cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

virtgpu_object.c (8124B)


      1/*
      2 * Copyright (C) 2015 Red Hat, Inc.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining
      6 * a copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sublicense, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * The above copyright notice and this permission notice (including the
     14 * next paragraph) shall be included in all copies or substantial
     15 * portions of the Software.
     16 *
     17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     24 */
     25
     26#include <linux/dma-mapping.h>
     27#include <linux/moduleparam.h>
     28
     29#include "virtgpu_drv.h"
     30
     31static int virtio_gpu_virglrenderer_workaround = 1;
     32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
     33
     34int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
     35{
     36	if (virtio_gpu_virglrenderer_workaround) {
     37		/*
     38		 * Hack to avoid re-using resource IDs.
     39		 *
     40		 * virglrenderer versions up to (and including) 0.7.0
     41		 * can't deal with that.  virglrenderer commit
     42		 * "f91a9dd35715 Fix unlinking resources from hash
     43		 * table." (Feb 2019) fixes the bug.
     44		 */
     45		static atomic_t seqno = ATOMIC_INIT(0);
     46		int handle = atomic_inc_return(&seqno);
     47		*resid = handle + 1;
     48	} else {
     49		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
     50		if (handle < 0)
     51			return handle;
     52		*resid = handle + 1;
     53	}
     54	return 0;
     55}
     56
     57static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
     58{
     59	if (!virtio_gpu_virglrenderer_workaround) {
     60		ida_free(&vgdev->resource_ida, id - 1);
     61	}
     62}
     63
     64void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
     65{
     66	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
     67
     68	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
     69	if (virtio_gpu_is_shmem(bo)) {
     70		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
     71
     72		if (shmem->pages) {
     73			if (shmem->mapped) {
     74				dma_unmap_sgtable(vgdev->vdev->dev.parent,
     75					     shmem->pages, DMA_TO_DEVICE, 0);
     76				shmem->mapped = 0;
     77			}
     78
     79			sg_free_table(shmem->pages);
     80			kfree(shmem->pages);
     81			shmem->pages = NULL;
     82			drm_gem_shmem_unpin(&bo->base);
     83		}
     84
     85		drm_gem_shmem_free(&bo->base);
     86	} else if (virtio_gpu_is_vram(bo)) {
     87		struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
     88
     89		spin_lock(&vgdev->host_visible_lock);
     90		if (drm_mm_node_allocated(&vram->vram_node))
     91			drm_mm_remove_node(&vram->vram_node);
     92
     93		spin_unlock(&vgdev->host_visible_lock);
     94
     95		drm_gem_free_mmap_offset(&vram->base.base.base);
     96		drm_gem_object_release(&vram->base.base.base);
     97		kfree(vram);
     98	}
     99}
    100
    101static void virtio_gpu_free_object(struct drm_gem_object *obj)
    102{
    103	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
    104	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
    105
    106	if (bo->created) {
    107		virtio_gpu_cmd_unref_resource(vgdev, bo);
    108		virtio_gpu_notify(vgdev);
    109		/* completion handler calls virtio_gpu_cleanup_object() */
    110		return;
    111	}
    112	virtio_gpu_cleanup_object(bo);
    113}
    114
    115static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
    116	.free = virtio_gpu_free_object,
    117	.open = virtio_gpu_gem_object_open,
    118	.close = virtio_gpu_gem_object_close,
    119	.print_info = drm_gem_shmem_object_print_info,
    120	.export = virtgpu_gem_prime_export,
    121	.pin = drm_gem_shmem_object_pin,
    122	.unpin = drm_gem_shmem_object_unpin,
    123	.get_sg_table = drm_gem_shmem_object_get_sg_table,
    124	.vmap = drm_gem_shmem_object_vmap,
    125	.vunmap = drm_gem_shmem_object_vunmap,
    126	.mmap = drm_gem_shmem_object_mmap,
    127	.vm_ops = &drm_gem_shmem_vm_ops,
    128};
    129
    130bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
    131{
    132	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
    133}
    134
    135struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
    136						size_t size)
    137{
    138	struct virtio_gpu_object_shmem *shmem;
    139	struct drm_gem_shmem_object *dshmem;
    140
    141	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
    142	if (!shmem)
    143		return ERR_PTR(-ENOMEM);
    144
    145	dshmem = &shmem->base.base;
    146	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
    147	return &dshmem->base;
    148}
    149
    150static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
    151					struct virtio_gpu_object *bo,
    152					struct virtio_gpu_mem_entry **ents,
    153					unsigned int *nents)
    154{
    155	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
    156	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
    157	struct scatterlist *sg;
    158	int si, ret;
    159
    160	ret = drm_gem_shmem_pin(&bo->base);
    161	if (ret < 0)
    162		return -EINVAL;
    163
    164	/*
    165	 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
    166	 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
    167	 * dma-ops. This is discouraged for other drivers, but should be fine
    168	 * since virtio_gpu doesn't support dma-buf import from other devices.
    169	 */
    170	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
    171	if (!shmem->pages) {
    172		drm_gem_shmem_unpin(&bo->base);
    173		return -EINVAL;
    174	}
    175
    176	if (use_dma_api) {
    177		ret = dma_map_sgtable(vgdev->vdev->dev.parent,
    178				      shmem->pages, DMA_TO_DEVICE, 0);
    179		if (ret)
    180			return ret;
    181		*nents = shmem->mapped = shmem->pages->nents;
    182	} else {
    183		*nents = shmem->pages->orig_nents;
    184	}
    185
    186	*ents = kvmalloc_array(*nents,
    187			       sizeof(struct virtio_gpu_mem_entry),
    188			       GFP_KERNEL);
    189	if (!(*ents)) {
    190		DRM_ERROR("failed to allocate ent list\n");
    191		return -ENOMEM;
    192	}
    193
    194	if (use_dma_api) {
    195		for_each_sgtable_dma_sg(shmem->pages, sg, si) {
    196			(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
    197			(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
    198			(*ents)[si].padding = 0;
    199		}
    200	} else {
    201		for_each_sgtable_sg(shmem->pages, sg, si) {
    202			(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
    203			(*ents)[si].length = cpu_to_le32(sg->length);
    204			(*ents)[si].padding = 0;
    205		}
    206	}
    207
    208	return 0;
    209}
    210
    211int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
    212			     struct virtio_gpu_object_params *params,
    213			     struct virtio_gpu_object **bo_ptr,
    214			     struct virtio_gpu_fence *fence)
    215{
    216	struct virtio_gpu_object_array *objs = NULL;
    217	struct drm_gem_shmem_object *shmem_obj;
    218	struct virtio_gpu_object *bo;
    219	struct virtio_gpu_mem_entry *ents;
    220	unsigned int nents;
    221	int ret;
    222
    223	*bo_ptr = NULL;
    224
    225	params->size = roundup(params->size, PAGE_SIZE);
    226	shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
    227	if (IS_ERR(shmem_obj))
    228		return PTR_ERR(shmem_obj);
    229	bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
    230
    231	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
    232	if (ret < 0)
    233		goto err_free_gem;
    234
    235	bo->dumb = params->dumb;
    236
    237	if (fence) {
    238		ret = -ENOMEM;
    239		objs = virtio_gpu_array_alloc(1);
    240		if (!objs)
    241			goto err_put_id;
    242		virtio_gpu_array_add_obj(objs, &bo->base.base);
    243
    244		ret = virtio_gpu_array_lock_resv(objs);
    245		if (ret != 0)
    246			goto err_put_objs;
    247	}
    248
    249	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
    250	if (ret != 0) {
    251		virtio_gpu_array_put_free(objs);
    252		virtio_gpu_free_object(&shmem_obj->base);
    253		return ret;
    254	}
    255
    256	if (params->blob) {
    257		if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
    258			bo->guest_blob = true;
    259
    260		virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
    261						    ents, nents);
    262	} else if (params->virgl) {
    263		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
    264						  objs, fence);
    265		virtio_gpu_object_attach(vgdev, bo, ents, nents);
    266	} else {
    267		virtio_gpu_cmd_create_resource(vgdev, bo, params,
    268					       objs, fence);
    269		virtio_gpu_object_attach(vgdev, bo, ents, nents);
    270	}
    271
    272	*bo_ptr = bo;
    273	return 0;
    274
    275err_put_objs:
    276	virtio_gpu_array_put_free(objs);
    277err_put_id:
    278	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
    279err_free_gem:
    280	drm_gem_shmem_free(shmem_obj);
    281	return ret;
    282}