cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

virtgpu_gem.c (7504B)


      1/*
      2 * Copyright (C) 2015 Red Hat, Inc.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining
      6 * a copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sublicense, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * The above copyright notice and this permission notice (including the
     14 * next paragraph) shall be included in all copies or substantial
     15 * portions of the Software.
     16 *
     17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     24 */
     25
     26#include <drm/drm_file.h>
     27#include <drm/drm_fourcc.h>
     28
     29#include "virtgpu_drv.h"
     30
     31static int virtio_gpu_gem_create(struct drm_file *file,
     32				 struct drm_device *dev,
     33				 struct virtio_gpu_object_params *params,
     34				 struct drm_gem_object **obj_p,
     35				 uint32_t *handle_p)
     36{
     37	struct virtio_gpu_device *vgdev = dev->dev_private;
     38	struct virtio_gpu_object *obj;
     39	int ret;
     40	u32 handle;
     41
     42	ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
     43	if (ret < 0)
     44		return ret;
     45
     46	ret = drm_gem_handle_create(file, &obj->base.base, &handle);
     47	if (ret) {
     48		drm_gem_object_release(&obj->base.base);
     49		return ret;
     50	}
     51
     52	*obj_p = &obj->base.base;
     53
     54	/* drop reference from allocate - handle holds it now */
     55	drm_gem_object_put(&obj->base.base);
     56
     57	*handle_p = handle;
     58	return 0;
     59}
     60
     61int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
     62				struct drm_device *dev,
     63				struct drm_mode_create_dumb *args)
     64{
     65	struct drm_gem_object *gobj;
     66	struct virtio_gpu_object_params params = { 0 };
     67	struct virtio_gpu_device *vgdev = dev->dev_private;
     68	int ret;
     69	uint32_t pitch;
     70
     71	if (args->bpp != 32)
     72		return -EINVAL;
     73
     74	pitch = args->width * 4;
     75	args->size = pitch * args->height;
     76	args->size = ALIGN(args->size, PAGE_SIZE);
     77
     78	params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
     79	params.width = args->width;
     80	params.height = args->height;
     81	params.size = args->size;
     82	params.dumb = true;
     83
     84	if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) {
     85		params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
     86		params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
     87		params.blob = true;
     88	}
     89
     90	ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
     91				    &args->handle);
     92	if (ret)
     93		goto fail;
     94
     95	args->pitch = pitch;
     96	return ret;
     97
     98fail:
     99	return ret;
    100}
    101
    102int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
    103			      struct drm_device *dev,
    104			      uint32_t handle, uint64_t *offset_p)
    105{
    106	struct drm_gem_object *gobj;
    107
    108	BUG_ON(!offset_p);
    109	gobj = drm_gem_object_lookup(file_priv, handle);
    110	if (gobj == NULL)
    111		return -ENOENT;
    112	*offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
    113	drm_gem_object_put(gobj);
    114	return 0;
    115}
    116
    117int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
    118			       struct drm_file *file)
    119{
    120	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
    121	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
    122	struct virtio_gpu_object_array *objs;
    123
    124	if (!vgdev->has_virgl_3d)
    125		goto out_notify;
    126
    127	/* the context might still be missing when the first ioctl is
    128	 * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
    129	 */
    130	virtio_gpu_create_context(obj->dev, file);
    131
    132	objs = virtio_gpu_array_alloc(1);
    133	if (!objs)
    134		return -ENOMEM;
    135	virtio_gpu_array_add_obj(objs, obj);
    136
    137	virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
    138					       objs);
    139out_notify:
    140	virtio_gpu_notify(vgdev);
    141	return 0;
    142}
    143
    144void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
    145				 struct drm_file *file)
    146{
    147	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
    148	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
    149	struct virtio_gpu_object_array *objs;
    150
    151	if (!vgdev->has_virgl_3d)
    152		return;
    153
    154	objs = virtio_gpu_array_alloc(1);
    155	if (!objs)
    156		return;
    157	virtio_gpu_array_add_obj(objs, obj);
    158
    159	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
    160					       objs);
    161	virtio_gpu_notify(vgdev);
    162}
    163
    164struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
    165{
    166	struct virtio_gpu_object_array *objs;
    167
    168	objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
    169	if (!objs)
    170		return NULL;
    171
    172	objs->nents = 0;
    173	objs->total = nents;
    174	return objs;
    175}
    176
    177static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
    178{
    179	kfree(objs);
    180}
    181
    182struct virtio_gpu_object_array*
    183virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
    184{
    185	struct virtio_gpu_object_array *objs;
    186	u32 i;
    187
    188	objs = virtio_gpu_array_alloc(nents);
    189	if (!objs)
    190		return NULL;
    191
    192	for (i = 0; i < nents; i++) {
    193		objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
    194		if (!objs->objs[i]) {
    195			objs->nents = i;
    196			virtio_gpu_array_put_free(objs);
    197			return NULL;
    198		}
    199	}
    200	objs->nents = i;
    201	return objs;
    202}
    203
    204void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
    205			      struct drm_gem_object *obj)
    206{
    207	if (WARN_ON_ONCE(objs->nents == objs->total))
    208		return;
    209
    210	drm_gem_object_get(obj);
    211	objs->objs[objs->nents] = obj;
    212	objs->nents++;
    213}
    214
    215int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
    216{
    217	unsigned int i;
    218	int ret;
    219
    220	if (objs->nents == 1) {
    221		ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
    222	} else {
    223		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
    224						&objs->ticket);
    225	}
    226	if (ret)
    227		return ret;
    228
    229	for (i = 0; i < objs->nents; ++i) {
    230		ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
    231		if (ret)
    232			return ret;
    233	}
    234	return ret;
    235}
    236
    237void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
    238{
    239	if (objs->nents == 1) {
    240		dma_resv_unlock(objs->objs[0]->resv);
    241	} else {
    242		drm_gem_unlock_reservations(objs->objs, objs->nents,
    243					    &objs->ticket);
    244	}
    245}
    246
    247void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
    248				struct dma_fence *fence)
    249{
    250	int i;
    251
    252	for (i = 0; i < objs->nents; i++)
    253		dma_resv_add_fence(objs->objs[i]->resv, fence,
    254				   DMA_RESV_USAGE_WRITE);
    255}
    256
    257void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
    258{
    259	u32 i;
    260
    261	if (!objs)
    262		return;
    263
    264	for (i = 0; i < objs->nents; i++)
    265		drm_gem_object_put(objs->objs[i]);
    266	virtio_gpu_array_free(objs);
    267}
    268
    269void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
    270				       struct virtio_gpu_object_array *objs)
    271{
    272	spin_lock(&vgdev->obj_free_lock);
    273	list_add_tail(&objs->next, &vgdev->obj_free_list);
    274	spin_unlock(&vgdev->obj_free_lock);
    275	schedule_work(&vgdev->obj_free_work);
    276}
    277
    278void virtio_gpu_array_put_free_work(struct work_struct *work)
    279{
    280	struct virtio_gpu_device *vgdev =
    281		container_of(work, struct virtio_gpu_device, obj_free_work);
    282	struct virtio_gpu_object_array *objs;
    283
    284	spin_lock(&vgdev->obj_free_lock);
    285	while (!list_empty(&vgdev->obj_free_list)) {
    286		objs = list_first_entry(&vgdev->obj_free_list,
    287					struct virtio_gpu_object_array, next);
    288		list_del(&objs->next);
    289		spin_unlock(&vgdev->obj_free_lock);
    290		virtio_gpu_array_put_free(objs);
    291		spin_lock(&vgdev->obj_free_lock);
    292	}
    293	spin_unlock(&vgdev->obj_free_lock);
    294}