cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drm_gem.c (35039B)


      1/*
      2 * Copyright © 2008 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 *
     23 * Authors:
     24 *    Eric Anholt <eric@anholt.net>
     25 *
     26 */
     27
     28#include <linux/dma-buf.h>
     29#include <linux/file.h>
     30#include <linux/fs.h>
     31#include <linux/iosys-map.h>
     32#include <linux/mem_encrypt.h>
     33#include <linux/mm.h>
     34#include <linux/mman.h>
     35#include <linux/module.h>
     36#include <linux/pagemap.h>
     37#include <linux/pagevec.h>
     38#include <linux/shmem_fs.h>
     39#include <linux/slab.h>
     40#include <linux/string_helpers.h>
     41#include <linux/types.h>
     42#include <linux/uaccess.h>
     43
     44#include <drm/drm.h>
     45#include <drm/drm_device.h>
     46#include <drm/drm_drv.h>
     47#include <drm/drm_file.h>
     48#include <drm/drm_gem.h>
     49#include <drm/drm_managed.h>
     50#include <drm/drm_print.h>
     51#include <drm/drm_vma_manager.h>
     52
     53#include "drm_internal.h"
     54
     55/** @file drm_gem.c
     56 *
     57 * This file provides some of the base ioctls and library routines for
     58 * the graphics memory manager implemented by each device driver.
     59 *
     60 * Because various devices have different requirements in terms of
     61 * synchronization and migration strategies, implementing that is left up to
     62 * the driver, and all that the general API provides should be generic --
     63 * allocating objects, reading/writing data with the cpu, freeing objects.
     64 * Even there, platform-dependent optimizations for reading/writing data with
     65 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
     66 * the DRI2 implementation wants to have at least allocate/mmap be generic.
     67 *
     68 * The goal was to have swap-backed object allocation managed through
     69 * struct file.  However, file descriptors as handles to a struct file have
     70 * two major failings:
     71 * - Process limits prevent more than 1024 or so being used at a time by
     72 *   default.
     73 * - Inability to allocate high fds will aggravate the X Server's select()
     74 *   handling, and likely that of many GL client applications as well.
     75 *
     76 * This led to a plan of using our own integer IDs (called handles, following
     77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
     78 * ioctls.  The objects themselves will still include the struct file so
     79 * that we can transition to fds if the required kernel infrastructure shows
     80 * up at a later date, and as our interface with shmfs for memory allocation.
     81 */
     82
     83static void
     84drm_gem_init_release(struct drm_device *dev, void *ptr)
     85{
     86	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
     87}
     88
     89/**
     90 * drm_gem_init - Initialize the GEM device fields
     91 * @dev: drm_devic structure to initialize
     92 */
     93int
     94drm_gem_init(struct drm_device *dev)
     95{
     96	struct drm_vma_offset_manager *vma_offset_manager;
     97
     98	mutex_init(&dev->object_name_lock);
     99	idr_init_base(&dev->object_name_idr, 1);
    100
    101	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
    102					  GFP_KERNEL);
    103	if (!vma_offset_manager) {
    104		DRM_ERROR("out of memory\n");
    105		return -ENOMEM;
    106	}
    107
    108	dev->vma_offset_manager = vma_offset_manager;
    109	drm_vma_offset_manager_init(vma_offset_manager,
    110				    DRM_FILE_PAGE_OFFSET_START,
    111				    DRM_FILE_PAGE_OFFSET_SIZE);
    112
    113	return drmm_add_action(dev, drm_gem_init_release, NULL);
    114}
    115
    116/**
    117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
    118 * @dev: drm_device the object should be initialized for
    119 * @obj: drm_gem_object to initialize
    120 * @size: object size
    121 *
    122 * Initialize an already allocated GEM object of the specified size with
    123 * shmfs backing store.
    124 */
    125int drm_gem_object_init(struct drm_device *dev,
    126			struct drm_gem_object *obj, size_t size)
    127{
    128	struct file *filp;
    129
    130	drm_gem_private_object_init(dev, obj, size);
    131
    132	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
    133	if (IS_ERR(filp))
    134		return PTR_ERR(filp);
    135
    136	obj->filp = filp;
    137
    138	return 0;
    139}
    140EXPORT_SYMBOL(drm_gem_object_init);
    141
    142/**
    143 * drm_gem_private_object_init - initialize an allocated private GEM object
    144 * @dev: drm_device the object should be initialized for
    145 * @obj: drm_gem_object to initialize
    146 * @size: object size
    147 *
    148 * Initialize an already allocated GEM object of the specified size with
    149 * no GEM provided backing store. Instead the caller is responsible for
    150 * backing the object and handling it.
    151 */
    152void drm_gem_private_object_init(struct drm_device *dev,
    153				 struct drm_gem_object *obj, size_t size)
    154{
    155	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
    156
    157	obj->dev = dev;
    158	obj->filp = NULL;
    159
    160	kref_init(&obj->refcount);
    161	obj->handle_count = 0;
    162	obj->size = size;
    163	dma_resv_init(&obj->_resv);
    164	if (!obj->resv)
    165		obj->resv = &obj->_resv;
    166
    167	drm_vma_node_reset(&obj->vma_node);
    168}
    169EXPORT_SYMBOL(drm_gem_private_object_init);
    170
    171static void
    172drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
    173{
    174	/*
    175	 * Note: obj->dma_buf can't disappear as long as we still hold a
    176	 * handle reference in obj->handle_count.
    177	 */
    178	mutex_lock(&filp->prime.lock);
    179	if (obj->dma_buf) {
    180		drm_prime_remove_buf_handle_locked(&filp->prime,
    181						   obj->dma_buf);
    182	}
    183	mutex_unlock(&filp->prime.lock);
    184}
    185
    186/**
    187 * drm_gem_object_handle_free - release resources bound to userspace handles
    188 * @obj: GEM object to clean up.
    189 *
    190 * Called after the last handle to the object has been closed
    191 *
    192 * Removes any name for the object. Note that this must be
    193 * called before drm_gem_object_free or we'll be touching
    194 * freed memory
    195 */
    196static void drm_gem_object_handle_free(struct drm_gem_object *obj)
    197{
    198	struct drm_device *dev = obj->dev;
    199
    200	/* Remove any name for this object */
    201	if (obj->name) {
    202		idr_remove(&dev->object_name_idr, obj->name);
    203		obj->name = 0;
    204	}
    205}
    206
    207static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
    208{
    209	/* Unbreak the reference cycle if we have an exported dma_buf. */
    210	if (obj->dma_buf) {
    211		dma_buf_put(obj->dma_buf);
    212		obj->dma_buf = NULL;
    213	}
    214}
    215
    216static void
    217drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
    218{
    219	struct drm_device *dev = obj->dev;
    220	bool final = false;
    221
    222	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
    223		return;
    224
    225	/*
    226	* Must bump handle count first as this may be the last
    227	* ref, in which case the object would disappear before we
    228	* checked for a name
    229	*/
    230
    231	mutex_lock(&dev->object_name_lock);
    232	if (--obj->handle_count == 0) {
    233		drm_gem_object_handle_free(obj);
    234		drm_gem_object_exported_dma_buf_free(obj);
    235		final = true;
    236	}
    237	mutex_unlock(&dev->object_name_lock);
    238
    239	if (final)
    240		drm_gem_object_put(obj);
    241}
    242
    243/*
    244 * Called at device or object close to release the file's
    245 * handle references on objects.
    246 */
    247static int
    248drm_gem_object_release_handle(int id, void *ptr, void *data)
    249{
    250	struct drm_file *file_priv = data;
    251	struct drm_gem_object *obj = ptr;
    252
    253	if (obj->funcs->close)
    254		obj->funcs->close(obj, file_priv);
    255
    256	drm_gem_remove_prime_handles(obj, file_priv);
    257	drm_vma_node_revoke(&obj->vma_node, file_priv);
    258
    259	drm_gem_object_handle_put_unlocked(obj);
    260
    261	return 0;
    262}
    263
    264/**
    265 * drm_gem_handle_delete - deletes the given file-private handle
    266 * @filp: drm file-private structure to use for the handle look up
    267 * @handle: userspace handle to delete
    268 *
    269 * Removes the GEM handle from the @filp lookup table which has been added with
    270 * drm_gem_handle_create(). If this is the last handle also cleans up linked
    271 * resources like GEM names.
    272 */
    273int
    274drm_gem_handle_delete(struct drm_file *filp, u32 handle)
    275{
    276	struct drm_gem_object *obj;
    277
    278	spin_lock(&filp->table_lock);
    279
    280	/* Check if we currently have a reference on the object */
    281	obj = idr_replace(&filp->object_idr, NULL, handle);
    282	spin_unlock(&filp->table_lock);
    283	if (IS_ERR_OR_NULL(obj))
    284		return -EINVAL;
    285
    286	/* Release driver's reference and decrement refcount. */
    287	drm_gem_object_release_handle(handle, obj, filp);
    288
    289	/* And finally make the handle available for future allocations. */
    290	spin_lock(&filp->table_lock);
    291	idr_remove(&filp->object_idr, handle);
    292	spin_unlock(&filp->table_lock);
    293
    294	return 0;
    295}
    296EXPORT_SYMBOL(drm_gem_handle_delete);
    297
    298/**
    299 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
    300 * @file: drm file-private structure containing the gem object
    301 * @dev: corresponding drm_device
    302 * @handle: gem object handle
    303 * @offset: return location for the fake mmap offset
    304 *
    305 * This implements the &drm_driver.dumb_map_offset kms driver callback for
    306 * drivers which use gem to manage their backing storage.
    307 *
    308 * Returns:
    309 * 0 on success or a negative error code on failure.
    310 */
    311int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
    312			    u32 handle, u64 *offset)
    313{
    314	struct drm_gem_object *obj;
    315	int ret;
    316
    317	obj = drm_gem_object_lookup(file, handle);
    318	if (!obj)
    319		return -ENOENT;
    320
    321	/* Don't allow imported objects to be mapped */
    322	if (obj->import_attach) {
    323		ret = -EINVAL;
    324		goto out;
    325	}
    326
    327	ret = drm_gem_create_mmap_offset(obj);
    328	if (ret)
    329		goto out;
    330
    331	*offset = drm_vma_node_offset_addr(&obj->vma_node);
    332out:
    333	drm_gem_object_put(obj);
    334
    335	return ret;
    336}
    337EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
    338
    339int drm_gem_dumb_destroy(struct drm_file *file,
    340			 struct drm_device *dev,
    341			 u32 handle)
    342{
    343	return drm_gem_handle_delete(file, handle);
    344}
    345
    346/**
    347 * drm_gem_handle_create_tail - internal functions to create a handle
    348 * @file_priv: drm file-private structure to register the handle for
    349 * @obj: object to register
    350 * @handlep: pointer to return the created handle to the caller
    351 *
    352 * This expects the &drm_device.object_name_lock to be held already and will
    353 * drop it before returning. Used to avoid races in establishing new handles
    354 * when importing an object from either an flink name or a dma-buf.
    355 *
    356 * Handles must be release again through drm_gem_handle_delete(). This is done
    357 * when userspace closes @file_priv for all attached handles, or through the
    358 * GEM_CLOSE ioctl for individual handles.
    359 */
    360int
    361drm_gem_handle_create_tail(struct drm_file *file_priv,
    362			   struct drm_gem_object *obj,
    363			   u32 *handlep)
    364{
    365	struct drm_device *dev = obj->dev;
    366	u32 handle;
    367	int ret;
    368
    369	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
    370	if (obj->handle_count++ == 0)
    371		drm_gem_object_get(obj);
    372
    373	/*
    374	 * Get the user-visible handle using idr.  Preload and perform
    375	 * allocation under our spinlock.
    376	 */
    377	idr_preload(GFP_KERNEL);
    378	spin_lock(&file_priv->table_lock);
    379
    380	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
    381
    382	spin_unlock(&file_priv->table_lock);
    383	idr_preload_end();
    384
    385	mutex_unlock(&dev->object_name_lock);
    386	if (ret < 0)
    387		goto err_unref;
    388
    389	handle = ret;
    390
    391	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
    392	if (ret)
    393		goto err_remove;
    394
    395	if (obj->funcs->open) {
    396		ret = obj->funcs->open(obj, file_priv);
    397		if (ret)
    398			goto err_revoke;
    399	}
    400
    401	*handlep = handle;
    402	return 0;
    403
    404err_revoke:
    405	drm_vma_node_revoke(&obj->vma_node, file_priv);
    406err_remove:
    407	spin_lock(&file_priv->table_lock);
    408	idr_remove(&file_priv->object_idr, handle);
    409	spin_unlock(&file_priv->table_lock);
    410err_unref:
    411	drm_gem_object_handle_put_unlocked(obj);
    412	return ret;
    413}
    414
    415/**
    416 * drm_gem_handle_create - create a gem handle for an object
    417 * @file_priv: drm file-private structure to register the handle for
    418 * @obj: object to register
    419 * @handlep: pointer to return the created handle to the caller
    420 *
    421 * Create a handle for this object. This adds a handle reference to the object,
    422 * which includes a regular reference count. Callers will likely want to
    423 * dereference the object afterwards.
    424 *
    425 * Since this publishes @obj to userspace it must be fully set up by this point,
    426 * drivers must call this last in their buffer object creation callbacks.
    427 */
    428int drm_gem_handle_create(struct drm_file *file_priv,
    429			  struct drm_gem_object *obj,
    430			  u32 *handlep)
    431{
    432	mutex_lock(&obj->dev->object_name_lock);
    433
    434	return drm_gem_handle_create_tail(file_priv, obj, handlep);
    435}
    436EXPORT_SYMBOL(drm_gem_handle_create);
    437
    438
    439/**
    440 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
    441 * @obj: obj in question
    442 *
    443 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
    444 *
    445 * Note that drm_gem_object_release() already calls this function, so drivers
    446 * don't have to take care of releasing the mmap offset themselves when freeing
    447 * the GEM object.
    448 */
    449void
    450drm_gem_free_mmap_offset(struct drm_gem_object *obj)
    451{
    452	struct drm_device *dev = obj->dev;
    453
    454	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
    455}
    456EXPORT_SYMBOL(drm_gem_free_mmap_offset);
    457
    458/**
    459 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
    460 * @obj: obj in question
    461 * @size: the virtual size
    462 *
    463 * GEM memory mapping works by handing back to userspace a fake mmap offset
    464 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
    465 * up the object based on the offset and sets up the various memory mapping
    466 * structures.
    467 *
    468 * This routine allocates and attaches a fake offset for @obj, in cases where
    469 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
    470 * Otherwise just use drm_gem_create_mmap_offset().
    471 *
    472 * This function is idempotent and handles an already allocated mmap offset
    473 * transparently. Drivers do not need to check for this case.
    474 */
    475int
    476drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
    477{
    478	struct drm_device *dev = obj->dev;
    479
    480	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
    481				  size / PAGE_SIZE);
    482}
    483EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
    484
    485/**
    486 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
    487 * @obj: obj in question
    488 *
    489 * GEM memory mapping works by handing back to userspace a fake mmap offset
    490 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
    491 * up the object based on the offset and sets up the various memory mapping
    492 * structures.
    493 *
    494 * This routine allocates and attaches a fake offset for @obj.
    495 *
    496 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
    497 * the fake offset again.
    498 */
    499int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
    500{
    501	return drm_gem_create_mmap_offset_size(obj, obj->size);
    502}
    503EXPORT_SYMBOL(drm_gem_create_mmap_offset);
    504
    505/*
    506 * Move pages to appropriate lru and release the pagevec, decrementing the
    507 * ref count of those pages.
    508 */
    509static void drm_gem_check_release_pagevec(struct pagevec *pvec)
    510{
    511	check_move_unevictable_pages(pvec);
    512	__pagevec_release(pvec);
    513	cond_resched();
    514}
    515
    516/**
    517 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
    518 * from shmem
    519 * @obj: obj in question
    520 *
    521 * This reads the page-array of the shmem-backing storage of the given gem
    522 * object. An array of pages is returned. If a page is not allocated or
    523 * swapped-out, this will allocate/swap-in the required pages. Note that the
    524 * whole object is covered by the page-array and pinned in memory.
    525 *
    526 * Use drm_gem_put_pages() to release the array and unpin all pages.
    527 *
    528 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
    529 * If you require other GFP-masks, you have to do those allocations yourself.
    530 *
    531 * Note that you are not allowed to change gfp-zones during runtime. That is,
    532 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
    533 * set during initialization. If you have special zone constraints, set them
    534 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
    535 * to keep pages in the required zone during swap-in.
    536 *
    537 * This function is only valid on objects initialized with
    538 * drm_gem_object_init(), but not for those initialized with
    539 * drm_gem_private_object_init() only.
    540 */
    541struct page **drm_gem_get_pages(struct drm_gem_object *obj)
    542{
    543	struct address_space *mapping;
    544	struct page *p, **pages;
    545	struct pagevec pvec;
    546	int i, npages;
    547
    548
    549	if (WARN_ON(!obj->filp))
    550		return ERR_PTR(-EINVAL);
    551
    552	/* This is the shared memory object that backs the GEM resource */
    553	mapping = obj->filp->f_mapping;
    554
    555	/* We already BUG_ON() for non-page-aligned sizes in
    556	 * drm_gem_object_init(), so we should never hit this unless
    557	 * driver author is doing something really wrong:
    558	 */
    559	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
    560
    561	npages = obj->size >> PAGE_SHIFT;
    562
    563	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
    564	if (pages == NULL)
    565		return ERR_PTR(-ENOMEM);
    566
    567	mapping_set_unevictable(mapping);
    568
    569	for (i = 0; i < npages; i++) {
    570		p = shmem_read_mapping_page(mapping, i);
    571		if (IS_ERR(p))
    572			goto fail;
    573		pages[i] = p;
    574
    575		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
    576		 * correct region during swapin. Note that this requires
    577		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
    578		 * so shmem can relocate pages during swapin if required.
    579		 */
    580		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
    581				(page_to_pfn(p) >= 0x00100000UL));
    582	}
    583
    584	return pages;
    585
    586fail:
    587	mapping_clear_unevictable(mapping);
    588	pagevec_init(&pvec);
    589	while (i--) {
    590		if (!pagevec_add(&pvec, pages[i]))
    591			drm_gem_check_release_pagevec(&pvec);
    592	}
    593	if (pagevec_count(&pvec))
    594		drm_gem_check_release_pagevec(&pvec);
    595
    596	kvfree(pages);
    597	return ERR_CAST(p);
    598}
    599EXPORT_SYMBOL(drm_gem_get_pages);
    600
    601/**
    602 * drm_gem_put_pages - helper to free backing pages for a GEM object
    603 * @obj: obj in question
    604 * @pages: pages to free
    605 * @dirty: if true, pages will be marked as dirty
    606 * @accessed: if true, the pages will be marked as accessed
    607 */
    608void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
    609		bool dirty, bool accessed)
    610{
    611	int i, npages;
    612	struct address_space *mapping;
    613	struct pagevec pvec;
    614
    615	mapping = file_inode(obj->filp)->i_mapping;
    616	mapping_clear_unevictable(mapping);
    617
    618	/* We already BUG_ON() for non-page-aligned sizes in
    619	 * drm_gem_object_init(), so we should never hit this unless
    620	 * driver author is doing something really wrong:
    621	 */
    622	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
    623
    624	npages = obj->size >> PAGE_SHIFT;
    625
    626	pagevec_init(&pvec);
    627	for (i = 0; i < npages; i++) {
    628		if (!pages[i])
    629			continue;
    630
    631		if (dirty)
    632			set_page_dirty(pages[i]);
    633
    634		if (accessed)
    635			mark_page_accessed(pages[i]);
    636
    637		/* Undo the reference we took when populating the table */
    638		if (!pagevec_add(&pvec, pages[i]))
    639			drm_gem_check_release_pagevec(&pvec);
    640	}
    641	if (pagevec_count(&pvec))
    642		drm_gem_check_release_pagevec(&pvec);
    643
    644	kvfree(pages);
    645}
    646EXPORT_SYMBOL(drm_gem_put_pages);
    647
    648static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
    649			  struct drm_gem_object **objs)
    650{
    651	int i, ret = 0;
    652	struct drm_gem_object *obj;
    653
    654	spin_lock(&filp->table_lock);
    655
    656	for (i = 0; i < count; i++) {
    657		/* Check if we currently have a reference on the object */
    658		obj = idr_find(&filp->object_idr, handle[i]);
    659		if (!obj) {
    660			ret = -ENOENT;
    661			break;
    662		}
    663		drm_gem_object_get(obj);
    664		objs[i] = obj;
    665	}
    666	spin_unlock(&filp->table_lock);
    667
    668	return ret;
    669}
    670
    671/**
    672 * drm_gem_objects_lookup - look up GEM objects from an array of handles
    673 * @filp: DRM file private date
    674 * @bo_handles: user pointer to array of userspace handle
    675 * @count: size of handle array
    676 * @objs_out: returned pointer to array of drm_gem_object pointers
    677 *
    678 * Takes an array of userspace handles and returns a newly allocated array of
    679 * GEM objects.
    680 *
    681 * For a single handle lookup, use drm_gem_object_lookup().
    682 *
    683 * Returns:
    684 *
    685 * @objs filled in with GEM object pointers. Returned GEM objects need to be
    686 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
    687 * failure. 0 is returned on success.
    688 *
    689 */
    690int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
    691			   int count, struct drm_gem_object ***objs_out)
    692{
    693	int ret;
    694	u32 *handles;
    695	struct drm_gem_object **objs;
    696
    697	if (!count)
    698		return 0;
    699
    700	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
    701			     GFP_KERNEL | __GFP_ZERO);
    702	if (!objs)
    703		return -ENOMEM;
    704
    705	*objs_out = objs;
    706
    707	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
    708	if (!handles) {
    709		ret = -ENOMEM;
    710		goto out;
    711	}
    712
    713	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
    714		ret = -EFAULT;
    715		DRM_DEBUG("Failed to copy in GEM handles\n");
    716		goto out;
    717	}
    718
    719	ret = objects_lookup(filp, handles, count, objs);
    720out:
    721	kvfree(handles);
    722	return ret;
    723
    724}
    725EXPORT_SYMBOL(drm_gem_objects_lookup);
    726
    727/**
    728 * drm_gem_object_lookup - look up a GEM object from its handle
    729 * @filp: DRM file private date
    730 * @handle: userspace handle
    731 *
    732 * Returns:
    733 *
    734 * A reference to the object named by the handle if such exists on @filp, NULL
    735 * otherwise.
    736 *
    737 * If looking up an array of handles, use drm_gem_objects_lookup().
    738 */
    739struct drm_gem_object *
    740drm_gem_object_lookup(struct drm_file *filp, u32 handle)
    741{
    742	struct drm_gem_object *obj = NULL;
    743
    744	objects_lookup(filp, &handle, 1, &obj);
    745	return obj;
    746}
    747EXPORT_SYMBOL(drm_gem_object_lookup);
    748
    749/**
    750 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
    751 * shared and/or exclusive fences.
    752 * @filep: DRM file private date
    753 * @handle: userspace handle
    754 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
    755 * @timeout: timeout value in jiffies or zero to return immediately
    756 *
    757 * Returns:
    758 *
    759 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
    760 * greater than 0 on success.
    761 */
    762long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
    763				    bool wait_all, unsigned long timeout)
    764{
    765	long ret;
    766	struct drm_gem_object *obj;
    767
    768	obj = drm_gem_object_lookup(filep, handle);
    769	if (!obj) {
    770		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
    771		return -EINVAL;
    772	}
    773
    774	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
    775				    true, timeout);
    776	if (ret == 0)
    777		ret = -ETIME;
    778	else if (ret > 0)
    779		ret = 0;
    780
    781	drm_gem_object_put(obj);
    782
    783	return ret;
    784}
    785EXPORT_SYMBOL(drm_gem_dma_resv_wait);
    786
    787/**
    788 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
    789 * @dev: drm_device
    790 * @data: ioctl data
    791 * @file_priv: drm file-private structure
    792 *
    793 * Releases the handle to an mm object.
    794 */
    795int
    796drm_gem_close_ioctl(struct drm_device *dev, void *data,
    797		    struct drm_file *file_priv)
    798{
    799	struct drm_gem_close *args = data;
    800	int ret;
    801
    802	if (!drm_core_check_feature(dev, DRIVER_GEM))
    803		return -EOPNOTSUPP;
    804
    805	ret = drm_gem_handle_delete(file_priv, args->handle);
    806
    807	return ret;
    808}
    809
    810/**
    811 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
    812 * @dev: drm_device
    813 * @data: ioctl data
    814 * @file_priv: drm file-private structure
    815 *
    816 * Create a global name for an object, returning the name.
    817 *
    818 * Note that the name does not hold a reference; when the object
    819 * is freed, the name goes away.
    820 */
    821int
    822drm_gem_flink_ioctl(struct drm_device *dev, void *data,
    823		    struct drm_file *file_priv)
    824{
    825	struct drm_gem_flink *args = data;
    826	struct drm_gem_object *obj;
    827	int ret;
    828
    829	if (!drm_core_check_feature(dev, DRIVER_GEM))
    830		return -EOPNOTSUPP;
    831
    832	obj = drm_gem_object_lookup(file_priv, args->handle);
    833	if (obj == NULL)
    834		return -ENOENT;
    835
    836	mutex_lock(&dev->object_name_lock);
    837	/* prevent races with concurrent gem_close. */
    838	if (obj->handle_count == 0) {
    839		ret = -ENOENT;
    840		goto err;
    841	}
    842
    843	if (!obj->name) {
    844		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
    845		if (ret < 0)
    846			goto err;
    847
    848		obj->name = ret;
    849	}
    850
    851	args->name = (uint64_t) obj->name;
    852	ret = 0;
    853
    854err:
    855	mutex_unlock(&dev->object_name_lock);
    856	drm_gem_object_put(obj);
    857	return ret;
    858}
    859
    860/**
    861 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
    862 * @dev: drm_device
    863 * @data: ioctl data
    864 * @file_priv: drm file-private structure
    865 *
    866 * Open an object using the global name, returning a handle and the size.
    867 *
    868 * This handle (of course) holds a reference to the object, so the object
    869 * will not go away until the handle is deleted.
    870 */
    871int
    872drm_gem_open_ioctl(struct drm_device *dev, void *data,
    873		   struct drm_file *file_priv)
    874{
    875	struct drm_gem_open *args = data;
    876	struct drm_gem_object *obj;
    877	int ret;
    878	u32 handle;
    879
    880	if (!drm_core_check_feature(dev, DRIVER_GEM))
    881		return -EOPNOTSUPP;
    882
    883	mutex_lock(&dev->object_name_lock);
    884	obj = idr_find(&dev->object_name_idr, (int) args->name);
    885	if (obj) {
    886		drm_gem_object_get(obj);
    887	} else {
    888		mutex_unlock(&dev->object_name_lock);
    889		return -ENOENT;
    890	}
    891
    892	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
    893	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
    894	if (ret)
    895		goto err;
    896
    897	args->handle = handle;
    898	args->size = obj->size;
    899
    900err:
    901	drm_gem_object_put(obj);
    902	return ret;
    903}
    904
    905/**
    906 * drm_gem_open - initializes GEM file-private structures at devnode open time
    907 * @dev: drm_device which is being opened by userspace
    908 * @file_private: drm file-private structure to set up
    909 *
    910 * Called at device open time, sets up the structure for handling refcounting
    911 * of mm objects.
    912 */
    913void
    914drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
    915{
    916	idr_init_base(&file_private->object_idr, 1);
    917	spin_lock_init(&file_private->table_lock);
    918}
    919
    920/**
    921 * drm_gem_release - release file-private GEM resources
    922 * @dev: drm_device which is being closed by userspace
    923 * @file_private: drm file-private structure to clean up
    924 *
    925 * Called at close time when the filp is going away.
    926 *
    927 * Releases any remaining references on objects by this filp.
    928 */
    929void
    930drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
    931{
    932	idr_for_each(&file_private->object_idr,
    933		     &drm_gem_object_release_handle, file_private);
    934	idr_destroy(&file_private->object_idr);
    935}
    936
    937/**
    938 * drm_gem_object_release - release GEM buffer object resources
    939 * @obj: GEM buffer object
    940 *
    941 * This releases any structures and resources used by @obj and is the inverse of
    942 * drm_gem_object_init().
    943 */
    944void
    945drm_gem_object_release(struct drm_gem_object *obj)
    946{
    947	WARN_ON(obj->dma_buf);
    948
    949	if (obj->filp)
    950		fput(obj->filp);
    951
    952	dma_resv_fini(&obj->_resv);
    953	drm_gem_free_mmap_offset(obj);
    954}
    955EXPORT_SYMBOL(drm_gem_object_release);
    956
    957/**
    958 * drm_gem_object_free - free a GEM object
    959 * @kref: kref of the object to free
    960 *
    961 * Called after the last reference to the object has been lost.
    962 *
    963 * Frees the object
    964 */
    965void
    966drm_gem_object_free(struct kref *kref)
    967{
    968	struct drm_gem_object *obj =
    969		container_of(kref, struct drm_gem_object, refcount);
    970
    971	if (WARN_ON(!obj->funcs->free))
    972		return;
    973
    974	obj->funcs->free(obj);
    975}
    976EXPORT_SYMBOL(drm_gem_object_free);
    977
    978/**
    979 * drm_gem_vm_open - vma->ops->open implementation for GEM
    980 * @vma: VM area structure
    981 *
    982 * This function implements the #vm_operations_struct open() callback for GEM
    983 * drivers. This must be used together with drm_gem_vm_close().
    984 */
    985void drm_gem_vm_open(struct vm_area_struct *vma)
    986{
    987	struct drm_gem_object *obj = vma->vm_private_data;
    988
    989	drm_gem_object_get(obj);
    990}
    991EXPORT_SYMBOL(drm_gem_vm_open);
    992
    993/**
    994 * drm_gem_vm_close - vma->ops->close implementation for GEM
    995 * @vma: VM area structure
    996 *
    997 * This function implements the #vm_operations_struct close() callback for GEM
    998 * drivers. This must be used together with drm_gem_vm_open().
    999 */
   1000void drm_gem_vm_close(struct vm_area_struct *vma)
   1001{
   1002	struct drm_gem_object *obj = vma->vm_private_data;
   1003
   1004	drm_gem_object_put(obj);
   1005}
   1006EXPORT_SYMBOL(drm_gem_vm_close);
   1007
   1008/**
   1009 * drm_gem_mmap_obj - memory map a GEM object
   1010 * @obj: the GEM object to map
   1011 * @obj_size: the object size to be mapped, in bytes
   1012 * @vma: VMA for the area to be mapped
   1013 *
   1014 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
   1015 * vm_ops. Depending on their requirements, GEM objects can either
   1016 * provide a fault handler in their vm_ops (in which case any accesses to
   1017 * the object will be trapped, to perform migration, GTT binding, surface
   1018 * register allocation, or performance monitoring), or mmap the buffer memory
   1019 * synchronously after calling drm_gem_mmap_obj.
   1020 *
   1021 * This function is mainly intended to implement the DMABUF mmap operation, when
   1022 * the GEM object is not looked up based on its fake offset. To implement the
   1023 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
   1024 *
   1025 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
   1026 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
   1027 * callers must verify access restrictions before calling this helper.
   1028 *
   1029 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
   1030 * size, or if no vm_ops are provided.
   1031 */
   1032int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
   1033		     struct vm_area_struct *vma)
   1034{
   1035	int ret;
   1036
   1037	/* Check for valid size. */
   1038	if (obj_size < vma->vm_end - vma->vm_start)
   1039		return -EINVAL;
   1040
   1041	/* Take a ref for this mapping of the object, so that the fault
   1042	 * handler can dereference the mmap offset's pointer to the object.
   1043	 * This reference is cleaned up by the corresponding vm_close
   1044	 * (which should happen whether the vma was created by this call, or
   1045	 * by a vm_open due to mremap or partial unmap or whatever).
   1046	 */
   1047	drm_gem_object_get(obj);
   1048
   1049	vma->vm_private_data = obj;
   1050	vma->vm_ops = obj->funcs->vm_ops;
   1051
   1052	if (obj->funcs->mmap) {
   1053		ret = obj->funcs->mmap(obj, vma);
   1054		if (ret)
   1055			goto err_drm_gem_object_put;
   1056		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
   1057	} else {
   1058		if (!vma->vm_ops) {
   1059			ret = -EINVAL;
   1060			goto err_drm_gem_object_put;
   1061		}
   1062
   1063		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
   1064		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
   1065		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
   1066	}
   1067
   1068	return 0;
   1069
   1070err_drm_gem_object_put:
   1071	drm_gem_object_put(obj);
   1072	return ret;
   1073}
   1074EXPORT_SYMBOL(drm_gem_mmap_obj);
   1075
   1076/**
   1077 * drm_gem_mmap - memory map routine for GEM objects
   1078 * @filp: DRM file pointer
   1079 * @vma: VMA for the area to be mapped
   1080 *
   1081 * If a driver supports GEM object mapping, mmap calls on the DRM file
   1082 * descriptor will end up here.
   1083 *
   1084 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
   1085 * contain the fake offset we created when the GTT map ioctl was called on
   1086 * the object) and map it with a call to drm_gem_mmap_obj().
   1087 *
   1088 * If the caller is not granted access to the buffer object, the mmap will fail
   1089 * with EACCES. Please see the vma manager for more information.
   1090 */
   1091int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
   1092{
   1093	struct drm_file *priv = filp->private_data;
   1094	struct drm_device *dev = priv->minor->dev;
   1095	struct drm_gem_object *obj = NULL;
   1096	struct drm_vma_offset_node *node;
   1097	int ret;
   1098
   1099	if (drm_dev_is_unplugged(dev))
   1100		return -ENODEV;
   1101
   1102	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
   1103	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
   1104						  vma->vm_pgoff,
   1105						  vma_pages(vma));
   1106	if (likely(node)) {
   1107		obj = container_of(node, struct drm_gem_object, vma_node);
   1108		/*
   1109		 * When the object is being freed, after it hits 0-refcnt it
   1110		 * proceeds to tear down the object. In the process it will
   1111		 * attempt to remove the VMA offset and so acquire this
   1112		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
   1113		 * that matches our range, we know it is in the process of being
   1114		 * destroyed and will be freed as soon as we release the lock -
   1115		 * so we have to check for the 0-refcnted object and treat it as
   1116		 * invalid.
   1117		 */
   1118		if (!kref_get_unless_zero(&obj->refcount))
   1119			obj = NULL;
   1120	}
   1121	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
   1122
   1123	if (!obj)
   1124		return -EINVAL;
   1125
   1126	if (!drm_vma_node_is_allowed(node, priv)) {
   1127		drm_gem_object_put(obj);
   1128		return -EACCES;
   1129	}
   1130
   1131	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
   1132			       vma);
   1133
   1134	drm_gem_object_put(obj);
   1135
   1136	return ret;
   1137}
   1138EXPORT_SYMBOL(drm_gem_mmap);
   1139
   1140void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
   1141			const struct drm_gem_object *obj)
   1142{
   1143	drm_printf_indent(p, indent, "name=%d\n", obj->name);
   1144	drm_printf_indent(p, indent, "refcount=%u\n",
   1145			  kref_read(&obj->refcount));
   1146	drm_printf_indent(p, indent, "start=%08lx\n",
   1147			  drm_vma_node_start(&obj->vma_node));
   1148	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
   1149	drm_printf_indent(p, indent, "imported=%s\n",
   1150			  str_yes_no(obj->import_attach));
   1151
   1152	if (obj->funcs->print_info)
   1153		obj->funcs->print_info(p, indent, obj);
   1154}
   1155
   1156int drm_gem_pin(struct drm_gem_object *obj)
   1157{
   1158	if (obj->funcs->pin)
   1159		return obj->funcs->pin(obj);
   1160	else
   1161		return 0;
   1162}
   1163
   1164void drm_gem_unpin(struct drm_gem_object *obj)
   1165{
   1166	if (obj->funcs->unpin)
   1167		obj->funcs->unpin(obj);
   1168}
   1169
   1170int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
   1171{
   1172	int ret;
   1173
   1174	if (!obj->funcs->vmap)
   1175		return -EOPNOTSUPP;
   1176
   1177	ret = obj->funcs->vmap(obj, map);
   1178	if (ret)
   1179		return ret;
   1180	else if (iosys_map_is_null(map))
   1181		return -ENOMEM;
   1182
   1183	return 0;
   1184}
   1185EXPORT_SYMBOL(drm_gem_vmap);
   1186
   1187void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
   1188{
   1189	if (iosys_map_is_null(map))
   1190		return;
   1191
   1192	if (obj->funcs->vunmap)
   1193		obj->funcs->vunmap(obj, map);
   1194
   1195	/* Always set the mapping to NULL. Callers may rely on this. */
   1196	iosys_map_clear(map);
   1197}
   1198EXPORT_SYMBOL(drm_gem_vunmap);
   1199
   1200/**
   1201 * drm_gem_lock_reservations - Sets up the ww context and acquires
   1202 * the lock on an array of GEM objects.
   1203 *
   1204 * Once you've locked your reservations, you'll want to set up space
   1205 * for your shared fences (if applicable), submit your job, then
   1206 * drm_gem_unlock_reservations().
   1207 *
   1208 * @objs: drm_gem_objects to lock
   1209 * @count: Number of objects in @objs
   1210 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
   1211 * part of tracking this set of locked reservations.
   1212 */
   1213int
   1214drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
   1215			  struct ww_acquire_ctx *acquire_ctx)
   1216{
   1217	int contended = -1;
   1218	int i, ret;
   1219
   1220	ww_acquire_init(acquire_ctx, &reservation_ww_class);
   1221
   1222retry:
   1223	if (contended != -1) {
   1224		struct drm_gem_object *obj = objs[contended];
   1225
   1226		ret = dma_resv_lock_slow_interruptible(obj->resv,
   1227								 acquire_ctx);
   1228		if (ret) {
   1229			ww_acquire_done(acquire_ctx);
   1230			return ret;
   1231		}
   1232	}
   1233
   1234	for (i = 0; i < count; i++) {
   1235		if (i == contended)
   1236			continue;
   1237
   1238		ret = dma_resv_lock_interruptible(objs[i]->resv,
   1239							    acquire_ctx);
   1240		if (ret) {
   1241			int j;
   1242
   1243			for (j = 0; j < i; j++)
   1244				dma_resv_unlock(objs[j]->resv);
   1245
   1246			if (contended != -1 && contended >= i)
   1247				dma_resv_unlock(objs[contended]->resv);
   1248
   1249			if (ret == -EDEADLK) {
   1250				contended = i;
   1251				goto retry;
   1252			}
   1253
   1254			ww_acquire_done(acquire_ctx);
   1255			return ret;
   1256		}
   1257	}
   1258
   1259	ww_acquire_done(acquire_ctx);
   1260
   1261	return 0;
   1262}
   1263EXPORT_SYMBOL(drm_gem_lock_reservations);
   1264
   1265void
   1266drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
   1267			    struct ww_acquire_ctx *acquire_ctx)
   1268{
   1269	int i;
   1270
   1271	for (i = 0; i < count; i++)
   1272		dma_resv_unlock(objs[i]->resv);
   1273
   1274	ww_acquire_fini(acquire_ctx);
   1275}
   1276EXPORT_SYMBOL(drm_gem_unlock_reservations);