cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drm_vm.c (17836B)


      1/*
      2 * \file drm_vm.c
      3 * Memory mapping for DRM
      4 *
      5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
      6 * \author Gareth Hughes <gareth@valinux.com>
      7 */
      8
      9/*
     10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
     11 *
     12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
     13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
     14 * All Rights Reserved.
     15 *
     16 * Permission is hereby granted, free of charge, to any person obtaining a
     17 * copy of this software and associated documentation files (the "Software"),
     18 * to deal in the Software without restriction, including without limitation
     19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     20 * and/or sell copies of the Software, and to permit persons to whom the
     21 * Software is furnished to do so, subject to the following conditions:
     22 *
     23 * The above copyright notice and this permission notice (including the next
     24 * paragraph) shall be included in all copies or substantial portions of the
     25 * Software.
     26 *
     27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     33 * OTHER DEALINGS IN THE SOFTWARE.
     34 */
     35
     36#include <linux/export.h>
     37#include <linux/pci.h>
     38#include <linux/seq_file.h>
     39#include <linux/vmalloc.h>
     40#include <linux/pgtable.h>
     41
     42#if defined(__ia64__)
     43#include <linux/efi.h>
     44#include <linux/slab.h>
     45#endif
     46#include <linux/mem_encrypt.h>
     47
     48#include <drm/drm_device.h>
     49#include <drm/drm_drv.h>
     50#include <drm/drm_file.h>
     51#include <drm/drm_framebuffer.h>
     52#include <drm/drm_print.h>
     53
     54#include "drm_internal.h"
     55#include "drm_legacy.h"
     56
     57struct drm_vma_entry {
     58	struct list_head head;
     59	struct vm_area_struct *vma;
     60	pid_t pid;
     61};
     62
     63static void drm_vm_open(struct vm_area_struct *vma);
     64static void drm_vm_close(struct vm_area_struct *vma);
     65
     66static pgprot_t drm_io_prot(struct drm_local_map *map,
     67			    struct vm_area_struct *vma)
     68{
     69	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
     70
     71#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
     72    defined(__mips__) || defined(__loongarch__)
     73	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
     74		tmp = pgprot_noncached(tmp);
     75	else
     76		tmp = pgprot_writecombine(tmp);
     77#elif defined(__ia64__)
     78	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
     79				    vma->vm_start))
     80		tmp = pgprot_writecombine(tmp);
     81	else
     82		tmp = pgprot_noncached(tmp);
     83#elif defined(__sparc__) || defined(__arm__)
     84	tmp = pgprot_noncached(tmp);
     85#endif
     86	return tmp;
     87}
     88
     89static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
     90{
     91	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
     92
     93#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
     94	tmp = pgprot_noncached_wc(tmp);
     95#endif
     96	return tmp;
     97}
     98
     99/*
    100 * \c fault method for AGP virtual memory.
    101 *
    102 * \param vma virtual memory area.
    103 * \param address access address.
    104 * \return pointer to the page structure.
    105 *
    106 * Find the right map and if it's AGP memory find the real physical page to
    107 * map, get the page, increment the use count and return it.
    108 */
    109#if IS_ENABLED(CONFIG_AGP)
    110static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
    111{
    112	struct vm_area_struct *vma = vmf->vma;
    113	struct drm_file *priv = vma->vm_file->private_data;
    114	struct drm_device *dev = priv->minor->dev;
    115	struct drm_local_map *map = NULL;
    116	struct drm_map_list *r_list;
    117	struct drm_hash_item *hash;
    118
    119	/*
    120	 * Find the right map
    121	 */
    122	if (!dev->agp)
    123		goto vm_fault_error;
    124
    125	if (!dev->agp || !dev->agp->cant_use_aperture)
    126		goto vm_fault_error;
    127
    128	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
    129		goto vm_fault_error;
    130
    131	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
    132	map = r_list->map;
    133
    134	if (map && map->type == _DRM_AGP) {
    135		/*
    136		 * Using vm_pgoff as a selector forces us to use this unusual
    137		 * addressing scheme.
    138		 */
    139		resource_size_t offset = vmf->address - vma->vm_start;
    140		resource_size_t baddr = map->offset + offset;
    141		struct drm_agp_mem *agpmem;
    142		struct page *page;
    143
    144#ifdef __alpha__
    145		/*
    146		 * Adjust to a bus-relative address
    147		 */
    148		baddr -= dev->hose->mem_space->start;
    149#endif
    150
    151		/*
    152		 * It's AGP memory - find the real physical page to map
    153		 */
    154		list_for_each_entry(agpmem, &dev->agp->memory, head) {
    155			if (agpmem->bound <= baddr &&
    156			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
    157				break;
    158		}
    159
    160		if (&agpmem->head == &dev->agp->memory)
    161			goto vm_fault_error;
    162
    163		/*
    164		 * Get the page, inc the use count, and return it
    165		 */
    166		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
    167		page = agpmem->memory->pages[offset];
    168		get_page(page);
    169		vmf->page = page;
    170
    171		DRM_DEBUG
    172		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
    173		     (unsigned long long)baddr,
    174		     agpmem->memory->pages[offset],
    175		     (unsigned long long)offset,
    176		     page_count(page));
    177		return 0;
    178	}
    179vm_fault_error:
    180	return VM_FAULT_SIGBUS;	/* Disallow mremap */
    181}
    182#else
    183static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
    184{
    185	return VM_FAULT_SIGBUS;
    186}
    187#endif
    188
    189/*
    190 * \c nopage method for shared virtual memory.
    191 *
    192 * \param vma virtual memory area.
    193 * \param address access address.
    194 * \return pointer to the page structure.
    195 *
    196 * Get the mapping, find the real physical page to map, get the page, and
    197 * return it.
    198 */
    199static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
    200{
    201	struct vm_area_struct *vma = vmf->vma;
    202	struct drm_local_map *map = vma->vm_private_data;
    203	unsigned long offset;
    204	unsigned long i;
    205	struct page *page;
    206
    207	if (!map)
    208		return VM_FAULT_SIGBUS;	/* Nothing allocated */
    209
    210	offset = vmf->address - vma->vm_start;
    211	i = (unsigned long)map->handle + offset;
    212	page = vmalloc_to_page((void *)i);
    213	if (!page)
    214		return VM_FAULT_SIGBUS;
    215	get_page(page);
    216	vmf->page = page;
    217
    218	DRM_DEBUG("shm_fault 0x%lx\n", offset);
    219	return 0;
    220}
    221
    222/*
    223 * \c close method for shared virtual memory.
    224 *
    225 * \param vma virtual memory area.
    226 *
    227 * Deletes map information if we are the last
    228 * person to close a mapping and it's not in the global maplist.
    229 */
    230static void drm_vm_shm_close(struct vm_area_struct *vma)
    231{
    232	struct drm_file *priv = vma->vm_file->private_data;
    233	struct drm_device *dev = priv->minor->dev;
    234	struct drm_vma_entry *pt, *temp;
    235	struct drm_local_map *map;
    236	struct drm_map_list *r_list;
    237	int found_maps = 0;
    238
    239	DRM_DEBUG("0x%08lx,0x%08lx\n",
    240		  vma->vm_start, vma->vm_end - vma->vm_start);
    241
    242	map = vma->vm_private_data;
    243
    244	mutex_lock(&dev->struct_mutex);
    245	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
    246		if (pt->vma->vm_private_data == map)
    247			found_maps++;
    248		if (pt->vma == vma) {
    249			list_del(&pt->head);
    250			kfree(pt);
    251		}
    252	}
    253
    254	/* We were the only map that was found */
    255	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
    256		/* Check to see if we are in the maplist, if we are not, then
    257		 * we delete this mappings information.
    258		 */
    259		found_maps = 0;
    260		list_for_each_entry(r_list, &dev->maplist, head) {
    261			if (r_list->map == map)
    262				found_maps++;
    263		}
    264
    265		if (!found_maps) {
    266			switch (map->type) {
    267			case _DRM_REGISTERS:
    268			case _DRM_FRAME_BUFFER:
    269				arch_phys_wc_del(map->mtrr);
    270				iounmap(map->handle);
    271				break;
    272			case _DRM_SHM:
    273				vfree(map->handle);
    274				break;
    275			case _DRM_AGP:
    276			case _DRM_SCATTER_GATHER:
    277				break;
    278			case _DRM_CONSISTENT:
    279				dma_free_coherent(dev->dev,
    280						  map->size,
    281						  map->handle,
    282						  map->offset);
    283				break;
    284			}
    285			kfree(map);
    286		}
    287	}
    288	mutex_unlock(&dev->struct_mutex);
    289}
    290
    291/*
    292 * \c fault method for DMA virtual memory.
    293 *
    294 * \param address access address.
    295 * \return pointer to the page structure.
    296 *
    297 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
    298 */
    299static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
    300{
    301	struct vm_area_struct *vma = vmf->vma;
    302	struct drm_file *priv = vma->vm_file->private_data;
    303	struct drm_device *dev = priv->minor->dev;
    304	struct drm_device_dma *dma = dev->dma;
    305	unsigned long offset;
    306	unsigned long page_nr;
    307	struct page *page;
    308
    309	if (!dma)
    310		return VM_FAULT_SIGBUS;	/* Error */
    311	if (!dma->pagelist)
    312		return VM_FAULT_SIGBUS;	/* Nothing allocated */
    313
    314	offset = vmf->address - vma->vm_start;
    315					/* vm_[pg]off[set] should be 0 */
    316	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
    317	page = virt_to_page((void *)dma->pagelist[page_nr]);
    318
    319	get_page(page);
    320	vmf->page = page;
    321
    322	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
    323	return 0;
    324}
    325
    326/*
    327 * \c fault method for scatter-gather virtual memory.
    328 *
    329 * \param address access address.
    330 * \return pointer to the page structure.
    331 *
    332 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
    333 */
    334static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
    335{
    336	struct vm_area_struct *vma = vmf->vma;
    337	struct drm_local_map *map = vma->vm_private_data;
    338	struct drm_file *priv = vma->vm_file->private_data;
    339	struct drm_device *dev = priv->minor->dev;
    340	struct drm_sg_mem *entry = dev->sg;
    341	unsigned long offset;
    342	unsigned long map_offset;
    343	unsigned long page_offset;
    344	struct page *page;
    345
    346	if (!entry)
    347		return VM_FAULT_SIGBUS;	/* Error */
    348	if (!entry->pagelist)
    349		return VM_FAULT_SIGBUS;	/* Nothing allocated */
    350
    351	offset = vmf->address - vma->vm_start;
    352	map_offset = map->offset - (unsigned long)dev->sg->virtual;
    353	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
    354	page = entry->pagelist[page_offset];
    355	get_page(page);
    356	vmf->page = page;
    357
    358	return 0;
    359}
    360
    361/** AGP virtual memory operations */
    362static const struct vm_operations_struct drm_vm_ops = {
    363	.fault = drm_vm_fault,
    364	.open = drm_vm_open,
    365	.close = drm_vm_close,
    366};
    367
    368/** Shared virtual memory operations */
    369static const struct vm_operations_struct drm_vm_shm_ops = {
    370	.fault = drm_vm_shm_fault,
    371	.open = drm_vm_open,
    372	.close = drm_vm_shm_close,
    373};
    374
    375/** DMA virtual memory operations */
    376static const struct vm_operations_struct drm_vm_dma_ops = {
    377	.fault = drm_vm_dma_fault,
    378	.open = drm_vm_open,
    379	.close = drm_vm_close,
    380};
    381
    382/** Scatter-gather virtual memory operations */
    383static const struct vm_operations_struct drm_vm_sg_ops = {
    384	.fault = drm_vm_sg_fault,
    385	.open = drm_vm_open,
    386	.close = drm_vm_close,
    387};
    388
    389static void drm_vm_open_locked(struct drm_device *dev,
    390			       struct vm_area_struct *vma)
    391{
    392	struct drm_vma_entry *vma_entry;
    393
    394	DRM_DEBUG("0x%08lx,0x%08lx\n",
    395		  vma->vm_start, vma->vm_end - vma->vm_start);
    396
    397	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
    398	if (vma_entry) {
    399		vma_entry->vma = vma;
    400		vma_entry->pid = current->pid;
    401		list_add(&vma_entry->head, &dev->vmalist);
    402	}
    403}
    404
    405static void drm_vm_open(struct vm_area_struct *vma)
    406{
    407	struct drm_file *priv = vma->vm_file->private_data;
    408	struct drm_device *dev = priv->minor->dev;
    409
    410	mutex_lock(&dev->struct_mutex);
    411	drm_vm_open_locked(dev, vma);
    412	mutex_unlock(&dev->struct_mutex);
    413}
    414
    415static void drm_vm_close_locked(struct drm_device *dev,
    416				struct vm_area_struct *vma)
    417{
    418	struct drm_vma_entry *pt, *temp;
    419
    420	DRM_DEBUG("0x%08lx,0x%08lx\n",
    421		  vma->vm_start, vma->vm_end - vma->vm_start);
    422
    423	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
    424		if (pt->vma == vma) {
    425			list_del(&pt->head);
    426			kfree(pt);
    427			break;
    428		}
    429	}
    430}
    431
    432/*
    433 * \c close method for all virtual memory types.
    434 *
    435 * \param vma virtual memory area.
    436 *
    437 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
    438 * free it.
    439 */
    440static void drm_vm_close(struct vm_area_struct *vma)
    441{
    442	struct drm_file *priv = vma->vm_file->private_data;
    443	struct drm_device *dev = priv->minor->dev;
    444
    445	mutex_lock(&dev->struct_mutex);
    446	drm_vm_close_locked(dev, vma);
    447	mutex_unlock(&dev->struct_mutex);
    448}
    449
    450/*
    451 * mmap DMA memory.
    452 *
    453 * \param file_priv DRM file private.
    454 * \param vma virtual memory area.
    455 * \return zero on success or a negative number on failure.
    456 *
    457 * Sets the virtual memory area operations structure to vm_dma_ops, the file
    458 * pointer, and calls vm_open().
    459 */
    460static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
    461{
    462	struct drm_file *priv = filp->private_data;
    463	struct drm_device *dev;
    464	struct drm_device_dma *dma;
    465	unsigned long length = vma->vm_end - vma->vm_start;
    466
    467	dev = priv->minor->dev;
    468	dma = dev->dma;
    469	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
    470		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
    471
    472	/* Length must match exact page count */
    473	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
    474		return -EINVAL;
    475	}
    476
    477	if (!capable(CAP_SYS_ADMIN) &&
    478	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
    479		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
    480#if defined(__i386__) || defined(__x86_64__)
    481		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
    482#else
    483		/* Ye gads this is ugly.  With more thought
    484		   we could move this up higher and use
    485		   `protection_map' instead.  */
    486		vma->vm_page_prot =
    487		    __pgprot(pte_val
    488			     (pte_wrprotect
    489			      (__pte(pgprot_val(vma->vm_page_prot)))));
    490#endif
    491	}
    492
    493	vma->vm_ops = &drm_vm_dma_ops;
    494
    495	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
    496
    497	drm_vm_open_locked(dev, vma);
    498	return 0;
    499}
    500
    501static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
    502{
    503#ifdef __alpha__
    504	return dev->hose->dense_mem_base;
    505#else
    506	return 0;
    507#endif
    508}
    509
    510/*
    511 * mmap DMA memory.
    512 *
    513 * \param file_priv DRM file private.
    514 * \param vma virtual memory area.
    515 * \return zero on success or a negative number on failure.
    516 *
    517 * If the virtual memory area has no offset associated with it then it's a DMA
    518 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
    519 * checks that the restricted flag is not set, sets the virtual memory operations
    520 * according to the mapping type and remaps the pages. Finally sets the file
    521 * pointer and calls vm_open().
    522 */
    523static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
    524{
    525	struct drm_file *priv = filp->private_data;
    526	struct drm_device *dev = priv->minor->dev;
    527	struct drm_local_map *map = NULL;
    528	resource_size_t offset = 0;
    529	struct drm_hash_item *hash;
    530
    531	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
    532		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
    533
    534	if (!priv->authenticated)
    535		return -EACCES;
    536
    537	/* We check for "dma". On Apple's UniNorth, it's valid to have
    538	 * the AGP mapped at physical address 0
    539	 * --BenH.
    540	 */
    541	if (!vma->vm_pgoff
    542#if IS_ENABLED(CONFIG_AGP)
    543	    && (!dev->agp
    544		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
    545#endif
    546	    )
    547		return drm_mmap_dma(filp, vma);
    548
    549	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
    550		DRM_ERROR("Could not find map\n");
    551		return -EINVAL;
    552	}
    553
    554	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
    555	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
    556		return -EPERM;
    557
    558	/* Check for valid size. */
    559	if (map->size < vma->vm_end - vma->vm_start)
    560		return -EINVAL;
    561
    562	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
    563		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
    564#if defined(__i386__) || defined(__x86_64__)
    565		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
    566#else
    567		/* Ye gads this is ugly.  With more thought
    568		   we could move this up higher and use
    569		   `protection_map' instead.  */
    570		vma->vm_page_prot =
    571		    __pgprot(pte_val
    572			     (pte_wrprotect
    573			      (__pte(pgprot_val(vma->vm_page_prot)))));
    574#endif
    575	}
    576
    577	switch (map->type) {
    578#if !defined(__arm__)
    579	case _DRM_AGP:
    580		if (dev->agp && dev->agp->cant_use_aperture) {
    581			/*
    582			 * On some platforms we can't talk to bus dma address from the CPU, so for
    583			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
    584			 * pages and mappings in fault()
    585			 */
    586#if defined(__powerpc__)
    587			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    588#endif
    589			vma->vm_ops = &drm_vm_ops;
    590			break;
    591		}
    592		fallthrough;	/* to _DRM_FRAME_BUFFER... */
    593#endif
    594	case _DRM_FRAME_BUFFER:
    595	case _DRM_REGISTERS:
    596		offset = drm_core_get_reg_ofs(dev);
    597		vma->vm_page_prot = drm_io_prot(map, vma);
    598		if (io_remap_pfn_range(vma, vma->vm_start,
    599				       (map->offset + offset) >> PAGE_SHIFT,
    600				       vma->vm_end - vma->vm_start,
    601				       vma->vm_page_prot))
    602			return -EAGAIN;
    603		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
    604			  " offset = 0x%llx\n",
    605			  map->type,
    606			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
    607
    608		vma->vm_ops = &drm_vm_ops;
    609		break;
    610	case _DRM_CONSISTENT:
    611		/* Consistent memory is really like shared memory. But
    612		 * it's allocated in a different way, so avoid fault */
    613		if (remap_pfn_range(vma, vma->vm_start,
    614		    page_to_pfn(virt_to_page(map->handle)),
    615		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
    616			return -EAGAIN;
    617		vma->vm_page_prot = drm_dma_prot(map->type, vma);
    618		fallthrough;	/* to _DRM_SHM */
    619	case _DRM_SHM:
    620		vma->vm_ops = &drm_vm_shm_ops;
    621		vma->vm_private_data = (void *)map;
    622		break;
    623	case _DRM_SCATTER_GATHER:
    624		vma->vm_ops = &drm_vm_sg_ops;
    625		vma->vm_private_data = (void *)map;
    626		vma->vm_page_prot = drm_dma_prot(map->type, vma);
    627		break;
    628	default:
    629		return -EINVAL;	/* This should never happen. */
    630	}
    631	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
    632
    633	drm_vm_open_locked(dev, vma);
    634	return 0;
    635}
    636
    637int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
    638{
    639	struct drm_file *priv = filp->private_data;
    640	struct drm_device *dev = priv->minor->dev;
    641	int ret;
    642
    643	if (drm_dev_is_unplugged(dev))
    644		return -ENODEV;
    645
    646	mutex_lock(&dev->struct_mutex);
    647	ret = drm_mmap_locked(filp, vma);
    648	mutex_unlock(&dev->struct_mutex);
    649
    650	return ret;
    651}
    652EXPORT_SYMBOL(drm_legacy_mmap);
    653
    654#if IS_ENABLED(CONFIG_DRM_LEGACY)
    655void drm_legacy_vma_flush(struct drm_device *dev)
    656{
    657	struct drm_vma_entry *vma, *vma_temp;
    658
    659	/* Clear vma list (only needed for legacy drivers) */
    660	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
    661		list_del(&vma->head);
    662		kfree(vma);
    663	}
    664}
    665#endif