cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drm_bufs.c (43500B)


      1/*
      2 * Legacy: Generic DRM Buffer Management
      3 *
      4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
      5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
      6 * All Rights Reserved.
      7 *
      8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
      9 * Author: Gareth Hughes <gareth@valinux.com>
     10 *
     11 * Permission is hereby granted, free of charge, to any person obtaining a
     12 * copy of this software and associated documentation files (the "Software"),
     13 * to deal in the Software without restriction, including without limitation
     14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     15 * and/or sell copies of the Software, and to permit persons to whom the
     16 * Software is furnished to do so, subject to the following conditions:
     17 *
     18 * The above copyright notice and this permission notice (including the next
     19 * paragraph) shall be included in all copies or substantial portions of the
     20 * Software.
     21 *
     22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     28 * OTHER DEALINGS IN THE SOFTWARE.
     29 */
     30
     31#include <linux/export.h>
     32#include <linux/log2.h>
     33#include <linux/mm.h>
     34#include <linux/mman.h>
     35#include <linux/nospec.h>
     36#include <linux/pci.h>
     37#include <linux/slab.h>
     38#include <linux/uaccess.h>
     39#include <linux/vmalloc.h>
     40
     41#include <asm/shmparam.h>
     42
     43#include <drm/drm_device.h>
     44#include <drm/drm_drv.h>
     45#include <drm/drm_file.h>
     46#include <drm/drm_print.h>
     47
     48#include "drm_legacy.h"
     49
     50
     51static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
     52						  struct drm_local_map *map)
     53{
     54	struct drm_map_list *entry;
     55
     56	list_for_each_entry(entry, &dev->maplist, head) {
     57		/*
     58		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
     59		 * while PCI resources may live above that, we only compare the
     60		 * lower 32 bits of the map offset for maps of type
     61		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
     62		 * It is assumed that if a driver have more than one resource
     63		 * of each type, the lower 32 bits are different.
     64		 */
     65		if (!entry->map ||
     66		    map->type != entry->map->type ||
     67		    entry->master != dev->master)
     68			continue;
     69		switch (map->type) {
     70		case _DRM_SHM:
     71			if (map->flags != _DRM_CONTAINS_LOCK)
     72				break;
     73			return entry;
     74		case _DRM_REGISTERS:
     75		case _DRM_FRAME_BUFFER:
     76			if ((entry->map->offset & 0xffffffff) ==
     77			    (map->offset & 0xffffffff))
     78				return entry;
     79			break;
     80		default: /* Make gcc happy */
     81			break;
     82		}
     83		if (entry->map->offset == map->offset)
     84			return entry;
     85	}
     86
     87	return NULL;
     88}
     89
     90static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
     91			  unsigned long user_token, int hashed_handle, int shm)
     92{
     93	int use_hashed_handle, shift;
     94	unsigned long add;
     95
     96#if (BITS_PER_LONG == 64)
     97	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
     98#elif (BITS_PER_LONG == 32)
     99	use_hashed_handle = hashed_handle;
    100#else
    101#error Unsupported long size. Neither 64 nor 32 bits.
    102#endif
    103
    104	if (!use_hashed_handle) {
    105		int ret;
    106
    107		hash->key = user_token >> PAGE_SHIFT;
    108		ret = drm_ht_insert_item(&dev->map_hash, hash);
    109		if (ret != -EINVAL)
    110			return ret;
    111	}
    112
    113	shift = 0;
    114	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
    115	if (shm && (SHMLBA > PAGE_SIZE)) {
    116		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
    117
    118		/* For shared memory, we have to preserve the SHMLBA
    119		 * bits of the eventual vma->vm_pgoff value during
    120		 * mmap().  Otherwise we run into cache aliasing problems
    121		 * on some platforms.  On these platforms, the pgoff of
    122		 * a mmap() request is used to pick a suitable virtual
    123		 * address for the mmap() region such that it will not
    124		 * cause cache aliasing problems.
    125		 *
    126		 * Therefore, make sure the SHMLBA relevant bits of the
    127		 * hash value we use are equal to those in the original
    128		 * kernel virtual address.
    129		 */
    130		shift = bits;
    131		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
    132	}
    133
    134	return drm_ht_just_insert_please(&dev->map_hash, hash,
    135					 user_token, 32 - PAGE_SHIFT - 3,
    136					 shift, add);
    137}
    138
    139/*
    140 * Core function to create a range of memory available for mapping by a
    141 * non-root process.
    142 *
    143 * Adjusts the memory offset to its absolute value according to the mapping
    144 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
    145 * applicable and if supported by the kernel.
    146 */
    147static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
    148			   unsigned int size, enum drm_map_type type,
    149			   enum drm_map_flags flags,
    150			   struct drm_map_list **maplist)
    151{
    152	struct drm_local_map *map;
    153	struct drm_map_list *list;
    154	unsigned long user_token;
    155	int ret;
    156
    157	map = kmalloc(sizeof(*map), GFP_KERNEL);
    158	if (!map)
    159		return -ENOMEM;
    160
    161	map->offset = offset;
    162	map->size = size;
    163	map->flags = flags;
    164	map->type = type;
    165
    166	/* Only allow shared memory to be removable since we only keep enough
    167	 * book keeping information about shared memory to allow for removal
    168	 * when processes fork.
    169	 */
    170	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
    171		kfree(map);
    172		return -EINVAL;
    173	}
    174	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
    175		  (unsigned long long)map->offset, map->size, map->type);
    176
    177	/* page-align _DRM_SHM maps. They are allocated here so there is no security
    178	 * hole created by that and it works around various broken drivers that use
    179	 * a non-aligned quantity to map the SAREA. --BenH
    180	 */
    181	if (map->type == _DRM_SHM)
    182		map->size = PAGE_ALIGN(map->size);
    183
    184	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
    185		kfree(map);
    186		return -EINVAL;
    187	}
    188	map->mtrr = -1;
    189	map->handle = NULL;
    190
    191	switch (map->type) {
    192	case _DRM_REGISTERS:
    193	case _DRM_FRAME_BUFFER:
    194#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
    195		if (map->offset + (map->size-1) < map->offset ||
    196		    map->offset < virt_to_phys(high_memory)) {
    197			kfree(map);
    198			return -EINVAL;
    199		}
    200#endif
    201		/* Some drivers preinitialize some maps, without the X Server
    202		 * needing to be aware of it.  Therefore, we just return success
    203		 * when the server tries to create a duplicate map.
    204		 */
    205		list = drm_find_matching_map(dev, map);
    206		if (list != NULL) {
    207			if (list->map->size != map->size) {
    208				DRM_DEBUG("Matching maps of type %d with "
    209					  "mismatched sizes, (%ld vs %ld)\n",
    210					  map->type, map->size,
    211					  list->map->size);
    212				list->map->size = map->size;
    213			}
    214
    215			kfree(map);
    216			*maplist = list;
    217			return 0;
    218		}
    219
    220		if (map->type == _DRM_FRAME_BUFFER ||
    221		    (map->flags & _DRM_WRITE_COMBINING)) {
    222			map->mtrr =
    223				arch_phys_wc_add(map->offset, map->size);
    224		}
    225		if (map->type == _DRM_REGISTERS) {
    226			if (map->flags & _DRM_WRITE_COMBINING)
    227				map->handle = ioremap_wc(map->offset,
    228							 map->size);
    229			else
    230				map->handle = ioremap(map->offset, map->size);
    231			if (!map->handle) {
    232				kfree(map);
    233				return -ENOMEM;
    234			}
    235		}
    236
    237		break;
    238	case _DRM_SHM:
    239		list = drm_find_matching_map(dev, map);
    240		if (list != NULL) {
    241			if (list->map->size != map->size) {
    242				DRM_DEBUG("Matching maps of type %d with "
    243					  "mismatched sizes, (%ld vs %ld)\n",
    244					  map->type, map->size, list->map->size);
    245				list->map->size = map->size;
    246			}
    247
    248			kfree(map);
    249			*maplist = list;
    250			return 0;
    251		}
    252		map->handle = vmalloc_user(map->size);
    253		DRM_DEBUG("%lu %d %p\n",
    254			  map->size, order_base_2(map->size), map->handle);
    255		if (!map->handle) {
    256			kfree(map);
    257			return -ENOMEM;
    258		}
    259		map->offset = (unsigned long)map->handle;
    260		if (map->flags & _DRM_CONTAINS_LOCK) {
    261			/* Prevent a 2nd X Server from creating a 2nd lock */
    262			if (dev->master->lock.hw_lock != NULL) {
    263				vfree(map->handle);
    264				kfree(map);
    265				return -EBUSY;
    266			}
    267			dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;	/* Pointer to lock */
    268		}
    269		break;
    270	case _DRM_AGP: {
    271		struct drm_agp_mem *entry;
    272		int valid = 0;
    273
    274		if (!dev->agp) {
    275			kfree(map);
    276			return -EINVAL;
    277		}
    278#ifdef __alpha__
    279		map->offset += dev->hose->mem_space->start;
    280#endif
    281		/* In some cases (i810 driver), user space may have already
    282		 * added the AGP base itself, because dev->agp->base previously
    283		 * only got set during AGP enable.  So, only add the base
    284		 * address if the map's offset isn't already within the
    285		 * aperture.
    286		 */
    287		if (map->offset < dev->agp->base ||
    288		    map->offset > dev->agp->base +
    289		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
    290			map->offset += dev->agp->base;
    291		}
    292		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
    293
    294		/* This assumes the DRM is in total control of AGP space.
    295		 * It's not always the case as AGP can be in the control
    296		 * of user space (i.e. i810 driver). So this loop will get
    297		 * skipped and we double check that dev->agp->memory is
    298		 * actually set as well as being invalid before EPERM'ing
    299		 */
    300		list_for_each_entry(entry, &dev->agp->memory, head) {
    301			if ((map->offset >= entry->bound) &&
    302			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
    303				valid = 1;
    304				break;
    305			}
    306		}
    307		if (!list_empty(&dev->agp->memory) && !valid) {
    308			kfree(map);
    309			return -EPERM;
    310		}
    311		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
    312			  (unsigned long long)map->offset, map->size);
    313
    314		break;
    315	}
    316	case _DRM_SCATTER_GATHER:
    317		if (!dev->sg) {
    318			kfree(map);
    319			return -EINVAL;
    320		}
    321		map->offset += (unsigned long)dev->sg->virtual;
    322		break;
    323	case _DRM_CONSISTENT:
    324		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
    325		 * As we're limiting the address to 2^32-1 (or less),
    326		 * casting it down to 32 bits is no problem, but we
    327		 * need to point to a 64bit variable first.
    328		 */
    329		map->handle = dma_alloc_coherent(dev->dev,
    330						 map->size,
    331						 &map->offset,
    332						 GFP_KERNEL);
    333		if (!map->handle) {
    334			kfree(map);
    335			return -ENOMEM;
    336		}
    337		break;
    338	default:
    339		kfree(map);
    340		return -EINVAL;
    341	}
    342
    343	list = kzalloc(sizeof(*list), GFP_KERNEL);
    344	if (!list) {
    345		if (map->type == _DRM_REGISTERS)
    346			iounmap(map->handle);
    347		kfree(map);
    348		return -EINVAL;
    349	}
    350	list->map = map;
    351
    352	mutex_lock(&dev->struct_mutex);
    353	list_add(&list->head, &dev->maplist);
    354
    355	/* Assign a 32-bit handle */
    356	/* We do it here so that dev->struct_mutex protects the increment */
    357	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
    358		map->offset;
    359	ret = drm_map_handle(dev, &list->hash, user_token, 0,
    360			     (map->type == _DRM_SHM));
    361	if (ret) {
    362		if (map->type == _DRM_REGISTERS)
    363			iounmap(map->handle);
    364		kfree(map);
    365		kfree(list);
    366		mutex_unlock(&dev->struct_mutex);
    367		return ret;
    368	}
    369
    370	list->user_token = list->hash.key << PAGE_SHIFT;
    371	mutex_unlock(&dev->struct_mutex);
    372
    373	if (!(map->flags & _DRM_DRIVER))
    374		list->master = dev->master;
    375	*maplist = list;
    376	return 0;
    377}
    378
    379int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
    380		      unsigned int size, enum drm_map_type type,
    381		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
    382{
    383	struct drm_map_list *list;
    384	int rc;
    385
    386	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
    387	if (!rc)
    388		*map_ptr = list->map;
    389	return rc;
    390}
    391EXPORT_SYMBOL(drm_legacy_addmap);
    392
    393struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
    394					 unsigned int token)
    395{
    396	struct drm_map_list *_entry;
    397
    398	list_for_each_entry(_entry, &dev->maplist, head)
    399		if (_entry->user_token == token)
    400			return _entry->map;
    401	return NULL;
    402}
    403EXPORT_SYMBOL(drm_legacy_findmap);
    404
    405/*
    406 * Ioctl to specify a range of memory that is available for mapping by a
    407 * non-root process.
    408 *
    409 * \param inode device inode.
    410 * \param file_priv DRM file private.
    411 * \param cmd command.
    412 * \param arg pointer to a drm_map structure.
    413 * \return zero on success or a negative value on error.
    414 *
    415 */
    416int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
    417			    struct drm_file *file_priv)
    418{
    419	struct drm_map *map = data;
    420	struct drm_map_list *maplist;
    421	int err;
    422
    423	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
    424		return -EPERM;
    425
    426	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
    427	    !drm_core_check_feature(dev, DRIVER_LEGACY))
    428		return -EOPNOTSUPP;
    429
    430	err = drm_addmap_core(dev, map->offset, map->size, map->type,
    431			      map->flags, &maplist);
    432
    433	if (err)
    434		return err;
    435
    436	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
    437	map->handle = (void *)(unsigned long)maplist->user_token;
    438
    439	/*
    440	 * It appears that there are no users of this value whatsoever --
    441	 * drmAddMap just discards it.  Let's not encourage its use.
    442	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
    443	 *  it's not a real mtrr index anymore.)
    444	 */
    445	map->mtrr = -1;
    446
    447	return 0;
    448}
    449
    450/*
    451 * Get a mapping information.
    452 *
    453 * \param inode device inode.
    454 * \param file_priv DRM file private.
    455 * \param cmd command.
    456 * \param arg user argument, pointing to a drm_map structure.
    457 *
    458 * \return zero on success or a negative number on failure.
    459 *
    460 * Searches for the mapping with the specified offset and copies its information
    461 * into userspace
    462 */
    463int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
    464			    struct drm_file *file_priv)
    465{
    466	struct drm_map *map = data;
    467	struct drm_map_list *r_list = NULL;
    468	struct list_head *list;
    469	int idx;
    470	int i;
    471
    472	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
    473	    !drm_core_check_feature(dev, DRIVER_LEGACY))
    474		return -EOPNOTSUPP;
    475
    476	idx = map->offset;
    477	if (idx < 0)
    478		return -EINVAL;
    479
    480	i = 0;
    481	mutex_lock(&dev->struct_mutex);
    482	list_for_each(list, &dev->maplist) {
    483		if (i == idx) {
    484			r_list = list_entry(list, struct drm_map_list, head);
    485			break;
    486		}
    487		i++;
    488	}
    489	if (!r_list || !r_list->map) {
    490		mutex_unlock(&dev->struct_mutex);
    491		return -EINVAL;
    492	}
    493
    494	map->offset = r_list->map->offset;
    495	map->size = r_list->map->size;
    496	map->type = r_list->map->type;
    497	map->flags = r_list->map->flags;
    498	map->handle = (void *)(unsigned long) r_list->user_token;
    499	map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
    500
    501	mutex_unlock(&dev->struct_mutex);
    502
    503	return 0;
    504}
    505
    506/*
    507 * Remove a map private from list and deallocate resources if the mapping
    508 * isn't in use.
    509 *
    510 * Searches the map on drm_device::maplist, removes it from the list, see if
    511 * it's being used, and free any associated resource (such as MTRR's) if it's not
    512 * being on use.
    513 *
    514 * \sa drm_legacy_addmap
    515 */
    516int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
    517{
    518	struct drm_map_list *r_list = NULL, *list_t;
    519	int found = 0;
    520	struct drm_master *master;
    521
    522	/* Find the list entry for the map and remove it */
    523	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
    524		if (r_list->map == map) {
    525			master = r_list->master;
    526			list_del(&r_list->head);
    527			drm_ht_remove_key(&dev->map_hash,
    528					  r_list->user_token >> PAGE_SHIFT);
    529			kfree(r_list);
    530			found = 1;
    531			break;
    532		}
    533	}
    534
    535	if (!found)
    536		return -EINVAL;
    537
    538	switch (map->type) {
    539	case _DRM_REGISTERS:
    540		iounmap(map->handle);
    541		fallthrough;
    542	case _DRM_FRAME_BUFFER:
    543		arch_phys_wc_del(map->mtrr);
    544		break;
    545	case _DRM_SHM:
    546		vfree(map->handle);
    547		if (master) {
    548			if (dev->sigdata.lock == master->lock.hw_lock)
    549				dev->sigdata.lock = NULL;
    550			master->lock.hw_lock = NULL;   /* SHM removed */
    551			master->lock.file_priv = NULL;
    552			wake_up_interruptible_all(&master->lock.lock_queue);
    553		}
    554		break;
    555	case _DRM_AGP:
    556	case _DRM_SCATTER_GATHER:
    557		break;
    558	case _DRM_CONSISTENT:
    559		dma_free_coherent(dev->dev,
    560				  map->size,
    561				  map->handle,
    562				  map->offset);
    563		break;
    564	}
    565	kfree(map);
    566
    567	return 0;
    568}
    569EXPORT_SYMBOL(drm_legacy_rmmap_locked);
    570
    571void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
    572{
    573	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
    574	    !drm_core_check_feature(dev, DRIVER_LEGACY))
    575		return;
    576
    577	mutex_lock(&dev->struct_mutex);
    578	drm_legacy_rmmap_locked(dev, map);
    579	mutex_unlock(&dev->struct_mutex);
    580}
    581EXPORT_SYMBOL(drm_legacy_rmmap);
    582
    583void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
    584{
    585	struct drm_map_list *r_list, *list_temp;
    586
    587	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
    588		return;
    589
    590	mutex_lock(&dev->struct_mutex);
    591	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
    592		if (r_list->master == master) {
    593			drm_legacy_rmmap_locked(dev, r_list->map);
    594			r_list = NULL;
    595		}
    596	}
    597	mutex_unlock(&dev->struct_mutex);
    598}
    599
    600void drm_legacy_rmmaps(struct drm_device *dev)
    601{
    602	struct drm_map_list *r_list, *list_temp;
    603
    604	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
    605		drm_legacy_rmmap(dev, r_list->map);
    606}
    607
    608/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
    609 * the last close of the device, and this is necessary for cleanup when things
    610 * exit uncleanly.  Therefore, having userland manually remove mappings seems
    611 * like a pointless exercise since they're going away anyway.
    612 *
    613 * One use case might be after addmap is allowed for normal users for SHM and
    614 * gets used by drivers that the server doesn't need to care about.  This seems
    615 * unlikely.
    616 *
    617 * \param inode device inode.
    618 * \param file_priv DRM file private.
    619 * \param cmd command.
    620 * \param arg pointer to a struct drm_map structure.
    621 * \return zero on success or a negative value on error.
    622 */
    623int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
    624			   struct drm_file *file_priv)
    625{
    626	struct drm_map *request = data;
    627	struct drm_local_map *map = NULL;
    628	struct drm_map_list *r_list;
    629	int ret;
    630
    631	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
    632	    !drm_core_check_feature(dev, DRIVER_LEGACY))
    633		return -EOPNOTSUPP;
    634
    635	mutex_lock(&dev->struct_mutex);
    636	list_for_each_entry(r_list, &dev->maplist, head) {
    637		if (r_list->map &&
    638		    r_list->user_token == (unsigned long)request->handle &&
    639		    r_list->map->flags & _DRM_REMOVABLE) {
    640			map = r_list->map;
    641			break;
    642		}
    643	}
    644
    645	/* List has wrapped around to the head pointer, or it's empty we didn't
    646	 * find anything.
    647	 */
    648	if (list_empty(&dev->maplist) || !map) {
    649		mutex_unlock(&dev->struct_mutex);
    650		return -EINVAL;
    651	}
    652
    653	/* Register and framebuffer maps are permanent */
    654	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
    655		mutex_unlock(&dev->struct_mutex);
    656		return 0;
    657	}
    658
    659	ret = drm_legacy_rmmap_locked(dev, map);
    660
    661	mutex_unlock(&dev->struct_mutex);
    662
    663	return ret;
    664}
    665
    666/*
    667 * Cleanup after an error on one of the addbufs() functions.
    668 *
    669 * \param dev DRM device.
    670 * \param entry buffer entry where the error occurred.
    671 *
    672 * Frees any pages and buffers associated with the given entry.
    673 */
    674static void drm_cleanup_buf_error(struct drm_device *dev,
    675				  struct drm_buf_entry *entry)
    676{
    677	drm_dma_handle_t *dmah;
    678	int i;
    679
    680	if (entry->seg_count) {
    681		for (i = 0; i < entry->seg_count; i++) {
    682			if (entry->seglist[i]) {
    683				dmah = entry->seglist[i];
    684				dma_free_coherent(dev->dev,
    685						  dmah->size,
    686						  dmah->vaddr,
    687						  dmah->busaddr);
    688				kfree(dmah);
    689			}
    690		}
    691		kfree(entry->seglist);
    692
    693		entry->seg_count = 0;
    694	}
    695
    696	if (entry->buf_count) {
    697		for (i = 0; i < entry->buf_count; i++) {
    698			kfree(entry->buflist[i].dev_private);
    699		}
    700		kfree(entry->buflist);
    701
    702		entry->buf_count = 0;
    703	}
    704}
    705
    706#if IS_ENABLED(CONFIG_AGP)
    707/*
    708 * Add AGP buffers for DMA transfers.
    709 *
    710 * \param dev struct drm_device to which the buffers are to be added.
    711 * \param request pointer to a struct drm_buf_desc describing the request.
    712 * \return zero on success or a negative number on failure.
    713 *
    714 * After some sanity checks creates a drm_buf structure for each buffer and
    715 * reallocates the buffer list of the same size order to accommodate the new
    716 * buffers.
    717 */
    718int drm_legacy_addbufs_agp(struct drm_device *dev,
    719			   struct drm_buf_desc *request)
    720{
    721	struct drm_device_dma *dma = dev->dma;
    722	struct drm_buf_entry *entry;
    723	struct drm_agp_mem *agp_entry;
    724	struct drm_buf *buf;
    725	unsigned long offset;
    726	unsigned long agp_offset;
    727	int count;
    728	int order;
    729	int size;
    730	int alignment;
    731	int page_order;
    732	int total;
    733	int byte_count;
    734	int i, valid;
    735	struct drm_buf **temp_buflist;
    736
    737	if (!dma)
    738		return -EINVAL;
    739
    740	count = request->count;
    741	order = order_base_2(request->size);
    742	size = 1 << order;
    743
    744	alignment = (request->flags & _DRM_PAGE_ALIGN)
    745	    ? PAGE_ALIGN(size) : size;
    746	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
    747	total = PAGE_SIZE << page_order;
    748
    749	byte_count = 0;
    750	agp_offset = dev->agp->base + request->agp_start;
    751
    752	DRM_DEBUG("count:      %d\n", count);
    753	DRM_DEBUG("order:      %d\n", order);
    754	DRM_DEBUG("size:       %d\n", size);
    755	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
    756	DRM_DEBUG("alignment:  %d\n", alignment);
    757	DRM_DEBUG("page_order: %d\n", page_order);
    758	DRM_DEBUG("total:      %d\n", total);
    759
    760	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
    761		return -EINVAL;
    762
    763	/* Make sure buffers are located in AGP memory that we own */
    764	valid = 0;
    765	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
    766		if ((agp_offset >= agp_entry->bound) &&
    767		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
    768			valid = 1;
    769			break;
    770		}
    771	}
    772	if (!list_empty(&dev->agp->memory) && !valid) {
    773		DRM_DEBUG("zone invalid\n");
    774		return -EINVAL;
    775	}
    776	spin_lock(&dev->buf_lock);
    777	if (dev->buf_use) {
    778		spin_unlock(&dev->buf_lock);
    779		return -EBUSY;
    780	}
    781	atomic_inc(&dev->buf_alloc);
    782	spin_unlock(&dev->buf_lock);
    783
    784	mutex_lock(&dev->struct_mutex);
    785	entry = &dma->bufs[order];
    786	if (entry->buf_count) {
    787		mutex_unlock(&dev->struct_mutex);
    788		atomic_dec(&dev->buf_alloc);
    789		return -ENOMEM;	/* May only call once for each order */
    790	}
    791
    792	if (count < 0 || count > 4096) {
    793		mutex_unlock(&dev->struct_mutex);
    794		atomic_dec(&dev->buf_alloc);
    795		return -EINVAL;
    796	}
    797
    798	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
    799	if (!entry->buflist) {
    800		mutex_unlock(&dev->struct_mutex);
    801		atomic_dec(&dev->buf_alloc);
    802		return -ENOMEM;
    803	}
    804
    805	entry->buf_size = size;
    806	entry->page_order = page_order;
    807
    808	offset = 0;
    809
    810	while (entry->buf_count < count) {
    811		buf = &entry->buflist[entry->buf_count];
    812		buf->idx = dma->buf_count + entry->buf_count;
    813		buf->total = alignment;
    814		buf->order = order;
    815		buf->used = 0;
    816
    817		buf->offset = (dma->byte_count + offset);
    818		buf->bus_address = agp_offset + offset;
    819		buf->address = (void *)(agp_offset + offset);
    820		buf->next = NULL;
    821		buf->waiting = 0;
    822		buf->pending = 0;
    823		buf->file_priv = NULL;
    824
    825		buf->dev_priv_size = dev->driver->dev_priv_size;
    826		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
    827		if (!buf->dev_private) {
    828			/* Set count correctly so we free the proper amount. */
    829			entry->buf_count = count;
    830			drm_cleanup_buf_error(dev, entry);
    831			mutex_unlock(&dev->struct_mutex);
    832			atomic_dec(&dev->buf_alloc);
    833			return -ENOMEM;
    834		}
    835
    836		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
    837
    838		offset += alignment;
    839		entry->buf_count++;
    840		byte_count += PAGE_SIZE << page_order;
    841	}
    842
    843	DRM_DEBUG("byte_count: %d\n", byte_count);
    844
    845	temp_buflist = krealloc(dma->buflist,
    846				(dma->buf_count + entry->buf_count) *
    847				sizeof(*dma->buflist), GFP_KERNEL);
    848	if (!temp_buflist) {
    849		/* Free the entry because it isn't valid */
    850		drm_cleanup_buf_error(dev, entry);
    851		mutex_unlock(&dev->struct_mutex);
    852		atomic_dec(&dev->buf_alloc);
    853		return -ENOMEM;
    854	}
    855	dma->buflist = temp_buflist;
    856
    857	for (i = 0; i < entry->buf_count; i++) {
    858		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
    859	}
    860
    861	dma->buf_count += entry->buf_count;
    862	dma->seg_count += entry->seg_count;
    863	dma->page_count += byte_count >> PAGE_SHIFT;
    864	dma->byte_count += byte_count;
    865
    866	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
    867	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
    868
    869	mutex_unlock(&dev->struct_mutex);
    870
    871	request->count = entry->buf_count;
    872	request->size = size;
    873
    874	dma->flags = _DRM_DMA_USE_AGP;
    875
    876	atomic_dec(&dev->buf_alloc);
    877	return 0;
    878}
    879EXPORT_SYMBOL(drm_legacy_addbufs_agp);
    880#endif /* CONFIG_AGP */
    881
    882int drm_legacy_addbufs_pci(struct drm_device *dev,
    883			   struct drm_buf_desc *request)
    884{
    885	struct drm_device_dma *dma = dev->dma;
    886	int count;
    887	int order;
    888	int size;
    889	int total;
    890	int page_order;
    891	struct drm_buf_entry *entry;
    892	drm_dma_handle_t *dmah;
    893	struct drm_buf *buf;
    894	int alignment;
    895	unsigned long offset;
    896	int i;
    897	int byte_count;
    898	int page_count;
    899	unsigned long *temp_pagelist;
    900	struct drm_buf **temp_buflist;
    901
    902	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
    903		return -EOPNOTSUPP;
    904
    905	if (!dma)
    906		return -EINVAL;
    907
    908	if (!capable(CAP_SYS_ADMIN))
    909		return -EPERM;
    910
    911	count = request->count;
    912	order = order_base_2(request->size);
    913	size = 1 << order;
    914
    915	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
    916		  request->count, request->size, size, order);
    917
    918	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
    919		return -EINVAL;
    920
    921	alignment = (request->flags & _DRM_PAGE_ALIGN)
    922	    ? PAGE_ALIGN(size) : size;
    923	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
    924	total = PAGE_SIZE << page_order;
    925
    926	spin_lock(&dev->buf_lock);
    927	if (dev->buf_use) {
    928		spin_unlock(&dev->buf_lock);
    929		return -EBUSY;
    930	}
    931	atomic_inc(&dev->buf_alloc);
    932	spin_unlock(&dev->buf_lock);
    933
    934	mutex_lock(&dev->struct_mutex);
    935	entry = &dma->bufs[order];
    936	if (entry->buf_count) {
    937		mutex_unlock(&dev->struct_mutex);
    938		atomic_dec(&dev->buf_alloc);
    939		return -ENOMEM;	/* May only call once for each order */
    940	}
    941
    942	if (count < 0 || count > 4096) {
    943		mutex_unlock(&dev->struct_mutex);
    944		atomic_dec(&dev->buf_alloc);
    945		return -EINVAL;
    946	}
    947
    948	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
    949	if (!entry->buflist) {
    950		mutex_unlock(&dev->struct_mutex);
    951		atomic_dec(&dev->buf_alloc);
    952		return -ENOMEM;
    953	}
    954
    955	entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
    956	if (!entry->seglist) {
    957		kfree(entry->buflist);
    958		mutex_unlock(&dev->struct_mutex);
    959		atomic_dec(&dev->buf_alloc);
    960		return -ENOMEM;
    961	}
    962
    963	/* Keep the original pagelist until we know all the allocations
    964	 * have succeeded
    965	 */
    966	temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
    967				      sizeof(*dma->pagelist),
    968				      GFP_KERNEL);
    969	if (!temp_pagelist) {
    970		kfree(entry->buflist);
    971		kfree(entry->seglist);
    972		mutex_unlock(&dev->struct_mutex);
    973		atomic_dec(&dev->buf_alloc);
    974		return -ENOMEM;
    975	}
    976	memcpy(temp_pagelist,
    977	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
    978	DRM_DEBUG("pagelist: %d entries\n",
    979		  dma->page_count + (count << page_order));
    980
    981	entry->buf_size = size;
    982	entry->page_order = page_order;
    983	byte_count = 0;
    984	page_count = 0;
    985
    986	while (entry->buf_count < count) {
    987		dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
    988		if (!dmah) {
    989			/* Set count correctly so we free the proper amount. */
    990			entry->buf_count = count;
    991			entry->seg_count = count;
    992			drm_cleanup_buf_error(dev, entry);
    993			kfree(temp_pagelist);
    994			mutex_unlock(&dev->struct_mutex);
    995			atomic_dec(&dev->buf_alloc);
    996			return -ENOMEM;
    997		}
    998
    999		dmah->size = total;
   1000		dmah->vaddr = dma_alloc_coherent(dev->dev,
   1001						 dmah->size,
   1002						 &dmah->busaddr,
   1003						 GFP_KERNEL);
   1004		if (!dmah->vaddr) {
   1005			kfree(dmah);
   1006
   1007			/* Set count correctly so we free the proper amount. */
   1008			entry->buf_count = count;
   1009			entry->seg_count = count;
   1010			drm_cleanup_buf_error(dev, entry);
   1011			kfree(temp_pagelist);
   1012			mutex_unlock(&dev->struct_mutex);
   1013			atomic_dec(&dev->buf_alloc);
   1014			return -ENOMEM;
   1015		}
   1016		entry->seglist[entry->seg_count++] = dmah;
   1017		for (i = 0; i < (1 << page_order); i++) {
   1018			DRM_DEBUG("page %d @ 0x%08lx\n",
   1019				  dma->page_count + page_count,
   1020				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
   1021			temp_pagelist[dma->page_count + page_count++]
   1022				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
   1023		}
   1024		for (offset = 0;
   1025		     offset + size <= total && entry->buf_count < count;
   1026		     offset += alignment, ++entry->buf_count) {
   1027			buf = &entry->buflist[entry->buf_count];
   1028			buf->idx = dma->buf_count + entry->buf_count;
   1029			buf->total = alignment;
   1030			buf->order = order;
   1031			buf->used = 0;
   1032			buf->offset = (dma->byte_count + byte_count + offset);
   1033			buf->address = (void *)(dmah->vaddr + offset);
   1034			buf->bus_address = dmah->busaddr + offset;
   1035			buf->next = NULL;
   1036			buf->waiting = 0;
   1037			buf->pending = 0;
   1038			buf->file_priv = NULL;
   1039
   1040			buf->dev_priv_size = dev->driver->dev_priv_size;
   1041			buf->dev_private = kzalloc(buf->dev_priv_size,
   1042						GFP_KERNEL);
   1043			if (!buf->dev_private) {
   1044				/* Set count correctly so we free the proper amount. */
   1045				entry->buf_count = count;
   1046				entry->seg_count = count;
   1047				drm_cleanup_buf_error(dev, entry);
   1048				kfree(temp_pagelist);
   1049				mutex_unlock(&dev->struct_mutex);
   1050				atomic_dec(&dev->buf_alloc);
   1051				return -ENOMEM;
   1052			}
   1053
   1054			DRM_DEBUG("buffer %d @ %p\n",
   1055				  entry->buf_count, buf->address);
   1056		}
   1057		byte_count += PAGE_SIZE << page_order;
   1058	}
   1059
   1060	temp_buflist = krealloc(dma->buflist,
   1061				(dma->buf_count + entry->buf_count) *
   1062				sizeof(*dma->buflist), GFP_KERNEL);
   1063	if (!temp_buflist) {
   1064		/* Free the entry because it isn't valid */
   1065		drm_cleanup_buf_error(dev, entry);
   1066		kfree(temp_pagelist);
   1067		mutex_unlock(&dev->struct_mutex);
   1068		atomic_dec(&dev->buf_alloc);
   1069		return -ENOMEM;
   1070	}
   1071	dma->buflist = temp_buflist;
   1072
   1073	for (i = 0; i < entry->buf_count; i++) {
   1074		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
   1075	}
   1076
   1077	/* No allocations failed, so now we can replace the original pagelist
   1078	 * with the new one.
   1079	 */
   1080	if (dma->page_count) {
   1081		kfree(dma->pagelist);
   1082	}
   1083	dma->pagelist = temp_pagelist;
   1084
   1085	dma->buf_count += entry->buf_count;
   1086	dma->seg_count += entry->seg_count;
   1087	dma->page_count += entry->seg_count << page_order;
   1088	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
   1089
   1090	mutex_unlock(&dev->struct_mutex);
   1091
   1092	request->count = entry->buf_count;
   1093	request->size = size;
   1094
   1095	if (request->flags & _DRM_PCI_BUFFER_RO)
   1096		dma->flags = _DRM_DMA_USE_PCI_RO;
   1097
   1098	atomic_dec(&dev->buf_alloc);
   1099	return 0;
   1100
   1101}
   1102EXPORT_SYMBOL(drm_legacy_addbufs_pci);
   1103
   1104static int drm_legacy_addbufs_sg(struct drm_device *dev,
   1105				 struct drm_buf_desc *request)
   1106{
   1107	struct drm_device_dma *dma = dev->dma;
   1108	struct drm_buf_entry *entry;
   1109	struct drm_buf *buf;
   1110	unsigned long offset;
   1111	unsigned long agp_offset;
   1112	int count;
   1113	int order;
   1114	int size;
   1115	int alignment;
   1116	int page_order;
   1117	int total;
   1118	int byte_count;
   1119	int i;
   1120	struct drm_buf **temp_buflist;
   1121
   1122	if (!drm_core_check_feature(dev, DRIVER_SG))
   1123		return -EOPNOTSUPP;
   1124
   1125	if (!dma)
   1126		return -EINVAL;
   1127
   1128	if (!capable(CAP_SYS_ADMIN))
   1129		return -EPERM;
   1130
   1131	count = request->count;
   1132	order = order_base_2(request->size);
   1133	size = 1 << order;
   1134
   1135	alignment = (request->flags & _DRM_PAGE_ALIGN)
   1136	    ? PAGE_ALIGN(size) : size;
   1137	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
   1138	total = PAGE_SIZE << page_order;
   1139
   1140	byte_count = 0;
   1141	agp_offset = request->agp_start;
   1142
   1143	DRM_DEBUG("count:      %d\n", count);
   1144	DRM_DEBUG("order:      %d\n", order);
   1145	DRM_DEBUG("size:       %d\n", size);
   1146	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
   1147	DRM_DEBUG("alignment:  %d\n", alignment);
   1148	DRM_DEBUG("page_order: %d\n", page_order);
   1149	DRM_DEBUG("total:      %d\n", total);
   1150
   1151	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
   1152		return -EINVAL;
   1153
   1154	spin_lock(&dev->buf_lock);
   1155	if (dev->buf_use) {
   1156		spin_unlock(&dev->buf_lock);
   1157		return -EBUSY;
   1158	}
   1159	atomic_inc(&dev->buf_alloc);
   1160	spin_unlock(&dev->buf_lock);
   1161
   1162	mutex_lock(&dev->struct_mutex);
   1163	entry = &dma->bufs[order];
   1164	if (entry->buf_count) {
   1165		mutex_unlock(&dev->struct_mutex);
   1166		atomic_dec(&dev->buf_alloc);
   1167		return -ENOMEM;	/* May only call once for each order */
   1168	}
   1169
   1170	if (count < 0 || count > 4096) {
   1171		mutex_unlock(&dev->struct_mutex);
   1172		atomic_dec(&dev->buf_alloc);
   1173		return -EINVAL;
   1174	}
   1175
   1176	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
   1177	if (!entry->buflist) {
   1178		mutex_unlock(&dev->struct_mutex);
   1179		atomic_dec(&dev->buf_alloc);
   1180		return -ENOMEM;
   1181	}
   1182
   1183	entry->buf_size = size;
   1184	entry->page_order = page_order;
   1185
   1186	offset = 0;
   1187
   1188	while (entry->buf_count < count) {
   1189		buf = &entry->buflist[entry->buf_count];
   1190		buf->idx = dma->buf_count + entry->buf_count;
   1191		buf->total = alignment;
   1192		buf->order = order;
   1193		buf->used = 0;
   1194
   1195		buf->offset = (dma->byte_count + offset);
   1196		buf->bus_address = agp_offset + offset;
   1197		buf->address = (void *)(agp_offset + offset
   1198					+ (unsigned long)dev->sg->virtual);
   1199		buf->next = NULL;
   1200		buf->waiting = 0;
   1201		buf->pending = 0;
   1202		buf->file_priv = NULL;
   1203
   1204		buf->dev_priv_size = dev->driver->dev_priv_size;
   1205		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
   1206		if (!buf->dev_private) {
   1207			/* Set count correctly so we free the proper amount. */
   1208			entry->buf_count = count;
   1209			drm_cleanup_buf_error(dev, entry);
   1210			mutex_unlock(&dev->struct_mutex);
   1211			atomic_dec(&dev->buf_alloc);
   1212			return -ENOMEM;
   1213		}
   1214
   1215		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
   1216
   1217		offset += alignment;
   1218		entry->buf_count++;
   1219		byte_count += PAGE_SIZE << page_order;
   1220	}
   1221
   1222	DRM_DEBUG("byte_count: %d\n", byte_count);
   1223
   1224	temp_buflist = krealloc(dma->buflist,
   1225				(dma->buf_count + entry->buf_count) *
   1226				sizeof(*dma->buflist), GFP_KERNEL);
   1227	if (!temp_buflist) {
   1228		/* Free the entry because it isn't valid */
   1229		drm_cleanup_buf_error(dev, entry);
   1230		mutex_unlock(&dev->struct_mutex);
   1231		atomic_dec(&dev->buf_alloc);
   1232		return -ENOMEM;
   1233	}
   1234	dma->buflist = temp_buflist;
   1235
   1236	for (i = 0; i < entry->buf_count; i++) {
   1237		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
   1238	}
   1239
   1240	dma->buf_count += entry->buf_count;
   1241	dma->seg_count += entry->seg_count;
   1242	dma->page_count += byte_count >> PAGE_SHIFT;
   1243	dma->byte_count += byte_count;
   1244
   1245	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
   1246	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
   1247
   1248	mutex_unlock(&dev->struct_mutex);
   1249
   1250	request->count = entry->buf_count;
   1251	request->size = size;
   1252
   1253	dma->flags = _DRM_DMA_USE_SG;
   1254
   1255	atomic_dec(&dev->buf_alloc);
   1256	return 0;
   1257}
   1258
   1259/*
   1260 * Add buffers for DMA transfers (ioctl).
   1261 *
   1262 * \param inode device inode.
   1263 * \param file_priv DRM file private.
   1264 * \param cmd command.
   1265 * \param arg pointer to a struct drm_buf_desc request.
   1266 * \return zero on success or a negative number on failure.
   1267 *
   1268 * According with the memory type specified in drm_buf_desc::flags and the
   1269 * build options, it dispatches the call either to addbufs_agp(),
   1270 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
   1271 * PCI memory respectively.
   1272 */
   1273int drm_legacy_addbufs(struct drm_device *dev, void *data,
   1274		       struct drm_file *file_priv)
   1275{
   1276	struct drm_buf_desc *request = data;
   1277	int ret;
   1278
   1279	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
   1280		return -EOPNOTSUPP;
   1281
   1282	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1283		return -EOPNOTSUPP;
   1284
   1285#if IS_ENABLED(CONFIG_AGP)
   1286	if (request->flags & _DRM_AGP_BUFFER)
   1287		ret = drm_legacy_addbufs_agp(dev, request);
   1288	else
   1289#endif
   1290	if (request->flags & _DRM_SG_BUFFER)
   1291		ret = drm_legacy_addbufs_sg(dev, request);
   1292	else if (request->flags & _DRM_FB_BUFFER)
   1293		ret = -EINVAL;
   1294	else
   1295		ret = drm_legacy_addbufs_pci(dev, request);
   1296
   1297	return ret;
   1298}
   1299
   1300/*
   1301 * Get information about the buffer mappings.
   1302 *
   1303 * This was originally mean for debugging purposes, or by a sophisticated
   1304 * client library to determine how best to use the available buffers (e.g.,
   1305 * large buffers can be used for image transfer).
   1306 *
   1307 * \param inode device inode.
   1308 * \param file_priv DRM file private.
   1309 * \param cmd command.
   1310 * \param arg pointer to a drm_buf_info structure.
   1311 * \return zero on success or a negative number on failure.
   1312 *
   1313 * Increments drm_device::buf_use while holding the drm_device::buf_lock
   1314 * lock, preventing of allocating more buffers after this call. Information
   1315 * about each requested buffer is then copied into user space.
   1316 */
   1317int __drm_legacy_infobufs(struct drm_device *dev,
   1318			void *data, int *p,
   1319			int (*f)(void *, int, struct drm_buf_entry *))
   1320{
   1321	struct drm_device_dma *dma = dev->dma;
   1322	int i;
   1323	int count;
   1324
   1325	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
   1326		return -EOPNOTSUPP;
   1327
   1328	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1329		return -EOPNOTSUPP;
   1330
   1331	if (!dma)
   1332		return -EINVAL;
   1333
   1334	spin_lock(&dev->buf_lock);
   1335	if (atomic_read(&dev->buf_alloc)) {
   1336		spin_unlock(&dev->buf_lock);
   1337		return -EBUSY;
   1338	}
   1339	++dev->buf_use;		/* Can't allocate more after this call */
   1340	spin_unlock(&dev->buf_lock);
   1341
   1342	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
   1343		if (dma->bufs[i].buf_count)
   1344			++count;
   1345	}
   1346
   1347	DRM_DEBUG("count = %d\n", count);
   1348
   1349	if (*p >= count) {
   1350		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
   1351			struct drm_buf_entry *from = &dma->bufs[i];
   1352
   1353			if (from->buf_count) {
   1354				if (f(data, count, from) < 0)
   1355					return -EFAULT;
   1356				DRM_DEBUG("%d %d %d %d %d\n",
   1357					  i,
   1358					  dma->bufs[i].buf_count,
   1359					  dma->bufs[i].buf_size,
   1360					  dma->bufs[i].low_mark,
   1361					  dma->bufs[i].high_mark);
   1362				++count;
   1363			}
   1364		}
   1365	}
   1366	*p = count;
   1367
   1368	return 0;
   1369}
   1370
   1371static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
   1372{
   1373	struct drm_buf_info *request = data;
   1374	struct drm_buf_desc __user *to = &request->list[count];
   1375	struct drm_buf_desc v = {.count = from->buf_count,
   1376				 .size = from->buf_size,
   1377				 .low_mark = from->low_mark,
   1378				 .high_mark = from->high_mark};
   1379
   1380	if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
   1381		return -EFAULT;
   1382	return 0;
   1383}
   1384
   1385int drm_legacy_infobufs(struct drm_device *dev, void *data,
   1386			struct drm_file *file_priv)
   1387{
   1388	struct drm_buf_info *request = data;
   1389
   1390	return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
   1391}
   1392
   1393/*
   1394 * Specifies a low and high water mark for buffer allocation
   1395 *
   1396 * \param inode device inode.
   1397 * \param file_priv DRM file private.
   1398 * \param cmd command.
   1399 * \param arg a pointer to a drm_buf_desc structure.
   1400 * \return zero on success or a negative number on failure.
   1401 *
   1402 * Verifies that the size order is bounded between the admissible orders and
   1403 * updates the respective drm_device_dma::bufs entry low and high water mark.
   1404 *
   1405 * \note This ioctl is deprecated and mostly never used.
   1406 */
   1407int drm_legacy_markbufs(struct drm_device *dev, void *data,
   1408			struct drm_file *file_priv)
   1409{
   1410	struct drm_device_dma *dma = dev->dma;
   1411	struct drm_buf_desc *request = data;
   1412	int order;
   1413	struct drm_buf_entry *entry;
   1414
   1415	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
   1416		return -EOPNOTSUPP;
   1417
   1418	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1419		return -EOPNOTSUPP;
   1420
   1421	if (!dma)
   1422		return -EINVAL;
   1423
   1424	DRM_DEBUG("%d, %d, %d\n",
   1425		  request->size, request->low_mark, request->high_mark);
   1426	order = order_base_2(request->size);
   1427	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
   1428		return -EINVAL;
   1429	entry = &dma->bufs[order];
   1430
   1431	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
   1432		return -EINVAL;
   1433	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
   1434		return -EINVAL;
   1435
   1436	entry->low_mark = request->low_mark;
   1437	entry->high_mark = request->high_mark;
   1438
   1439	return 0;
   1440}
   1441
   1442/*
   1443 * Unreserve the buffers in list, previously reserved using drmDMA.
   1444 *
   1445 * \param inode device inode.
   1446 * \param file_priv DRM file private.
   1447 * \param cmd command.
   1448 * \param arg pointer to a drm_buf_free structure.
   1449 * \return zero on success or a negative number on failure.
   1450 *
   1451 * Calls free_buffer() for each used buffer.
   1452 * This function is primarily used for debugging.
   1453 */
   1454int drm_legacy_freebufs(struct drm_device *dev, void *data,
   1455			struct drm_file *file_priv)
   1456{
   1457	struct drm_device_dma *dma = dev->dma;
   1458	struct drm_buf_free *request = data;
   1459	int i;
   1460	int idx;
   1461	struct drm_buf *buf;
   1462
   1463	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
   1464		return -EOPNOTSUPP;
   1465
   1466	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1467		return -EOPNOTSUPP;
   1468
   1469	if (!dma)
   1470		return -EINVAL;
   1471
   1472	DRM_DEBUG("%d\n", request->count);
   1473	for (i = 0; i < request->count; i++) {
   1474		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
   1475			return -EFAULT;
   1476		if (idx < 0 || idx >= dma->buf_count) {
   1477			DRM_ERROR("Index %d (of %d max)\n",
   1478				  idx, dma->buf_count - 1);
   1479			return -EINVAL;
   1480		}
   1481		idx = array_index_nospec(idx, dma->buf_count);
   1482		buf = dma->buflist[idx];
   1483		if (buf->file_priv != file_priv) {
   1484			DRM_ERROR("Process %d freeing buffer not owned\n",
   1485				  task_pid_nr(current));
   1486			return -EINVAL;
   1487		}
   1488		drm_legacy_free_buffer(dev, buf);
   1489	}
   1490
   1491	return 0;
   1492}
   1493
   1494/*
   1495 * Maps all of the DMA buffers into client-virtual space (ioctl).
   1496 *
   1497 * \param inode device inode.
   1498 * \param file_priv DRM file private.
   1499 * \param cmd command.
   1500 * \param arg pointer to a drm_buf_map structure.
   1501 * \return zero on success or a negative number on failure.
   1502 *
   1503 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
   1504 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
   1505 * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
   1506 * drm_mmap_dma().
   1507 */
   1508int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
   1509			 void __user **v,
   1510			 int (*f)(void *, int, unsigned long,
   1511				 struct drm_buf *),
   1512				 struct drm_file *file_priv)
   1513{
   1514	struct drm_device_dma *dma = dev->dma;
   1515	int retcode = 0;
   1516	unsigned long virtual;
   1517	int i;
   1518
   1519	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
   1520		return -EOPNOTSUPP;
   1521
   1522	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1523		return -EOPNOTSUPP;
   1524
   1525	if (!dma)
   1526		return -EINVAL;
   1527
   1528	spin_lock(&dev->buf_lock);
   1529	if (atomic_read(&dev->buf_alloc)) {
   1530		spin_unlock(&dev->buf_lock);
   1531		return -EBUSY;
   1532	}
   1533	dev->buf_use++;		/* Can't allocate more after this call */
   1534	spin_unlock(&dev->buf_lock);
   1535
   1536	if (*p >= dma->buf_count) {
   1537		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
   1538		    || (drm_core_check_feature(dev, DRIVER_SG)
   1539			&& (dma->flags & _DRM_DMA_USE_SG))) {
   1540			struct drm_local_map *map = dev->agp_buffer_map;
   1541			unsigned long token = dev->agp_buffer_token;
   1542
   1543			if (!map) {
   1544				retcode = -EINVAL;
   1545				goto done;
   1546			}
   1547			virtual = vm_mmap(file_priv->filp, 0, map->size,
   1548					  PROT_READ | PROT_WRITE,
   1549					  MAP_SHARED,
   1550					  token);
   1551		} else {
   1552			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
   1553					  PROT_READ | PROT_WRITE,
   1554					  MAP_SHARED, 0);
   1555		}
   1556		if (virtual > -1024UL) {
   1557			/* Real error */
   1558			retcode = (signed long)virtual;
   1559			goto done;
   1560		}
   1561		*v = (void __user *)virtual;
   1562
   1563		for (i = 0; i < dma->buf_count; i++) {
   1564			if (f(data, i, virtual, dma->buflist[i]) < 0) {
   1565				retcode = -EFAULT;
   1566				goto done;
   1567			}
   1568		}
   1569	}
   1570      done:
   1571	*p = dma->buf_count;
   1572	DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
   1573
   1574	return retcode;
   1575}
   1576
   1577static int map_one_buf(void *data, int idx, unsigned long virtual,
   1578			struct drm_buf *buf)
   1579{
   1580	struct drm_buf_map *request = data;
   1581	unsigned long address = virtual + buf->offset;	/* *** */
   1582
   1583	if (copy_to_user(&request->list[idx].idx, &buf->idx,
   1584			 sizeof(request->list[0].idx)))
   1585		return -EFAULT;
   1586	if (copy_to_user(&request->list[idx].total, &buf->total,
   1587			 sizeof(request->list[0].total)))
   1588		return -EFAULT;
   1589	if (clear_user(&request->list[idx].used, sizeof(int)))
   1590		return -EFAULT;
   1591	if (copy_to_user(&request->list[idx].address, &address,
   1592			 sizeof(address)))
   1593		return -EFAULT;
   1594	return 0;
   1595}
   1596
   1597int drm_legacy_mapbufs(struct drm_device *dev, void *data,
   1598		       struct drm_file *file_priv)
   1599{
   1600	struct drm_buf_map *request = data;
   1601
   1602	return __drm_legacy_mapbufs(dev, data, &request->count,
   1603				    &request->virtual, map_one_buf,
   1604				    file_priv);
   1605}
   1606
   1607int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
   1608		  struct drm_file *file_priv)
   1609{
   1610	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
   1611		return -EOPNOTSUPP;
   1612
   1613	if (dev->driver->dma_ioctl)
   1614		return dev->driver->dma_ioctl(dev, data, file_priv);
   1615	else
   1616		return -EINVAL;
   1617}
   1618
   1619struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
   1620{
   1621	struct drm_map_list *entry;
   1622
   1623	list_for_each_entry(entry, &dev->maplist, head) {
   1624		if (entry->map && entry->map->type == _DRM_SHM &&
   1625		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
   1626			return entry->map;
   1627		}
   1628	}
   1629	return NULL;
   1630}
   1631EXPORT_SYMBOL(drm_legacy_getsarea);