cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vc4_bo.c (27668B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  Copyright © 2015 Broadcom
      4 */
      5
      6/**
      7 * DOC: VC4 GEM BO management support
      8 *
      9 * The VC4 GPU architecture (both scanout and rendering) has direct
     10 * access to system memory with no MMU in between.  To support it, we
     11 * use the GEM CMA helper functions to allocate contiguous ranges of
     12 * physical memory for our BOs.
     13 *
     14 * Since the CMA allocator is very slow, we keep a cache of recently
     15 * freed BOs around so that the kernel's allocation of objects for 3D
     16 * rendering can return quickly.
     17 */
     18
     19#include <linux/dma-buf.h>
     20
     21#include "vc4_drv.h"
     22#include "uapi/drm/vc4_drm.h"
     23
     24static const struct drm_gem_object_funcs vc4_gem_object_funcs;
     25
     26static const char * const bo_type_names[] = {
     27	"kernel",
     28	"V3D",
     29	"V3D shader",
     30	"dumb",
     31	"binner",
     32	"RCL",
     33	"BCL",
     34	"kernel BO cache",
     35};
     36
     37static bool is_user_label(int label)
     38{
     39	return label >= VC4_BO_TYPE_COUNT;
     40}
     41
     42static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
     43{
     44	int i;
     45
     46	for (i = 0; i < vc4->num_labels; i++) {
     47		if (!vc4->bo_labels[i].num_allocated)
     48			continue;
     49
     50		drm_printf(p, "%30s: %6dkb BOs (%d)\n",
     51			   vc4->bo_labels[i].name,
     52			   vc4->bo_labels[i].size_allocated / 1024,
     53			   vc4->bo_labels[i].num_allocated);
     54	}
     55
     56	mutex_lock(&vc4->purgeable.lock);
     57	if (vc4->purgeable.num)
     58		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
     59			   vc4->purgeable.size / 1024, vc4->purgeable.num);
     60
     61	if (vc4->purgeable.purged_num)
     62		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
     63			   vc4->purgeable.purged_size / 1024,
     64			   vc4->purgeable.purged_num);
     65	mutex_unlock(&vc4->purgeable.lock);
     66}
     67
     68static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
     69{
     70	struct drm_info_node *node = (struct drm_info_node *)m->private;
     71	struct drm_device *dev = node->minor->dev;
     72	struct vc4_dev *vc4 = to_vc4_dev(dev);
     73	struct drm_printer p = drm_seq_file_printer(m);
     74
     75	vc4_bo_stats_print(&p, vc4);
     76
     77	return 0;
     78}
     79
     80/* Takes ownership of *name and returns the appropriate slot for it in
     81 * the bo_labels[] array, extending it as necessary.
     82 *
     83 * This is inefficient and could use a hash table instead of walking
     84 * an array and strcmp()ing.  However, the assumption is that user
     85 * labeling will be infrequent (scanout buffers and other long-lived
     86 * objects, or debug driver builds), so we can live with it for now.
     87 */
     88static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
     89{
     90	int i;
     91	int free_slot = -1;
     92
     93	for (i = 0; i < vc4->num_labels; i++) {
     94		if (!vc4->bo_labels[i].name) {
     95			free_slot = i;
     96		} else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
     97			kfree(name);
     98			return i;
     99		}
    100	}
    101
    102	if (free_slot != -1) {
    103		WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
    104		vc4->bo_labels[free_slot].name = name;
    105		return free_slot;
    106	} else {
    107		u32 new_label_count = vc4->num_labels + 1;
    108		struct vc4_label *new_labels =
    109			krealloc(vc4->bo_labels,
    110				 new_label_count * sizeof(*new_labels),
    111				 GFP_KERNEL);
    112
    113		if (!new_labels) {
    114			kfree(name);
    115			return -1;
    116		}
    117
    118		free_slot = vc4->num_labels;
    119		vc4->bo_labels = new_labels;
    120		vc4->num_labels = new_label_count;
    121
    122		vc4->bo_labels[free_slot].name = name;
    123		vc4->bo_labels[free_slot].num_allocated = 0;
    124		vc4->bo_labels[free_slot].size_allocated = 0;
    125
    126		return free_slot;
    127	}
    128}
    129
    130static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
    131{
    132	struct vc4_bo *bo = to_vc4_bo(gem_obj);
    133	struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
    134
    135	lockdep_assert_held(&vc4->bo_lock);
    136
    137	if (label != -1) {
    138		vc4->bo_labels[label].num_allocated++;
    139		vc4->bo_labels[label].size_allocated += gem_obj->size;
    140	}
    141
    142	vc4->bo_labels[bo->label].num_allocated--;
    143	vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
    144
    145	if (vc4->bo_labels[bo->label].num_allocated == 0 &&
    146	    is_user_label(bo->label)) {
    147		/* Free user BO label slots on last unreference.
    148		 * Slots are just where we track the stats for a given
    149		 * name, and once a name is unused we can reuse that
    150		 * slot.
    151		 */
    152		kfree(vc4->bo_labels[bo->label].name);
    153		vc4->bo_labels[bo->label].name = NULL;
    154	}
    155
    156	bo->label = label;
    157}
    158
    159static uint32_t bo_page_index(size_t size)
    160{
    161	return (size / PAGE_SIZE) - 1;
    162}
    163
    164static void vc4_bo_destroy(struct vc4_bo *bo)
    165{
    166	struct drm_gem_object *obj = &bo->base.base;
    167	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
    168
    169	lockdep_assert_held(&vc4->bo_lock);
    170
    171	vc4_bo_set_label(obj, -1);
    172
    173	if (bo->validated_shader) {
    174		kfree(bo->validated_shader->uniform_addr_offsets);
    175		kfree(bo->validated_shader->texture_samples);
    176		kfree(bo->validated_shader);
    177		bo->validated_shader = NULL;
    178	}
    179
    180	drm_gem_cma_free(&bo->base);
    181}
    182
    183static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
    184{
    185	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
    186
    187	lockdep_assert_held(&vc4->bo_lock);
    188	list_del(&bo->unref_head);
    189	list_del(&bo->size_head);
    190}
    191
    192static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
    193						     size_t size)
    194{
    195	struct vc4_dev *vc4 = to_vc4_dev(dev);
    196	uint32_t page_index = bo_page_index(size);
    197
    198	if (vc4->bo_cache.size_list_size <= page_index) {
    199		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
    200					page_index + 1);
    201		struct list_head *new_list;
    202		uint32_t i;
    203
    204		new_list = kmalloc_array(new_size, sizeof(struct list_head),
    205					 GFP_KERNEL);
    206		if (!new_list)
    207			return NULL;
    208
    209		/* Rebase the old cached BO lists to their new list
    210		 * head locations.
    211		 */
    212		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
    213			struct list_head *old_list =
    214				&vc4->bo_cache.size_list[i];
    215
    216			if (list_empty(old_list))
    217				INIT_LIST_HEAD(&new_list[i]);
    218			else
    219				list_replace(old_list, &new_list[i]);
    220		}
    221		/* And initialize the brand new BO list heads. */
    222		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
    223			INIT_LIST_HEAD(&new_list[i]);
    224
    225		kfree(vc4->bo_cache.size_list);
    226		vc4->bo_cache.size_list = new_list;
    227		vc4->bo_cache.size_list_size = new_size;
    228	}
    229
    230	return &vc4->bo_cache.size_list[page_index];
    231}
    232
    233static void vc4_bo_cache_purge(struct drm_device *dev)
    234{
    235	struct vc4_dev *vc4 = to_vc4_dev(dev);
    236
    237	mutex_lock(&vc4->bo_lock);
    238	while (!list_empty(&vc4->bo_cache.time_list)) {
    239		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
    240						    struct vc4_bo, unref_head);
    241		vc4_bo_remove_from_cache(bo);
    242		vc4_bo_destroy(bo);
    243	}
    244	mutex_unlock(&vc4->bo_lock);
    245}
    246
    247void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
    248{
    249	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
    250
    251	if (WARN_ON_ONCE(vc4->is_vc5))
    252		return;
    253
    254	mutex_lock(&vc4->purgeable.lock);
    255	list_add_tail(&bo->size_head, &vc4->purgeable.list);
    256	vc4->purgeable.num++;
    257	vc4->purgeable.size += bo->base.base.size;
    258	mutex_unlock(&vc4->purgeable.lock);
    259}
    260
    261static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
    262{
    263	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
    264
    265	if (WARN_ON_ONCE(vc4->is_vc5))
    266		return;
    267
    268	/* list_del_init() is used here because the caller might release
    269	 * the purgeable lock in order to acquire the madv one and update the
    270	 * madv status.
    271	 * During this short period of time a user might decide to mark
    272	 * the BO as unpurgeable, and if bo->madv is set to
    273	 * VC4_MADV_DONTNEED it will try to remove the BO from the
    274	 * purgeable list which will fail if the ->next/prev fields
    275	 * are set to LIST_POISON1/LIST_POISON2 (which is what
    276	 * list_del() does).
    277	 * Re-initializing the list element guarantees that list_del()
    278	 * will work correctly even if it's a NOP.
    279	 */
    280	list_del_init(&bo->size_head);
    281	vc4->purgeable.num--;
    282	vc4->purgeable.size -= bo->base.base.size;
    283}
    284
    285void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
    286{
    287	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
    288
    289	mutex_lock(&vc4->purgeable.lock);
    290	vc4_bo_remove_from_purgeable_pool_locked(bo);
    291	mutex_unlock(&vc4->purgeable.lock);
    292}
    293
    294static void vc4_bo_purge(struct drm_gem_object *obj)
    295{
    296	struct vc4_bo *bo = to_vc4_bo(obj);
    297	struct drm_device *dev = obj->dev;
    298
    299	WARN_ON(!mutex_is_locked(&bo->madv_lock));
    300	WARN_ON(bo->madv != VC4_MADV_DONTNEED);
    301
    302	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
    303
    304	dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
    305	bo->base.vaddr = NULL;
    306	bo->madv = __VC4_MADV_PURGED;
    307}
    308
    309static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
    310{
    311	struct vc4_dev *vc4 = to_vc4_dev(dev);
    312
    313	mutex_lock(&vc4->purgeable.lock);
    314	while (!list_empty(&vc4->purgeable.list)) {
    315		struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
    316						     struct vc4_bo, size_head);
    317		struct drm_gem_object *obj = &bo->base.base;
    318		size_t purged_size = 0;
    319
    320		vc4_bo_remove_from_purgeable_pool_locked(bo);
    321
    322		/* Release the purgeable lock while we're purging the BO so
    323		 * that other people can continue inserting things in the
    324		 * purgeable pool without having to wait for all BOs to be
    325		 * purged.
    326		 */
    327		mutex_unlock(&vc4->purgeable.lock);
    328		mutex_lock(&bo->madv_lock);
    329
    330		/* Since we released the purgeable pool lock before acquiring
    331		 * the BO madv one, the user may have marked the BO as WILLNEED
    332		 * and re-used it in the meantime.
    333		 * Before purging the BO we need to make sure
    334		 * - it is still marked as DONTNEED
    335		 * - it has not been re-inserted in the purgeable list
    336		 * - it is not used by HW blocks
    337		 * If one of these conditions is not met, just skip the entry.
    338		 */
    339		if (bo->madv == VC4_MADV_DONTNEED &&
    340		    list_empty(&bo->size_head) &&
    341		    !refcount_read(&bo->usecnt)) {
    342			purged_size = bo->base.base.size;
    343			vc4_bo_purge(obj);
    344		}
    345		mutex_unlock(&bo->madv_lock);
    346		mutex_lock(&vc4->purgeable.lock);
    347
    348		if (purged_size) {
    349			vc4->purgeable.purged_size += purged_size;
    350			vc4->purgeable.purged_num++;
    351		}
    352	}
    353	mutex_unlock(&vc4->purgeable.lock);
    354}
    355
    356static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
    357					    uint32_t size,
    358					    enum vc4_kernel_bo_type type)
    359{
    360	struct vc4_dev *vc4 = to_vc4_dev(dev);
    361	uint32_t page_index = bo_page_index(size);
    362	struct vc4_bo *bo = NULL;
    363
    364	mutex_lock(&vc4->bo_lock);
    365	if (page_index >= vc4->bo_cache.size_list_size)
    366		goto out;
    367
    368	if (list_empty(&vc4->bo_cache.size_list[page_index]))
    369		goto out;
    370
    371	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
    372			      struct vc4_bo, size_head);
    373	vc4_bo_remove_from_cache(bo);
    374	kref_init(&bo->base.base.refcount);
    375
    376out:
    377	if (bo)
    378		vc4_bo_set_label(&bo->base.base, type);
    379	mutex_unlock(&vc4->bo_lock);
    380	return bo;
    381}
    382
    383/**
    384 * vc4_create_object - Implementation of driver->gem_create_object.
    385 * @dev: DRM device
    386 * @size: Size in bytes of the memory the object will reference
    387 *
    388 * This lets the CMA helpers allocate object structs for us, and keep
    389 * our BO stats correct.
    390 */
    391struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
    392{
    393	struct vc4_dev *vc4 = to_vc4_dev(dev);
    394	struct vc4_bo *bo;
    395
    396	if (WARN_ON_ONCE(vc4->is_vc5))
    397		return ERR_PTR(-ENODEV);
    398
    399	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
    400	if (!bo)
    401		return ERR_PTR(-ENOMEM);
    402
    403	bo->madv = VC4_MADV_WILLNEED;
    404	refcount_set(&bo->usecnt, 0);
    405	mutex_init(&bo->madv_lock);
    406	mutex_lock(&vc4->bo_lock);
    407	bo->label = VC4_BO_TYPE_KERNEL;
    408	vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
    409	vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
    410	mutex_unlock(&vc4->bo_lock);
    411
    412	bo->base.base.funcs = &vc4_gem_object_funcs;
    413
    414	return &bo->base.base;
    415}
    416
    417struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
    418			     bool allow_unzeroed, enum vc4_kernel_bo_type type)
    419{
    420	size_t size = roundup(unaligned_size, PAGE_SIZE);
    421	struct vc4_dev *vc4 = to_vc4_dev(dev);
    422	struct drm_gem_cma_object *cma_obj;
    423	struct vc4_bo *bo;
    424
    425	if (WARN_ON_ONCE(vc4->is_vc5))
    426		return ERR_PTR(-ENODEV);
    427
    428	if (size == 0)
    429		return ERR_PTR(-EINVAL);
    430
    431	/* First, try to get a vc4_bo from the kernel BO cache. */
    432	bo = vc4_bo_get_from_cache(dev, size, type);
    433	if (bo) {
    434		if (!allow_unzeroed)
    435			memset(bo->base.vaddr, 0, bo->base.base.size);
    436		return bo;
    437	}
    438
    439	cma_obj = drm_gem_cma_create(dev, size);
    440	if (IS_ERR(cma_obj)) {
    441		/*
    442		 * If we've run out of CMA memory, kill the cache of
    443		 * CMA allocations we've got laying around and try again.
    444		 */
    445		vc4_bo_cache_purge(dev);
    446		cma_obj = drm_gem_cma_create(dev, size);
    447	}
    448
    449	if (IS_ERR(cma_obj)) {
    450		/*
    451		 * Still not enough CMA memory, purge the userspace BO
    452		 * cache and retry.
    453		 * This is sub-optimal since we purge the whole userspace
    454		 * BO cache which forces user that want to re-use the BO to
    455		 * restore its initial content.
    456		 * Ideally, we should purge entries one by one and retry
    457		 * after each to see if CMA allocation succeeds. Or even
    458		 * better, try to find an entry with at least the same
    459		 * size.
    460		 */
    461		vc4_bo_userspace_cache_purge(dev);
    462		cma_obj = drm_gem_cma_create(dev, size);
    463	}
    464
    465	if (IS_ERR(cma_obj)) {
    466		struct drm_printer p = drm_info_printer(vc4->base.dev);
    467		DRM_ERROR("Failed to allocate from CMA:\n");
    468		vc4_bo_stats_print(&p, vc4);
    469		return ERR_PTR(-ENOMEM);
    470	}
    471	bo = to_vc4_bo(&cma_obj->base);
    472
    473	/* By default, BOs do not support the MADV ioctl. This will be enabled
    474	 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
    475	 * BOs).
    476	 */
    477	bo->madv = __VC4_MADV_NOTSUPP;
    478
    479	mutex_lock(&vc4->bo_lock);
    480	vc4_bo_set_label(&cma_obj->base, type);
    481	mutex_unlock(&vc4->bo_lock);
    482
    483	return bo;
    484}
    485
    486int vc4_bo_dumb_create(struct drm_file *file_priv,
    487		       struct drm_device *dev,
    488		       struct drm_mode_create_dumb *args)
    489{
    490	struct vc4_dev *vc4 = to_vc4_dev(dev);
    491	struct vc4_bo *bo = NULL;
    492	int ret;
    493
    494	if (WARN_ON_ONCE(vc4->is_vc5))
    495		return -ENODEV;
    496
    497	ret = vc4_dumb_fixup_args(args);
    498	if (ret)
    499		return ret;
    500
    501	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
    502	if (IS_ERR(bo))
    503		return PTR_ERR(bo);
    504
    505	bo->madv = VC4_MADV_WILLNEED;
    506
    507	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
    508	drm_gem_object_put(&bo->base.base);
    509
    510	return ret;
    511}
    512
    513static void vc4_bo_cache_free_old(struct drm_device *dev)
    514{
    515	struct vc4_dev *vc4 = to_vc4_dev(dev);
    516	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
    517
    518	lockdep_assert_held(&vc4->bo_lock);
    519
    520	while (!list_empty(&vc4->bo_cache.time_list)) {
    521		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
    522						    struct vc4_bo, unref_head);
    523		if (time_before(expire_time, bo->free_time)) {
    524			mod_timer(&vc4->bo_cache.time_timer,
    525				  round_jiffies_up(jiffies +
    526						   msecs_to_jiffies(1000)));
    527			return;
    528		}
    529
    530		vc4_bo_remove_from_cache(bo);
    531		vc4_bo_destroy(bo);
    532	}
    533}
    534
    535/* Called on the last userspace/kernel unreference of the BO.  Returns
    536 * it to the BO cache if possible, otherwise frees it.
    537 */
    538static void vc4_free_object(struct drm_gem_object *gem_bo)
    539{
    540	struct drm_device *dev = gem_bo->dev;
    541	struct vc4_dev *vc4 = to_vc4_dev(dev);
    542	struct vc4_bo *bo = to_vc4_bo(gem_bo);
    543	struct list_head *cache_list;
    544
    545	/* Remove the BO from the purgeable list. */
    546	mutex_lock(&bo->madv_lock);
    547	if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
    548		vc4_bo_remove_from_purgeable_pool(bo);
    549	mutex_unlock(&bo->madv_lock);
    550
    551	mutex_lock(&vc4->bo_lock);
    552	/* If the object references someone else's memory, we can't cache it.
    553	 */
    554	if (gem_bo->import_attach) {
    555		vc4_bo_destroy(bo);
    556		goto out;
    557	}
    558
    559	/* Don't cache if it was publicly named. */
    560	if (gem_bo->name) {
    561		vc4_bo_destroy(bo);
    562		goto out;
    563	}
    564
    565	/* If this object was partially constructed but CMA allocation
    566	 * had failed, just free it. Can also happen when the BO has been
    567	 * purged.
    568	 */
    569	if (!bo->base.vaddr) {
    570		vc4_bo_destroy(bo);
    571		goto out;
    572	}
    573
    574	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
    575	if (!cache_list) {
    576		vc4_bo_destroy(bo);
    577		goto out;
    578	}
    579
    580	if (bo->validated_shader) {
    581		kfree(bo->validated_shader->uniform_addr_offsets);
    582		kfree(bo->validated_shader->texture_samples);
    583		kfree(bo->validated_shader);
    584		bo->validated_shader = NULL;
    585	}
    586
    587	/* Reset madv and usecnt before adding the BO to the cache. */
    588	bo->madv = __VC4_MADV_NOTSUPP;
    589	refcount_set(&bo->usecnt, 0);
    590
    591	bo->t_format = false;
    592	bo->free_time = jiffies;
    593	list_add(&bo->size_head, cache_list);
    594	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
    595
    596	vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
    597
    598	vc4_bo_cache_free_old(dev);
    599
    600out:
    601	mutex_unlock(&vc4->bo_lock);
    602}
    603
    604static void vc4_bo_cache_time_work(struct work_struct *work)
    605{
    606	struct vc4_dev *vc4 =
    607		container_of(work, struct vc4_dev, bo_cache.time_work);
    608	struct drm_device *dev = &vc4->base;
    609
    610	mutex_lock(&vc4->bo_lock);
    611	vc4_bo_cache_free_old(dev);
    612	mutex_unlock(&vc4->bo_lock);
    613}
    614
    615int vc4_bo_inc_usecnt(struct vc4_bo *bo)
    616{
    617	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
    618	int ret;
    619
    620	if (WARN_ON_ONCE(vc4->is_vc5))
    621		return -ENODEV;
    622
    623	/* Fast path: if the BO is already retained by someone, no need to
    624	 * check the madv status.
    625	 */
    626	if (refcount_inc_not_zero(&bo->usecnt))
    627		return 0;
    628
    629	mutex_lock(&bo->madv_lock);
    630	switch (bo->madv) {
    631	case VC4_MADV_WILLNEED:
    632		if (!refcount_inc_not_zero(&bo->usecnt))
    633			refcount_set(&bo->usecnt, 1);
    634		ret = 0;
    635		break;
    636	case VC4_MADV_DONTNEED:
    637		/* We shouldn't use a BO marked as purgeable if at least
    638		 * someone else retained its content by incrementing usecnt.
    639		 * Luckily the BO hasn't been purged yet, but something wrong
    640		 * is happening here. Just throw an error instead of
    641		 * authorizing this use case.
    642		 */
    643	case __VC4_MADV_PURGED:
    644		/* We can't use a purged BO. */
    645	default:
    646		/* Invalid madv value. */
    647		ret = -EINVAL;
    648		break;
    649	}
    650	mutex_unlock(&bo->madv_lock);
    651
    652	return ret;
    653}
    654
    655void vc4_bo_dec_usecnt(struct vc4_bo *bo)
    656{
    657	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
    658
    659	if (WARN_ON_ONCE(vc4->is_vc5))
    660		return;
    661
    662	/* Fast path: if the BO is still retained by someone, no need to test
    663	 * the madv value.
    664	 */
    665	if (refcount_dec_not_one(&bo->usecnt))
    666		return;
    667
    668	mutex_lock(&bo->madv_lock);
    669	if (refcount_dec_and_test(&bo->usecnt) &&
    670	    bo->madv == VC4_MADV_DONTNEED)
    671		vc4_bo_add_to_purgeable_pool(bo);
    672	mutex_unlock(&bo->madv_lock);
    673}
    674
    675static void vc4_bo_cache_time_timer(struct timer_list *t)
    676{
    677	struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
    678
    679	schedule_work(&vc4->bo_cache.time_work);
    680}
    681
    682static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
    683{
    684	struct vc4_bo *bo = to_vc4_bo(obj);
    685	struct dma_buf *dmabuf;
    686	int ret;
    687
    688	if (bo->validated_shader) {
    689		DRM_DEBUG("Attempting to export shader BO\n");
    690		return ERR_PTR(-EINVAL);
    691	}
    692
    693	/* Note: as soon as the BO is exported it becomes unpurgeable, because
    694	 * noone ever decrements the usecnt even if the reference held by the
    695	 * exported BO is released. This shouldn't be a problem since we don't
    696	 * expect exported BOs to be marked as purgeable.
    697	 */
    698	ret = vc4_bo_inc_usecnt(bo);
    699	if (ret) {
    700		DRM_ERROR("Failed to increment BO usecnt\n");
    701		return ERR_PTR(ret);
    702	}
    703
    704	dmabuf = drm_gem_prime_export(obj, flags);
    705	if (IS_ERR(dmabuf))
    706		vc4_bo_dec_usecnt(bo);
    707
    708	return dmabuf;
    709}
    710
    711static vm_fault_t vc4_fault(struct vm_fault *vmf)
    712{
    713	struct vm_area_struct *vma = vmf->vma;
    714	struct drm_gem_object *obj = vma->vm_private_data;
    715	struct vc4_bo *bo = to_vc4_bo(obj);
    716
    717	/* The only reason we would end up here is when user-space accesses
    718	 * BO's memory after it's been purged.
    719	 */
    720	mutex_lock(&bo->madv_lock);
    721	WARN_ON(bo->madv != __VC4_MADV_PURGED);
    722	mutex_unlock(&bo->madv_lock);
    723
    724	return VM_FAULT_SIGBUS;
    725}
    726
    727static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
    728{
    729	struct vc4_bo *bo = to_vc4_bo(obj);
    730
    731	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
    732		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
    733		return -EINVAL;
    734	}
    735
    736	if (bo->madv != VC4_MADV_WILLNEED) {
    737		DRM_DEBUG("mmaping of %s BO not allowed\n",
    738			  bo->madv == VC4_MADV_DONTNEED ?
    739			  "purgeable" : "purged");
    740		return -EINVAL;
    741	}
    742
    743	return drm_gem_cma_mmap(&bo->base, vma);
    744}
    745
    746static const struct vm_operations_struct vc4_vm_ops = {
    747	.fault = vc4_fault,
    748	.open = drm_gem_vm_open,
    749	.close = drm_gem_vm_close,
    750};
    751
    752static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
    753	.free = vc4_free_object,
    754	.export = vc4_prime_export,
    755	.get_sg_table = drm_gem_cma_object_get_sg_table,
    756	.vmap = drm_gem_cma_object_vmap,
    757	.mmap = vc4_gem_object_mmap,
    758	.vm_ops = &vc4_vm_ops,
    759};
    760
    761static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
    762{
    763	if (!vc4->v3d)
    764		return -ENODEV;
    765
    766	if (vc4file->bin_bo_used)
    767		return 0;
    768
    769	return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
    770}
    771
    772int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
    773			struct drm_file *file_priv)
    774{
    775	struct drm_vc4_create_bo *args = data;
    776	struct vc4_file *vc4file = file_priv->driver_priv;
    777	struct vc4_dev *vc4 = to_vc4_dev(dev);
    778	struct vc4_bo *bo = NULL;
    779	int ret;
    780
    781	if (WARN_ON_ONCE(vc4->is_vc5))
    782		return -ENODEV;
    783
    784	ret = vc4_grab_bin_bo(vc4, vc4file);
    785	if (ret)
    786		return ret;
    787
    788	/*
    789	 * We can't allocate from the BO cache, because the BOs don't
    790	 * get zeroed, and that might leak data between users.
    791	 */
    792	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
    793	if (IS_ERR(bo))
    794		return PTR_ERR(bo);
    795
    796	bo->madv = VC4_MADV_WILLNEED;
    797
    798	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
    799	drm_gem_object_put(&bo->base.base);
    800
    801	return ret;
    802}
    803
    804int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
    805		      struct drm_file *file_priv)
    806{
    807	struct vc4_dev *vc4 = to_vc4_dev(dev);
    808	struct drm_vc4_mmap_bo *args = data;
    809	struct drm_gem_object *gem_obj;
    810
    811	if (WARN_ON_ONCE(vc4->is_vc5))
    812		return -ENODEV;
    813
    814	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    815	if (!gem_obj) {
    816		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
    817		return -EINVAL;
    818	}
    819
    820	/* The mmap offset was set up at BO allocation time. */
    821	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
    822
    823	drm_gem_object_put(gem_obj);
    824	return 0;
    825}
    826
    827int
    828vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
    829			   struct drm_file *file_priv)
    830{
    831	struct drm_vc4_create_shader_bo *args = data;
    832	struct vc4_file *vc4file = file_priv->driver_priv;
    833	struct vc4_dev *vc4 = to_vc4_dev(dev);
    834	struct vc4_bo *bo = NULL;
    835	int ret;
    836
    837	if (WARN_ON_ONCE(vc4->is_vc5))
    838		return -ENODEV;
    839
    840	if (args->size == 0)
    841		return -EINVAL;
    842
    843	if (args->size % sizeof(u64) != 0)
    844		return -EINVAL;
    845
    846	if (args->flags != 0) {
    847		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
    848		return -EINVAL;
    849	}
    850
    851	if (args->pad != 0) {
    852		DRM_INFO("Pad set: 0x%08x\n", args->pad);
    853		return -EINVAL;
    854	}
    855
    856	ret = vc4_grab_bin_bo(vc4, vc4file);
    857	if (ret)
    858		return ret;
    859
    860	bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
    861	if (IS_ERR(bo))
    862		return PTR_ERR(bo);
    863
    864	bo->madv = VC4_MADV_WILLNEED;
    865
    866	if (copy_from_user(bo->base.vaddr,
    867			     (void __user *)(uintptr_t)args->data,
    868			     args->size)) {
    869		ret = -EFAULT;
    870		goto fail;
    871	}
    872	/* Clear the rest of the memory from allocating from the BO
    873	 * cache.
    874	 */
    875	memset(bo->base.vaddr + args->size, 0,
    876	       bo->base.base.size - args->size);
    877
    878	bo->validated_shader = vc4_validate_shader(&bo->base);
    879	if (!bo->validated_shader) {
    880		ret = -EINVAL;
    881		goto fail;
    882	}
    883
    884	/* We have to create the handle after validation, to avoid
    885	 * races for users to do doing things like mmap the shader BO.
    886	 */
    887	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
    888
    889fail:
    890	drm_gem_object_put(&bo->base.base);
    891
    892	return ret;
    893}
    894
    895/**
    896 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
    897 * @dev: DRM device
    898 * @data: ioctl argument
    899 * @file_priv: DRM file for this fd
    900 *
    901 * The tiling state of the BO decides the default modifier of an fb if
    902 * no specific modifier was set by userspace, and the return value of
    903 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
    904 * received from dmabuf as the same tiling format as the producer
    905 * used).
    906 */
    907int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
    908			 struct drm_file *file_priv)
    909{
    910	struct vc4_dev *vc4 = to_vc4_dev(dev);
    911	struct drm_vc4_set_tiling *args = data;
    912	struct drm_gem_object *gem_obj;
    913	struct vc4_bo *bo;
    914	bool t_format;
    915
    916	if (WARN_ON_ONCE(vc4->is_vc5))
    917		return -ENODEV;
    918
    919	if (args->flags != 0)
    920		return -EINVAL;
    921
    922	switch (args->modifier) {
    923	case DRM_FORMAT_MOD_NONE:
    924		t_format = false;
    925		break;
    926	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
    927		t_format = true;
    928		break;
    929	default:
    930		return -EINVAL;
    931	}
    932
    933	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    934	if (!gem_obj) {
    935		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
    936		return -ENOENT;
    937	}
    938	bo = to_vc4_bo(gem_obj);
    939	bo->t_format = t_format;
    940
    941	drm_gem_object_put(gem_obj);
    942
    943	return 0;
    944}
    945
    946/**
    947 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
    948 * @dev: DRM device
    949 * @data: ioctl argument
    950 * @file_priv: DRM file for this fd
    951 *
    952 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
    953 */
    954int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
    955			 struct drm_file *file_priv)
    956{
    957	struct vc4_dev *vc4 = to_vc4_dev(dev);
    958	struct drm_vc4_get_tiling *args = data;
    959	struct drm_gem_object *gem_obj;
    960	struct vc4_bo *bo;
    961
    962	if (WARN_ON_ONCE(vc4->is_vc5))
    963		return -ENODEV;
    964
    965	if (args->flags != 0 || args->modifier != 0)
    966		return -EINVAL;
    967
    968	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    969	if (!gem_obj) {
    970		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
    971		return -ENOENT;
    972	}
    973	bo = to_vc4_bo(gem_obj);
    974
    975	if (bo->t_format)
    976		args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
    977	else
    978		args->modifier = DRM_FORMAT_MOD_NONE;
    979
    980	drm_gem_object_put(gem_obj);
    981
    982	return 0;
    983}
    984
    985static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
    986int vc4_bo_cache_init(struct drm_device *dev)
    987{
    988	struct vc4_dev *vc4 = to_vc4_dev(dev);
    989	int i;
    990
    991	if (WARN_ON_ONCE(vc4->is_vc5))
    992		return -ENODEV;
    993
    994	/* Create the initial set of BO labels that the kernel will
    995	 * use.  This lets us avoid a bunch of string reallocation in
    996	 * the kernel's draw and BO allocation paths.
    997	 */
    998	vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
    999				 GFP_KERNEL);
   1000	if (!vc4->bo_labels)
   1001		return -ENOMEM;
   1002	vc4->num_labels = VC4_BO_TYPE_COUNT;
   1003
   1004	BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
   1005	for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
   1006		vc4->bo_labels[i].name = bo_type_names[i];
   1007
   1008	mutex_init(&vc4->bo_lock);
   1009
   1010	vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
   1011
   1012	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
   1013
   1014	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
   1015	timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
   1016
   1017	return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
   1018}
   1019
   1020static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
   1021{
   1022	struct vc4_dev *vc4 = to_vc4_dev(dev);
   1023	int i;
   1024
   1025	del_timer(&vc4->bo_cache.time_timer);
   1026	cancel_work_sync(&vc4->bo_cache.time_work);
   1027
   1028	vc4_bo_cache_purge(dev);
   1029
   1030	for (i = 0; i < vc4->num_labels; i++) {
   1031		if (vc4->bo_labels[i].num_allocated) {
   1032			DRM_ERROR("Destroying BO cache with %d %s "
   1033				  "BOs still allocated\n",
   1034				  vc4->bo_labels[i].num_allocated,
   1035				  vc4->bo_labels[i].name);
   1036		}
   1037
   1038		if (is_user_label(i))
   1039			kfree(vc4->bo_labels[i].name);
   1040	}
   1041	kfree(vc4->bo_labels);
   1042}
   1043
   1044int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
   1045		       struct drm_file *file_priv)
   1046{
   1047	struct vc4_dev *vc4 = to_vc4_dev(dev);
   1048	struct drm_vc4_label_bo *args = data;
   1049	char *name;
   1050	struct drm_gem_object *gem_obj;
   1051	int ret = 0, label;
   1052
   1053	if (WARN_ON_ONCE(vc4->is_vc5))
   1054		return -ENODEV;
   1055
   1056	if (!args->len)
   1057		return -EINVAL;
   1058
   1059	name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
   1060	if (IS_ERR(name))
   1061		return PTR_ERR(name);
   1062
   1063	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
   1064	if (!gem_obj) {
   1065		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
   1066		kfree(name);
   1067		return -ENOENT;
   1068	}
   1069
   1070	mutex_lock(&vc4->bo_lock);
   1071	label = vc4_get_user_label(vc4, name);
   1072	if (label != -1)
   1073		vc4_bo_set_label(gem_obj, label);
   1074	else
   1075		ret = -ENOMEM;
   1076	mutex_unlock(&vc4->bo_lock);
   1077
   1078	drm_gem_object_put(gem_obj);
   1079
   1080	return ret;
   1081}