cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lima_gem.c (9337B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
      3
      4#include <linux/mm.h>
      5#include <linux/iosys-map.h>
      6#include <linux/sync_file.h>
      7#include <linux/pagemap.h>
      8#include <linux/shmem_fs.h>
      9#include <linux/dma-mapping.h>
     10
     11#include <drm/drm_file.h>
     12#include <drm/drm_syncobj.h>
     13#include <drm/drm_utils.h>
     14
     15#include <drm/lima_drm.h>
     16
     17#include "lima_drv.h"
     18#include "lima_gem.h"
     19#include "lima_vm.h"
     20
     21int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
     22{
     23	struct page **pages;
     24	struct address_space *mapping = bo->base.base.filp->f_mapping;
     25	struct device *dev = bo->base.base.dev->dev;
     26	size_t old_size = bo->heap_size;
     27	size_t new_size = bo->heap_size ? bo->heap_size * 2 :
     28		(lima_heap_init_nr_pages << PAGE_SHIFT);
     29	struct sg_table sgt;
     30	int i, ret;
     31
     32	if (bo->heap_size >= bo->base.base.size)
     33		return -ENOSPC;
     34
     35	new_size = min(new_size, bo->base.base.size);
     36
     37	mutex_lock(&bo->base.pages_lock);
     38
     39	if (bo->base.pages) {
     40		pages = bo->base.pages;
     41	} else {
     42		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
     43				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
     44		if (!pages) {
     45			mutex_unlock(&bo->base.pages_lock);
     46			return -ENOMEM;
     47		}
     48
     49		bo->base.pages = pages;
     50		bo->base.pages_use_count = 1;
     51
     52		mapping_set_unevictable(mapping);
     53	}
     54
     55	for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
     56		struct page *page = shmem_read_mapping_page(mapping, i);
     57
     58		if (IS_ERR(page)) {
     59			mutex_unlock(&bo->base.pages_lock);
     60			return PTR_ERR(page);
     61		}
     62		pages[i] = page;
     63	}
     64
     65	mutex_unlock(&bo->base.pages_lock);
     66
     67	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
     68					new_size, GFP_KERNEL);
     69	if (ret)
     70		return ret;
     71
     72	if (bo->base.sgt) {
     73		dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
     74		sg_free_table(bo->base.sgt);
     75	} else {
     76		bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
     77		if (!bo->base.sgt) {
     78			sg_free_table(&sgt);
     79			return -ENOMEM;
     80		}
     81	}
     82
     83	ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
     84	if (ret) {
     85		sg_free_table(&sgt);
     86		kfree(bo->base.sgt);
     87		bo->base.sgt = NULL;
     88		return ret;
     89	}
     90
     91	*bo->base.sgt = sgt;
     92
     93	if (vm) {
     94		ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
     95		if (ret)
     96			return ret;
     97	}
     98
     99	bo->heap_size = new_size;
    100	return 0;
    101}
    102
    103int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
    104			   u32 size, u32 flags, u32 *handle)
    105{
    106	int err;
    107	gfp_t mask;
    108	struct drm_gem_shmem_object *shmem;
    109	struct drm_gem_object *obj;
    110	struct lima_bo *bo;
    111	bool is_heap = flags & LIMA_BO_FLAG_HEAP;
    112
    113	shmem = drm_gem_shmem_create(dev, size);
    114	if (IS_ERR(shmem))
    115		return PTR_ERR(shmem);
    116
    117	obj = &shmem->base;
    118
    119	/* Mali Utgard GPU can only support 32bit address space */
    120	mask = mapping_gfp_mask(obj->filp->f_mapping);
    121	mask &= ~__GFP_HIGHMEM;
    122	mask |= __GFP_DMA32;
    123	mapping_set_gfp_mask(obj->filp->f_mapping, mask);
    124
    125	if (is_heap) {
    126		bo = to_lima_bo(obj);
    127		err = lima_heap_alloc(bo, NULL);
    128		if (err)
    129			goto out;
    130	} else {
    131		struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem);
    132
    133		if (IS_ERR(sgt)) {
    134			err = PTR_ERR(sgt);
    135			goto out;
    136		}
    137	}
    138
    139	err = drm_gem_handle_create(file, obj, handle);
    140
    141out:
    142	/* drop reference from allocate - handle holds it now */
    143	drm_gem_object_put(obj);
    144
    145	return err;
    146}
    147
    148static void lima_gem_free_object(struct drm_gem_object *obj)
    149{
    150	struct lima_bo *bo = to_lima_bo(obj);
    151
    152	if (!list_empty(&bo->va))
    153		dev_err(obj->dev->dev, "lima gem free bo still has va\n");
    154
    155	drm_gem_shmem_free(&bo->base);
    156}
    157
    158static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
    159{
    160	struct lima_bo *bo = to_lima_bo(obj);
    161	struct lima_drm_priv *priv = to_lima_drm_priv(file);
    162	struct lima_vm *vm = priv->vm;
    163
    164	return lima_vm_bo_add(vm, bo, true);
    165}
    166
    167static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
    168{
    169	struct lima_bo *bo = to_lima_bo(obj);
    170	struct lima_drm_priv *priv = to_lima_drm_priv(file);
    171	struct lima_vm *vm = priv->vm;
    172
    173	lima_vm_bo_del(vm, bo);
    174}
    175
    176static int lima_gem_pin(struct drm_gem_object *obj)
    177{
    178	struct lima_bo *bo = to_lima_bo(obj);
    179
    180	if (bo->heap_size)
    181		return -EINVAL;
    182
    183	return drm_gem_shmem_pin(&bo->base);
    184}
    185
    186static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
    187{
    188	struct lima_bo *bo = to_lima_bo(obj);
    189
    190	if (bo->heap_size)
    191		return -EINVAL;
    192
    193	return drm_gem_shmem_vmap(&bo->base, map);
    194}
    195
    196static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
    197{
    198	struct lima_bo *bo = to_lima_bo(obj);
    199
    200	if (bo->heap_size)
    201		return -EINVAL;
    202
    203	return drm_gem_shmem_mmap(&bo->base, vma);
    204}
    205
    206static const struct drm_gem_object_funcs lima_gem_funcs = {
    207	.free = lima_gem_free_object,
    208	.open = lima_gem_object_open,
    209	.close = lima_gem_object_close,
    210	.print_info = drm_gem_shmem_object_print_info,
    211	.pin = lima_gem_pin,
    212	.unpin = drm_gem_shmem_object_unpin,
    213	.get_sg_table = drm_gem_shmem_object_get_sg_table,
    214	.vmap = lima_gem_vmap,
    215	.vunmap = drm_gem_shmem_object_vunmap,
    216	.mmap = lima_gem_mmap,
    217	.vm_ops = &drm_gem_shmem_vm_ops,
    218};
    219
    220struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
    221{
    222	struct lima_bo *bo;
    223
    224	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
    225	if (!bo)
    226		return ERR_PTR(-ENOMEM);
    227
    228	mutex_init(&bo->lock);
    229	INIT_LIST_HEAD(&bo->va);
    230	bo->base.map_wc = true;
    231	bo->base.base.funcs = &lima_gem_funcs;
    232
    233	return &bo->base.base;
    234}
    235
    236int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
    237{
    238	struct drm_gem_object *obj;
    239	struct lima_bo *bo;
    240	struct lima_drm_priv *priv = to_lima_drm_priv(file);
    241	struct lima_vm *vm = priv->vm;
    242
    243	obj = drm_gem_object_lookup(file, handle);
    244	if (!obj)
    245		return -ENOENT;
    246
    247	bo = to_lima_bo(obj);
    248
    249	*va = lima_vm_get_va(vm, bo);
    250
    251	*offset = drm_vma_node_offset_addr(&obj->vma_node);
    252
    253	drm_gem_object_put(obj);
    254	return 0;
    255}
    256
    257static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
    258			    bool write, bool explicit)
    259{
    260	int err;
    261
    262	err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
    263	if (err)
    264		return err;
    265
    266	/* explicit sync use user passed dep fence */
    267	if (explicit)
    268		return 0;
    269
    270	return drm_sched_job_add_implicit_dependencies(&task->base,
    271						       &bo->base.base,
    272						       write);
    273}
    274
    275static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
    276{
    277	int i, err;
    278
    279	for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
    280		struct dma_fence *fence = NULL;
    281
    282		if (!submit->in_sync[i])
    283			continue;
    284
    285		err = drm_syncobj_find_fence(file, submit->in_sync[i],
    286					     0, 0, &fence);
    287		if (err)
    288			return err;
    289
    290		err = drm_sched_job_add_dependency(&submit->task->base, fence);
    291		if (err) {
    292			dma_fence_put(fence);
    293			return err;
    294		}
    295	}
    296
    297	return 0;
    298}
    299
    300int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
    301{
    302	int i, err = 0;
    303	struct ww_acquire_ctx ctx;
    304	struct lima_drm_priv *priv = to_lima_drm_priv(file);
    305	struct lima_vm *vm = priv->vm;
    306	struct drm_syncobj *out_sync = NULL;
    307	struct dma_fence *fence;
    308	struct lima_bo **bos = submit->lbos;
    309
    310	if (submit->out_sync) {
    311		out_sync = drm_syncobj_find(file, submit->out_sync);
    312		if (!out_sync)
    313			return -ENOENT;
    314	}
    315
    316	for (i = 0; i < submit->nr_bos; i++) {
    317		struct drm_gem_object *obj;
    318		struct lima_bo *bo;
    319
    320		obj = drm_gem_object_lookup(file, submit->bos[i].handle);
    321		if (!obj) {
    322			err = -ENOENT;
    323			goto err_out0;
    324		}
    325
    326		bo = to_lima_bo(obj);
    327
    328		/* increase refcnt of gpu va map to prevent unmapped when executing,
    329		 * will be decreased when task done
    330		 */
    331		err = lima_vm_bo_add(vm, bo, false);
    332		if (err) {
    333			drm_gem_object_put(obj);
    334			goto err_out0;
    335		}
    336
    337		bos[i] = bo;
    338	}
    339
    340	err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
    341					submit->nr_bos, &ctx);
    342	if (err)
    343		goto err_out0;
    344
    345	err = lima_sched_task_init(
    346		submit->task, submit->ctx->context + submit->pipe,
    347		bos, submit->nr_bos, vm);
    348	if (err)
    349		goto err_out1;
    350
    351	err = lima_gem_add_deps(file, submit);
    352	if (err)
    353		goto err_out2;
    354
    355	for (i = 0; i < submit->nr_bos; i++) {
    356		err = lima_gem_sync_bo(
    357			submit->task, bos[i],
    358			submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
    359			submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
    360		if (err)
    361			goto err_out2;
    362	}
    363
    364	fence = lima_sched_context_queue_task(submit->task);
    365
    366	for (i = 0; i < submit->nr_bos; i++) {
    367		dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
    368				   submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
    369				   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
    370	}
    371
    372	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
    373				    submit->nr_bos, &ctx);
    374
    375	for (i = 0; i < submit->nr_bos; i++)
    376		drm_gem_object_put(&bos[i]->base.base);
    377
    378	if (out_sync) {
    379		drm_syncobj_replace_fence(out_sync, fence);
    380		drm_syncobj_put(out_sync);
    381	}
    382
    383	dma_fence_put(fence);
    384
    385	return 0;
    386
    387err_out2:
    388	lima_sched_task_fini(submit->task);
    389err_out1:
    390	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
    391				    submit->nr_bos, &ctx);
    392err_out0:
    393	for (i = 0; i < submit->nr_bos; i++) {
    394		if (!bos[i])
    395			break;
    396		lima_vm_bo_del(vm, bos[i]);
    397		drm_gem_object_put(&bos[i]->base.base);
    398	}
    399	if (out_sync)
    400		drm_syncobj_put(out_sync);
    401	return err;
    402}
    403
    404int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
    405{
    406	bool write = op & LIMA_GEM_WAIT_WRITE;
    407	long ret, timeout;
    408
    409	if (!op)
    410		return 0;
    411
    412	timeout = drm_timeout_abs_to_jiffies(timeout_ns);
    413
    414	ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
    415	if (ret == -ETIME)
    416		ret = timeout ? -ETIMEDOUT : -EBUSY;
    417
    418	return ret;
    419}