cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

armada_gem.c (13463B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2012 Russell King
      4 */
      5
      6#include <linux/dma-buf.h>
      7#include <linux/dma-mapping.h>
      8#include <linux/mman.h>
      9#include <linux/shmem_fs.h>
     10
     11#include <drm/armada_drm.h>
     12#include <drm/drm_prime.h>
     13
     14#include "armada_drm.h"
     15#include "armada_gem.h"
     16#include "armada_ioctlP.h"
     17
     18MODULE_IMPORT_NS(DMA_BUF);
     19
     20static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
     21{
     22	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
     23	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
     24	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
     25
     26	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
     27	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
     28}
     29
     30static const struct vm_operations_struct armada_gem_vm_ops = {
     31	.fault	= armada_gem_vm_fault,
     32	.open	= drm_gem_vm_open,
     33	.close	= drm_gem_vm_close,
     34};
     35
     36static size_t roundup_gem_size(size_t size)
     37{
     38	return roundup(size, PAGE_SIZE);
     39}
     40
     41void armada_gem_free_object(struct drm_gem_object *obj)
     42{
     43	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
     44	struct armada_private *priv = drm_to_armada_dev(obj->dev);
     45
     46	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
     47
     48	drm_gem_free_mmap_offset(&dobj->obj);
     49
     50	might_lock(&priv->linear_lock);
     51
     52	if (dobj->page) {
     53		/* page backed memory */
     54		unsigned int order = get_order(dobj->obj.size);
     55		__free_pages(dobj->page, order);
     56	} else if (dobj->linear) {
     57		/* linear backed memory */
     58		mutex_lock(&priv->linear_lock);
     59		drm_mm_remove_node(dobj->linear);
     60		mutex_unlock(&priv->linear_lock);
     61		kfree(dobj->linear);
     62		if (dobj->addr)
     63			iounmap(dobj->addr);
     64	}
     65
     66	if (dobj->obj.import_attach) {
     67		/* We only ever display imported data */
     68		if (dobj->sgt)
     69			dma_buf_unmap_attachment(dobj->obj.import_attach,
     70						 dobj->sgt, DMA_TO_DEVICE);
     71		drm_prime_gem_destroy(&dobj->obj, NULL);
     72	}
     73
     74	drm_gem_object_release(&dobj->obj);
     75
     76	kfree(dobj);
     77}
     78
     79int
     80armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
     81{
     82	struct armada_private *priv = drm_to_armada_dev(dev);
     83	size_t size = obj->obj.size;
     84
     85	if (obj->page || obj->linear)
     86		return 0;
     87
     88	/*
     89	 * If it is a small allocation (typically cursor, which will
     90	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
     91	 * Framebuffers will never be this small (our minimum size for
     92	 * framebuffers is larger than this anyway.)  Such objects are
     93	 * only accessed by the CPU so we don't need any special handing
     94	 * here.
     95	 */
     96	if (size <= 8192) {
     97		unsigned int order = get_order(size);
     98		struct page *p = alloc_pages(GFP_KERNEL, order);
     99
    100		if (p) {
    101			obj->addr = page_address(p);
    102			obj->phys_addr = page_to_phys(p);
    103			obj->page = p;
    104
    105			memset(obj->addr, 0, PAGE_ALIGN(size));
    106		}
    107	}
    108
    109	/*
    110	 * We could grab something from CMA if it's enabled, but that
    111	 * involves building in a problem:
    112	 *
    113	 * CMA's interface uses dma_alloc_coherent(), which provides us
    114	 * with an CPU virtual address and a device address.
    115	 *
    116	 * The CPU virtual address may be either an address in the kernel
    117	 * direct mapped region (for example, as it would be on x86) or
    118	 * it may be remapped into another part of kernel memory space
    119	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
    120	 * returned virtual address is invalid depending on the architecture
    121	 * implementation.
    122	 *
    123	 * The device address may also not be a physical address; it may
    124	 * be that there is some kind of remapping between the device and
    125	 * system RAM, which makes the use of the device address also
    126	 * unsafe to re-use as a physical address.
    127	 *
    128	 * This makes DRM usage of dma_alloc_coherent() in a generic way
    129	 * at best very questionable and unsafe.
    130	 */
    131
    132	/* Otherwise, grab it from our linear allocation */
    133	if (!obj->page) {
    134		struct drm_mm_node *node;
    135		unsigned align = min_t(unsigned, size, SZ_2M);
    136		void __iomem *ptr;
    137		int ret;
    138
    139		node = kzalloc(sizeof(*node), GFP_KERNEL);
    140		if (!node)
    141			return -ENOSPC;
    142
    143		mutex_lock(&priv->linear_lock);
    144		ret = drm_mm_insert_node_generic(&priv->linear, node,
    145						 size, align, 0, 0);
    146		mutex_unlock(&priv->linear_lock);
    147		if (ret) {
    148			kfree(node);
    149			return ret;
    150		}
    151
    152		obj->linear = node;
    153
    154		/* Ensure that the memory we're returning is cleared. */
    155		ptr = ioremap_wc(obj->linear->start, size);
    156		if (!ptr) {
    157			mutex_lock(&priv->linear_lock);
    158			drm_mm_remove_node(obj->linear);
    159			mutex_unlock(&priv->linear_lock);
    160			kfree(obj->linear);
    161			obj->linear = NULL;
    162			return -ENOMEM;
    163		}
    164
    165		memset_io(ptr, 0, size);
    166		iounmap(ptr);
    167
    168		obj->phys_addr = obj->linear->start;
    169		obj->dev_addr = obj->linear->start;
    170		obj->mapped = true;
    171	}
    172
    173	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
    174			 (unsigned long long)obj->phys_addr,
    175			 (unsigned long long)obj->dev_addr);
    176
    177	return 0;
    178}
    179
    180void *
    181armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
    182{
    183	/* only linear objects need to be ioremap'd */
    184	if (!dobj->addr && dobj->linear)
    185		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
    186	return dobj->addr;
    187}
    188
    189static const struct drm_gem_object_funcs armada_gem_object_funcs = {
    190	.free = armada_gem_free_object,
    191	.export = armada_gem_prime_export,
    192	.vm_ops = &armada_gem_vm_ops,
    193};
    194
    195struct armada_gem_object *
    196armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
    197{
    198	struct armada_gem_object *obj;
    199
    200	size = roundup_gem_size(size);
    201
    202	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
    203	if (!obj)
    204		return NULL;
    205
    206	obj->obj.funcs = &armada_gem_object_funcs;
    207
    208	drm_gem_private_object_init(dev, &obj->obj, size);
    209
    210	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
    211
    212	return obj;
    213}
    214
    215static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
    216	size_t size)
    217{
    218	struct armada_gem_object *obj;
    219	struct address_space *mapping;
    220
    221	size = roundup_gem_size(size);
    222
    223	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
    224	if (!obj)
    225		return NULL;
    226
    227	obj->obj.funcs = &armada_gem_object_funcs;
    228
    229	if (drm_gem_object_init(dev, &obj->obj, size)) {
    230		kfree(obj);
    231		return NULL;
    232	}
    233
    234	mapping = obj->obj.filp->f_mapping;
    235	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
    236
    237	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
    238
    239	return obj;
    240}
    241
    242/* Dumb alloc support */
    243int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
    244	struct drm_mode_create_dumb *args)
    245{
    246	struct armada_gem_object *dobj;
    247	u32 handle;
    248	size_t size;
    249	int ret;
    250
    251	args->pitch = armada_pitch(args->width, args->bpp);
    252	args->size = size = args->pitch * args->height;
    253
    254	dobj = armada_gem_alloc_private_object(dev, size);
    255	if (dobj == NULL)
    256		return -ENOMEM;
    257
    258	ret = armada_gem_linear_back(dev, dobj);
    259	if (ret)
    260		goto err;
    261
    262	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
    263	if (ret)
    264		goto err;
    265
    266	args->handle = handle;
    267
    268	/* drop reference from allocate - handle holds it now */
    269	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
    270 err:
    271	drm_gem_object_put(&dobj->obj);
    272	return ret;
    273}
    274
    275/* Private driver gem ioctls */
    276int armada_gem_create_ioctl(struct drm_device *dev, void *data,
    277	struct drm_file *file)
    278{
    279	struct drm_armada_gem_create *args = data;
    280	struct armada_gem_object *dobj;
    281	size_t size;
    282	u32 handle;
    283	int ret;
    284
    285	if (args->size == 0)
    286		return -ENOMEM;
    287
    288	size = args->size;
    289
    290	dobj = armada_gem_alloc_object(dev, size);
    291	if (dobj == NULL)
    292		return -ENOMEM;
    293
    294	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
    295	if (ret)
    296		goto err;
    297
    298	args->handle = handle;
    299
    300	/* drop reference from allocate - handle holds it now */
    301	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
    302 err:
    303	drm_gem_object_put(&dobj->obj);
    304	return ret;
    305}
    306
    307/* Map a shmem-backed object into process memory space */
    308int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
    309	struct drm_file *file)
    310{
    311	struct drm_armada_gem_mmap *args = data;
    312	struct armada_gem_object *dobj;
    313	unsigned long addr;
    314
    315	dobj = armada_gem_object_lookup(file, args->handle);
    316	if (dobj == NULL)
    317		return -ENOENT;
    318
    319	if (!dobj->obj.filp) {
    320		drm_gem_object_put(&dobj->obj);
    321		return -EINVAL;
    322	}
    323
    324	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
    325		       MAP_SHARED, args->offset);
    326	drm_gem_object_put(&dobj->obj);
    327	if (IS_ERR_VALUE(addr))
    328		return addr;
    329
    330	args->addr = addr;
    331
    332	return 0;
    333}
    334
    335int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
    336	struct drm_file *file)
    337{
    338	struct drm_armada_gem_pwrite *args = data;
    339	struct armada_gem_object *dobj;
    340	char __user *ptr;
    341	int ret = 0;
    342
    343	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
    344		args->handle, args->offset, args->size, args->ptr);
    345
    346	if (args->size == 0)
    347		return 0;
    348
    349	ptr = (char __user *)(uintptr_t)args->ptr;
    350
    351	if (!access_ok(ptr, args->size))
    352		return -EFAULT;
    353
    354	if (fault_in_readable(ptr, args->size))
    355		return -EFAULT;
    356
    357	dobj = armada_gem_object_lookup(file, args->handle);
    358	if (dobj == NULL)
    359		return -ENOENT;
    360
    361	/* Must be a kernel-mapped object */
    362	if (!dobj->addr)
    363		return -EINVAL;
    364
    365	if (args->offset > dobj->obj.size ||
    366	    args->size > dobj->obj.size - args->offset) {
    367		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
    368		ret = -EINVAL;
    369		goto unref;
    370	}
    371
    372	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
    373		ret = -EFAULT;
    374	} else if (dobj->update) {
    375		dobj->update(dobj->update_data);
    376		ret = 0;
    377	}
    378
    379 unref:
    380	drm_gem_object_put(&dobj->obj);
    381	return ret;
    382}
    383
    384/* Prime support */
    385static struct sg_table *
    386armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
    387	enum dma_data_direction dir)
    388{
    389	struct drm_gem_object *obj = attach->dmabuf->priv;
    390	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
    391	struct scatterlist *sg;
    392	struct sg_table *sgt;
    393	int i;
    394
    395	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
    396	if (!sgt)
    397		return NULL;
    398
    399	if (dobj->obj.filp) {
    400		struct address_space *mapping;
    401		int count;
    402
    403		count = dobj->obj.size / PAGE_SIZE;
    404		if (sg_alloc_table(sgt, count, GFP_KERNEL))
    405			goto free_sgt;
    406
    407		mapping = dobj->obj.filp->f_mapping;
    408
    409		for_each_sgtable_sg(sgt, sg, i) {
    410			struct page *page;
    411
    412			page = shmem_read_mapping_page(mapping, i);
    413			if (IS_ERR(page))
    414				goto release;
    415
    416			sg_set_page(sg, page, PAGE_SIZE, 0);
    417		}
    418
    419		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
    420			goto release;
    421	} else if (dobj->page) {
    422		/* Single contiguous page */
    423		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
    424			goto free_sgt;
    425
    426		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
    427
    428		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
    429			goto free_table;
    430	} else if (dobj->linear) {
    431		/* Single contiguous physical region - no struct page */
    432		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
    433			goto free_sgt;
    434		sg_dma_address(sgt->sgl) = dobj->dev_addr;
    435		sg_dma_len(sgt->sgl) = dobj->obj.size;
    436	} else {
    437		goto free_sgt;
    438	}
    439	return sgt;
    440
    441 release:
    442	for_each_sgtable_sg(sgt, sg, i)
    443		if (sg_page(sg))
    444			put_page(sg_page(sg));
    445 free_table:
    446	sg_free_table(sgt);
    447 free_sgt:
    448	kfree(sgt);
    449	return NULL;
    450}
    451
    452static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
    453	struct sg_table *sgt, enum dma_data_direction dir)
    454{
    455	struct drm_gem_object *obj = attach->dmabuf->priv;
    456	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
    457	int i;
    458
    459	if (!dobj->linear)
    460		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
    461
    462	if (dobj->obj.filp) {
    463		struct scatterlist *sg;
    464
    465		for_each_sgtable_sg(sgt, sg, i)
    466			put_page(sg_page(sg));
    467	}
    468
    469	sg_free_table(sgt);
    470	kfree(sgt);
    471}
    472
    473static int
    474armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
    475{
    476	return -EINVAL;
    477}
    478
    479static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
    480	.map_dma_buf	= armada_gem_prime_map_dma_buf,
    481	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
    482	.release	= drm_gem_dmabuf_release,
    483	.mmap		= armada_gem_dmabuf_mmap,
    484};
    485
    486struct dma_buf *
    487armada_gem_prime_export(struct drm_gem_object *obj, int flags)
    488{
    489	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
    490
    491	exp_info.ops = &armada_gem_prime_dmabuf_ops;
    492	exp_info.size = obj->size;
    493	exp_info.flags = O_RDWR;
    494	exp_info.priv = obj;
    495
    496	return drm_gem_dmabuf_export(obj->dev, &exp_info);
    497}
    498
    499struct drm_gem_object *
    500armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
    501{
    502	struct dma_buf_attachment *attach;
    503	struct armada_gem_object *dobj;
    504
    505	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
    506		struct drm_gem_object *obj = buf->priv;
    507		if (obj->dev == dev) {
    508			/*
    509			 * Importing our own dmabuf(s) increases the
    510			 * refcount on the gem object itself.
    511			 */
    512			drm_gem_object_get(obj);
    513			return obj;
    514		}
    515	}
    516
    517	attach = dma_buf_attach(buf, dev->dev);
    518	if (IS_ERR(attach))
    519		return ERR_CAST(attach);
    520
    521	dobj = armada_gem_alloc_private_object(dev, buf->size);
    522	if (!dobj) {
    523		dma_buf_detach(buf, attach);
    524		return ERR_PTR(-ENOMEM);
    525	}
    526
    527	dobj->obj.import_attach = attach;
    528	get_dma_buf(buf);
    529
    530	/*
    531	 * Don't call dma_buf_map_attachment() here - it maps the
    532	 * scatterlist immediately for DMA, and this is not always
    533	 * an appropriate thing to do.
    534	 */
    535	return &dobj->obj;
    536}
    537
    538int armada_gem_map_import(struct armada_gem_object *dobj)
    539{
    540	int ret;
    541
    542	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
    543					   DMA_TO_DEVICE);
    544	if (IS_ERR(dobj->sgt)) {
    545		ret = PTR_ERR(dobj->sgt);
    546		dobj->sgt = NULL;
    547		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
    548		return ret;
    549	}
    550	if (dobj->sgt->nents > 1) {
    551		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
    552		return -EINVAL;
    553	}
    554	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
    555		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
    556		return -EINVAL;
    557	}
    558	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
    559	dobj->mapped = true;
    560	return 0;
    561}