cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

exynos_drm_gem.c (11461B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* exynos_drm_gem.c
      3 *
      4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
      5 * Author: Inki Dae <inki.dae@samsung.com>
      6 */
      7
      8
      9#include <linux/dma-buf.h>
     10#include <linux/pfn_t.h>
     11#include <linux/shmem_fs.h>
     12#include <linux/module.h>
     13
     14#include <drm/drm_prime.h>
     15#include <drm/drm_vma_manager.h>
     16#include <drm/exynos_drm.h>
     17
     18#include "exynos_drm_drv.h"
     19#include "exynos_drm_gem.h"
     20
     21MODULE_IMPORT_NS(DMA_BUF);
     22
     23static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
     24
     25static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
     26{
     27	struct drm_device *dev = exynos_gem->base.dev;
     28	unsigned long attr = 0;
     29
     30	if (exynos_gem->dma_addr) {
     31		DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
     32		return 0;
     33	}
     34
     35	/*
     36	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
     37	 * region will be allocated else physically contiguous
     38	 * as possible.
     39	 */
     40	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
     41		attr |= DMA_ATTR_FORCE_CONTIGUOUS;
     42
     43	/*
     44	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
     45	 * else cachable mapping.
     46	 */
     47	if (exynos_gem->flags & EXYNOS_BO_WC ||
     48			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
     49		attr |= DMA_ATTR_WRITE_COMBINE;
     50
     51	/* FBDev emulation requires kernel mapping */
     52	if (!kvmap)
     53		attr |= DMA_ATTR_NO_KERNEL_MAPPING;
     54
     55	exynos_gem->dma_attrs = attr;
     56	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
     57					     &exynos_gem->dma_addr, GFP_KERNEL,
     58					     exynos_gem->dma_attrs);
     59	if (!exynos_gem->cookie) {
     60		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
     61		return -ENOMEM;
     62	}
     63
     64	if (kvmap)
     65		exynos_gem->kvaddr = exynos_gem->cookie;
     66
     67	DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
     68			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
     69	return 0;
     70}
     71
     72static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
     73{
     74	struct drm_device *dev = exynos_gem->base.dev;
     75
     76	if (!exynos_gem->dma_addr) {
     77		DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
     78		return;
     79	}
     80
     81	DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
     82			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
     83
     84	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
     85			(dma_addr_t)exynos_gem->dma_addr,
     86			exynos_gem->dma_attrs);
     87}
     88
     89static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
     90					struct drm_file *file_priv,
     91					unsigned int *handle)
     92{
     93	int ret;
     94
     95	/*
     96	 * allocate a id of idr table where the obj is registered
     97	 * and handle has the id what user can see.
     98	 */
     99	ret = drm_gem_handle_create(file_priv, obj, handle);
    100	if (ret)
    101		return ret;
    102
    103	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
    104
    105	/* drop reference from allocate - handle holds it now. */
    106	drm_gem_object_put(obj);
    107
    108	return 0;
    109}
    110
    111void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
    112{
    113	struct drm_gem_object *obj = &exynos_gem->base;
    114
    115	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
    116			  obj->handle_count);
    117
    118	/*
    119	 * do not release memory region from exporter.
    120	 *
    121	 * the region will be released by exporter
    122	 * once dmabuf's refcount becomes 0.
    123	 */
    124	if (obj->import_attach)
    125		drm_prime_gem_destroy(obj, exynos_gem->sgt);
    126	else
    127		exynos_drm_free_buf(exynos_gem);
    128
    129	/* release file pointer to gem object. */
    130	drm_gem_object_release(obj);
    131
    132	kfree(exynos_gem);
    133}
    134
    135static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
    136	.open = drm_gem_vm_open,
    137	.close = drm_gem_vm_close,
    138};
    139
    140static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
    141	.free = exynos_drm_gem_free_object,
    142	.get_sg_table = exynos_drm_gem_prime_get_sg_table,
    143	.mmap = exynos_drm_gem_mmap,
    144	.vm_ops = &exynos_drm_gem_vm_ops,
    145};
    146
    147static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
    148						  unsigned long size)
    149{
    150	struct exynos_drm_gem *exynos_gem;
    151	struct drm_gem_object *obj;
    152	int ret;
    153
    154	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
    155	if (!exynos_gem)
    156		return ERR_PTR(-ENOMEM);
    157
    158	exynos_gem->size = size;
    159	obj = &exynos_gem->base;
    160
    161	obj->funcs = &exynos_drm_gem_object_funcs;
    162
    163	ret = drm_gem_object_init(dev, obj, size);
    164	if (ret < 0) {
    165		DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
    166		kfree(exynos_gem);
    167		return ERR_PTR(ret);
    168	}
    169
    170	ret = drm_gem_create_mmap_offset(obj);
    171	if (ret < 0) {
    172		drm_gem_object_release(obj);
    173		kfree(exynos_gem);
    174		return ERR_PTR(ret);
    175	}
    176
    177	DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
    178
    179	return exynos_gem;
    180}
    181
    182struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
    183					     unsigned int flags,
    184					     unsigned long size,
    185					     bool kvmap)
    186{
    187	struct exynos_drm_gem *exynos_gem;
    188	int ret;
    189
    190	if (flags & ~(EXYNOS_BO_MASK)) {
    191		DRM_DEV_ERROR(dev->dev,
    192			      "invalid GEM buffer flags: %u\n", flags);
    193		return ERR_PTR(-EINVAL);
    194	}
    195
    196	if (!size) {
    197		DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
    198		return ERR_PTR(-EINVAL);
    199	}
    200
    201	size = roundup(size, PAGE_SIZE);
    202
    203	exynos_gem = exynos_drm_gem_init(dev, size);
    204	if (IS_ERR(exynos_gem))
    205		return exynos_gem;
    206
    207	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
    208		/*
    209		 * when no IOMMU is available, all allocated buffers are
    210		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
    211		 */
    212		flags &= ~EXYNOS_BO_NONCONTIG;
    213		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
    214	}
    215
    216	/* set memory type and cache attribute from user side. */
    217	exynos_gem->flags = flags;
    218
    219	ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
    220	if (ret < 0) {
    221		drm_gem_object_release(&exynos_gem->base);
    222		kfree(exynos_gem);
    223		return ERR_PTR(ret);
    224	}
    225
    226	return exynos_gem;
    227}
    228
    229int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
    230				struct drm_file *file_priv)
    231{
    232	struct drm_exynos_gem_create *args = data;
    233	struct exynos_drm_gem *exynos_gem;
    234	int ret;
    235
    236	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
    237	if (IS_ERR(exynos_gem))
    238		return PTR_ERR(exynos_gem);
    239
    240	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
    241					   &args->handle);
    242	if (ret) {
    243		exynos_drm_gem_destroy(exynos_gem);
    244		return ret;
    245	}
    246
    247	return 0;
    248}
    249
    250int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
    251			     struct drm_file *file_priv)
    252{
    253	struct drm_exynos_gem_map *args = data;
    254
    255	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
    256				       &args->offset);
    257}
    258
    259struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
    260					  unsigned int gem_handle)
    261{
    262	struct drm_gem_object *obj;
    263
    264	obj = drm_gem_object_lookup(filp, gem_handle);
    265	if (!obj)
    266		return NULL;
    267	return to_exynos_gem(obj);
    268}
    269
    270static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
    271				      struct vm_area_struct *vma)
    272{
    273	struct drm_device *drm_dev = exynos_gem->base.dev;
    274	unsigned long vm_size;
    275	int ret;
    276
    277	vma->vm_flags &= ~VM_PFNMAP;
    278	vma->vm_pgoff = 0;
    279
    280	vm_size = vma->vm_end - vma->vm_start;
    281
    282	/* check if user-requested size is valid. */
    283	if (vm_size > exynos_gem->size)
    284		return -EINVAL;
    285
    286	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
    287			     exynos_gem->dma_addr, exynos_gem->size,
    288			     exynos_gem->dma_attrs);
    289	if (ret < 0) {
    290		DRM_ERROR("failed to mmap.\n");
    291		return ret;
    292	}
    293
    294	return 0;
    295}
    296
    297int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
    298				      struct drm_file *file_priv)
    299{
    300	struct exynos_drm_gem *exynos_gem;
    301	struct drm_exynos_gem_info *args = data;
    302	struct drm_gem_object *obj;
    303
    304	obj = drm_gem_object_lookup(file_priv, args->handle);
    305	if (!obj) {
    306		DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
    307		return -EINVAL;
    308	}
    309
    310	exynos_gem = to_exynos_gem(obj);
    311
    312	args->flags = exynos_gem->flags;
    313	args->size = exynos_gem->size;
    314
    315	drm_gem_object_put(obj);
    316
    317	return 0;
    318}
    319
    320void exynos_drm_gem_free_object(struct drm_gem_object *obj)
    321{
    322	exynos_drm_gem_destroy(to_exynos_gem(obj));
    323}
    324
    325int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
    326			       struct drm_device *dev,
    327			       struct drm_mode_create_dumb *args)
    328{
    329	struct exynos_drm_gem *exynos_gem;
    330	unsigned int flags;
    331	int ret;
    332
    333	/*
    334	 * allocate memory to be used for framebuffer.
    335	 * - this callback would be called by user application
    336	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
    337	 */
    338
    339	args->pitch = args->width * ((args->bpp + 7) / 8);
    340	args->size = args->pitch * args->height;
    341
    342	if (is_drm_iommu_supported(dev))
    343		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
    344	else
    345		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
    346
    347	exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
    348	if (IS_ERR(exynos_gem)) {
    349		dev_warn(dev->dev, "FB allocation failed.\n");
    350		return PTR_ERR(exynos_gem);
    351	}
    352
    353	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
    354					   &args->handle);
    355	if (ret) {
    356		exynos_drm_gem_destroy(exynos_gem);
    357		return ret;
    358	}
    359
    360	return 0;
    361}
    362
    363static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
    364{
    365	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
    366	int ret;
    367
    368	if (obj->import_attach)
    369		return dma_buf_mmap(obj->dma_buf, vma, 0);
    370
    371	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
    372
    373	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
    374			  exynos_gem->flags);
    375
    376	/* non-cachable as default. */
    377	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
    378		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
    379	else if (exynos_gem->flags & EXYNOS_BO_WC)
    380		vma->vm_page_prot =
    381			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
    382	else
    383		vma->vm_page_prot =
    384			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
    385
    386	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
    387	if (ret)
    388		goto err_close_vm;
    389
    390	return ret;
    391
    392err_close_vm:
    393	drm_gem_vm_close(vma);
    394
    395	return ret;
    396}
    397
    398/* low-level interface prime helpers */
    399struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
    400					    struct dma_buf *dma_buf)
    401{
    402	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
    403}
    404
    405struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
    406{
    407	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
    408	struct drm_device *drm_dev = obj->dev;
    409	struct sg_table *sgt;
    410	int ret;
    411
    412	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
    413	if (!sgt)
    414		return ERR_PTR(-ENOMEM);
    415
    416	ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
    417				    exynos_gem->dma_addr, exynos_gem->size,
    418				    exynos_gem->dma_attrs);
    419	if (ret) {
    420		DRM_ERROR("failed to get sgtable, %d\n", ret);
    421		kfree(sgt);
    422		return ERR_PTR(ret);
    423	}
    424
    425	return sgt;
    426}
    427
    428struct drm_gem_object *
    429exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
    430				     struct dma_buf_attachment *attach,
    431				     struct sg_table *sgt)
    432{
    433	struct exynos_drm_gem *exynos_gem;
    434
    435	/* check if the entries in the sg_table are contiguous */
    436	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
    437		DRM_ERROR("buffer chunks must be mapped contiguously");
    438		return ERR_PTR(-EINVAL);
    439	}
    440
    441	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
    442	if (IS_ERR(exynos_gem))
    443		return ERR_CAST(exynos_gem);
    444
    445	/*
    446	 * Buffer has been mapped as contiguous into DMA address space,
    447	 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
    448	 * We assume a simplified logic below:
    449	 */
    450	if (is_drm_iommu_supported(dev))
    451		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
    452	else
    453		exynos_gem->flags |= EXYNOS_BO_CONTIG;
    454
    455	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
    456	exynos_gem->sgt = sgt;
    457	return &exynos_gem->base;
    458}