cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtk_drm_gem.c (6654B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2015 MediaTek Inc.
      4 */
      5
      6#include <linux/dma-buf.h>
      7
      8#include <drm/drm.h>
      9#include <drm/drm_device.h>
     10#include <drm/drm_gem.h>
     11#include <drm/drm_gem_cma_helper.h>
     12#include <drm/drm_prime.h>
     13
     14#include "mtk_drm_drv.h"
     15#include "mtk_drm_gem.h"
     16
     17static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
     18
     19static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
     20	.free = mtk_drm_gem_free_object,
     21	.get_sg_table = mtk_gem_prime_get_sg_table,
     22	.vmap = mtk_drm_gem_prime_vmap,
     23	.vunmap = mtk_drm_gem_prime_vunmap,
     24	.mmap = mtk_drm_gem_object_mmap,
     25	.vm_ops = &drm_gem_cma_vm_ops,
     26};
     27
     28static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
     29						unsigned long size)
     30{
     31	struct mtk_drm_gem_obj *mtk_gem_obj;
     32	int ret;
     33
     34	size = round_up(size, PAGE_SIZE);
     35
     36	mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
     37	if (!mtk_gem_obj)
     38		return ERR_PTR(-ENOMEM);
     39
     40	mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
     41
     42	ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
     43	if (ret < 0) {
     44		DRM_ERROR("failed to initialize gem object\n");
     45		kfree(mtk_gem_obj);
     46		return ERR_PTR(ret);
     47	}
     48
     49	return mtk_gem_obj;
     50}
     51
     52struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
     53					   size_t size, bool alloc_kmap)
     54{
     55	struct mtk_drm_private *priv = dev->dev_private;
     56	struct mtk_drm_gem_obj *mtk_gem;
     57	struct drm_gem_object *obj;
     58	int ret;
     59
     60	mtk_gem = mtk_drm_gem_init(dev, size);
     61	if (IS_ERR(mtk_gem))
     62		return ERR_CAST(mtk_gem);
     63
     64	obj = &mtk_gem->base;
     65
     66	mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
     67
     68	if (!alloc_kmap)
     69		mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
     70
     71	mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
     72					  &mtk_gem->dma_addr, GFP_KERNEL,
     73					  mtk_gem->dma_attrs);
     74	if (!mtk_gem->cookie) {
     75		DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
     76		ret = -ENOMEM;
     77		goto err_gem_free;
     78	}
     79
     80	if (alloc_kmap)
     81		mtk_gem->kvaddr = mtk_gem->cookie;
     82
     83	DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
     84			 mtk_gem->cookie, &mtk_gem->dma_addr,
     85			 size);
     86
     87	return mtk_gem;
     88
     89err_gem_free:
     90	drm_gem_object_release(obj);
     91	kfree(mtk_gem);
     92	return ERR_PTR(ret);
     93}
     94
     95void mtk_drm_gem_free_object(struct drm_gem_object *obj)
     96{
     97	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
     98	struct mtk_drm_private *priv = obj->dev->dev_private;
     99
    100	if (mtk_gem->sg)
    101		drm_prime_gem_destroy(obj, mtk_gem->sg);
    102	else
    103		dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
    104			       mtk_gem->dma_addr, mtk_gem->dma_attrs);
    105
    106	/* release file pointer to gem object. */
    107	drm_gem_object_release(obj);
    108
    109	kfree(mtk_gem);
    110}
    111
    112int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
    113			    struct drm_mode_create_dumb *args)
    114{
    115	struct mtk_drm_gem_obj *mtk_gem;
    116	int ret;
    117
    118	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
    119	args->size = args->pitch * args->height;
    120
    121	mtk_gem = mtk_drm_gem_create(dev, args->size, false);
    122	if (IS_ERR(mtk_gem))
    123		return PTR_ERR(mtk_gem);
    124
    125	/*
    126	 * allocate a id of idr table where the obj is registered
    127	 * and handle has the id what user can see.
    128	 */
    129	ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
    130	if (ret)
    131		goto err_handle_create;
    132
    133	/* drop reference from allocate - handle holds it now. */
    134	drm_gem_object_put(&mtk_gem->base);
    135
    136	return 0;
    137
    138err_handle_create:
    139	mtk_drm_gem_free_object(&mtk_gem->base);
    140	return ret;
    141}
    142
    143static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
    144				   struct vm_area_struct *vma)
    145
    146{
    147	int ret;
    148	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
    149	struct mtk_drm_private *priv = obj->dev->dev_private;
    150
    151	/*
    152	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
    153	 * whole buffer from the start.
    154	 */
    155	vma->vm_pgoff = 0;
    156
    157	/*
    158	 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
    159	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
    160	 */
    161	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
    162	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
    163	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
    164
    165	ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
    166			     mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
    167	if (ret)
    168		drm_gem_vm_close(vma);
    169
    170	return ret;
    171}
    172
    173/*
    174 * Allocate a sg_table for this GEM object.
    175 * Note: Both the table's contents, and the sg_table itself must be freed by
    176 *       the caller.
    177 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
    178 */
    179struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
    180{
    181	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
    182	struct mtk_drm_private *priv = obj->dev->dev_private;
    183	struct sg_table *sgt;
    184	int ret;
    185
    186	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
    187	if (!sgt)
    188		return ERR_PTR(-ENOMEM);
    189
    190	ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
    191				    mtk_gem->dma_addr, obj->size,
    192				    mtk_gem->dma_attrs);
    193	if (ret) {
    194		DRM_ERROR("failed to allocate sgt, %d\n", ret);
    195		kfree(sgt);
    196		return ERR_PTR(ret);
    197	}
    198
    199	return sgt;
    200}
    201
    202struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
    203			struct dma_buf_attachment *attach, struct sg_table *sg)
    204{
    205	struct mtk_drm_gem_obj *mtk_gem;
    206
    207	/* check if the entries in the sg_table are contiguous */
    208	if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
    209		DRM_ERROR("sg_table is not contiguous");
    210		return ERR_PTR(-EINVAL);
    211	}
    212
    213	mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
    214	if (IS_ERR(mtk_gem))
    215		return ERR_CAST(mtk_gem);
    216
    217	mtk_gem->dma_addr = sg_dma_address(sg->sgl);
    218	mtk_gem->sg = sg;
    219
    220	return &mtk_gem->base;
    221}
    222
    223int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
    224{
    225	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
    226	struct sg_table *sgt = NULL;
    227	unsigned int npages;
    228
    229	if (mtk_gem->kvaddr)
    230		goto out;
    231
    232	sgt = mtk_gem_prime_get_sg_table(obj);
    233	if (IS_ERR(sgt))
    234		return PTR_ERR(sgt);
    235
    236	npages = obj->size >> PAGE_SHIFT;
    237	mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
    238	if (!mtk_gem->pages) {
    239		kfree(sgt);
    240		return -ENOMEM;
    241	}
    242
    243	drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
    244
    245	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
    246			       pgprot_writecombine(PAGE_KERNEL));
    247
    248out:
    249	kfree(sgt);
    250	iosys_map_set_vaddr(map, mtk_gem->kvaddr);
    251
    252	return 0;
    253}
    254
    255void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
    256			      struct iosys_map *map)
    257{
    258	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
    259	void *vaddr = map->vaddr;
    260
    261	if (!mtk_gem->pages)
    262		return;
    263
    264	vunmap(vaddr);
    265	mtk_gem->kvaddr = 0;
    266	kfree(mtk_gem->pages);
    267}