cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drm_gem_cma_helper.c (17415B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * drm gem CMA (contiguous memory allocator) helper functions
      4 *
      5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
      6 *
      7 * Based on Samsung Exynos code
      8 *
      9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
     10 */
     11
     12#include <linux/dma-buf.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/export.h>
     15#include <linux/mm.h>
     16#include <linux/module.h>
     17#include <linux/mutex.h>
     18#include <linux/slab.h>
     19
     20#include <drm/drm.h>
     21#include <drm/drm_device.h>
     22#include <drm/drm_drv.h>
     23#include <drm/drm_gem_cma_helper.h>
     24#include <drm/drm_vma_manager.h>
     25
     26/**
     27 * DOC: cma helpers
     28 *
     29 * The Contiguous Memory Allocator reserves a pool of memory at early boot
     30 * that is used to service requests for large blocks of contiguous memory.
     31 *
     32 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
     33 * objects that are physically contiguous in memory. This is useful for
     34 * display drivers that are unable to map scattered buffers via an IOMMU.
     35 *
     36 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
     37 * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
     38 * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
     39 */
     40
     41static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
     42	.free = drm_gem_cma_object_free,
     43	.print_info = drm_gem_cma_object_print_info,
     44	.get_sg_table = drm_gem_cma_object_get_sg_table,
     45	.vmap = drm_gem_cma_object_vmap,
     46	.mmap = drm_gem_cma_object_mmap,
     47	.vm_ops = &drm_gem_cma_vm_ops,
     48};
     49
     50/**
     51 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
     52 * @drm: DRM device
     53 * @size: size of the object to allocate
     54 * @private: true if used for internal purposes
     55 *
     56 * This function creates and initializes a GEM CMA object of the given size,
     57 * but doesn't allocate any memory to back the object.
     58 *
     59 * Returns:
     60 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
     61 * error code on failure.
     62 */
     63static struct drm_gem_cma_object *
     64__drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
     65{
     66	struct drm_gem_cma_object *cma_obj;
     67	struct drm_gem_object *gem_obj;
     68	int ret = 0;
     69
     70	if (drm->driver->gem_create_object) {
     71		gem_obj = drm->driver->gem_create_object(drm, size);
     72		if (IS_ERR(gem_obj))
     73			return ERR_CAST(gem_obj);
     74		cma_obj = to_drm_gem_cma_obj(gem_obj);
     75	} else {
     76		cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
     77		if (!cma_obj)
     78			return ERR_PTR(-ENOMEM);
     79		gem_obj = &cma_obj->base;
     80	}
     81
     82	if (!gem_obj->funcs)
     83		gem_obj->funcs = &drm_gem_cma_default_funcs;
     84
     85	if (private) {
     86		drm_gem_private_object_init(drm, gem_obj, size);
     87
     88		/* Always use writecombine for dma-buf mappings */
     89		cma_obj->map_noncoherent = false;
     90	} else {
     91		ret = drm_gem_object_init(drm, gem_obj, size);
     92	}
     93	if (ret)
     94		goto error;
     95
     96	ret = drm_gem_create_mmap_offset(gem_obj);
     97	if (ret) {
     98		drm_gem_object_release(gem_obj);
     99		goto error;
    100	}
    101
    102	return cma_obj;
    103
    104error:
    105	kfree(cma_obj);
    106	return ERR_PTR(ret);
    107}
    108
    109/**
    110 * drm_gem_cma_create - allocate an object with the given size
    111 * @drm: DRM device
    112 * @size: size of the object to allocate
    113 *
    114 * This function creates a CMA GEM object and allocates a contiguous chunk of
    115 * memory as backing store.
    116 *
    117 * Returns:
    118 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
    119 * error code on failure.
    120 */
    121struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
    122					      size_t size)
    123{
    124	struct drm_gem_cma_object *cma_obj;
    125	int ret;
    126
    127	size = round_up(size, PAGE_SIZE);
    128
    129	cma_obj = __drm_gem_cma_create(drm, size, false);
    130	if (IS_ERR(cma_obj))
    131		return cma_obj;
    132
    133	if (cma_obj->map_noncoherent) {
    134		cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
    135						       &cma_obj->paddr,
    136						       DMA_TO_DEVICE,
    137						       GFP_KERNEL | __GFP_NOWARN);
    138	} else {
    139		cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
    140					      GFP_KERNEL | __GFP_NOWARN);
    141	}
    142	if (!cma_obj->vaddr) {
    143		drm_dbg(drm, "failed to allocate buffer with size %zu\n",
    144			 size);
    145		ret = -ENOMEM;
    146		goto error;
    147	}
    148
    149	return cma_obj;
    150
    151error:
    152	drm_gem_object_put(&cma_obj->base);
    153	return ERR_PTR(ret);
    154}
    155EXPORT_SYMBOL_GPL(drm_gem_cma_create);
    156
    157/**
    158 * drm_gem_cma_create_with_handle - allocate an object with the given size and
    159 *     return a GEM handle to it
    160 * @file_priv: DRM file-private structure to register the handle for
    161 * @drm: DRM device
    162 * @size: size of the object to allocate
    163 * @handle: return location for the GEM handle
    164 *
    165 * This function creates a CMA GEM object, allocating a physically contiguous
    166 * chunk of memory as backing store. The GEM object is then added to the list
    167 * of object associated with the given file and a handle to it is returned.
    168 *
    169 * Returns:
    170 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
    171 * error code on failure.
    172 */
    173static struct drm_gem_cma_object *
    174drm_gem_cma_create_with_handle(struct drm_file *file_priv,
    175			       struct drm_device *drm, size_t size,
    176			       uint32_t *handle)
    177{
    178	struct drm_gem_cma_object *cma_obj;
    179	struct drm_gem_object *gem_obj;
    180	int ret;
    181
    182	cma_obj = drm_gem_cma_create(drm, size);
    183	if (IS_ERR(cma_obj))
    184		return cma_obj;
    185
    186	gem_obj = &cma_obj->base;
    187
    188	/*
    189	 * allocate a id of idr table where the obj is registered
    190	 * and handle has the id what user can see.
    191	 */
    192	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
    193	/* drop reference from allocate - handle holds it now. */
    194	drm_gem_object_put(gem_obj);
    195	if (ret)
    196		return ERR_PTR(ret);
    197
    198	return cma_obj;
    199}
    200
    201/**
    202 * drm_gem_cma_free - free resources associated with a CMA GEM object
    203 * @cma_obj: CMA GEM object to free
    204 *
    205 * This function frees the backing memory of the CMA GEM object, cleans up the
    206 * GEM object state and frees the memory used to store the object itself.
    207 * If the buffer is imported and the virtual address is set, it is released.
    208 */
    209void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
    210{
    211	struct drm_gem_object *gem_obj = &cma_obj->base;
    212	struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr);
    213
    214	if (gem_obj->import_attach) {
    215		if (cma_obj->vaddr)
    216			dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
    217		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
    218	} else if (cma_obj->vaddr) {
    219		if (cma_obj->map_noncoherent)
    220			dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
    221					     cma_obj->vaddr, cma_obj->paddr,
    222					     DMA_TO_DEVICE);
    223		else
    224			dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
    225				    cma_obj->vaddr, cma_obj->paddr);
    226	}
    227
    228	drm_gem_object_release(gem_obj);
    229
    230	kfree(cma_obj);
    231}
    232EXPORT_SYMBOL_GPL(drm_gem_cma_free);
    233
    234/**
    235 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
    236 * @file_priv: DRM file-private structure to create the dumb buffer for
    237 * @drm: DRM device
    238 * @args: IOCTL data
    239 *
    240 * This aligns the pitch and size arguments to the minimum required. This is
    241 * an internal helper that can be wrapped by a driver to account for hardware
    242 * with more specific alignment requirements. It should not be used directly
    243 * as their &drm_driver.dumb_create callback.
    244 *
    245 * Returns:
    246 * 0 on success or a negative error code on failure.
    247 */
    248int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
    249				     struct drm_device *drm,
    250				     struct drm_mode_create_dumb *args)
    251{
    252	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
    253	struct drm_gem_cma_object *cma_obj;
    254
    255	if (args->pitch < min_pitch)
    256		args->pitch = min_pitch;
    257
    258	if (args->size < args->pitch * args->height)
    259		args->size = args->pitch * args->height;
    260
    261	cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
    262						 &args->handle);
    263	return PTR_ERR_OR_ZERO(cma_obj);
    264}
    265EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
    266
    267/**
    268 * drm_gem_cma_dumb_create - create a dumb buffer object
    269 * @file_priv: DRM file-private structure to create the dumb buffer for
    270 * @drm: DRM device
    271 * @args: IOCTL data
    272 *
    273 * This function computes the pitch of the dumb buffer and rounds it up to an
    274 * integer number of bytes per pixel. Drivers for hardware that doesn't have
    275 * any additional restrictions on the pitch can directly use this function as
    276 * their &drm_driver.dumb_create callback.
    277 *
    278 * For hardware with additional restrictions, drivers can adjust the fields
    279 * set up by userspace and pass the IOCTL data along to the
    280 * drm_gem_cma_dumb_create_internal() function.
    281 *
    282 * Returns:
    283 * 0 on success or a negative error code on failure.
    284 */
    285int drm_gem_cma_dumb_create(struct drm_file *file_priv,
    286			    struct drm_device *drm,
    287			    struct drm_mode_create_dumb *args)
    288{
    289	struct drm_gem_cma_object *cma_obj;
    290
    291	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
    292	args->size = args->pitch * args->height;
    293
    294	cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
    295						 &args->handle);
    296	return PTR_ERR_OR_ZERO(cma_obj);
    297}
    298EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
    299
    300const struct vm_operations_struct drm_gem_cma_vm_ops = {
    301	.open = drm_gem_vm_open,
    302	.close = drm_gem_vm_close,
    303};
    304EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
    305
    306#ifndef CONFIG_MMU
    307/**
    308 * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
    309 * @filp: file object
    310 * @addr: memory address
    311 * @len: buffer size
    312 * @pgoff: page offset
    313 * @flags: memory flags
    314 *
    315 * This function is used in noMMU platforms to propose address mapping
    316 * for a given buffer.
    317 * It's intended to be used as a direct handler for the struct
    318 * &file_operations.get_unmapped_area operation.
    319 *
    320 * Returns:
    321 * mapping address on success or a negative error code on failure.
    322 */
    323unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
    324					    unsigned long addr,
    325					    unsigned long len,
    326					    unsigned long pgoff,
    327					    unsigned long flags)
    328{
    329	struct drm_gem_cma_object *cma_obj;
    330	struct drm_gem_object *obj = NULL;
    331	struct drm_file *priv = filp->private_data;
    332	struct drm_device *dev = priv->minor->dev;
    333	struct drm_vma_offset_node *node;
    334
    335	if (drm_dev_is_unplugged(dev))
    336		return -ENODEV;
    337
    338	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
    339	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
    340						  pgoff,
    341						  len >> PAGE_SHIFT);
    342	if (likely(node)) {
    343		obj = container_of(node, struct drm_gem_object, vma_node);
    344		/*
    345		 * When the object is being freed, after it hits 0-refcnt it
    346		 * proceeds to tear down the object. In the process it will
    347		 * attempt to remove the VMA offset and so acquire this
    348		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
    349		 * that matches our range, we know it is in the process of being
    350		 * destroyed and will be freed as soon as we release the lock -
    351		 * so we have to check for the 0-refcnted object and treat it as
    352		 * invalid.
    353		 */
    354		if (!kref_get_unless_zero(&obj->refcount))
    355			obj = NULL;
    356	}
    357
    358	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
    359
    360	if (!obj)
    361		return -EINVAL;
    362
    363	if (!drm_vma_node_is_allowed(node, priv)) {
    364		drm_gem_object_put(obj);
    365		return -EACCES;
    366	}
    367
    368	cma_obj = to_drm_gem_cma_obj(obj);
    369
    370	drm_gem_object_put(obj);
    371
    372	return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
    373}
    374EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
    375#endif
    376
    377/**
    378 * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
    379 * @cma_obj: CMA GEM object
    380 * @p: DRM printer
    381 * @indent: Tab indentation level
    382 *
    383 * This function prints paddr and vaddr for use in e.g. debugfs output.
    384 */
    385void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
    386			    struct drm_printer *p, unsigned int indent)
    387{
    388	drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
    389	drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
    390}
    391EXPORT_SYMBOL(drm_gem_cma_print_info);
    392
    393/**
    394 * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
    395 *     pages for a CMA GEM object
    396 * @cma_obj: CMA GEM object
    397 *
    398 * This function exports a scatter/gather table by calling the standard
    399 * DMA mapping API.
    400 *
    401 * Returns:
    402 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
    403 */
    404struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
    405{
    406	struct drm_gem_object *obj = &cma_obj->base;
    407	struct sg_table *sgt;
    408	int ret;
    409
    410	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
    411	if (!sgt)
    412		return ERR_PTR(-ENOMEM);
    413
    414	ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
    415			      cma_obj->paddr, obj->size);
    416	if (ret < 0)
    417		goto out;
    418
    419	return sgt;
    420
    421out:
    422	kfree(sgt);
    423	return ERR_PTR(ret);
    424}
    425EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
    426
    427/**
    428 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
    429 *     driver's scatter/gather table of pinned pages
    430 * @dev: device to import into
    431 * @attach: DMA-BUF attachment
    432 * @sgt: scatter/gather table of pinned pages
    433 *
    434 * This function imports a scatter/gather table exported via DMA-BUF by
    435 * another driver. Imported buffers must be physically contiguous in memory
    436 * (i.e. the scatter/gather table must contain a single entry). Drivers that
    437 * use the CMA helpers should set this as their
    438 * &drm_driver.gem_prime_import_sg_table callback.
    439 *
    440 * Returns:
    441 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
    442 * error code on failure.
    443 */
    444struct drm_gem_object *
    445drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
    446				  struct dma_buf_attachment *attach,
    447				  struct sg_table *sgt)
    448{
    449	struct drm_gem_cma_object *cma_obj;
    450
    451	/* check if the entries in the sg_table are contiguous */
    452	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
    453		return ERR_PTR(-EINVAL);
    454
    455	/* Create a CMA GEM buffer. */
    456	cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
    457	if (IS_ERR(cma_obj))
    458		return ERR_CAST(cma_obj);
    459
    460	cma_obj->paddr = sg_dma_address(sgt->sgl);
    461	cma_obj->sgt = sgt;
    462
    463	DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
    464
    465	return &cma_obj->base;
    466}
    467EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
    468
    469/**
    470 * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
    471 *     address space
    472 * @cma_obj: CMA GEM object
    473 * @map: Returns the kernel virtual address of the CMA GEM object's backing
    474 *       store.
    475 *
    476 * This function maps a buffer into the kernel's virtual address space.
    477 * Since the CMA buffers are already mapped into the kernel virtual address
    478 * space this simply returns the cached virtual address.
    479 *
    480 * Returns:
    481 * 0 on success, or a negative error code otherwise.
    482 */
    483int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
    484		     struct iosys_map *map)
    485{
    486	iosys_map_set_vaddr(map, cma_obj->vaddr);
    487
    488	return 0;
    489}
    490EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
    491
    492/**
    493 * drm_gem_cma_mmap - memory-map an exported CMA GEM object
    494 * @cma_obj: CMA GEM object
    495 * @vma: VMA for the area to be mapped
    496 *
    497 * This function maps a buffer into a userspace process's address space.
    498 * In addition to the usual GEM VMA setup it immediately faults in the entire
    499 * object instead of using on-demand faulting.
    500 *
    501 * Returns:
    502 * 0 on success or a negative error code on failure.
    503 */
    504int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
    505{
    506	struct drm_gem_object *obj = &cma_obj->base;
    507	int ret;
    508
    509	/*
    510	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
    511	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
    512	 * the whole buffer.
    513	 */
    514	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
    515	vma->vm_flags &= ~VM_PFNMAP;
    516	vma->vm_flags |= VM_DONTEXPAND;
    517
    518	if (cma_obj->map_noncoherent) {
    519		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
    520
    521		ret = dma_mmap_pages(cma_obj->base.dev->dev,
    522				     vma, vma->vm_end - vma->vm_start,
    523				     virt_to_page(cma_obj->vaddr));
    524	} else {
    525		ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
    526				  cma_obj->paddr, vma->vm_end - vma->vm_start);
    527	}
    528	if (ret)
    529		drm_gem_vm_close(vma);
    530
    531	return ret;
    532}
    533EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
    534
    535/**
    536 * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
    537 *	scatter/gather table and get the virtual address of the buffer
    538 * @dev: DRM device
    539 * @attach: DMA-BUF attachment
    540 * @sgt: Scatter/gather table of pinned pages
    541 *
    542 * This function imports a scatter/gather table using
    543 * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
    544 * virtual address. This ensures that a CMA GEM object always has its virtual
    545 * address set. This address is released when the object is freed.
    546 *
    547 * This function can be used as the &drm_driver.gem_prime_import_sg_table
    548 * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
    549 * the necessary DRM driver operations.
    550 *
    551 * Returns:
    552 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
    553 * error code on failure.
    554 */
    555struct drm_gem_object *
    556drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
    557				       struct dma_buf_attachment *attach,
    558				       struct sg_table *sgt)
    559{
    560	struct drm_gem_cma_object *cma_obj;
    561	struct drm_gem_object *obj;
    562	struct iosys_map map;
    563	int ret;
    564
    565	ret = dma_buf_vmap(attach->dmabuf, &map);
    566	if (ret) {
    567		DRM_ERROR("Failed to vmap PRIME buffer\n");
    568		return ERR_PTR(ret);
    569	}
    570
    571	obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
    572	if (IS_ERR(obj)) {
    573		dma_buf_vunmap(attach->dmabuf, &map);
    574		return obj;
    575	}
    576
    577	cma_obj = to_drm_gem_cma_obj(obj);
    578	cma_obj->vaddr = map.vaddr;
    579
    580	return obj;
    581}
    582EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
    583
    584MODULE_DESCRIPTION("DRM CMA memory-management helpers");
    585MODULE_IMPORT_NS(DMA_BUF);
    586MODULE_LICENSE("GPL");