cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_region_ttm.c (7144B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2021 Intel Corporation
      4 */
      5#include <drm/ttm/ttm_bo_driver.h>
      6#include <drm/ttm/ttm_device.h>
      7#include <drm/ttm/ttm_range_manager.h>
      8
      9#include "i915_drv.h"
     10#include "i915_scatterlist.h"
     11#include "i915_ttm_buddy_manager.h"
     12
     13#include "intel_region_ttm.h"
     14
     15#include "gem/i915_gem_region.h"
     16#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
     17/**
     18 * DOC: TTM support structure
     19 *
     20 * The code in this file deals with setting up memory managers for TTM
     21 * LMEM and MOCK regions and converting the output from
     22 * the managers to struct sg_table, Basically providing the mapping from
     23 * i915 GEM regions to TTM memory types and resource managers.
     24 */
     25
     26/**
     27 * intel_region_ttm_device_init - Initialize a TTM device
     28 * @dev_priv: Pointer to an i915 device private structure.
     29 *
     30 * Return: 0 on success, negative error code on failure.
     31 */
     32int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
     33{
     34	struct drm_device *drm = &dev_priv->drm;
     35
     36	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
     37			       drm->dev, drm->anon_inode->i_mapping,
     38			       drm->vma_offset_manager, false, false);
     39}
     40
     41/**
     42 * intel_region_ttm_device_fini - Finalize a TTM device
     43 * @dev_priv: Pointer to an i915 device private structure.
     44 */
     45void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
     46{
     47	ttm_device_fini(&dev_priv->bdev);
     48}
     49
     50/*
     51 * Map the i915 memory regions to TTM memory types. We use the
     52 * driver-private types for now, reserving TTM_PL_VRAM for stolen
     53 * memory and TTM_PL_TT for GGTT use if decided to implement this.
     54 */
     55int intel_region_to_ttm_type(const struct intel_memory_region *mem)
     56{
     57	int type;
     58
     59	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
     60		   mem->type != INTEL_MEMORY_MOCK &&
     61		   mem->type != INTEL_MEMORY_SYSTEM);
     62
     63	if (mem->type == INTEL_MEMORY_SYSTEM)
     64		return TTM_PL_SYSTEM;
     65
     66	type = mem->instance + TTM_PL_PRIV;
     67	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
     68
     69	return type;
     70}
     71
     72/**
     73 * intel_region_ttm_init - Initialize a memory region for TTM.
     74 * @mem: The region to initialize.
     75 *
     76 * This function initializes a suitable TTM resource manager for the
     77 * region, and if it's a LMEM region type, attaches it to the TTM
     78 * device. MOCK regions are NOT attached to the TTM device, since we don't
     79 * have one for the mock selftests.
     80 *
     81 * Return: 0 on success, negative error code on failure.
     82 */
     83int intel_region_ttm_init(struct intel_memory_region *mem)
     84{
     85	struct ttm_device *bdev = &mem->i915->bdev;
     86	int mem_type = intel_region_to_ttm_type(mem);
     87	int ret;
     88
     89	ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
     90				      resource_size(&mem->region),
     91				      mem->io_size,
     92				      mem->min_page_size, PAGE_SIZE);
     93	if (ret)
     94		return ret;
     95
     96	mem->region_private = ttm_manager_type(bdev, mem_type);
     97
     98	return 0;
     99}
    100
    101/**
    102 * intel_region_ttm_fini - Finalize a TTM region.
    103 * @mem: The memory region
    104 *
    105 * This functions takes down the TTM resource manager associated with the
    106 * memory region, and if it was registered with the TTM device,
    107 * removes that registration.
    108 */
    109int intel_region_ttm_fini(struct intel_memory_region *mem)
    110{
    111	struct ttm_resource_manager *man = mem->region_private;
    112	int ret = -EBUSY;
    113	int count;
    114
    115	/*
    116	 * Put the region's move fences. This releases requests that
    117	 * may hold on to contexts and vms that may hold on to buffer
    118	 * objects placed in this region.
    119	 */
    120	if (man)
    121		ttm_resource_manager_cleanup(man);
    122
    123	/* Flush objects from region. */
    124	for (count = 0; count < 10; ++count) {
    125		i915_gem_flush_free_objects(mem->i915);
    126
    127		mutex_lock(&mem->objects.lock);
    128		if (list_empty(&mem->objects.list))
    129			ret = 0;
    130		mutex_unlock(&mem->objects.lock);
    131		if (!ret)
    132			break;
    133
    134		msleep(20);
    135		flush_delayed_work(&mem->i915->bdev.wq);
    136	}
    137
    138	/* If we leaked objects, Don't free the region causing use after free */
    139	if (ret || !man)
    140		return ret;
    141
    142	ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
    143				      intel_region_to_ttm_type(mem));
    144	GEM_WARN_ON(ret);
    145	mem->region_private = NULL;
    146
    147	return ret;
    148}
    149
    150/**
    151 * intel_region_ttm_resource_to_rsgt -
    152 * Convert an opaque TTM resource manager resource to a refcounted sg_table.
    153 * @mem: The memory region.
    154 * @res: The resource manager resource obtained from the TTM resource manager.
    155 *
    156 * The gem backends typically use sg-tables for operations on the underlying
    157 * io_memory. So provide a way for the backends to translate the
    158 * nodes they are handed from TTM to sg-tables.
    159 *
    160 * Return: A malloced sg_table on success, an error pointer on failure.
    161 */
    162struct i915_refct_sgt *
    163intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
    164				  struct ttm_resource *res)
    165{
    166	if (mem->is_range_manager) {
    167		struct ttm_range_mgr_node *range_node =
    168			to_ttm_range_mgr_node(res);
    169
    170		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
    171					      mem->region.start);
    172	} else {
    173		return i915_rsgt_from_buddy_resource(res, mem->region.start);
    174	}
    175}
    176
    177#ifdef CONFIG_DRM_I915_SELFTEST
    178/**
    179 * intel_region_ttm_resource_alloc - Allocate memory resources from a region
    180 * @mem: The memory region,
    181 * @size: The requested size in bytes
    182 * @flags: Allocation flags
    183 *
    184 * This functionality is provided only for callers that need to allocate
    185 * memory from standalone TTM range managers, without the TTM eviction
    186 * functionality. Don't use if you are not completely sure that's the
    187 * case. The returned opaque node can be converted to an sg_table using
    188 * intel_region_ttm_resource_to_st(), and can be freed using
    189 * intel_region_ttm_resource_free().
    190 *
    191 * Return: A valid pointer on success, an error pointer on failure.
    192 */
    193struct ttm_resource *
    194intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
    195				resource_size_t offset,
    196				resource_size_t size,
    197				unsigned int flags)
    198{
    199	struct ttm_resource_manager *man = mem->region_private;
    200	struct ttm_place place = {};
    201	struct ttm_buffer_object mock_bo = {};
    202	struct ttm_resource *res;
    203	int ret;
    204
    205	if (flags & I915_BO_ALLOC_CONTIGUOUS)
    206		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
    207	if (offset != I915_BO_INVALID_OFFSET) {
    208		place.fpfn = offset >> PAGE_SHIFT;
    209		place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
    210	} else if (mem->io_size && mem->io_size < mem->total) {
    211		if (flags & I915_BO_ALLOC_GPU_ONLY) {
    212			place.flags |= TTM_PL_FLAG_TOPDOWN;
    213		} else {
    214			place.fpfn = 0;
    215			place.lpfn = mem->io_size >> PAGE_SHIFT;
    216		}
    217	}
    218
    219	mock_bo.base.size = size;
    220	mock_bo.bdev = &mem->i915->bdev;
    221
    222	ret = man->func->alloc(man, &mock_bo, &place, &res);
    223	if (ret == -ENOSPC)
    224		ret = -ENXIO;
    225	if (!ret)
    226		res->bo = NULL; /* Rather blow up, then some uaf */
    227	return ret ? ERR_PTR(ret) : res;
    228}
    229
    230#endif
    231
    232/**
    233 * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
    234 * @mem: The region the resource was allocated from.
    235 * @res: The opaque resource representing an allocation.
    236 */
    237void intel_region_ttm_resource_free(struct intel_memory_region *mem,
    238				    struct ttm_resource *res)
    239{
    240	struct ttm_resource_manager *man = mem->region_private;
    241	struct ttm_buffer_object mock_bo = {};
    242
    243	mock_bo.base.size = res->num_pages << PAGE_SHIFT;
    244	mock_bo.bdev = &mem->i915->bdev;
    245	res->bo = &mock_bo;
    246
    247	man->func->free(man, res);
    248}