cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_ttm_buddy_manager.c (10524B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2021 Intel Corporation
      4 */
      5
      6#include <linux/slab.h>
      7
      8#include <drm/ttm/ttm_bo_driver.h>
      9#include <drm/ttm/ttm_placement.h>
     10
     11#include <drm/drm_buddy.h>
     12
     13#include "i915_ttm_buddy_manager.h"
     14
     15#include "i915_gem.h"
     16
     17struct i915_ttm_buddy_manager {
     18	struct ttm_resource_manager manager;
     19	struct drm_buddy mm;
     20	struct list_head reserved;
     21	struct mutex lock;
     22	unsigned long visible_size;
     23	unsigned long visible_avail;
     24	unsigned long visible_reserved;
     25	u64 default_page_size;
     26};
     27
     28static struct i915_ttm_buddy_manager *
     29to_buddy_manager(struct ttm_resource_manager *man)
     30{
     31	return container_of(man, struct i915_ttm_buddy_manager, manager);
     32}
     33
     34static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
     35				    struct ttm_buffer_object *bo,
     36				    const struct ttm_place *place,
     37				    struct ttm_resource **res)
     38{
     39	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
     40	struct i915_ttm_buddy_resource *bman_res;
     41	struct drm_buddy *mm = &bman->mm;
     42	unsigned long n_pages, lpfn;
     43	u64 min_page_size;
     44	u64 size;
     45	int err;
     46
     47	lpfn = place->lpfn;
     48	if (!lpfn)
     49		lpfn = man->size;
     50
     51	bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
     52	if (!bman_res)
     53		return -ENOMEM;
     54
     55	ttm_resource_init(bo, place, &bman_res->base);
     56	INIT_LIST_HEAD(&bman_res->blocks);
     57	bman_res->mm = mm;
     58
     59	if (place->flags & TTM_PL_FLAG_TOPDOWN)
     60		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
     61
     62	if (place->fpfn || lpfn != man->size)
     63		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
     64
     65	GEM_BUG_ON(!bman_res->base.num_pages);
     66	size = bman_res->base.num_pages << PAGE_SHIFT;
     67
     68	min_page_size = bman->default_page_size;
     69	if (bo->page_alignment)
     70		min_page_size = bo->page_alignment << PAGE_SHIFT;
     71
     72	GEM_BUG_ON(min_page_size < mm->chunk_size);
     73	GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
     74
     75	if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
     76	    place->flags & TTM_PL_FLAG_CONTIGUOUS) {
     77		unsigned long pages;
     78
     79		size = roundup_pow_of_two(size);
     80		min_page_size = size;
     81
     82		pages = size >> ilog2(mm->chunk_size);
     83		if (pages > lpfn)
     84			lpfn = pages;
     85	}
     86
     87	if (size > lpfn << PAGE_SHIFT) {
     88		err = -E2BIG;
     89		goto err_free_res;
     90	}
     91
     92	n_pages = size >> ilog2(mm->chunk_size);
     93
     94	mutex_lock(&bman->lock);
     95	if (lpfn <= bman->visible_size && n_pages > bman->visible_avail) {
     96		mutex_unlock(&bman->lock);
     97		err = -ENOSPC;
     98		goto err_free_res;
     99	}
    100
    101	err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
    102				     (u64)lpfn << PAGE_SHIFT,
    103				     (u64)n_pages << PAGE_SHIFT,
    104				     min_page_size,
    105				     &bman_res->blocks,
    106				     bman_res->flags);
    107	mutex_unlock(&bman->lock);
    108	if (unlikely(err))
    109		goto err_free_blocks;
    110
    111	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
    112		u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
    113
    114		mutex_lock(&bman->lock);
    115		drm_buddy_block_trim(mm,
    116				     original_size,
    117				     &bman_res->blocks);
    118		mutex_unlock(&bman->lock);
    119	}
    120
    121	if (lpfn <= bman->visible_size) {
    122		bman_res->used_visible_size = bman_res->base.num_pages;
    123	} else {
    124		struct drm_buddy_block *block;
    125
    126		list_for_each_entry(block, &bman_res->blocks, link) {
    127			unsigned long start =
    128				drm_buddy_block_offset(block) >> PAGE_SHIFT;
    129
    130			if (start < bman->visible_size) {
    131				unsigned long end = start +
    132					(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
    133
    134				bman_res->used_visible_size +=
    135					min(end, bman->visible_size) - start;
    136			}
    137		}
    138	}
    139
    140	if (bman_res->used_visible_size) {
    141		mutex_lock(&bman->lock);
    142		bman->visible_avail -= bman_res->used_visible_size;
    143		mutex_unlock(&bman->lock);
    144	}
    145
    146	if (place->lpfn - place->fpfn == n_pages)
    147		bman_res->base.start = place->fpfn;
    148	else if (lpfn <= bman->visible_size)
    149		bman_res->base.start = 0;
    150	else
    151		bman_res->base.start = bman->visible_size;
    152
    153	*res = &bman_res->base;
    154	return 0;
    155
    156err_free_blocks:
    157	mutex_lock(&bman->lock);
    158	drm_buddy_free_list(mm, &bman_res->blocks);
    159	mutex_unlock(&bman->lock);
    160err_free_res:
    161	ttm_resource_fini(man, &bman_res->base);
    162	kfree(bman_res);
    163	return err;
    164}
    165
    166static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
    167				    struct ttm_resource *res)
    168{
    169	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
    170	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
    171
    172	mutex_lock(&bman->lock);
    173	drm_buddy_free_list(&bman->mm, &bman_res->blocks);
    174	bman->visible_avail += bman_res->used_visible_size;
    175	mutex_unlock(&bman->lock);
    176
    177	ttm_resource_fini(man, res);
    178	kfree(bman_res);
    179}
    180
    181static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
    182				     struct drm_printer *printer)
    183{
    184	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
    185	struct drm_buddy_block *block;
    186
    187	mutex_lock(&bman->lock);
    188	drm_printf(printer, "default_page_size: %lluKiB\n",
    189		   bman->default_page_size >> 10);
    190	drm_printf(printer, "visible_avail: %lluMiB\n",
    191		   (u64)bman->visible_avail << PAGE_SHIFT >> 20);
    192	drm_printf(printer, "visible_size: %lluMiB\n",
    193		   (u64)bman->visible_size << PAGE_SHIFT >> 20);
    194	drm_printf(printer, "visible_reserved: %lluMiB\n",
    195		   (u64)bman->visible_reserved << PAGE_SHIFT >> 20);
    196
    197	drm_buddy_print(&bman->mm, printer);
    198
    199	drm_printf(printer, "reserved:\n");
    200	list_for_each_entry(block, &bman->reserved, link)
    201		drm_buddy_block_print(&bman->mm, block, printer);
    202	mutex_unlock(&bman->lock);
    203}
    204
    205static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
    206	.alloc = i915_ttm_buddy_man_alloc,
    207	.free = i915_ttm_buddy_man_free,
    208	.debug = i915_ttm_buddy_man_debug,
    209};
    210
    211/**
    212 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
    213 * @bdev: The ttm device
    214 * @type: Memory type we want to manage
    215 * @use_tt: Set use_tt for the manager
    216 * @size: The size in bytes to manage
    217 * @visible_size: The CPU visible size in bytes to manage
    218 * @default_page_size: The default minimum page size in bytes for allocations,
    219 * this must be at least as large as @chunk_size, and can be overridden by
    220 * setting the BO page_alignment, to be larger or smaller as needed.
    221 * @chunk_size: The minimum page size in bytes for our allocations i.e
    222 * order-zero
    223 *
    224 * Note that the starting address is assumed to be zero here, since this
    225 * simplifies keeping the property where allocated blocks having natural
    226 * power-of-two alignment. So long as the real starting address is some large
    227 * power-of-two, or naturally start from zero, then this should be fine.  Also
    228 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
    229 * if say there is some unusable range from the start of the region. We can
    230 * revisit this in the future and make the interface accept an actual starting
    231 * offset and let it take care of the rest.
    232 *
    233 * Note that if the @size is not aligned to the @chunk_size then we perform the
    234 * required rounding to get the usable size. The final size in pages can be
    235 * taken from &ttm_resource_manager.size.
    236 *
    237 * Return: 0 on success, negative error code on failure.
    238 */
    239int i915_ttm_buddy_man_init(struct ttm_device *bdev,
    240			    unsigned int type, bool use_tt,
    241			    u64 size, u64 visible_size, u64 default_page_size,
    242			    u64 chunk_size)
    243{
    244	struct ttm_resource_manager *man;
    245	struct i915_ttm_buddy_manager *bman;
    246	int err;
    247
    248	bman = kzalloc(sizeof(*bman), GFP_KERNEL);
    249	if (!bman)
    250		return -ENOMEM;
    251
    252	err = drm_buddy_init(&bman->mm, size, chunk_size);
    253	if (err)
    254		goto err_free_bman;
    255
    256	mutex_init(&bman->lock);
    257	INIT_LIST_HEAD(&bman->reserved);
    258	GEM_BUG_ON(default_page_size < chunk_size);
    259	bman->default_page_size = default_page_size;
    260	bman->visible_size = visible_size >> PAGE_SHIFT;
    261	bman->visible_avail = bman->visible_size;
    262
    263	man = &bman->manager;
    264	man->use_tt = use_tt;
    265	man->func = &i915_ttm_buddy_manager_func;
    266	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
    267
    268	ttm_resource_manager_set_used(man, true);
    269	ttm_set_driver_manager(bdev, type, man);
    270
    271	return 0;
    272
    273err_free_bman:
    274	kfree(bman);
    275	return err;
    276}
    277
    278/**
    279 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
    280 * @bdev: The ttm device
    281 * @type: Memory type we want to manage
    282 *
    283 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
    284 * also be freed for us here.
    285 *
    286 * Return: 0 on success, negative error code on failure.
    287 */
    288int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
    289{
    290	struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
    291	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
    292	struct drm_buddy *mm = &bman->mm;
    293	int ret;
    294
    295	ttm_resource_manager_set_used(man, false);
    296
    297	ret = ttm_resource_manager_evict_all(bdev, man);
    298	if (ret)
    299		return ret;
    300
    301	ttm_set_driver_manager(bdev, type, NULL);
    302
    303	mutex_lock(&bman->lock);
    304	drm_buddy_free_list(mm, &bman->reserved);
    305	drm_buddy_fini(mm);
    306	bman->visible_avail += bman->visible_reserved;
    307	WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
    308	mutex_unlock(&bman->lock);
    309
    310	ttm_resource_manager_cleanup(man);
    311	kfree(bman);
    312
    313	return 0;
    314}
    315
    316/**
    317 * i915_ttm_buddy_man_reserve - Reserve address range
    318 * @man: The buddy allocator ttm manager
    319 * @start: The offset in bytes, where the region start is assumed to be zero
    320 * @size: The size in bytes
    321 *
    322 * Note that the starting address for the region is always assumed to be zero.
    323 *
    324 * Return: 0 on success, negative error code on failure.
    325 */
    326int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
    327			       u64 start, u64 size)
    328{
    329	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
    330	struct drm_buddy *mm = &bman->mm;
    331	unsigned long fpfn = start >> PAGE_SHIFT;
    332	unsigned long flags = 0;
    333	int ret;
    334
    335	flags |= DRM_BUDDY_RANGE_ALLOCATION;
    336
    337	mutex_lock(&bman->lock);
    338	ret = drm_buddy_alloc_blocks(mm, start,
    339				     start + size,
    340				     size, mm->chunk_size,
    341				     &bman->reserved,
    342				     flags);
    343
    344	if (fpfn < bman->visible_size) {
    345		unsigned long lpfn = fpfn + (size >> PAGE_SHIFT);
    346		unsigned long visible = min(lpfn, bman->visible_size) - fpfn;
    347
    348		bman->visible_reserved += visible;
    349		bman->visible_avail -= visible;
    350	}
    351	mutex_unlock(&bman->lock);
    352
    353	return ret;
    354}
    355
    356/**
    357 * i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion
    358 * in pages.
    359 * @man: The buddy allocator ttm manager
    360 */
    361u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man)
    362{
    363	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
    364
    365	return bman->visible_size;
    366}
    367
    368#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    369void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man,
    370					   u64 size)
    371{
    372	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
    373
    374	bman->visible_size = size;
    375}
    376#endif