cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_memory_region.c (8329B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#include <linux/prandom.h>
      7
      8#include <uapi/drm/i915_drm.h>
      9
     10#include "intel_memory_region.h"
     11#include "i915_drv.h"
     12#include "i915_ttm_buddy_manager.h"
     13
     14static const struct {
     15	u16 class;
     16	u16 instance;
     17} intel_region_map[] = {
     18	[INTEL_REGION_SMEM] = {
     19		.class = INTEL_MEMORY_SYSTEM,
     20		.instance = 0,
     21	},
     22	[INTEL_REGION_LMEM_0] = {
     23		.class = INTEL_MEMORY_LOCAL,
     24		.instance = 0,
     25	},
     26	[INTEL_REGION_STOLEN_SMEM] = {
     27		.class = INTEL_MEMORY_STOLEN_SYSTEM,
     28		.instance = 0,
     29	},
     30	[INTEL_REGION_STOLEN_LMEM] = {
     31		.class = INTEL_MEMORY_STOLEN_LOCAL,
     32		.instance = 0,
     33	},
     34};
     35
     36static int __iopagetest(struct intel_memory_region *mem,
     37			u8 __iomem *va, int pagesize,
     38			u8 value, resource_size_t offset,
     39			const void *caller)
     40{
     41	int byte = prandom_u32_max(pagesize);
     42	u8 result[3];
     43
     44	memset_io(va, value, pagesize); /* or GPF! */
     45	wmb();
     46
     47	result[0] = ioread8(va);
     48	result[1] = ioread8(va + byte);
     49	result[2] = ioread8(va + pagesize - 1);
     50	if (memchr_inv(result, value, sizeof(result))) {
     51		dev_err(mem->i915->drm.dev,
     52			"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
     53			&mem->region, &mem->io_start, &offset, caller,
     54			value, result[0], result[1], result[2]);
     55		return -EINVAL;
     56	}
     57
     58	return 0;
     59}
     60
     61static int iopagetest(struct intel_memory_region *mem,
     62		      resource_size_t offset,
     63		      const void *caller)
     64{
     65	const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
     66	void __iomem *va;
     67	int err;
     68	int i;
     69
     70	va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
     71	if (!va) {
     72		dev_err(mem->i915->drm.dev,
     73			"Failed to ioremap memory region [%pa + %pa] for %ps\n",
     74			&mem->io_start, &offset, caller);
     75		return -EFAULT;
     76	}
     77
     78	for (i = 0; i < ARRAY_SIZE(val); i++) {
     79		err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
     80		if (err)
     81			break;
     82
     83		err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
     84		if (err)
     85			break;
     86	}
     87
     88	iounmap(va);
     89	return err;
     90}
     91
     92static resource_size_t random_page(resource_size_t last)
     93{
     94	/* Limited to low 44b (16TiB), but should suffice for a spot check */
     95	return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT;
     96}
     97
     98static int iomemtest(struct intel_memory_region *mem,
     99		     bool test_all,
    100		     const void *caller)
    101{
    102	resource_size_t last, page;
    103	int err;
    104
    105	if (mem->io_size < PAGE_SIZE)
    106		return 0;
    107
    108	last = mem->io_size - PAGE_SIZE;
    109
    110	/*
    111	 * Quick test to check read/write access to the iomap (backing store).
    112	 *
    113	 * Write a byte, read it back. If the iomapping fails, we expect
    114	 * a GPF preventing further execution. If the backing store does not
    115	 * exist, the read back will return garbage. We check a couple of pages,
    116	 * the first and last of the specified region to confirm the backing
    117	 * store + iomap does cover the entire memory region; and we check
    118	 * a random offset within as a quick spot check for bad memory.
    119	 */
    120
    121	if (test_all) {
    122		for (page = 0; page <= last; page += PAGE_SIZE) {
    123			err = iopagetest(mem, page, caller);
    124			if (err)
    125				return err;
    126		}
    127	} else {
    128		err = iopagetest(mem, 0, caller);
    129		if (err)
    130			return err;
    131
    132		err = iopagetest(mem, last, caller);
    133		if (err)
    134			return err;
    135
    136		err = iopagetest(mem, random_page(last), caller);
    137		if (err)
    138			return err;
    139	}
    140
    141	return 0;
    142}
    143
    144struct intel_memory_region *
    145intel_memory_region_lookup(struct drm_i915_private *i915,
    146			   u16 class, u16 instance)
    147{
    148	struct intel_memory_region *mr;
    149	int id;
    150
    151	/* XXX: consider maybe converting to an rb tree at some point */
    152	for_each_memory_region(mr, i915, id) {
    153		if (mr->type == class && mr->instance == instance)
    154			return mr;
    155	}
    156
    157	return NULL;
    158}
    159
    160struct intel_memory_region *
    161intel_memory_region_by_type(struct drm_i915_private *i915,
    162			    enum intel_memory_type mem_type)
    163{
    164	struct intel_memory_region *mr;
    165	int id;
    166
    167	for_each_memory_region(mr, i915, id)
    168		if (mr->type == mem_type)
    169			return mr;
    170
    171	return NULL;
    172}
    173
    174/**
    175 * intel_memory_region_reserve - Reserve a memory range
    176 * @mem: The region for which we want to reserve a range.
    177 * @offset: Start of the range to reserve.
    178 * @size: The size of the range to reserve.
    179 *
    180 * Return: 0 on success, negative error code on failure.
    181 */
    182int intel_memory_region_reserve(struct intel_memory_region *mem,
    183				resource_size_t offset,
    184				resource_size_t size)
    185{
    186	struct ttm_resource_manager *man = mem->region_private;
    187
    188	GEM_BUG_ON(mem->is_range_manager);
    189
    190	return i915_ttm_buddy_man_reserve(man, offset, size);
    191}
    192
    193void intel_memory_region_debug(struct intel_memory_region *mr,
    194			       struct drm_printer *printer)
    195{
    196	drm_printf(printer, "%s: ", mr->name);
    197
    198	if (mr->region_private)
    199		ttm_resource_manager_debug(mr->region_private, printer);
    200	else
    201		drm_printf(printer, "total:%pa, available:%pa bytes\n",
    202			   &mr->total, &mr->avail);
    203}
    204
    205static int intel_memory_region_memtest(struct intel_memory_region *mem,
    206				       void *caller)
    207{
    208	struct drm_i915_private *i915 = mem->i915;
    209	int err = 0;
    210
    211	if (!mem->io_start)
    212		return 0;
    213
    214	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
    215		err = iomemtest(mem, i915->params.memtest, caller);
    216
    217	return err;
    218}
    219
    220struct intel_memory_region *
    221intel_memory_region_create(struct drm_i915_private *i915,
    222			   resource_size_t start,
    223			   resource_size_t size,
    224			   resource_size_t min_page_size,
    225			   resource_size_t io_start,
    226			   resource_size_t io_size,
    227			   u16 type,
    228			   u16 instance,
    229			   const struct intel_memory_region_ops *ops)
    230{
    231	struct intel_memory_region *mem;
    232	int err;
    233
    234	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
    235	if (!mem)
    236		return ERR_PTR(-ENOMEM);
    237
    238	mem->i915 = i915;
    239	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
    240	mem->io_start = io_start;
    241	mem->io_size = io_size;
    242	mem->min_page_size = min_page_size;
    243	mem->ops = ops;
    244	mem->total = size;
    245	mem->avail = mem->total;
    246	mem->type = type;
    247	mem->instance = instance;
    248
    249	mutex_init(&mem->objects.lock);
    250	INIT_LIST_HEAD(&mem->objects.list);
    251
    252	if (ops->init) {
    253		err = ops->init(mem);
    254		if (err)
    255			goto err_free;
    256	}
    257
    258	err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
    259	if (err)
    260		goto err_release;
    261
    262	return mem;
    263
    264err_release:
    265	if (mem->ops->release)
    266		mem->ops->release(mem);
    267err_free:
    268	kfree(mem);
    269	return ERR_PTR(err);
    270}
    271
    272void intel_memory_region_set_name(struct intel_memory_region *mem,
    273				  const char *fmt, ...)
    274{
    275	va_list ap;
    276
    277	va_start(ap, fmt);
    278	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
    279	va_end(ap);
    280}
    281
    282void intel_memory_region_destroy(struct intel_memory_region *mem)
    283{
    284	int ret = 0;
    285
    286	if (mem->ops->release)
    287		ret = mem->ops->release(mem);
    288
    289	GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
    290	mutex_destroy(&mem->objects.lock);
    291	if (!ret)
    292		kfree(mem);
    293}
    294
    295/* Global memory region registration -- only slight layer inversions! */
    296
    297int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
    298{
    299	int err, i;
    300
    301	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
    302		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
    303		u16 type, instance;
    304
    305		if (!HAS_REGION(i915, BIT(i)))
    306			continue;
    307
    308		type = intel_region_map[i].class;
    309		instance = intel_region_map[i].instance;
    310		switch (type) {
    311		case INTEL_MEMORY_SYSTEM:
    312			if (IS_DGFX(i915))
    313				mem = i915_gem_ttm_system_setup(i915, type,
    314								instance);
    315			else
    316				mem = i915_gem_shmem_setup(i915, type,
    317							   instance);
    318			break;
    319		case INTEL_MEMORY_STOLEN_LOCAL:
    320			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
    321			if (!IS_ERR(mem))
    322				i915->mm.stolen_region = mem;
    323			break;
    324		case INTEL_MEMORY_STOLEN_SYSTEM:
    325			mem = i915_gem_stolen_smem_setup(i915, type, instance);
    326			if (!IS_ERR(mem))
    327				i915->mm.stolen_region = mem;
    328			break;
    329		default:
    330			continue;
    331		}
    332
    333		if (IS_ERR(mem)) {
    334			err = PTR_ERR(mem);
    335			drm_err(&i915->drm,
    336				"Failed to setup region(%d) type=%d\n",
    337				err, type);
    338			goto out_cleanup;
    339		}
    340
    341		mem->id = i;
    342		i915->mm.regions[i] = mem;
    343	}
    344
    345	return 0;
    346
    347out_cleanup:
    348	intel_memory_regions_driver_release(i915);
    349	return err;
    350}
    351
    352void intel_memory_regions_driver_release(struct drm_i915_private *i915)
    353{
    354	int i;
    355
    356	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
    357		struct intel_memory_region *region =
    358			fetch_and_zero(&i915->mm.regions[i]);
    359
    360		if (region)
    361			intel_memory_region_destroy(region);
    362	}
    363}
    364
    365#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    366#include "selftests/intel_memory_region.c"
    367#include "selftests/mock_region.c"
    368#endif