cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

selftest_mocs.c (9534B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#include "gt/intel_engine_pm.h"
      7#include "gt/intel_gpu_commands.h"
      8#include "i915_selftest.h"
      9
     10#include "gem/selftests/mock_context.h"
     11#include "selftests/igt_reset.h"
     12#include "selftests/igt_spinner.h"
     13#include "selftests/intel_scheduler_helpers.h"
     14
     15struct live_mocs {
     16	struct drm_i915_mocs_table table;
     17	struct drm_i915_mocs_table *mocs;
     18	struct drm_i915_mocs_table *l3cc;
     19	struct i915_vma *scratch;
     20	void *vaddr;
     21};
     22
     23static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
     24{
     25	struct intel_context *ce;
     26
     27	ce = intel_context_create(engine);
     28	if (IS_ERR(ce))
     29		return ce;
     30
     31	/* We build large requests to read the registers from the ring */
     32	ce->ring_size = SZ_16K;
     33
     34	return ce;
     35}
     36
     37static int request_add_sync(struct i915_request *rq, int err)
     38{
     39	i915_request_get(rq);
     40	i915_request_add(rq);
     41	if (i915_request_wait(rq, 0, HZ / 5) < 0)
     42		err = -ETIME;
     43	i915_request_put(rq);
     44
     45	return err;
     46}
     47
     48static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
     49{
     50	int err = 0;
     51
     52	i915_request_get(rq);
     53	i915_request_add(rq);
     54	if (spin && !igt_wait_for_spinner(spin, rq))
     55		err = -ETIME;
     56	i915_request_put(rq);
     57
     58	return err;
     59}
     60
     61static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
     62{
     63	unsigned int flags;
     64	int err;
     65
     66	memset(arg, 0, sizeof(*arg));
     67
     68	flags = get_mocs_settings(gt->i915, &arg->table);
     69	if (!flags)
     70		return -EINVAL;
     71
     72	if (flags & HAS_RENDER_L3CC)
     73		arg->l3cc = &arg->table;
     74
     75	if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
     76		arg->mocs = &arg->table;
     77
     78	arg->scratch =
     79		__vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
     80	if (IS_ERR(arg->scratch))
     81		return PTR_ERR(arg->scratch);
     82
     83	arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
     84	if (IS_ERR(arg->vaddr)) {
     85		err = PTR_ERR(arg->vaddr);
     86		goto err_scratch;
     87	}
     88
     89	return 0;
     90
     91err_scratch:
     92	i915_vma_unpin_and_release(&arg->scratch, 0);
     93	return err;
     94}
     95
     96static void live_mocs_fini(struct live_mocs *arg)
     97{
     98	i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
     99}
    100
    101static int read_regs(struct i915_request *rq,
    102		     u32 addr, unsigned int count,
    103		     u32 *offset)
    104{
    105	unsigned int i;
    106	u32 *cs;
    107
    108	GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
    109
    110	cs = intel_ring_begin(rq, 4 * count);
    111	if (IS_ERR(cs))
    112		return PTR_ERR(cs);
    113
    114	for (i = 0; i < count; i++) {
    115		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
    116		*cs++ = addr;
    117		*cs++ = *offset;
    118		*cs++ = 0;
    119
    120		addr += sizeof(u32);
    121		*offset += sizeof(u32);
    122	}
    123
    124	intel_ring_advance(rq, cs);
    125
    126	return 0;
    127}
    128
    129static int read_mocs_table(struct i915_request *rq,
    130			   const struct drm_i915_mocs_table *table,
    131			   u32 *offset)
    132{
    133	u32 addr;
    134
    135	if (!table)
    136		return 0;
    137
    138	if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
    139		addr = global_mocs_offset();
    140	else
    141		addr = mocs_offset(rq->engine);
    142
    143	return read_regs(rq, addr, table->n_entries, offset);
    144}
    145
    146static int read_l3cc_table(struct i915_request *rq,
    147			   const struct drm_i915_mocs_table *table,
    148			   u32 *offset)
    149{
    150	u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
    151
    152	if (!table)
    153		return 0;
    154
    155	return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
    156}
    157
    158static int check_mocs_table(struct intel_engine_cs *engine,
    159			    const struct drm_i915_mocs_table *table,
    160			    u32 **vaddr)
    161{
    162	unsigned int i;
    163	u32 expect;
    164
    165	if (!table)
    166		return 0;
    167
    168	for_each_mocs(expect, table, i) {
    169		if (**vaddr != expect) {
    170			pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
    171			       engine->name, i, **vaddr, expect);
    172			return -EINVAL;
    173		}
    174		++*vaddr;
    175	}
    176
    177	return 0;
    178}
    179
    180static bool mcr_range(struct drm_i915_private *i915, u32 offset)
    181{
    182	/*
    183	 * Registers in this range are affected by the MCR selector
    184	 * which only controls CPU initiated MMIO. Routing does not
    185	 * work for CS access so we cannot verify them on this path.
    186	 */
    187	return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
    188}
    189
    190static int check_l3cc_table(struct intel_engine_cs *engine,
    191			    const struct drm_i915_mocs_table *table,
    192			    u32 **vaddr)
    193{
    194	/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
    195	u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
    196	unsigned int i;
    197	u32 expect;
    198
    199	if (!table)
    200		return 0;
    201
    202	for_each_l3cc(expect, table, i) {
    203		if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
    204			pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
    205			       engine->name, i, **vaddr, expect);
    206			return -EINVAL;
    207		}
    208		++*vaddr;
    209		reg += 4;
    210	}
    211
    212	return 0;
    213}
    214
    215static int check_mocs_engine(struct live_mocs *arg,
    216			     struct intel_context *ce)
    217{
    218	struct i915_vma *vma = arg->scratch;
    219	struct i915_request *rq;
    220	u32 offset;
    221	u32 *vaddr;
    222	int err;
    223
    224	memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
    225
    226	rq = intel_context_create_request(ce);
    227	if (IS_ERR(rq))
    228		return PTR_ERR(rq);
    229
    230	i915_vma_lock(vma);
    231	err = i915_request_await_object(rq, vma->obj, true);
    232	if (!err)
    233		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
    234	i915_vma_unlock(vma);
    235
    236	/* Read the mocs tables back using SRM */
    237	offset = i915_ggtt_offset(vma);
    238	if (!err)
    239		err = read_mocs_table(rq, arg->mocs, &offset);
    240	if (!err && ce->engine->class == RENDER_CLASS)
    241		err = read_l3cc_table(rq, arg->l3cc, &offset);
    242	offset -= i915_ggtt_offset(vma);
    243	GEM_BUG_ON(offset > PAGE_SIZE);
    244
    245	err = request_add_sync(rq, err);
    246	if (err)
    247		return err;
    248
    249	/* Compare the results against the expected tables */
    250	vaddr = arg->vaddr;
    251	if (!err)
    252		err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
    253	if (!err && ce->engine->class == RENDER_CLASS)
    254		err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
    255	if (err)
    256		return err;
    257
    258	GEM_BUG_ON(arg->vaddr + offset != vaddr);
    259	return 0;
    260}
    261
    262static int live_mocs_kernel(void *arg)
    263{
    264	struct intel_gt *gt = arg;
    265	struct intel_engine_cs *engine;
    266	enum intel_engine_id id;
    267	struct live_mocs mocs;
    268	int err;
    269
    270	/* Basic check the system is configured with the expected mocs table */
    271
    272	err = live_mocs_init(&mocs, gt);
    273	if (err)
    274		return err;
    275
    276	for_each_engine(engine, gt, id) {
    277		intel_engine_pm_get(engine);
    278		err = check_mocs_engine(&mocs, engine->kernel_context);
    279		intel_engine_pm_put(engine);
    280		if (err)
    281			break;
    282	}
    283
    284	live_mocs_fini(&mocs);
    285	return err;
    286}
    287
    288static int live_mocs_clean(void *arg)
    289{
    290	struct intel_gt *gt = arg;
    291	struct intel_engine_cs *engine;
    292	enum intel_engine_id id;
    293	struct live_mocs mocs;
    294	int err;
    295
    296	/* Every new context should see the same mocs table */
    297
    298	err = live_mocs_init(&mocs, gt);
    299	if (err)
    300		return err;
    301
    302	for_each_engine(engine, gt, id) {
    303		struct intel_context *ce;
    304
    305		ce = mocs_context_create(engine);
    306		if (IS_ERR(ce)) {
    307			err = PTR_ERR(ce);
    308			break;
    309		}
    310
    311		err = check_mocs_engine(&mocs, ce);
    312		intel_context_put(ce);
    313		if (err)
    314			break;
    315	}
    316
    317	live_mocs_fini(&mocs);
    318	return err;
    319}
    320
    321static int active_engine_reset(struct intel_context *ce,
    322			       const char *reason,
    323			       bool using_guc)
    324{
    325	struct igt_spinner spin;
    326	struct i915_request *rq;
    327	int err;
    328
    329	err = igt_spinner_init(&spin, ce->engine->gt);
    330	if (err)
    331		return err;
    332
    333	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
    334	if (IS_ERR(rq)) {
    335		igt_spinner_fini(&spin);
    336		return PTR_ERR(rq);
    337	}
    338
    339	err = request_add_spin(rq, &spin);
    340	if (err == 0 && !using_guc)
    341		err = intel_engine_reset(ce->engine, reason);
    342
    343	/* Ensure the reset happens and kills the engine */
    344	if (err == 0)
    345		err = intel_selftest_wait_for_rq(rq);
    346
    347	igt_spinner_end(&spin);
    348	igt_spinner_fini(&spin);
    349
    350	return err;
    351}
    352
    353static int __live_mocs_reset(struct live_mocs *mocs,
    354			     struct intel_context *ce, bool using_guc)
    355{
    356	struct intel_gt *gt = ce->engine->gt;
    357	int err;
    358
    359	if (intel_has_reset_engine(gt)) {
    360		if (!using_guc) {
    361			err = intel_engine_reset(ce->engine, "mocs");
    362			if (err)
    363				return err;
    364
    365			err = check_mocs_engine(mocs, ce);
    366			if (err)
    367				return err;
    368		}
    369
    370		err = active_engine_reset(ce, "mocs", using_guc);
    371		if (err)
    372			return err;
    373
    374		err = check_mocs_engine(mocs, ce);
    375		if (err)
    376			return err;
    377	}
    378
    379	if (intel_has_gpu_reset(gt)) {
    380		intel_gt_reset(gt, ce->engine->mask, "mocs");
    381
    382		err = check_mocs_engine(mocs, ce);
    383		if (err)
    384			return err;
    385	}
    386
    387	return 0;
    388}
    389
    390static int live_mocs_reset(void *arg)
    391{
    392	struct intel_gt *gt = arg;
    393	struct intel_engine_cs *engine;
    394	enum intel_engine_id id;
    395	struct live_mocs mocs;
    396	int err = 0;
    397
    398	/* Check the mocs setup is retained over per-engine and global resets */
    399
    400	err = live_mocs_init(&mocs, gt);
    401	if (err)
    402		return err;
    403
    404	igt_global_reset_lock(gt);
    405	for_each_engine(engine, gt, id) {
    406		bool using_guc = intel_engine_uses_guc(engine);
    407		struct intel_selftest_saved_policy saved;
    408		struct intel_context *ce;
    409		int err2;
    410
    411		err = intel_selftest_modify_policy(engine, &saved,
    412						   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
    413		if (err)
    414			break;
    415
    416		ce = mocs_context_create(engine);
    417		if (IS_ERR(ce)) {
    418			err = PTR_ERR(ce);
    419			goto restore;
    420		}
    421
    422		intel_engine_pm_get(engine);
    423
    424		err = __live_mocs_reset(&mocs, ce, using_guc);
    425
    426		intel_engine_pm_put(engine);
    427		intel_context_put(ce);
    428
    429restore:
    430		err2 = intel_selftest_restore_policy(engine, &saved);
    431		if (err == 0)
    432			err = err2;
    433		if (err)
    434			break;
    435	}
    436	igt_global_reset_unlock(gt);
    437
    438	live_mocs_fini(&mocs);
    439	return err;
    440}
    441
    442int intel_mocs_live_selftests(struct drm_i915_private *i915)
    443{
    444	static const struct i915_subtest tests[] = {
    445		SUBTEST(live_mocs_kernel),
    446		SUBTEST(live_mocs_clean),
    447		SUBTEST(live_mocs_reset),
    448	};
    449	struct drm_i915_mocs_table table;
    450
    451	if (!get_mocs_settings(i915, &table))
    452		return 0;
    453
    454	return intel_gt_live_subtests(tests, to_gt(i915));
    455}