cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

selftest_ring_submission.c (6120B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2020 Intel Corporation
      4 */
      5
      6#include "intel_engine_pm.h"
      7#include "selftests/igt_flush_test.h"
      8
      9static struct i915_vma *create_wally(struct intel_engine_cs *engine)
     10{
     11	struct drm_i915_gem_object *obj;
     12	struct i915_vma *vma;
     13	u32 *cs;
     14	int err;
     15
     16	obj = i915_gem_object_create_internal(engine->i915, 4096);
     17	if (IS_ERR(obj))
     18		return ERR_CAST(obj);
     19
     20	vma = i915_vma_instance(obj, engine->gt->vm, NULL);
     21	if (IS_ERR(vma)) {
     22		i915_gem_object_put(obj);
     23		return vma;
     24	}
     25
     26	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
     27	if (err) {
     28		i915_gem_object_put(obj);
     29		return ERR_PTR(err);
     30	}
     31
     32	err = i915_vma_sync(vma);
     33	if (err) {
     34		i915_gem_object_put(obj);
     35		return ERR_PTR(err);
     36	}
     37
     38	cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
     39	if (IS_ERR(cs)) {
     40		i915_gem_object_put(obj);
     41		return ERR_CAST(cs);
     42	}
     43
     44	if (GRAPHICS_VER(engine->i915) >= 6) {
     45		*cs++ = MI_STORE_DWORD_IMM_GEN4;
     46		*cs++ = 0;
     47	} else if (GRAPHICS_VER(engine->i915) >= 4) {
     48		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
     49		*cs++ = 0;
     50	} else {
     51		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
     52	}
     53	*cs++ = vma->node.start + 4000;
     54	*cs++ = STACK_MAGIC;
     55
     56	*cs++ = MI_BATCH_BUFFER_END;
     57
     58	i915_gem_object_flush_map(obj);
     59	i915_gem_object_unpin_map(obj);
     60
     61	vma->private = intel_context_create(engine); /* dummy residuals */
     62	if (IS_ERR(vma->private)) {
     63		vma = ERR_CAST(vma->private);
     64		i915_gem_object_put(obj);
     65	}
     66
     67	return vma;
     68}
     69
     70static int context_sync(struct intel_context *ce)
     71{
     72	struct i915_request *rq;
     73	int err = 0;
     74
     75	rq = intel_context_create_request(ce);
     76	if (IS_ERR(rq))
     77		return PTR_ERR(rq);
     78
     79	i915_request_get(rq);
     80	i915_request_add(rq);
     81
     82	if (i915_request_wait(rq, 0, HZ / 5) < 0)
     83		err = -ETIME;
     84	i915_request_put(rq);
     85
     86	return err;
     87}
     88
     89static int new_context_sync(struct intel_engine_cs *engine)
     90{
     91	struct intel_context *ce;
     92	int err;
     93
     94	ce = intel_context_create(engine);
     95	if (IS_ERR(ce))
     96		return PTR_ERR(ce);
     97
     98	err = context_sync(ce);
     99	intel_context_put(ce);
    100
    101	return err;
    102}
    103
    104static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
    105{
    106	int pass;
    107	int err;
    108
    109	for (pass = 0; pass < 2; pass++) {
    110		WRITE_ONCE(*result, 0);
    111		err = context_sync(engine->kernel_context);
    112		if (err || READ_ONCE(*result)) {
    113			if (!err) {
    114				pr_err("pass[%d] wa_bb emitted for the kernel context\n",
    115				       pass);
    116				err = -EINVAL;
    117			}
    118			return err;
    119		}
    120
    121		WRITE_ONCE(*result, 0);
    122		err = new_context_sync(engine);
    123		if (READ_ONCE(*result) != STACK_MAGIC) {
    124			if (!err) {
    125				pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
    126				       pass);
    127				err = -EINVAL;
    128			}
    129			return err;
    130		}
    131
    132		WRITE_ONCE(*result, 0);
    133		err = new_context_sync(engine);
    134		if (READ_ONCE(*result) != STACK_MAGIC) {
    135			if (!err) {
    136				pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
    137				       pass);
    138				err = -EINVAL;
    139			}
    140			return err;
    141		}
    142	}
    143
    144	return 0;
    145}
    146
    147static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
    148{
    149	struct intel_context *ce;
    150	int err, i;
    151
    152	ce = intel_context_create(engine);
    153	if (IS_ERR(ce))
    154		return PTR_ERR(ce);
    155
    156	for (i = 0; i < 2; i++) {
    157		WRITE_ONCE(*result, 0);
    158		err = context_sync(ce);
    159		if (err)
    160			break;
    161	}
    162	intel_context_put(ce);
    163	if (err)
    164		return err;
    165
    166	if (READ_ONCE(*result)) {
    167		pr_err("wa_bb emitted between the same user context\n");
    168		return -EINVAL;
    169	}
    170
    171	return 0;
    172}
    173
    174static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
    175{
    176	struct intel_context *ce;
    177	int err, i;
    178
    179	ce = intel_context_create(engine);
    180	if (IS_ERR(ce))
    181		return PTR_ERR(ce);
    182
    183	for (i = 0; i < 2; i++) {
    184		WRITE_ONCE(*result, 0);
    185		err = context_sync(ce);
    186		if (err)
    187			break;
    188
    189		err = context_sync(engine->kernel_context);
    190		if (err)
    191			break;
    192	}
    193	intel_context_put(ce);
    194	if (err)
    195		return err;
    196
    197	if (READ_ONCE(*result)) {
    198		pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
    199		return -EINVAL;
    200	}
    201
    202	return 0;
    203}
    204
    205static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
    206{
    207	struct i915_vma *bb;
    208	u32 *result;
    209	int err;
    210
    211	bb = create_wally(engine);
    212	if (IS_ERR(bb))
    213		return PTR_ERR(bb);
    214
    215	result = i915_gem_object_pin_map_unlocked(bb->obj, I915_MAP_WC);
    216	if (IS_ERR(result)) {
    217		intel_context_put(bb->private);
    218		i915_vma_unpin_and_release(&bb, 0);
    219		return PTR_ERR(result);
    220	}
    221	result += 1000;
    222
    223	engine->wa_ctx.vma = bb;
    224
    225	err = mixed_contexts_sync(engine, result);
    226	if (err)
    227		goto out;
    228
    229	err = double_context_sync_00(engine, result);
    230	if (err)
    231		goto out;
    232
    233	err = kernel_context_sync_00(engine, result);
    234	if (err)
    235		goto out;
    236
    237out:
    238	intel_context_put(engine->wa_ctx.vma->private);
    239	i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
    240	return err;
    241}
    242
    243static int live_ctx_switch_wa(void *arg)
    244{
    245	struct intel_gt *gt = arg;
    246	struct intel_engine_cs *engine;
    247	enum intel_engine_id id;
    248
    249	/*
    250	 * Exercise the inter-context wa batch.
    251	 *
    252	 * Between each user context we run a wa batch, and since it may
    253	 * have implications for user visible state, we have to check that
    254	 * we do actually execute it.
    255	 *
    256	 * The trick we use is to replace the normal wa batch with a custom
    257	 * one that writes to a marker within it, and we can then look for
    258	 * that marker to confirm if the batch was run when we expect it,
    259	 * and equally important it was wasn't run when we don't!
    260	 */
    261
    262	for_each_engine(engine, gt, id) {
    263		struct i915_vma *saved_wa;
    264		int err;
    265
    266		if (!intel_engine_can_store_dword(engine))
    267			continue;
    268
    269		if (IS_GRAPHICS_VER(gt->i915, 4, 5))
    270			continue; /* MI_STORE_DWORD is privileged! */
    271
    272		saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
    273
    274		intel_engine_pm_get(engine);
    275		err = __live_ctx_switch_wa(engine);
    276		intel_engine_pm_put(engine);
    277		if (igt_flush_test(gt->i915))
    278			err = -EIO;
    279
    280		engine->wa_ctx.vma = saved_wa;
    281		if (err)
    282			return err;
    283	}
    284
    285	return 0;
    286}
    287
    288int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
    289{
    290	static const struct i915_subtest tests[] = {
    291		SUBTEST(live_ctx_switch_wa),
    292	};
    293
    294	if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING)
    295		return 0;
    296
    297	return intel_gt_live_subtests(tests, to_gt(i915));
    298}