cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

selftest_timeline.c (31776B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2017-2018 Intel Corporation
      4 */
      5
      6#include <linux/prime_numbers.h>
      7#include <linux/string_helpers.h>
      8
      9#include "intel_context.h"
     10#include "intel_engine_heartbeat.h"
     11#include "intel_engine_pm.h"
     12#include "intel_engine_regs.h"
     13#include "intel_gpu_commands.h"
     14#include "intel_gt.h"
     15#include "intel_gt_requests.h"
     16#include "intel_ring.h"
     17#include "selftest_engine_heartbeat.h"
     18
     19#include "../selftests/i915_random.h"
     20#include "../i915_selftest.h"
     21
     22#include "selftests/igt_flush_test.h"
     23#include "selftests/lib_sw_fence.h"
     24#include "selftests/mock_gem_device.h"
     25#include "selftests/mock_timeline.h"
     26
     27static struct page *hwsp_page(struct intel_timeline *tl)
     28{
     29	struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
     30
     31	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
     32	return sg_page(obj->mm.pages->sgl);
     33}
     34
     35static unsigned long hwsp_cacheline(struct intel_timeline *tl)
     36{
     37	unsigned long address = (unsigned long)page_address(hwsp_page(tl));
     38
     39	return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES;
     40}
     41
     42static int selftest_tl_pin(struct intel_timeline *tl)
     43{
     44	struct i915_gem_ww_ctx ww;
     45	int err;
     46
     47	i915_gem_ww_ctx_init(&ww, false);
     48retry:
     49	err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww);
     50	if (!err)
     51		err = intel_timeline_pin(tl, &ww);
     52
     53	if (err == -EDEADLK) {
     54		err = i915_gem_ww_ctx_backoff(&ww);
     55		if (!err)
     56			goto retry;
     57	}
     58	i915_gem_ww_ctx_fini(&ww);
     59	return err;
     60}
     61
     62/* Only half of seqno's are usable, see __intel_timeline_get_seqno() */
     63#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
     64
     65struct mock_hwsp_freelist {
     66	struct intel_gt *gt;
     67	struct radix_tree_root cachelines;
     68	struct intel_timeline **history;
     69	unsigned long count, max;
     70	struct rnd_state prng;
     71};
     72
     73enum {
     74	SHUFFLE = BIT(0),
     75};
     76
     77static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
     78			       unsigned int idx,
     79			       struct intel_timeline *tl)
     80{
     81	tl = xchg(&state->history[idx], tl);
     82	if (tl) {
     83		radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
     84		intel_timeline_unpin(tl);
     85		intel_timeline_put(tl);
     86	}
     87}
     88
     89static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
     90				unsigned int count,
     91				unsigned int flags)
     92{
     93	struct intel_timeline *tl;
     94	unsigned int idx;
     95
     96	while (count--) {
     97		unsigned long cacheline;
     98		int err;
     99
    100		tl = intel_timeline_create(state->gt);
    101		if (IS_ERR(tl))
    102			return PTR_ERR(tl);
    103
    104		err = selftest_tl_pin(tl);
    105		if (err) {
    106			intel_timeline_put(tl);
    107			return err;
    108		}
    109
    110		cacheline = hwsp_cacheline(tl);
    111		err = radix_tree_insert(&state->cachelines, cacheline, tl);
    112		if (err) {
    113			if (err == -EEXIST) {
    114				pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
    115				       cacheline);
    116			}
    117			intel_timeline_unpin(tl);
    118			intel_timeline_put(tl);
    119			return err;
    120		}
    121
    122		idx = state->count++ % state->max;
    123		__mock_hwsp_record(state, idx, tl);
    124	}
    125
    126	if (flags & SHUFFLE)
    127		i915_prandom_shuffle(state->history,
    128				     sizeof(*state->history),
    129				     min(state->count, state->max),
    130				     &state->prng);
    131
    132	count = i915_prandom_u32_max_state(min(state->count, state->max),
    133					   &state->prng);
    134	while (count--) {
    135		idx = --state->count % state->max;
    136		__mock_hwsp_record(state, idx, NULL);
    137	}
    138
    139	return 0;
    140}
    141
    142static int mock_hwsp_freelist(void *arg)
    143{
    144	struct mock_hwsp_freelist state;
    145	struct drm_i915_private *i915;
    146	const struct {
    147		const char *name;
    148		unsigned int flags;
    149	} phases[] = {
    150		{ "linear", 0 },
    151		{ "shuffled", SHUFFLE },
    152		{ },
    153	}, *p;
    154	unsigned int na;
    155	int err = 0;
    156
    157	i915 = mock_gem_device();
    158	if (!i915)
    159		return -ENOMEM;
    160
    161	INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
    162	state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
    163
    164	state.gt = to_gt(i915);
    165
    166	/*
    167	 * Create a bunch of timelines and check that their HWSP do not overlap.
    168	 * Free some, and try again.
    169	 */
    170
    171	state.max = PAGE_SIZE / sizeof(*state.history);
    172	state.count = 0;
    173	state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
    174	if (!state.history) {
    175		err = -ENOMEM;
    176		goto err_put;
    177	}
    178
    179	for (p = phases; p->name; p++) {
    180		pr_debug("%s(%s)\n", __func__, p->name);
    181		for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
    182			err = __mock_hwsp_timeline(&state, na, p->flags);
    183			if (err)
    184				goto out;
    185		}
    186	}
    187
    188out:
    189	for (na = 0; na < state.max; na++)
    190		__mock_hwsp_record(&state, na, NULL);
    191	kfree(state.history);
    192err_put:
    193	mock_destroy_device(i915);
    194	return err;
    195}
    196
    197struct __igt_sync {
    198	const char *name;
    199	u32 seqno;
    200	bool expected;
    201	bool set;
    202};
    203
    204static int __igt_sync(struct intel_timeline *tl,
    205		      u64 ctx,
    206		      const struct __igt_sync *p,
    207		      const char *name)
    208{
    209	int ret;
    210
    211	if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
    212		pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
    213		       name, p->name, ctx, p->seqno, str_yes_no(p->expected));
    214		return -EINVAL;
    215	}
    216
    217	if (p->set) {
    218		ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
    219		if (ret)
    220			return ret;
    221	}
    222
    223	return 0;
    224}
    225
    226static int igt_sync(void *arg)
    227{
    228	const struct __igt_sync pass[] = {
    229		{ "unset", 0, false, false },
    230		{ "new", 0, false, true },
    231		{ "0a", 0, true, true },
    232		{ "1a", 1, false, true },
    233		{ "1b", 1, true, true },
    234		{ "0b", 0, true, false },
    235		{ "2a", 2, false, true },
    236		{ "4", 4, false, true },
    237		{ "INT_MAX", INT_MAX, false, true },
    238		{ "INT_MAX-1", INT_MAX-1, true, false },
    239		{ "INT_MAX+1", (u32)INT_MAX+1, false, true },
    240		{ "INT_MAX", INT_MAX, true, false },
    241		{ "UINT_MAX", UINT_MAX, false, true },
    242		{ "wrap", 0, false, true },
    243		{ "unwrap", UINT_MAX, true, false },
    244		{},
    245	}, *p;
    246	struct intel_timeline tl;
    247	int order, offset;
    248	int ret = -ENODEV;
    249
    250	mock_timeline_init(&tl, 0);
    251	for (p = pass; p->name; p++) {
    252		for (order = 1; order < 64; order++) {
    253			for (offset = -1; offset <= (order > 1); offset++) {
    254				u64 ctx = BIT_ULL(order) + offset;
    255
    256				ret = __igt_sync(&tl, ctx, p, "1");
    257				if (ret)
    258					goto out;
    259			}
    260		}
    261	}
    262	mock_timeline_fini(&tl);
    263
    264	mock_timeline_init(&tl, 0);
    265	for (order = 1; order < 64; order++) {
    266		for (offset = -1; offset <= (order > 1); offset++) {
    267			u64 ctx = BIT_ULL(order) + offset;
    268
    269			for (p = pass; p->name; p++) {
    270				ret = __igt_sync(&tl, ctx, p, "2");
    271				if (ret)
    272					goto out;
    273			}
    274		}
    275	}
    276
    277out:
    278	mock_timeline_fini(&tl);
    279	return ret;
    280}
    281
    282static unsigned int random_engine(struct rnd_state *rnd)
    283{
    284	return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
    285}
    286
    287static int bench_sync(void *arg)
    288{
    289	struct rnd_state prng;
    290	struct intel_timeline tl;
    291	unsigned long end_time, count;
    292	u64 prng32_1M;
    293	ktime_t kt;
    294	int order, last_order;
    295
    296	mock_timeline_init(&tl, 0);
    297
    298	/* Lookups from cache are very fast and so the random number generation
    299	 * and the loop itself becomes a significant factor in the per-iteration
    300	 * timings. We try to compensate the results by measuring the overhead
    301	 * of the prng and subtract it from the reported results.
    302	 */
    303	prandom_seed_state(&prng, i915_selftest.random_seed);
    304	count = 0;
    305	kt = ktime_get();
    306	end_time = jiffies + HZ/10;
    307	do {
    308		u32 x;
    309
    310		/* Make sure the compiler doesn't optimise away the prng call */
    311		WRITE_ONCE(x, prandom_u32_state(&prng));
    312
    313		count++;
    314	} while (!time_after(jiffies, end_time));
    315	kt = ktime_sub(ktime_get(), kt);
    316	pr_debug("%s: %lu random evaluations, %lluns/prng\n",
    317		 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
    318	prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
    319
    320	/* Benchmark (only) setting random context ids */
    321	prandom_seed_state(&prng, i915_selftest.random_seed);
    322	count = 0;
    323	kt = ktime_get();
    324	end_time = jiffies + HZ/10;
    325	do {
    326		u64 id = i915_prandom_u64_state(&prng);
    327
    328		__intel_timeline_sync_set(&tl, id, 0);
    329		count++;
    330	} while (!time_after(jiffies, end_time));
    331	kt = ktime_sub(ktime_get(), kt);
    332	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
    333	pr_info("%s: %lu random insertions, %lluns/insert\n",
    334		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
    335
    336	/* Benchmark looking up the exact same context ids as we just set */
    337	prandom_seed_state(&prng, i915_selftest.random_seed);
    338	end_time = count;
    339	kt = ktime_get();
    340	while (end_time--) {
    341		u64 id = i915_prandom_u64_state(&prng);
    342
    343		if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
    344			mock_timeline_fini(&tl);
    345			pr_err("Lookup of %llu failed\n", id);
    346			return -EINVAL;
    347		}
    348	}
    349	kt = ktime_sub(ktime_get(), kt);
    350	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
    351	pr_info("%s: %lu random lookups, %lluns/lookup\n",
    352		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
    353
    354	mock_timeline_fini(&tl);
    355	cond_resched();
    356
    357	mock_timeline_init(&tl, 0);
    358
    359	/* Benchmark setting the first N (in order) contexts */
    360	count = 0;
    361	kt = ktime_get();
    362	end_time = jiffies + HZ/10;
    363	do {
    364		__intel_timeline_sync_set(&tl, count++, 0);
    365	} while (!time_after(jiffies, end_time));
    366	kt = ktime_sub(ktime_get(), kt);
    367	pr_info("%s: %lu in-order insertions, %lluns/insert\n",
    368		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
    369
    370	/* Benchmark looking up the exact same context ids as we just set */
    371	end_time = count;
    372	kt = ktime_get();
    373	while (end_time--) {
    374		if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
    375			pr_err("Lookup of %lu failed\n", end_time);
    376			mock_timeline_fini(&tl);
    377			return -EINVAL;
    378		}
    379	}
    380	kt = ktime_sub(ktime_get(), kt);
    381	pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
    382		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
    383
    384	mock_timeline_fini(&tl);
    385	cond_resched();
    386
    387	mock_timeline_init(&tl, 0);
    388
    389	/* Benchmark searching for a random context id and maybe changing it */
    390	prandom_seed_state(&prng, i915_selftest.random_seed);
    391	count = 0;
    392	kt = ktime_get();
    393	end_time = jiffies + HZ/10;
    394	do {
    395		u32 id = random_engine(&prng);
    396		u32 seqno = prandom_u32_state(&prng);
    397
    398		if (!__intel_timeline_sync_is_later(&tl, id, seqno))
    399			__intel_timeline_sync_set(&tl, id, seqno);
    400
    401		count++;
    402	} while (!time_after(jiffies, end_time));
    403	kt = ktime_sub(ktime_get(), kt);
    404	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
    405	pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
    406		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
    407	mock_timeline_fini(&tl);
    408	cond_resched();
    409
    410	/* Benchmark searching for a known context id and changing the seqno */
    411	for (last_order = 1, order = 1; order < 32;
    412	     ({ int tmp = last_order; last_order = order; order += tmp; })) {
    413		unsigned int mask = BIT(order) - 1;
    414
    415		mock_timeline_init(&tl, 0);
    416
    417		count = 0;
    418		kt = ktime_get();
    419		end_time = jiffies + HZ/10;
    420		do {
    421			/* Without assuming too many details of the underlying
    422			 * implementation, try to identify its phase-changes
    423			 * (if any)!
    424			 */
    425			u64 id = (u64)(count & mask) << order;
    426
    427			__intel_timeline_sync_is_later(&tl, id, 0);
    428			__intel_timeline_sync_set(&tl, id, 0);
    429
    430			count++;
    431		} while (!time_after(jiffies, end_time));
    432		kt = ktime_sub(ktime_get(), kt);
    433		pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
    434			__func__, count, order,
    435			(long long)div64_ul(ktime_to_ns(kt), count));
    436		mock_timeline_fini(&tl);
    437		cond_resched();
    438	}
    439
    440	return 0;
    441}
    442
    443int intel_timeline_mock_selftests(void)
    444{
    445	static const struct i915_subtest tests[] = {
    446		SUBTEST(mock_hwsp_freelist),
    447		SUBTEST(igt_sync),
    448		SUBTEST(bench_sync),
    449	};
    450
    451	return i915_subtests(tests, NULL);
    452}
    453
    454static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
    455{
    456	u32 *cs;
    457
    458	cs = intel_ring_begin(rq, 4);
    459	if (IS_ERR(cs))
    460		return PTR_ERR(cs);
    461
    462	if (GRAPHICS_VER(rq->engine->i915) >= 8) {
    463		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
    464		*cs++ = addr;
    465		*cs++ = 0;
    466		*cs++ = value;
    467	} else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
    468		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
    469		*cs++ = 0;
    470		*cs++ = addr;
    471		*cs++ = value;
    472	} else {
    473		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
    474		*cs++ = addr;
    475		*cs++ = value;
    476		*cs++ = MI_NOOP;
    477	}
    478
    479	intel_ring_advance(rq, cs);
    480
    481	return 0;
    482}
    483
    484static struct i915_request *
    485checked_tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
    486{
    487	struct i915_request *rq;
    488	int err;
    489
    490	err = selftest_tl_pin(tl);
    491	if (err) {
    492		rq = ERR_PTR(err);
    493		goto out;
    494	}
    495
    496	if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
    497		pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
    498		       *tl->hwsp_seqno, tl->seqno);
    499		intel_timeline_unpin(tl);
    500		return ERR_PTR(-EINVAL);
    501	}
    502
    503	rq = intel_engine_create_kernel_request(engine);
    504	if (IS_ERR(rq))
    505		goto out_unpin;
    506
    507	i915_request_get(rq);
    508
    509	err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
    510	i915_request_add(rq);
    511	if (err) {
    512		i915_request_put(rq);
    513		rq = ERR_PTR(err);
    514	}
    515
    516out_unpin:
    517	intel_timeline_unpin(tl);
    518out:
    519	if (IS_ERR(rq))
    520		pr_err("Failed to write to timeline!\n");
    521	return rq;
    522}
    523
    524static int live_hwsp_engine(void *arg)
    525{
    526#define NUM_TIMELINES 4096
    527	struct intel_gt *gt = arg;
    528	struct intel_timeline **timelines;
    529	struct intel_engine_cs *engine;
    530	enum intel_engine_id id;
    531	unsigned long count, n;
    532	int err = 0;
    533
    534	/*
    535	 * Create a bunch of timelines and check we can write
    536	 * independently to each of their breadcrumb slots.
    537	 */
    538
    539	timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
    540				   sizeof(*timelines),
    541				   GFP_KERNEL);
    542	if (!timelines)
    543		return -ENOMEM;
    544
    545	count = 0;
    546	for_each_engine(engine, gt, id) {
    547		if (!intel_engine_can_store_dword(engine))
    548			continue;
    549
    550		intel_engine_pm_get(engine);
    551
    552		for (n = 0; n < NUM_TIMELINES; n++) {
    553			struct intel_timeline *tl;
    554			struct i915_request *rq;
    555
    556			tl = intel_timeline_create(gt);
    557			if (IS_ERR(tl)) {
    558				err = PTR_ERR(tl);
    559				break;
    560			}
    561
    562			rq = checked_tl_write(tl, engine, count);
    563			if (IS_ERR(rq)) {
    564				intel_timeline_put(tl);
    565				err = PTR_ERR(rq);
    566				break;
    567			}
    568
    569			timelines[count++] = tl;
    570			i915_request_put(rq);
    571		}
    572
    573		intel_engine_pm_put(engine);
    574		if (err)
    575			break;
    576	}
    577
    578	if (igt_flush_test(gt->i915))
    579		err = -EIO;
    580
    581	for (n = 0; n < count; n++) {
    582		struct intel_timeline *tl = timelines[n];
    583
    584		if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
    585			GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
    586				      n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
    587			GEM_TRACE_DUMP();
    588			err = -EINVAL;
    589		}
    590		intel_timeline_put(tl);
    591	}
    592
    593	kvfree(timelines);
    594	return err;
    595#undef NUM_TIMELINES
    596}
    597
    598static int live_hwsp_alternate(void *arg)
    599{
    600#define NUM_TIMELINES 4096
    601	struct intel_gt *gt = arg;
    602	struct intel_timeline **timelines;
    603	struct intel_engine_cs *engine;
    604	enum intel_engine_id id;
    605	unsigned long count, n;
    606	int err = 0;
    607
    608	/*
    609	 * Create a bunch of timelines and check we can write
    610	 * independently to each of their breadcrumb slots with adjacent
    611	 * engines.
    612	 */
    613
    614	timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
    615				   sizeof(*timelines),
    616				   GFP_KERNEL);
    617	if (!timelines)
    618		return -ENOMEM;
    619
    620	count = 0;
    621	for (n = 0; n < NUM_TIMELINES; n++) {
    622		for_each_engine(engine, gt, id) {
    623			struct intel_timeline *tl;
    624			struct i915_request *rq;
    625
    626			if (!intel_engine_can_store_dword(engine))
    627				continue;
    628
    629			tl = intel_timeline_create(gt);
    630			if (IS_ERR(tl)) {
    631				err = PTR_ERR(tl);
    632				goto out;
    633			}
    634
    635			intel_engine_pm_get(engine);
    636			rq = checked_tl_write(tl, engine, count);
    637			intel_engine_pm_put(engine);
    638			if (IS_ERR(rq)) {
    639				intel_timeline_put(tl);
    640				err = PTR_ERR(rq);
    641				goto out;
    642			}
    643
    644			timelines[count++] = tl;
    645			i915_request_put(rq);
    646		}
    647	}
    648
    649out:
    650	if (igt_flush_test(gt->i915))
    651		err = -EIO;
    652
    653	for (n = 0; n < count; n++) {
    654		struct intel_timeline *tl = timelines[n];
    655
    656		if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
    657			GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
    658				      n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
    659			GEM_TRACE_DUMP();
    660			err = -EINVAL;
    661		}
    662		intel_timeline_put(tl);
    663	}
    664
    665	kvfree(timelines);
    666	return err;
    667#undef NUM_TIMELINES
    668}
    669
    670static int live_hwsp_wrap(void *arg)
    671{
    672	struct intel_gt *gt = arg;
    673	struct intel_engine_cs *engine;
    674	struct intel_timeline *tl;
    675	enum intel_engine_id id;
    676	int err = 0;
    677
    678	/*
    679	 * Across a seqno wrap, we need to keep the old cacheline alive for
    680	 * foreign GPU references.
    681	 */
    682
    683	tl = intel_timeline_create(gt);
    684	if (IS_ERR(tl))
    685		return PTR_ERR(tl);
    686
    687	if (!tl->has_initial_breadcrumb)
    688		goto out_free;
    689
    690	err = selftest_tl_pin(tl);
    691	if (err)
    692		goto out_free;
    693
    694	for_each_engine(engine, gt, id) {
    695		const u32 *hwsp_seqno[2];
    696		struct i915_request *rq;
    697		u32 seqno[2];
    698
    699		if (!intel_engine_can_store_dword(engine))
    700			continue;
    701
    702		rq = intel_engine_create_kernel_request(engine);
    703		if (IS_ERR(rq)) {
    704			err = PTR_ERR(rq);
    705			goto out;
    706		}
    707
    708		tl->seqno = -4u;
    709
    710		mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
    711		err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
    712		mutex_unlock(&tl->mutex);
    713		if (err) {
    714			i915_request_add(rq);
    715			goto out;
    716		}
    717		pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
    718			 seqno[0], tl->hwsp_offset);
    719
    720		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
    721		if (err) {
    722			i915_request_add(rq);
    723			goto out;
    724		}
    725		hwsp_seqno[0] = tl->hwsp_seqno;
    726
    727		mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
    728		err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
    729		mutex_unlock(&tl->mutex);
    730		if (err) {
    731			i915_request_add(rq);
    732			goto out;
    733		}
    734		pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
    735			 seqno[1], tl->hwsp_offset);
    736
    737		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
    738		if (err) {
    739			i915_request_add(rq);
    740			goto out;
    741		}
    742		hwsp_seqno[1] = tl->hwsp_seqno;
    743
    744		/* With wrap should come a new hwsp */
    745		GEM_BUG_ON(seqno[1] >= seqno[0]);
    746		GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
    747
    748		i915_request_add(rq);
    749
    750		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
    751			pr_err("Wait for timeline writes timed out!\n");
    752			err = -EIO;
    753			goto out;
    754		}
    755
    756		if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] ||
    757		    READ_ONCE(*hwsp_seqno[1]) != seqno[1]) {
    758			pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
    759			       *hwsp_seqno[0], *hwsp_seqno[1],
    760			       seqno[0], seqno[1]);
    761			err = -EINVAL;
    762			goto out;
    763		}
    764
    765		intel_gt_retire_requests(gt); /* recycle HWSP */
    766	}
    767
    768out:
    769	if (igt_flush_test(gt->i915))
    770		err = -EIO;
    771
    772	intel_timeline_unpin(tl);
    773out_free:
    774	intel_timeline_put(tl);
    775	return err;
    776}
    777
    778static int emit_read_hwsp(struct i915_request *rq,
    779			  u32 seqno, u32 hwsp,
    780			  u32 *addr)
    781{
    782	const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
    783	u32 *cs;
    784
    785	cs = intel_ring_begin(rq, 12);
    786	if (IS_ERR(cs))
    787		return PTR_ERR(cs);
    788
    789	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
    790	*cs++ = *addr;
    791	*cs++ = 0;
    792	*cs++ = seqno;
    793	*addr += 4;
    794
    795	*cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_USE_GGTT;
    796	*cs++ = gpr;
    797	*cs++ = hwsp;
    798	*cs++ = 0;
    799
    800	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
    801	*cs++ = gpr;
    802	*cs++ = *addr;
    803	*cs++ = 0;
    804	*addr += 4;
    805
    806	intel_ring_advance(rq, cs);
    807
    808	return 0;
    809}
    810
    811struct hwsp_watcher {
    812	struct i915_vma *vma;
    813	struct i915_request *rq;
    814	u32 addr;
    815	u32 *map;
    816};
    817
    818static bool cmp_lt(u32 a, u32 b)
    819{
    820	return a < b;
    821}
    822
    823static bool cmp_gte(u32 a, u32 b)
    824{
    825	return a >= b;
    826}
    827
    828static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
    829{
    830	struct drm_i915_gem_object *obj;
    831	struct i915_vma *vma;
    832
    833	obj = i915_gem_object_create_internal(gt->i915, SZ_2M);
    834	if (IS_ERR(obj))
    835		return PTR_ERR(obj);
    836
    837	w->map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
    838	if (IS_ERR(w->map)) {
    839		i915_gem_object_put(obj);
    840		return PTR_ERR(w->map);
    841	}
    842
    843	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
    844	if (IS_ERR(vma)) {
    845		i915_gem_object_put(obj);
    846		return PTR_ERR(vma);
    847	}
    848
    849	w->vma = vma;
    850	w->addr = i915_ggtt_offset(vma);
    851	return 0;
    852}
    853
    854static void switch_tl_lock(struct i915_request *from, struct i915_request *to)
    855{
    856	/* some light mutex juggling required; think co-routines */
    857
    858	if (from) {
    859		lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
    860		mutex_unlock(&from->context->timeline->mutex);
    861	}
    862
    863	if (to) {
    864		mutex_lock(&to->context->timeline->mutex);
    865		to->cookie = lockdep_pin_lock(&to->context->timeline->mutex);
    866	}
    867}
    868
    869static int create_watcher(struct hwsp_watcher *w,
    870			  struct intel_engine_cs *engine,
    871			  int ringsz)
    872{
    873	struct intel_context *ce;
    874
    875	ce = intel_context_create(engine);
    876	if (IS_ERR(ce))
    877		return PTR_ERR(ce);
    878
    879	ce->ring_size = ringsz;
    880	w->rq = intel_context_create_request(ce);
    881	intel_context_put(ce);
    882	if (IS_ERR(w->rq))
    883		return PTR_ERR(w->rq);
    884
    885	w->addr = i915_ggtt_offset(w->vma);
    886
    887	switch_tl_lock(w->rq, NULL);
    888
    889	return 0;
    890}
    891
    892static int check_watcher(struct hwsp_watcher *w, const char *name,
    893			 bool (*op)(u32 hwsp, u32 seqno))
    894{
    895	struct i915_request *rq = fetch_and_zero(&w->rq);
    896	u32 offset, end;
    897	int err;
    898
    899	GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
    900
    901	i915_request_get(rq);
    902	switch_tl_lock(NULL, rq);
    903	i915_request_add(rq);
    904
    905	if (i915_request_wait(rq, 0, HZ) < 0) {
    906		err = -ETIME;
    907		goto out;
    908	}
    909
    910	err = 0;
    911	offset = 0;
    912	end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map);
    913	while (offset < end) {
    914		if (!op(w->map[offset + 1], w->map[offset])) {
    915			pr_err("Watcher '%s' found HWSP value %x for seqno %x\n",
    916			       name, w->map[offset + 1], w->map[offset]);
    917			err = -EINVAL;
    918		}
    919
    920		offset += 2;
    921	}
    922
    923out:
    924	i915_request_put(rq);
    925	return err;
    926}
    927
    928static void cleanup_watcher(struct hwsp_watcher *w)
    929{
    930	if (w->rq) {
    931		switch_tl_lock(NULL, w->rq);
    932
    933		i915_request_add(w->rq);
    934	}
    935
    936	i915_vma_unpin_and_release(&w->vma, I915_VMA_RELEASE_MAP);
    937}
    938
    939static bool retire_requests(struct intel_timeline *tl)
    940{
    941	struct i915_request *rq, *rn;
    942
    943	mutex_lock(&tl->mutex);
    944	list_for_each_entry_safe(rq, rn, &tl->requests, link)
    945		if (!i915_request_retire(rq))
    946			break;
    947	mutex_unlock(&tl->mutex);
    948
    949	return !i915_active_fence_isset(&tl->last_request);
    950}
    951
    952static struct i915_request *wrap_timeline(struct i915_request *rq)
    953{
    954	struct intel_context *ce = rq->context;
    955	struct intel_timeline *tl = ce->timeline;
    956	u32 seqno = rq->fence.seqno;
    957
    958	while (tl->seqno >= seqno) { /* Cause a wrap */
    959		i915_request_put(rq);
    960		rq = intel_context_create_request(ce);
    961		if (IS_ERR(rq))
    962			return rq;
    963
    964		i915_request_get(rq);
    965		i915_request_add(rq);
    966	}
    967
    968	i915_request_put(rq);
    969	rq = i915_request_create(ce);
    970	if (IS_ERR(rq))
    971		return rq;
    972
    973	i915_request_get(rq);
    974	i915_request_add(rq);
    975
    976	return rq;
    977}
    978
    979static int live_hwsp_read(void *arg)
    980{
    981	struct intel_gt *gt = arg;
    982	struct hwsp_watcher watcher[2] = {};
    983	struct intel_engine_cs *engine;
    984	struct intel_timeline *tl;
    985	enum intel_engine_id id;
    986	int err = 0;
    987	int i;
    988
    989	/*
    990	 * If we take a reference to the HWSP for reading on the GPU, that
    991	 * read may be arbitrarily delayed (either by foreign fence or
    992	 * priority saturation) and a wrap can happen within 30 minutes.
    993	 * When the GPU read is finally submitted it should be correct,
    994	 * even across multiple wraps.
    995	 */
    996
    997	if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
    998		return 0;
    999
   1000	tl = intel_timeline_create(gt);
   1001	if (IS_ERR(tl))
   1002		return PTR_ERR(tl);
   1003
   1004	if (!tl->has_initial_breadcrumb)
   1005		goto out_free;
   1006
   1007	for (i = 0; i < ARRAY_SIZE(watcher); i++) {
   1008		err = setup_watcher(&watcher[i], gt);
   1009		if (err)
   1010			goto out;
   1011	}
   1012
   1013	for_each_engine(engine, gt, id) {
   1014		struct intel_context *ce;
   1015		unsigned long count = 0;
   1016		IGT_TIMEOUT(end_time);
   1017
   1018		/* Create a request we can use for remote reading of the HWSP */
   1019		err = create_watcher(&watcher[1], engine, SZ_512K);
   1020		if (err)
   1021			goto out;
   1022
   1023		do {
   1024			struct i915_sw_fence *submit;
   1025			struct i915_request *rq;
   1026			u32 hwsp, dummy;
   1027
   1028			submit = heap_fence_create(GFP_KERNEL);
   1029			if (!submit) {
   1030				err = -ENOMEM;
   1031				goto out;
   1032			}
   1033
   1034			err = create_watcher(&watcher[0], engine, SZ_4K);
   1035			if (err)
   1036				goto out;
   1037
   1038			ce = intel_context_create(engine);
   1039			if (IS_ERR(ce)) {
   1040				err = PTR_ERR(ce);
   1041				goto out;
   1042			}
   1043
   1044			ce->timeline = intel_timeline_get(tl);
   1045
   1046			/* Ensure timeline is mapped, done during first pin */
   1047			err = intel_context_pin(ce);
   1048			if (err) {
   1049				intel_context_put(ce);
   1050				goto out;
   1051			}
   1052
   1053			/*
   1054			 * Start at a new wrap, and set seqno right before another wrap,
   1055			 * saving 30 minutes of nops
   1056			 */
   1057			tl->seqno = -12u + 2 * (count & 3);
   1058			__intel_timeline_get_seqno(tl, &dummy);
   1059
   1060			rq = i915_request_create(ce);
   1061			if (IS_ERR(rq)) {
   1062				err = PTR_ERR(rq);
   1063				intel_context_unpin(ce);
   1064				intel_context_put(ce);
   1065				goto out;
   1066			}
   1067
   1068			err = i915_sw_fence_await_dma_fence(&rq->submit,
   1069							    &watcher[0].rq->fence, 0,
   1070							    GFP_KERNEL);
   1071			if (err < 0) {
   1072				i915_request_add(rq);
   1073				intel_context_unpin(ce);
   1074				intel_context_put(ce);
   1075				goto out;
   1076			}
   1077
   1078			switch_tl_lock(rq, watcher[0].rq);
   1079			err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
   1080			if (err == 0)
   1081				err = emit_read_hwsp(watcher[0].rq, /* before */
   1082						     rq->fence.seqno, hwsp,
   1083						     &watcher[0].addr);
   1084			switch_tl_lock(watcher[0].rq, rq);
   1085			if (err) {
   1086				i915_request_add(rq);
   1087				intel_context_unpin(ce);
   1088				intel_context_put(ce);
   1089				goto out;
   1090			}
   1091
   1092			switch_tl_lock(rq, watcher[1].rq);
   1093			err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
   1094			if (err == 0)
   1095				err = emit_read_hwsp(watcher[1].rq, /* after */
   1096						     rq->fence.seqno, hwsp,
   1097						     &watcher[1].addr);
   1098			switch_tl_lock(watcher[1].rq, rq);
   1099			if (err) {
   1100				i915_request_add(rq);
   1101				intel_context_unpin(ce);
   1102				intel_context_put(ce);
   1103				goto out;
   1104			}
   1105
   1106			i915_request_get(rq);
   1107			i915_request_add(rq);
   1108
   1109			rq = wrap_timeline(rq);
   1110			intel_context_unpin(ce);
   1111			intel_context_put(ce);
   1112			if (IS_ERR(rq)) {
   1113				err = PTR_ERR(rq);
   1114				goto out;
   1115			}
   1116
   1117			err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit,
   1118							    &rq->fence, 0,
   1119							    GFP_KERNEL);
   1120			if (err < 0) {
   1121				i915_request_put(rq);
   1122				goto out;
   1123			}
   1124
   1125			err = check_watcher(&watcher[0], "before", cmp_lt);
   1126			i915_sw_fence_commit(submit);
   1127			heap_fence_put(submit);
   1128			if (err) {
   1129				i915_request_put(rq);
   1130				goto out;
   1131			}
   1132			count++;
   1133
   1134			/* Flush the timeline before manually wrapping again */
   1135			if (i915_request_wait(rq,
   1136					      I915_WAIT_INTERRUPTIBLE,
   1137					      HZ) < 0) {
   1138				err = -ETIME;
   1139				i915_request_put(rq);
   1140				goto out;
   1141			}
   1142			retire_requests(tl);
   1143			i915_request_put(rq);
   1144
   1145			/* Single requests are limited to half a ring at most */
   1146			if (8 * watcher[1].rq->ring->emit >
   1147			    3 * watcher[1].rq->ring->size)
   1148				break;
   1149
   1150		} while (!__igt_timeout(end_time, NULL) &&
   1151			 count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
   1152
   1153		pr_info("%s: simulated %lu wraps\n", engine->name, count);
   1154		err = check_watcher(&watcher[1], "after", cmp_gte);
   1155		if (err)
   1156			goto out;
   1157	}
   1158
   1159out:
   1160	for (i = 0; i < ARRAY_SIZE(watcher); i++)
   1161		cleanup_watcher(&watcher[i]);
   1162
   1163	if (igt_flush_test(gt->i915))
   1164		err = -EIO;
   1165
   1166out_free:
   1167	intel_timeline_put(tl);
   1168	return err;
   1169}
   1170
   1171static int live_hwsp_rollover_kernel(void *arg)
   1172{
   1173	struct intel_gt *gt = arg;
   1174	struct intel_engine_cs *engine;
   1175	enum intel_engine_id id;
   1176	int err = 0;
   1177
   1178	/*
   1179	 * Run the host for long enough, and even the kernel context will
   1180	 * see a seqno rollover.
   1181	 */
   1182
   1183	for_each_engine(engine, gt, id) {
   1184		struct intel_context *ce = engine->kernel_context;
   1185		struct intel_timeline *tl = ce->timeline;
   1186		struct i915_request *rq[3] = {};
   1187		int i;
   1188
   1189		st_engine_heartbeat_disable(engine);
   1190		if (intel_gt_wait_for_idle(gt, HZ / 2)) {
   1191			err = -EIO;
   1192			goto out;
   1193		}
   1194
   1195		GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
   1196		tl->seqno = -2u;
   1197		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
   1198
   1199		for (i = 0; i < ARRAY_SIZE(rq); i++) {
   1200			struct i915_request *this;
   1201
   1202			this = i915_request_create(ce);
   1203			if (IS_ERR(this)) {
   1204				err = PTR_ERR(this);
   1205				goto out;
   1206			}
   1207
   1208			pr_debug("%s: create fence.seqnp:%d\n",
   1209				 engine->name,
   1210				 lower_32_bits(this->fence.seqno));
   1211
   1212			GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
   1213
   1214			rq[i] = i915_request_get(this);
   1215			i915_request_add(this);
   1216		}
   1217
   1218		/* We expected a wrap! */
   1219		GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
   1220
   1221		if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
   1222			pr_err("Wait for timeline wrap timed out!\n");
   1223			err = -EIO;
   1224			goto out;
   1225		}
   1226
   1227		for (i = 0; i < ARRAY_SIZE(rq); i++) {
   1228			if (!i915_request_completed(rq[i])) {
   1229				pr_err("Pre-wrap request not completed!\n");
   1230				err = -EINVAL;
   1231				goto out;
   1232			}
   1233		}
   1234
   1235out:
   1236		for (i = 0; i < ARRAY_SIZE(rq); i++)
   1237			i915_request_put(rq[i]);
   1238		st_engine_heartbeat_enable(engine);
   1239		if (err)
   1240			break;
   1241	}
   1242
   1243	if (igt_flush_test(gt->i915))
   1244		err = -EIO;
   1245
   1246	return err;
   1247}
   1248
   1249static int live_hwsp_rollover_user(void *arg)
   1250{
   1251	struct intel_gt *gt = arg;
   1252	struct intel_engine_cs *engine;
   1253	enum intel_engine_id id;
   1254	int err = 0;
   1255
   1256	/*
   1257	 * Simulate a long running user context, and force the seqno wrap
   1258	 * on the user's timeline.
   1259	 */
   1260
   1261	for_each_engine(engine, gt, id) {
   1262		struct i915_request *rq[3] = {};
   1263		struct intel_timeline *tl;
   1264		struct intel_context *ce;
   1265		int i;
   1266
   1267		ce = intel_context_create(engine);
   1268		if (IS_ERR(ce))
   1269			return PTR_ERR(ce);
   1270
   1271		err = intel_context_alloc_state(ce);
   1272		if (err)
   1273			goto out;
   1274
   1275		tl = ce->timeline;
   1276		if (!tl->has_initial_breadcrumb)
   1277			goto out;
   1278
   1279		err = intel_context_pin(ce);
   1280		if (err)
   1281			goto out;
   1282
   1283		tl->seqno = -4u;
   1284		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
   1285
   1286		for (i = 0; i < ARRAY_SIZE(rq); i++) {
   1287			struct i915_request *this;
   1288
   1289			this = intel_context_create_request(ce);
   1290			if (IS_ERR(this)) {
   1291				err = PTR_ERR(this);
   1292				goto out_unpin;
   1293			}
   1294
   1295			pr_debug("%s: create fence.seqnp:%d\n",
   1296				 engine->name,
   1297				 lower_32_bits(this->fence.seqno));
   1298
   1299			GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
   1300
   1301			rq[i] = i915_request_get(this);
   1302			i915_request_add(this);
   1303		}
   1304
   1305		/* We expected a wrap! */
   1306		GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
   1307
   1308		if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
   1309			pr_err("Wait for timeline wrap timed out!\n");
   1310			err = -EIO;
   1311			goto out_unpin;
   1312		}
   1313
   1314		for (i = 0; i < ARRAY_SIZE(rq); i++) {
   1315			if (!i915_request_completed(rq[i])) {
   1316				pr_err("Pre-wrap request not completed!\n");
   1317				err = -EINVAL;
   1318				goto out_unpin;
   1319			}
   1320		}
   1321out_unpin:
   1322		intel_context_unpin(ce);
   1323out:
   1324		for (i = 0; i < ARRAY_SIZE(rq); i++)
   1325			i915_request_put(rq[i]);
   1326		intel_context_put(ce);
   1327		if (err)
   1328			break;
   1329	}
   1330
   1331	if (igt_flush_test(gt->i915))
   1332		err = -EIO;
   1333
   1334	return err;
   1335}
   1336
   1337static int live_hwsp_recycle(void *arg)
   1338{
   1339	struct intel_gt *gt = arg;
   1340	struct intel_engine_cs *engine;
   1341	enum intel_engine_id id;
   1342	unsigned long count;
   1343	int err = 0;
   1344
   1345	/*
   1346	 * Check seqno writes into one timeline at a time. We expect to
   1347	 * recycle the breadcrumb slot between iterations and neither
   1348	 * want to confuse ourselves or the GPU.
   1349	 */
   1350
   1351	count = 0;
   1352	for_each_engine(engine, gt, id) {
   1353		IGT_TIMEOUT(end_time);
   1354
   1355		if (!intel_engine_can_store_dword(engine))
   1356			continue;
   1357
   1358		intel_engine_pm_get(engine);
   1359
   1360		do {
   1361			struct intel_timeline *tl;
   1362			struct i915_request *rq;
   1363
   1364			tl = intel_timeline_create(gt);
   1365			if (IS_ERR(tl)) {
   1366				err = PTR_ERR(tl);
   1367				break;
   1368			}
   1369
   1370			rq = checked_tl_write(tl, engine, count);
   1371			if (IS_ERR(rq)) {
   1372				intel_timeline_put(tl);
   1373				err = PTR_ERR(rq);
   1374				break;
   1375			}
   1376
   1377			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
   1378				pr_err("Wait for timeline writes timed out!\n");
   1379				i915_request_put(rq);
   1380				intel_timeline_put(tl);
   1381				err = -EIO;
   1382				break;
   1383			}
   1384
   1385			if (READ_ONCE(*tl->hwsp_seqno) != count) {
   1386				GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n",
   1387					      count, tl->fence_context,
   1388					      tl->hwsp_offset, *tl->hwsp_seqno);
   1389				GEM_TRACE_DUMP();
   1390				err = -EINVAL;
   1391			}
   1392
   1393			i915_request_put(rq);
   1394			intel_timeline_put(tl);
   1395			count++;
   1396
   1397			if (err)
   1398				break;
   1399		} while (!__igt_timeout(end_time, NULL));
   1400
   1401		intel_engine_pm_put(engine);
   1402		if (err)
   1403			break;
   1404	}
   1405
   1406	return err;
   1407}
   1408
   1409int intel_timeline_live_selftests(struct drm_i915_private *i915)
   1410{
   1411	static const struct i915_subtest tests[] = {
   1412		SUBTEST(live_hwsp_recycle),
   1413		SUBTEST(live_hwsp_engine),
   1414		SUBTEST(live_hwsp_alternate),
   1415		SUBTEST(live_hwsp_wrap),
   1416		SUBTEST(live_hwsp_read),
   1417		SUBTEST(live_hwsp_rollover_kernel),
   1418		SUBTEST(live_hwsp_rollover_user),
   1419	};
   1420
   1421	if (intel_gt_is_wedged(to_gt(i915)))
   1422		return 0;
   1423
   1424	return intel_gt_live_subtests(tests, to_gt(i915));
   1425}