cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_sw_fence.c (14816B)


      1/*
      2 * Copyright © 2017 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 *
     23 */
     24
     25#include <linux/completion.h>
     26#include <linux/delay.h>
     27#include <linux/prime_numbers.h>
     28
     29#include "../i915_selftest.h"
     30
     31static int
     32fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
     33{
     34	switch (state) {
     35	case FENCE_COMPLETE:
     36		break;
     37
     38	case FENCE_FREE:
     39		/* Leave the fence for the caller to free it after testing */
     40		break;
     41	}
     42
     43	return NOTIFY_DONE;
     44}
     45
     46static struct i915_sw_fence *alloc_fence(void)
     47{
     48	struct i915_sw_fence *fence;
     49
     50	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
     51	if (!fence)
     52		return NULL;
     53
     54	i915_sw_fence_init(fence, fence_notify);
     55	return fence;
     56}
     57
     58static void free_fence(struct i915_sw_fence *fence)
     59{
     60	i915_sw_fence_fini(fence);
     61	kfree(fence);
     62}
     63
     64static int __test_self(struct i915_sw_fence *fence)
     65{
     66	if (i915_sw_fence_done(fence))
     67		return -EINVAL;
     68
     69	i915_sw_fence_commit(fence);
     70	if (!i915_sw_fence_done(fence))
     71		return -EINVAL;
     72
     73	i915_sw_fence_wait(fence);
     74	if (!i915_sw_fence_done(fence))
     75		return -EINVAL;
     76
     77	return 0;
     78}
     79
     80static int test_self(void *arg)
     81{
     82	struct i915_sw_fence *fence;
     83	int ret;
     84
     85	/* Test i915_sw_fence signaling and completion testing */
     86	fence = alloc_fence();
     87	if (!fence)
     88		return -ENOMEM;
     89
     90	ret = __test_self(fence);
     91
     92	free_fence(fence);
     93	return ret;
     94}
     95
     96static int test_dag(void *arg)
     97{
     98	struct i915_sw_fence *A, *B, *C;
     99	int ret = -EINVAL;
    100
    101	/* Test detection of cycles within the i915_sw_fence graphs */
    102	if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
    103		return 0;
    104
    105	A = alloc_fence();
    106	if (!A)
    107		return -ENOMEM;
    108
    109	if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
    110		pr_err("recursive cycle not detected (AA)\n");
    111		goto err_A;
    112	}
    113
    114	B = alloc_fence();
    115	if (!B) {
    116		ret = -ENOMEM;
    117		goto err_A;
    118	}
    119
    120	i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
    121	if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
    122		pr_err("single depth cycle not detected (BAB)\n");
    123		goto err_B;
    124	}
    125
    126	C = alloc_fence();
    127	if (!C) {
    128		ret = -ENOMEM;
    129		goto err_B;
    130	}
    131
    132	if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
    133		pr_err("invalid cycle detected\n");
    134		goto err_C;
    135	}
    136	if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
    137		pr_err("single depth cycle not detected (CBC)\n");
    138		goto err_C;
    139	}
    140	if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
    141		pr_err("cycle not detected (BA, CB, AC)\n");
    142		goto err_C;
    143	}
    144	if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
    145		pr_err("invalid cycle detected\n");
    146		goto err_C;
    147	}
    148
    149	i915_sw_fence_commit(A);
    150	i915_sw_fence_commit(B);
    151	i915_sw_fence_commit(C);
    152
    153	ret = 0;
    154	if (!i915_sw_fence_done(C)) {
    155		pr_err("fence C not done\n");
    156		ret = -EINVAL;
    157	}
    158	if (!i915_sw_fence_done(B)) {
    159		pr_err("fence B not done\n");
    160		ret = -EINVAL;
    161	}
    162	if (!i915_sw_fence_done(A)) {
    163		pr_err("fence A not done\n");
    164		ret = -EINVAL;
    165	}
    166err_C:
    167	free_fence(C);
    168err_B:
    169	free_fence(B);
    170err_A:
    171	free_fence(A);
    172	return ret;
    173}
    174
    175static int test_AB(void *arg)
    176{
    177	struct i915_sw_fence *A, *B;
    178	int ret;
    179
    180	/* Test i915_sw_fence (A) waiting on an event source (B) */
    181	A = alloc_fence();
    182	if (!A)
    183		return -ENOMEM;
    184	B = alloc_fence();
    185	if (!B) {
    186		ret = -ENOMEM;
    187		goto err_A;
    188	}
    189
    190	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
    191	if (ret < 0)
    192		goto err_B;
    193	if (ret == 0) {
    194		pr_err("Incorrectly reported fence A was complete before await\n");
    195		ret = -EINVAL;
    196		goto err_B;
    197	}
    198
    199	ret = -EINVAL;
    200	i915_sw_fence_commit(A);
    201	if (i915_sw_fence_done(A))
    202		goto err_B;
    203
    204	i915_sw_fence_commit(B);
    205	if (!i915_sw_fence_done(B)) {
    206		pr_err("Fence B is not done\n");
    207		goto err_B;
    208	}
    209
    210	if (!i915_sw_fence_done(A)) {
    211		pr_err("Fence A is not done\n");
    212		goto err_B;
    213	}
    214
    215	ret = 0;
    216err_B:
    217	free_fence(B);
    218err_A:
    219	free_fence(A);
    220	return ret;
    221}
    222
    223static int test_ABC(void *arg)
    224{
    225	struct i915_sw_fence *A, *B, *C;
    226	int ret;
    227
    228	/* Test a chain of fences, A waits on B who waits on C */
    229	A = alloc_fence();
    230	if (!A)
    231		return -ENOMEM;
    232
    233	B = alloc_fence();
    234	if (!B) {
    235		ret = -ENOMEM;
    236		goto err_A;
    237	}
    238
    239	C = alloc_fence();
    240	if (!C) {
    241		ret = -ENOMEM;
    242		goto err_B;
    243	}
    244
    245	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
    246	if (ret < 0)
    247		goto err_C;
    248	if (ret == 0) {
    249		pr_err("Incorrectly reported fence B was complete before await\n");
    250		goto err_C;
    251	}
    252
    253	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
    254	if (ret < 0)
    255		goto err_C;
    256	if (ret == 0) {
    257		pr_err("Incorrectly reported fence C was complete before await\n");
    258		goto err_C;
    259	}
    260
    261	ret = -EINVAL;
    262	i915_sw_fence_commit(A);
    263	if (i915_sw_fence_done(A)) {
    264		pr_err("Fence A completed early\n");
    265		goto err_C;
    266	}
    267
    268	i915_sw_fence_commit(B);
    269	if (i915_sw_fence_done(B)) {
    270		pr_err("Fence B completed early\n");
    271		goto err_C;
    272	}
    273
    274	if (i915_sw_fence_done(A)) {
    275		pr_err("Fence A completed early (after signaling B)\n");
    276		goto err_C;
    277	}
    278
    279	i915_sw_fence_commit(C);
    280
    281	ret = 0;
    282	if (!i915_sw_fence_done(C)) {
    283		pr_err("Fence C not done\n");
    284		ret = -EINVAL;
    285	}
    286	if (!i915_sw_fence_done(B)) {
    287		pr_err("Fence B not done\n");
    288		ret = -EINVAL;
    289	}
    290	if (!i915_sw_fence_done(A)) {
    291		pr_err("Fence A not done\n");
    292		ret = -EINVAL;
    293	}
    294err_C:
    295	free_fence(C);
    296err_B:
    297	free_fence(B);
    298err_A:
    299	free_fence(A);
    300	return ret;
    301}
    302
    303static int test_AB_C(void *arg)
    304{
    305	struct i915_sw_fence *A, *B, *C;
    306	int ret = -EINVAL;
    307
    308	/* Test multiple fences (AB) waiting on a single event (C) */
    309	A = alloc_fence();
    310	if (!A)
    311		return -ENOMEM;
    312
    313	B = alloc_fence();
    314	if (!B) {
    315		ret = -ENOMEM;
    316		goto err_A;
    317	}
    318
    319	C = alloc_fence();
    320	if (!C) {
    321		ret = -ENOMEM;
    322		goto err_B;
    323	}
    324
    325	ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
    326	if (ret < 0)
    327		goto err_C;
    328	if (ret == 0) {
    329		ret = -EINVAL;
    330		goto err_C;
    331	}
    332
    333	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
    334	if (ret < 0)
    335		goto err_C;
    336	if (ret == 0) {
    337		ret = -EINVAL;
    338		goto err_C;
    339	}
    340
    341	i915_sw_fence_commit(A);
    342	i915_sw_fence_commit(B);
    343
    344	ret = 0;
    345	if (i915_sw_fence_done(A)) {
    346		pr_err("Fence A completed early\n");
    347		ret = -EINVAL;
    348	}
    349
    350	if (i915_sw_fence_done(B)) {
    351		pr_err("Fence B completed early\n");
    352		ret = -EINVAL;
    353	}
    354
    355	i915_sw_fence_commit(C);
    356	if (!i915_sw_fence_done(C)) {
    357		pr_err("Fence C not done\n");
    358		ret = -EINVAL;
    359	}
    360
    361	if (!i915_sw_fence_done(B)) {
    362		pr_err("Fence B not done\n");
    363		ret = -EINVAL;
    364	}
    365
    366	if (!i915_sw_fence_done(A)) {
    367		pr_err("Fence A not done\n");
    368		ret = -EINVAL;
    369	}
    370
    371err_C:
    372	free_fence(C);
    373err_B:
    374	free_fence(B);
    375err_A:
    376	free_fence(A);
    377	return ret;
    378}
    379
    380static int test_C_AB(void *arg)
    381{
    382	struct i915_sw_fence *A, *B, *C;
    383	int ret;
    384
    385	/* Test multiple event sources (A,B) for a single fence (C) */
    386	A = alloc_fence();
    387	if (!A)
    388		return -ENOMEM;
    389
    390	B = alloc_fence();
    391	if (!B) {
    392		ret = -ENOMEM;
    393		goto err_A;
    394	}
    395
    396	C = alloc_fence();
    397	if (!C) {
    398		ret = -ENOMEM;
    399		goto err_B;
    400	}
    401
    402	ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
    403	if (ret < 0)
    404		goto err_C;
    405	if (ret == 0) {
    406		ret = -EINVAL;
    407		goto err_C;
    408	}
    409
    410	ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
    411	if (ret < 0)
    412		goto err_C;
    413	if (ret == 0) {
    414		ret = -EINVAL;
    415		goto err_C;
    416	}
    417
    418	ret = 0;
    419	i915_sw_fence_commit(C);
    420	if (i915_sw_fence_done(C))
    421		ret = -EINVAL;
    422
    423	i915_sw_fence_commit(A);
    424	i915_sw_fence_commit(B);
    425
    426	if (!i915_sw_fence_done(A)) {
    427		pr_err("Fence A not done\n");
    428		ret = -EINVAL;
    429	}
    430
    431	if (!i915_sw_fence_done(B)) {
    432		pr_err("Fence B not done\n");
    433		ret = -EINVAL;
    434	}
    435
    436	if (!i915_sw_fence_done(C)) {
    437		pr_err("Fence C not done\n");
    438		ret = -EINVAL;
    439	}
    440
    441err_C:
    442	free_fence(C);
    443err_B:
    444	free_fence(B);
    445err_A:
    446	free_fence(A);
    447	return ret;
    448}
    449
    450static int test_chain(void *arg)
    451{
    452	int nfences = 4096;
    453	struct i915_sw_fence **fences;
    454	int ret, i;
    455
    456	/* Test a long chain of fences */
    457	fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
    458	if (!fences)
    459		return -ENOMEM;
    460
    461	for (i = 0; i < nfences; i++) {
    462		fences[i] = alloc_fence();
    463		if (!fences[i]) {
    464			nfences = i;
    465			ret = -ENOMEM;
    466			goto err;
    467		}
    468
    469		if (i > 0) {
    470			ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
    471							       fences[i - 1],
    472							       GFP_KERNEL);
    473			if (ret < 0) {
    474				nfences = i + 1;
    475				goto err;
    476			}
    477
    478			i915_sw_fence_commit(fences[i]);
    479		}
    480	}
    481
    482	ret = 0;
    483	for (i = nfences; --i; ) {
    484		if (i915_sw_fence_done(fences[i])) {
    485			if (ret == 0)
    486				pr_err("Fence[%d] completed early\n", i);
    487			ret = -EINVAL;
    488		}
    489	}
    490	i915_sw_fence_commit(fences[0]);
    491	for (i = 0; ret == 0 && i < nfences; i++) {
    492		if (!i915_sw_fence_done(fences[i])) {
    493			pr_err("Fence[%d] is not done\n", i);
    494			ret = -EINVAL;
    495		}
    496	}
    497
    498err:
    499	for (i = 0; i < nfences; i++)
    500		free_fence(fences[i]);
    501	kfree(fences);
    502	return ret;
    503}
    504
    505struct task_ipc {
    506	struct work_struct work;
    507	struct completion started;
    508	struct i915_sw_fence *in, *out;
    509	int value;
    510};
    511
    512static void task_ipc(struct work_struct *work)
    513{
    514	struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
    515
    516	complete(&ipc->started);
    517
    518	i915_sw_fence_wait(ipc->in);
    519	smp_store_mb(ipc->value, 1);
    520	i915_sw_fence_commit(ipc->out);
    521}
    522
    523static int test_ipc(void *arg)
    524{
    525	struct task_ipc ipc;
    526	int ret = 0;
    527
    528	/* Test use of i915_sw_fence as an interprocess signaling mechanism */
    529	ipc.in = alloc_fence();
    530	if (!ipc.in)
    531		return -ENOMEM;
    532	ipc.out = alloc_fence();
    533	if (!ipc.out) {
    534		ret = -ENOMEM;
    535		goto err_in;
    536	}
    537
    538	/* use a completion to avoid chicken-and-egg testing */
    539	init_completion(&ipc.started);
    540
    541	ipc.value = 0;
    542	INIT_WORK_ONSTACK(&ipc.work, task_ipc);
    543	schedule_work(&ipc.work);
    544
    545	wait_for_completion(&ipc.started);
    546
    547	usleep_range(1000, 2000);
    548	if (READ_ONCE(ipc.value)) {
    549		pr_err("worker updated value before i915_sw_fence was signaled\n");
    550		ret = -EINVAL;
    551	}
    552
    553	i915_sw_fence_commit(ipc.in);
    554	i915_sw_fence_wait(ipc.out);
    555
    556	if (!READ_ONCE(ipc.value)) {
    557		pr_err("worker signaled i915_sw_fence before value was posted\n");
    558		ret = -EINVAL;
    559	}
    560
    561	flush_work(&ipc.work);
    562	destroy_work_on_stack(&ipc.work);
    563	free_fence(ipc.out);
    564err_in:
    565	free_fence(ipc.in);
    566	return ret;
    567}
    568
    569static int test_timer(void *arg)
    570{
    571	unsigned long target, delay;
    572	struct timed_fence tf;
    573
    574	preempt_disable();
    575	timed_fence_init(&tf, target = jiffies);
    576	if (!i915_sw_fence_done(&tf.fence)) {
    577		pr_err("Fence with immediate expiration not signaled\n");
    578		goto err;
    579	}
    580	preempt_enable();
    581	timed_fence_fini(&tf);
    582
    583	for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
    584		preempt_disable();
    585		timed_fence_init(&tf, target = jiffies + delay);
    586		if (i915_sw_fence_done(&tf.fence)) {
    587			pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
    588			goto err;
    589		}
    590		preempt_enable();
    591
    592		i915_sw_fence_wait(&tf.fence);
    593
    594		preempt_disable();
    595		if (!i915_sw_fence_done(&tf.fence)) {
    596			pr_err("Fence not signaled after wait\n");
    597			goto err;
    598		}
    599		if (time_before(jiffies, target)) {
    600			pr_err("Fence signaled too early, target=%lu, now=%lu\n",
    601			       target, jiffies);
    602			goto err;
    603		}
    604		preempt_enable();
    605		timed_fence_fini(&tf);
    606	}
    607
    608	return 0;
    609
    610err:
    611	preempt_enable();
    612	timed_fence_fini(&tf);
    613	return -EINVAL;
    614}
    615
    616static const char *mock_name(struct dma_fence *fence)
    617{
    618	return "mock";
    619}
    620
    621static const struct dma_fence_ops mock_fence_ops = {
    622	.get_driver_name = mock_name,
    623	.get_timeline_name = mock_name,
    624};
    625
    626static DEFINE_SPINLOCK(mock_fence_lock);
    627
    628static struct dma_fence *alloc_dma_fence(void)
    629{
    630	struct dma_fence *dma;
    631
    632	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
    633	if (dma)
    634		dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
    635
    636	return dma;
    637}
    638
    639static struct i915_sw_fence *
    640wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
    641{
    642	struct i915_sw_fence *fence;
    643	int err;
    644
    645	fence = alloc_fence();
    646	if (!fence)
    647		return ERR_PTR(-ENOMEM);
    648
    649	err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
    650	i915_sw_fence_commit(fence);
    651	if (err < 0) {
    652		free_fence(fence);
    653		return ERR_PTR(err);
    654	}
    655
    656	return fence;
    657}
    658
    659static int test_dma_fence(void *arg)
    660{
    661	struct i915_sw_fence *timeout = NULL, *not = NULL;
    662	unsigned long delay = i915_selftest.timeout_jiffies;
    663	unsigned long end, sleep;
    664	struct dma_fence *dma;
    665	int err;
    666
    667	dma = alloc_dma_fence();
    668	if (!dma)
    669		return -ENOMEM;
    670
    671	timeout = wrap_dma_fence(dma, delay);
    672	if (IS_ERR(timeout)) {
    673		err = PTR_ERR(timeout);
    674		goto err;
    675	}
    676
    677	not = wrap_dma_fence(dma, 0);
    678	if (IS_ERR(not)) {
    679		err = PTR_ERR(not);
    680		goto err;
    681	}
    682
    683	err = -EINVAL;
    684	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
    685		pr_err("Fences immediately signaled\n");
    686		goto err;
    687	}
    688
    689	/* We round the timeout for the fence up to the next second */
    690	end = round_jiffies_up(jiffies + delay);
    691
    692	sleep = jiffies_to_usecs(delay) / 3;
    693	usleep_range(sleep, 2 * sleep);
    694	if (time_after(jiffies, end)) {
    695		pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
    696			 delay, end, jiffies);
    697		goto skip;
    698	}
    699
    700	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
    701		pr_err("Fences signaled too early\n");
    702		goto err;
    703	}
    704
    705	if (!wait_event_timeout(timeout->wait,
    706				i915_sw_fence_done(timeout),
    707				2 * (end - jiffies) + 1)) {
    708		pr_err("Timeout fence unsignaled!\n");
    709		goto err;
    710	}
    711
    712	if (i915_sw_fence_done(not)) {
    713		pr_err("No timeout fence signaled!\n");
    714		goto err;
    715	}
    716
    717skip:
    718	dma_fence_signal(dma);
    719
    720	if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
    721		pr_err("Fences unsignaled\n");
    722		goto err;
    723	}
    724
    725	free_fence(not);
    726	free_fence(timeout);
    727	dma_fence_put(dma);
    728
    729	return 0;
    730
    731err:
    732	dma_fence_signal(dma);
    733	if (!IS_ERR_OR_NULL(timeout))
    734		free_fence(timeout);
    735	if (!IS_ERR_OR_NULL(not))
    736		free_fence(not);
    737	dma_fence_put(dma);
    738	return err;
    739}
    740
    741int i915_sw_fence_mock_selftests(void)
    742{
    743	static const struct i915_subtest tests[] = {
    744		SUBTEST(test_self),
    745		SUBTEST(test_dag),
    746		SUBTEST(test_AB),
    747		SUBTEST(test_ABC),
    748		SUBTEST(test_AB_C),
    749		SUBTEST(test_C_AB),
    750		SUBTEST(test_chain),
    751		SUBTEST(test_ipc),
    752		SUBTEST(test_timer),
    753		SUBTEST(test_dma_fence),
    754	};
    755
    756	return i915_subtests(tests, NULL);
    757}