cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

test-ww_mutex.c (13809B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Module-based API test facility for ww_mutexes
      4 */
      5
      6#include <linux/kernel.h>
      7
      8#include <linux/completion.h>
      9#include <linux/delay.h>
     10#include <linux/kthread.h>
     11#include <linux/module.h>
     12#include <linux/random.h>
     13#include <linux/slab.h>
     14#include <linux/ww_mutex.h>
     15
     16static DEFINE_WD_CLASS(ww_class);
     17struct workqueue_struct *wq;
     18
     19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
     20#define ww_acquire_init_noinject(a, b) do { \
     21		ww_acquire_init((a), (b)); \
     22		(a)->deadlock_inject_countdown = ~0U; \
     23	} while (0)
     24#else
     25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
     26#endif
     27
     28struct test_mutex {
     29	struct work_struct work;
     30	struct ww_mutex mutex;
     31	struct completion ready, go, done;
     32	unsigned int flags;
     33};
     34
     35#define TEST_MTX_SPIN BIT(0)
     36#define TEST_MTX_TRY BIT(1)
     37#define TEST_MTX_CTX BIT(2)
     38#define __TEST_MTX_LAST BIT(3)
     39
     40static void test_mutex_work(struct work_struct *work)
     41{
     42	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
     43
     44	complete(&mtx->ready);
     45	wait_for_completion(&mtx->go);
     46
     47	if (mtx->flags & TEST_MTX_TRY) {
     48		while (!ww_mutex_trylock(&mtx->mutex, NULL))
     49			cond_resched();
     50	} else {
     51		ww_mutex_lock(&mtx->mutex, NULL);
     52	}
     53	complete(&mtx->done);
     54	ww_mutex_unlock(&mtx->mutex);
     55}
     56
     57static int __test_mutex(unsigned int flags)
     58{
     59#define TIMEOUT (HZ / 16)
     60	struct test_mutex mtx;
     61	struct ww_acquire_ctx ctx;
     62	int ret;
     63
     64	ww_mutex_init(&mtx.mutex, &ww_class);
     65	ww_acquire_init(&ctx, &ww_class);
     66
     67	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
     68	init_completion(&mtx.ready);
     69	init_completion(&mtx.go);
     70	init_completion(&mtx.done);
     71	mtx.flags = flags;
     72
     73	schedule_work(&mtx.work);
     74
     75	wait_for_completion(&mtx.ready);
     76	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
     77	complete(&mtx.go);
     78	if (flags & TEST_MTX_SPIN) {
     79		unsigned long timeout = jiffies + TIMEOUT;
     80
     81		ret = 0;
     82		do {
     83			if (completion_done(&mtx.done)) {
     84				ret = -EINVAL;
     85				break;
     86			}
     87			cond_resched();
     88		} while (time_before(jiffies, timeout));
     89	} else {
     90		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
     91	}
     92	ww_mutex_unlock(&mtx.mutex);
     93	ww_acquire_fini(&ctx);
     94
     95	if (ret) {
     96		pr_err("%s(flags=%x): mutual exclusion failure\n",
     97		       __func__, flags);
     98		ret = -EINVAL;
     99	}
    100
    101	flush_work(&mtx.work);
    102	destroy_work_on_stack(&mtx.work);
    103	return ret;
    104#undef TIMEOUT
    105}
    106
    107static int test_mutex(void)
    108{
    109	int ret;
    110	int i;
    111
    112	for (i = 0; i < __TEST_MTX_LAST; i++) {
    113		ret = __test_mutex(i);
    114		if (ret)
    115			return ret;
    116	}
    117
    118	return 0;
    119}
    120
    121static int test_aa(bool trylock)
    122{
    123	struct ww_mutex mutex;
    124	struct ww_acquire_ctx ctx;
    125	int ret;
    126	const char *from = trylock ? "trylock" : "lock";
    127
    128	ww_mutex_init(&mutex, &ww_class);
    129	ww_acquire_init(&ctx, &ww_class);
    130
    131	if (!trylock) {
    132		ret = ww_mutex_lock(&mutex, &ctx);
    133		if (ret) {
    134			pr_err("%s: initial lock failed!\n", __func__);
    135			goto out;
    136		}
    137	} else {
    138		ret = !ww_mutex_trylock(&mutex, &ctx);
    139		if (ret) {
    140			pr_err("%s: initial trylock failed!\n", __func__);
    141			goto out;
    142		}
    143	}
    144
    145	if (ww_mutex_trylock(&mutex, NULL))  {
    146		pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
    147		ww_mutex_unlock(&mutex);
    148		ret = -EINVAL;
    149		goto out;
    150	}
    151
    152	if (ww_mutex_trylock(&mutex, &ctx))  {
    153		pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
    154		ww_mutex_unlock(&mutex);
    155		ret = -EINVAL;
    156		goto out;
    157	}
    158
    159	ret = ww_mutex_lock(&mutex, &ctx);
    160	if (ret != -EALREADY) {
    161		pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
    162		       __func__, ret, from);
    163		if (!ret)
    164			ww_mutex_unlock(&mutex);
    165		ret = -EINVAL;
    166		goto out;
    167	}
    168
    169	ww_mutex_unlock(&mutex);
    170	ret = 0;
    171out:
    172	ww_acquire_fini(&ctx);
    173	return ret;
    174}
    175
    176struct test_abba {
    177	struct work_struct work;
    178	struct ww_mutex a_mutex;
    179	struct ww_mutex b_mutex;
    180	struct completion a_ready;
    181	struct completion b_ready;
    182	bool resolve, trylock;
    183	int result;
    184};
    185
    186static void test_abba_work(struct work_struct *work)
    187{
    188	struct test_abba *abba = container_of(work, typeof(*abba), work);
    189	struct ww_acquire_ctx ctx;
    190	int err;
    191
    192	ww_acquire_init_noinject(&ctx, &ww_class);
    193	if (!abba->trylock)
    194		ww_mutex_lock(&abba->b_mutex, &ctx);
    195	else
    196		WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
    197
    198	WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
    199
    200	complete(&abba->b_ready);
    201	wait_for_completion(&abba->a_ready);
    202
    203	err = ww_mutex_lock(&abba->a_mutex, &ctx);
    204	if (abba->resolve && err == -EDEADLK) {
    205		ww_mutex_unlock(&abba->b_mutex);
    206		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
    207		err = ww_mutex_lock(&abba->b_mutex, &ctx);
    208	}
    209
    210	if (!err)
    211		ww_mutex_unlock(&abba->a_mutex);
    212	ww_mutex_unlock(&abba->b_mutex);
    213	ww_acquire_fini(&ctx);
    214
    215	abba->result = err;
    216}
    217
    218static int test_abba(bool trylock, bool resolve)
    219{
    220	struct test_abba abba;
    221	struct ww_acquire_ctx ctx;
    222	int err, ret;
    223
    224	ww_mutex_init(&abba.a_mutex, &ww_class);
    225	ww_mutex_init(&abba.b_mutex, &ww_class);
    226	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
    227	init_completion(&abba.a_ready);
    228	init_completion(&abba.b_ready);
    229	abba.trylock = trylock;
    230	abba.resolve = resolve;
    231
    232	schedule_work(&abba.work);
    233
    234	ww_acquire_init_noinject(&ctx, &ww_class);
    235	if (!trylock)
    236		ww_mutex_lock(&abba.a_mutex, &ctx);
    237	else
    238		WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
    239
    240	WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
    241
    242	complete(&abba.a_ready);
    243	wait_for_completion(&abba.b_ready);
    244
    245	err = ww_mutex_lock(&abba.b_mutex, &ctx);
    246	if (resolve && err == -EDEADLK) {
    247		ww_mutex_unlock(&abba.a_mutex);
    248		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
    249		err = ww_mutex_lock(&abba.a_mutex, &ctx);
    250	}
    251
    252	if (!err)
    253		ww_mutex_unlock(&abba.b_mutex);
    254	ww_mutex_unlock(&abba.a_mutex);
    255	ww_acquire_fini(&ctx);
    256
    257	flush_work(&abba.work);
    258	destroy_work_on_stack(&abba.work);
    259
    260	ret = 0;
    261	if (resolve) {
    262		if (err || abba.result) {
    263			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
    264			       __func__, err, abba.result);
    265			ret = -EINVAL;
    266		}
    267	} else {
    268		if (err != -EDEADLK && abba.result != -EDEADLK) {
    269			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
    270			       __func__, err, abba.result);
    271			ret = -EINVAL;
    272		}
    273	}
    274	return ret;
    275}
    276
    277struct test_cycle {
    278	struct work_struct work;
    279	struct ww_mutex a_mutex;
    280	struct ww_mutex *b_mutex;
    281	struct completion *a_signal;
    282	struct completion b_signal;
    283	int result;
    284};
    285
    286static void test_cycle_work(struct work_struct *work)
    287{
    288	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
    289	struct ww_acquire_ctx ctx;
    290	int err, erra = 0;
    291
    292	ww_acquire_init_noinject(&ctx, &ww_class);
    293	ww_mutex_lock(&cycle->a_mutex, &ctx);
    294
    295	complete(cycle->a_signal);
    296	wait_for_completion(&cycle->b_signal);
    297
    298	err = ww_mutex_lock(cycle->b_mutex, &ctx);
    299	if (err == -EDEADLK) {
    300		err = 0;
    301		ww_mutex_unlock(&cycle->a_mutex);
    302		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
    303		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
    304	}
    305
    306	if (!err)
    307		ww_mutex_unlock(cycle->b_mutex);
    308	if (!erra)
    309		ww_mutex_unlock(&cycle->a_mutex);
    310	ww_acquire_fini(&ctx);
    311
    312	cycle->result = err ?: erra;
    313}
    314
    315static int __test_cycle(unsigned int nthreads)
    316{
    317	struct test_cycle *cycles;
    318	unsigned int n, last = nthreads - 1;
    319	int ret;
    320
    321	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
    322	if (!cycles)
    323		return -ENOMEM;
    324
    325	for (n = 0; n < nthreads; n++) {
    326		struct test_cycle *cycle = &cycles[n];
    327
    328		ww_mutex_init(&cycle->a_mutex, &ww_class);
    329		if (n == last)
    330			cycle->b_mutex = &cycles[0].a_mutex;
    331		else
    332			cycle->b_mutex = &cycles[n + 1].a_mutex;
    333
    334		if (n == 0)
    335			cycle->a_signal = &cycles[last].b_signal;
    336		else
    337			cycle->a_signal = &cycles[n - 1].b_signal;
    338		init_completion(&cycle->b_signal);
    339
    340		INIT_WORK(&cycle->work, test_cycle_work);
    341		cycle->result = 0;
    342	}
    343
    344	for (n = 0; n < nthreads; n++)
    345		queue_work(wq, &cycles[n].work);
    346
    347	flush_workqueue(wq);
    348
    349	ret = 0;
    350	for (n = 0; n < nthreads; n++) {
    351		struct test_cycle *cycle = &cycles[n];
    352
    353		if (!cycle->result)
    354			continue;
    355
    356		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
    357		       n, nthreads, cycle->result);
    358		ret = -EINVAL;
    359		break;
    360	}
    361
    362	for (n = 0; n < nthreads; n++)
    363		ww_mutex_destroy(&cycles[n].a_mutex);
    364	kfree(cycles);
    365	return ret;
    366}
    367
    368static int test_cycle(unsigned int ncpus)
    369{
    370	unsigned int n;
    371	int ret;
    372
    373	for (n = 2; n <= ncpus + 1; n++) {
    374		ret = __test_cycle(n);
    375		if (ret)
    376			return ret;
    377	}
    378
    379	return 0;
    380}
    381
    382struct stress {
    383	struct work_struct work;
    384	struct ww_mutex *locks;
    385	unsigned long timeout;
    386	int nlocks;
    387};
    388
    389static int *get_random_order(int count)
    390{
    391	int *order;
    392	int n, r, tmp;
    393
    394	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
    395	if (!order)
    396		return order;
    397
    398	for (n = 0; n < count; n++)
    399		order[n] = n;
    400
    401	for (n = count - 1; n > 1; n--) {
    402		r = get_random_int() % (n + 1);
    403		if (r != n) {
    404			tmp = order[n];
    405			order[n] = order[r];
    406			order[r] = tmp;
    407		}
    408	}
    409
    410	return order;
    411}
    412
    413static void dummy_load(struct stress *stress)
    414{
    415	usleep_range(1000, 2000);
    416}
    417
    418static void stress_inorder_work(struct work_struct *work)
    419{
    420	struct stress *stress = container_of(work, typeof(*stress), work);
    421	const int nlocks = stress->nlocks;
    422	struct ww_mutex *locks = stress->locks;
    423	struct ww_acquire_ctx ctx;
    424	int *order;
    425
    426	order = get_random_order(nlocks);
    427	if (!order)
    428		return;
    429
    430	do {
    431		int contended = -1;
    432		int n, err;
    433
    434		ww_acquire_init(&ctx, &ww_class);
    435retry:
    436		err = 0;
    437		for (n = 0; n < nlocks; n++) {
    438			if (n == contended)
    439				continue;
    440
    441			err = ww_mutex_lock(&locks[order[n]], &ctx);
    442			if (err < 0)
    443				break;
    444		}
    445		if (!err)
    446			dummy_load(stress);
    447
    448		if (contended > n)
    449			ww_mutex_unlock(&locks[order[contended]]);
    450		contended = n;
    451		while (n--)
    452			ww_mutex_unlock(&locks[order[n]]);
    453
    454		if (err == -EDEADLK) {
    455			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
    456			goto retry;
    457		}
    458
    459		if (err) {
    460			pr_err_once("stress (%s) failed with %d\n",
    461				    __func__, err);
    462			break;
    463		}
    464
    465		ww_acquire_fini(&ctx);
    466	} while (!time_after(jiffies, stress->timeout));
    467
    468	kfree(order);
    469	kfree(stress);
    470}
    471
    472struct reorder_lock {
    473	struct list_head link;
    474	struct ww_mutex *lock;
    475};
    476
    477static void stress_reorder_work(struct work_struct *work)
    478{
    479	struct stress *stress = container_of(work, typeof(*stress), work);
    480	LIST_HEAD(locks);
    481	struct ww_acquire_ctx ctx;
    482	struct reorder_lock *ll, *ln;
    483	int *order;
    484	int n, err;
    485
    486	order = get_random_order(stress->nlocks);
    487	if (!order)
    488		return;
    489
    490	for (n = 0; n < stress->nlocks; n++) {
    491		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
    492		if (!ll)
    493			goto out;
    494
    495		ll->lock = &stress->locks[order[n]];
    496		list_add(&ll->link, &locks);
    497	}
    498	kfree(order);
    499	order = NULL;
    500
    501	do {
    502		ww_acquire_init(&ctx, &ww_class);
    503
    504		list_for_each_entry(ll, &locks, link) {
    505			err = ww_mutex_lock(ll->lock, &ctx);
    506			if (!err)
    507				continue;
    508
    509			ln = ll;
    510			list_for_each_entry_continue_reverse(ln, &locks, link)
    511				ww_mutex_unlock(ln->lock);
    512
    513			if (err != -EDEADLK) {
    514				pr_err_once("stress (%s) failed with %d\n",
    515					    __func__, err);
    516				break;
    517			}
    518
    519			ww_mutex_lock_slow(ll->lock, &ctx);
    520			list_move(&ll->link, &locks); /* restarts iteration */
    521		}
    522
    523		dummy_load(stress);
    524		list_for_each_entry(ll, &locks, link)
    525			ww_mutex_unlock(ll->lock);
    526
    527		ww_acquire_fini(&ctx);
    528	} while (!time_after(jiffies, stress->timeout));
    529
    530out:
    531	list_for_each_entry_safe(ll, ln, &locks, link)
    532		kfree(ll);
    533	kfree(order);
    534	kfree(stress);
    535}
    536
    537static void stress_one_work(struct work_struct *work)
    538{
    539	struct stress *stress = container_of(work, typeof(*stress), work);
    540	const int nlocks = stress->nlocks;
    541	struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
    542	int err;
    543
    544	do {
    545		err = ww_mutex_lock(lock, NULL);
    546		if (!err) {
    547			dummy_load(stress);
    548			ww_mutex_unlock(lock);
    549		} else {
    550			pr_err_once("stress (%s) failed with %d\n",
    551				    __func__, err);
    552			break;
    553		}
    554	} while (!time_after(jiffies, stress->timeout));
    555
    556	kfree(stress);
    557}
    558
    559#define STRESS_INORDER BIT(0)
    560#define STRESS_REORDER BIT(1)
    561#define STRESS_ONE BIT(2)
    562#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
    563
    564static int stress(int nlocks, int nthreads, unsigned int flags)
    565{
    566	struct ww_mutex *locks;
    567	int n;
    568
    569	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
    570	if (!locks)
    571		return -ENOMEM;
    572
    573	for (n = 0; n < nlocks; n++)
    574		ww_mutex_init(&locks[n], &ww_class);
    575
    576	for (n = 0; nthreads; n++) {
    577		struct stress *stress;
    578		void (*fn)(struct work_struct *work);
    579
    580		fn = NULL;
    581		switch (n & 3) {
    582		case 0:
    583			if (flags & STRESS_INORDER)
    584				fn = stress_inorder_work;
    585			break;
    586		case 1:
    587			if (flags & STRESS_REORDER)
    588				fn = stress_reorder_work;
    589			break;
    590		case 2:
    591			if (flags & STRESS_ONE)
    592				fn = stress_one_work;
    593			break;
    594		}
    595
    596		if (!fn)
    597			continue;
    598
    599		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
    600		if (!stress)
    601			break;
    602
    603		INIT_WORK(&stress->work, fn);
    604		stress->locks = locks;
    605		stress->nlocks = nlocks;
    606		stress->timeout = jiffies + 2*HZ;
    607
    608		queue_work(wq, &stress->work);
    609		nthreads--;
    610	}
    611
    612	flush_workqueue(wq);
    613
    614	for (n = 0; n < nlocks; n++)
    615		ww_mutex_destroy(&locks[n]);
    616	kfree(locks);
    617
    618	return 0;
    619}
    620
    621static int __init test_ww_mutex_init(void)
    622{
    623	int ncpus = num_online_cpus();
    624	int ret, i;
    625
    626	printk(KERN_INFO "Beginning ww mutex selftests\n");
    627
    628	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
    629	if (!wq)
    630		return -ENOMEM;
    631
    632	ret = test_mutex();
    633	if (ret)
    634		return ret;
    635
    636	ret = test_aa(false);
    637	if (ret)
    638		return ret;
    639
    640	ret = test_aa(true);
    641	if (ret)
    642		return ret;
    643
    644	for (i = 0; i < 4; i++) {
    645		ret = test_abba(i & 1, i & 2);
    646		if (ret)
    647			return ret;
    648	}
    649
    650	ret = test_cycle(ncpus);
    651	if (ret)
    652		return ret;
    653
    654	ret = stress(16, 2*ncpus, STRESS_INORDER);
    655	if (ret)
    656		return ret;
    657
    658	ret = stress(16, 2*ncpus, STRESS_REORDER);
    659	if (ret)
    660		return ret;
    661
    662	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
    663	if (ret)
    664		return ret;
    665
    666	printk(KERN_INFO "All ww mutex selftests passed\n");
    667	return 0;
    668}
    669
    670static void __exit test_ww_mutex_exit(void)
    671{
    672	destroy_workqueue(wq);
    673}
    674
    675module_init(test_ww_mutex_init);
    676module_exit(test_ww_mutex_exit);
    677
    678MODULE_LICENSE("GPL");
    679MODULE_AUTHOR("Intel Corporation");