cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

multiorder.c (5294B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * multiorder.c: Multi-order radix tree entry testing
      4 * Copyright (c) 2016 Intel Corporation
      5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
      6 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
      7 */
      8#include <linux/radix-tree.h>
      9#include <linux/slab.h>
     10#include <linux/errno.h>
     11#include <pthread.h>
     12
     13#include "test.h"
     14
     15static int item_insert_order(struct xarray *xa, unsigned long index,
     16			unsigned order)
     17{
     18	XA_STATE_ORDER(xas, xa, index, order);
     19	struct item *item = item_create(index, order);
     20
     21	do {
     22		xas_lock(&xas);
     23		xas_store(&xas, item);
     24		xas_unlock(&xas);
     25	} while (xas_nomem(&xas, GFP_KERNEL));
     26
     27	if (!xas_error(&xas))
     28		return 0;
     29
     30	free(item);
     31	return xas_error(&xas);
     32}
     33
     34void multiorder_iteration(struct xarray *xa)
     35{
     36	XA_STATE(xas, xa, 0);
     37	struct item *item;
     38	int i, j, err;
     39
     40#define NUM_ENTRIES 11
     41	int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
     42	int order[NUM_ENTRIES] = {1, 1, 2, 3,  4,  1,  0,  1,  3,  0, 7};
     43
     44	printv(1, "Multiorder iteration test\n");
     45
     46	for (i = 0; i < NUM_ENTRIES; i++) {
     47		err = item_insert_order(xa, index[i], order[i]);
     48		assert(!err);
     49	}
     50
     51	for (j = 0; j < 256; j++) {
     52		for (i = 0; i < NUM_ENTRIES; i++)
     53			if (j <= (index[i] | ((1 << order[i]) - 1)))
     54				break;
     55
     56		xas_set(&xas, j);
     57		xas_for_each(&xas, item, ULONG_MAX) {
     58			int height = order[i] / XA_CHUNK_SHIFT;
     59			int shift = height * XA_CHUNK_SHIFT;
     60			unsigned long mask = (1UL << order[i]) - 1;
     61
     62			assert((xas.xa_index | mask) == (index[i] | mask));
     63			assert(xas.xa_node->shift == shift);
     64			assert(!radix_tree_is_internal_node(item));
     65			assert((item->index | mask) == (index[i] | mask));
     66			assert(item->order == order[i]);
     67			i++;
     68		}
     69	}
     70
     71	item_kill_tree(xa);
     72}
     73
     74void multiorder_tagged_iteration(struct xarray *xa)
     75{
     76	XA_STATE(xas, xa, 0);
     77	struct item *item;
     78	int i, j;
     79
     80#define MT_NUM_ENTRIES 9
     81	int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
     82	int order[MT_NUM_ENTRIES] = {1, 0, 2, 4,  3,  1,  3,  0,   7};
     83
     84#define TAG_ENTRIES 7
     85	int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
     86
     87	printv(1, "Multiorder tagged iteration test\n");
     88
     89	for (i = 0; i < MT_NUM_ENTRIES; i++)
     90		assert(!item_insert_order(xa, index[i], order[i]));
     91
     92	assert(!xa_marked(xa, XA_MARK_1));
     93
     94	for (i = 0; i < TAG_ENTRIES; i++)
     95		xa_set_mark(xa, tag_index[i], XA_MARK_1);
     96
     97	for (j = 0; j < 256; j++) {
     98		int k;
     99
    100		for (i = 0; i < TAG_ENTRIES; i++) {
    101			for (k = i; index[k] < tag_index[i]; k++)
    102				;
    103			if (j <= (index[k] | ((1 << order[k]) - 1)))
    104				break;
    105		}
    106
    107		xas_set(&xas, j);
    108		xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) {
    109			unsigned long mask;
    110			for (k = i; index[k] < tag_index[i]; k++)
    111				;
    112			mask = (1UL << order[k]) - 1;
    113
    114			assert((xas.xa_index | mask) == (tag_index[i] | mask));
    115			assert(!xa_is_internal(item));
    116			assert((item->index | mask) == (tag_index[i] | mask));
    117			assert(item->order == order[k]);
    118			i++;
    119		}
    120	}
    121
    122	assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1,
    123				XA_MARK_2) == TAG_ENTRIES);
    124
    125	for (j = 0; j < 256; j++) {
    126		int mask, k;
    127
    128		for (i = 0; i < TAG_ENTRIES; i++) {
    129			for (k = i; index[k] < tag_index[i]; k++)
    130				;
    131			if (j <= (index[k] | ((1 << order[k]) - 1)))
    132				break;
    133		}
    134
    135		xas_set(&xas, j);
    136		xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) {
    137			for (k = i; index[k] < tag_index[i]; k++)
    138				;
    139			mask = (1 << order[k]) - 1;
    140
    141			assert((xas.xa_index | mask) == (tag_index[i] | mask));
    142			assert(!xa_is_internal(item));
    143			assert((item->index | mask) == (tag_index[i] | mask));
    144			assert(item->order == order[k]);
    145			i++;
    146		}
    147	}
    148
    149	assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1,
    150				XA_MARK_0) == TAG_ENTRIES);
    151	i = 0;
    152	xas_set(&xas, 0);
    153	xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) {
    154		assert(xas.xa_index == tag_index[i]);
    155		i++;
    156	}
    157	assert(i == TAG_ENTRIES);
    158
    159	item_kill_tree(xa);
    160}
    161
    162bool stop_iteration = false;
    163
    164static void *creator_func(void *ptr)
    165{
    166	/* 'order' is set up to ensure we have sibling entries */
    167	unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
    168	struct radix_tree_root *tree = ptr;
    169	int i;
    170
    171	for (i = 0; i < 10000; i++) {
    172		item_insert_order(tree, 0, order);
    173		item_delete_rcu(tree, 0);
    174	}
    175
    176	stop_iteration = true;
    177	return NULL;
    178}
    179
    180static void *iterator_func(void *ptr)
    181{
    182	XA_STATE(xas, ptr, 0);
    183	struct item *item;
    184
    185	while (!stop_iteration) {
    186		rcu_read_lock();
    187		xas_for_each(&xas, item, ULONG_MAX) {
    188			if (xas_retry(&xas, item))
    189				continue;
    190
    191			item_sanity(item, xas.xa_index);
    192		}
    193		rcu_read_unlock();
    194	}
    195	return NULL;
    196}
    197
    198static void multiorder_iteration_race(struct xarray *xa)
    199{
    200	const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
    201	pthread_t worker_thread[num_threads];
    202	int i;
    203
    204	pthread_create(&worker_thread[0], NULL, &creator_func, xa);
    205	for (i = 1; i < num_threads; i++)
    206		pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
    207
    208	for (i = 0; i < num_threads; i++)
    209		pthread_join(worker_thread[i], NULL);
    210
    211	item_kill_tree(xa);
    212}
    213
    214static DEFINE_XARRAY(array);
    215
    216void multiorder_checks(void)
    217{
    218	multiorder_iteration(&array);
    219	multiorder_tagged_iteration(&array);
    220	multiorder_iteration_race(&array);
    221
    222	radix_tree_cpu_dead(0);
    223}
    224
    225int __weak main(void)
    226{
    227	rcu_register_thread();
    228	radix_tree_init();
    229	multiorder_checks();
    230	rcu_unregister_thread();
    231	return 0;
    232}