cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

futex-requeue.c (8250B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
      4 *
      5 * futex-requeue: Block a bunch of threads on futex1 and requeue them
      6 *                on futex2, N at a time.
      7 *
      8 * This program is particularly useful to measure the latency of nthread
      9 * requeues without waking up any tasks (in the non-pi case) -- thus
     10 * mimicking a regular futex_wait.
     11 */
     12
     13/* For the CLR_() macros */
     14#include <string.h>
     15#include <pthread.h>
     16
     17#include <signal.h>
     18#include "../util/stat.h"
     19#include <subcmd/parse-options.h>
     20#include <linux/compiler.h>
     21#include <linux/kernel.h>
     22#include <linux/time64.h>
     23#include <errno.h>
     24#include <perf/cpumap.h>
     25#include "bench.h"
     26#include "futex.h"
     27
     28#include <err.h>
     29#include <stdlib.h>
     30#include <sys/time.h>
     31#include <sys/mman.h>
     32
     33static u_int32_t futex1 = 0, futex2 = 0;
     34
     35static pthread_t *worker;
     36static bool done = false;
     37static pthread_mutex_t thread_lock;
     38static pthread_cond_t thread_parent, thread_worker;
     39static struct stats requeuetime_stats, requeued_stats;
     40static unsigned int threads_starting;
     41static int futex_flag = 0;
     42
     43static struct bench_futex_parameters params = {
     44	/*
     45	 * How many tasks to requeue at a time.
     46	 * Default to 1 in order to make the kernel work more.
     47	 */
     48	.nrequeue = 1,
     49};
     50
     51static const struct option options[] = {
     52	OPT_UINTEGER('t', "threads",  &params.nthreads, "Specify amount of threads"),
     53	OPT_UINTEGER('q', "nrequeue", &params.nrequeue, "Specify amount of threads to requeue at once"),
     54	OPT_BOOLEAN( 's', "silent",   &params.silent, "Silent mode: do not display data/details"),
     55	OPT_BOOLEAN( 'S', "shared",   &params.fshared, "Use shared futexes instead of private ones"),
     56	OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
     57	OPT_BOOLEAN( 'B', "broadcast", &params.broadcast, "Requeue all threads at once"),
     58	OPT_BOOLEAN( 'p', "pi", &params.pi, "Use PI-aware variants of FUTEX_CMP_REQUEUE"),
     59
     60	OPT_END()
     61};
     62
     63static const char * const bench_futex_requeue_usage[] = {
     64	"perf bench futex requeue <options>",
     65	NULL
     66};
     67
     68static void print_summary(void)
     69{
     70	double requeuetime_avg = avg_stats(&requeuetime_stats);
     71	double requeuetime_stddev = stddev_stats(&requeuetime_stats);
     72	unsigned int requeued_avg = avg_stats(&requeued_stats);
     73
     74	printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
     75	       requeued_avg,
     76	       params.nthreads,
     77	       requeuetime_avg / USEC_PER_MSEC,
     78	       rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
     79}
     80
     81static void *workerfn(void *arg __maybe_unused)
     82{
     83	int ret;
     84
     85	pthread_mutex_lock(&thread_lock);
     86	threads_starting--;
     87	if (!threads_starting)
     88		pthread_cond_signal(&thread_parent);
     89	pthread_cond_wait(&thread_worker, &thread_lock);
     90	pthread_mutex_unlock(&thread_lock);
     91
     92	while (1) {
     93		if (!params.pi) {
     94			ret = futex_wait(&futex1, 0, NULL, futex_flag);
     95			if (!ret)
     96				break;
     97
     98			if (ret && errno != EAGAIN) {
     99				if (!params.silent)
    100					warnx("futex_wait");
    101				break;
    102			}
    103		} else {
    104			ret = futex_wait_requeue_pi(&futex1, 0, &futex2,
    105						    NULL, futex_flag);
    106			if (!ret) {
    107				/* got the lock at futex2 */
    108				futex_unlock_pi(&futex2, futex_flag);
    109				break;
    110			}
    111
    112			if (ret && errno != EAGAIN) {
    113				if (!params.silent)
    114					warnx("futex_wait_requeue_pi");
    115				break;
    116			}
    117		}
    118	}
    119
    120	return NULL;
    121}
    122
    123static void block_threads(pthread_t *w,
    124			  pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
    125{
    126	cpu_set_t *cpuset;
    127	unsigned int i;
    128	int nrcpus = perf_cpu_map__nr(cpu);
    129	size_t size;
    130
    131	threads_starting = params.nthreads;
    132
    133	cpuset = CPU_ALLOC(nrcpus);
    134	BUG_ON(!cpuset);
    135	size = CPU_ALLOC_SIZE(nrcpus);
    136
    137	/* create and block all threads */
    138	for (i = 0; i < params.nthreads; i++) {
    139		CPU_ZERO_S(size, cpuset);
    140		CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
    141
    142		if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
    143			CPU_FREE(cpuset);
    144			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
    145		}
    146
    147		if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
    148			CPU_FREE(cpuset);
    149			err(EXIT_FAILURE, "pthread_create");
    150		}
    151	}
    152	CPU_FREE(cpuset);
    153}
    154
    155static void toggle_done(int sig __maybe_unused,
    156			siginfo_t *info __maybe_unused,
    157			void *uc __maybe_unused)
    158{
    159	done = true;
    160}
    161
    162int bench_futex_requeue(int argc, const char **argv)
    163{
    164	int ret = 0;
    165	unsigned int i, j;
    166	struct sigaction act;
    167	pthread_attr_t thread_attr;
    168	struct perf_cpu_map *cpu;
    169
    170	argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
    171	if (argc)
    172		goto err;
    173
    174	cpu = perf_cpu_map__new(NULL);
    175	if (!cpu)
    176		err(EXIT_FAILURE, "cpu_map__new");
    177
    178	memset(&act, 0, sizeof(act));
    179	sigfillset(&act.sa_mask);
    180	act.sa_sigaction = toggle_done;
    181	sigaction(SIGINT, &act, NULL);
    182
    183	if (params.mlockall) {
    184		if (mlockall(MCL_CURRENT | MCL_FUTURE))
    185			err(EXIT_FAILURE, "mlockall");
    186	}
    187
    188	if (!params.nthreads)
    189		params.nthreads = perf_cpu_map__nr(cpu);
    190
    191	worker = calloc(params.nthreads, sizeof(*worker));
    192	if (!worker)
    193		err(EXIT_FAILURE, "calloc");
    194
    195	if (!params.fshared)
    196		futex_flag = FUTEX_PRIVATE_FLAG;
    197
    198	if (params.nrequeue > params.nthreads)
    199		params.nrequeue = params.nthreads;
    200
    201	if (params.broadcast)
    202		params.nrequeue = params.nthreads;
    203
    204	printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %s%p), "
    205	       "%d at a time.\n\n",  getpid(), params.nthreads,
    206	       params.fshared ? "shared":"private", &futex1,
    207	       params.pi ? "PI ": "", &futex2, params.nrequeue);
    208
    209	init_stats(&requeued_stats);
    210	init_stats(&requeuetime_stats);
    211	pthread_attr_init(&thread_attr);
    212	pthread_mutex_init(&thread_lock, NULL);
    213	pthread_cond_init(&thread_parent, NULL);
    214	pthread_cond_init(&thread_worker, NULL);
    215
    216	for (j = 0; j < bench_repeat && !done; j++) {
    217		unsigned int nrequeued = 0, wakeups = 0;
    218		struct timeval start, end, runtime;
    219
    220		/* create, launch & block all threads */
    221		block_threads(worker, thread_attr, cpu);
    222
    223		/* make sure all threads are already blocked */
    224		pthread_mutex_lock(&thread_lock);
    225		while (threads_starting)
    226			pthread_cond_wait(&thread_parent, &thread_lock);
    227		pthread_cond_broadcast(&thread_worker);
    228		pthread_mutex_unlock(&thread_lock);
    229
    230		usleep(100000);
    231
    232		/* Ok, all threads are patiently blocked, start requeueing */
    233		gettimeofday(&start, NULL);
    234		while (nrequeued < params.nthreads) {
    235			int r;
    236
    237			/*
    238			 * For the regular non-pi case, do not wakeup any tasks
    239			 * blocked on futex1, allowing us to really measure
    240			 * futex_wait functionality. For the PI case the first
    241			 * waiter is always awoken.
    242			 */
    243			if (!params.pi) {
    244				r = futex_cmp_requeue(&futex1, 0, &futex2, 0,
    245						      params.nrequeue,
    246						      futex_flag);
    247			} else {
    248				r = futex_cmp_requeue_pi(&futex1, 0, &futex2,
    249							 params.nrequeue,
    250							 futex_flag);
    251				wakeups++; /* assume no error */
    252			}
    253
    254			if (r < 0)
    255				err(EXIT_FAILURE, "couldn't requeue from %p to %p",
    256				    &futex1, &futex2);
    257
    258			nrequeued += r;
    259		}
    260
    261		gettimeofday(&end, NULL);
    262		timersub(&end, &start, &runtime);
    263
    264		update_stats(&requeued_stats, nrequeued);
    265		update_stats(&requeuetime_stats, runtime.tv_usec);
    266
    267		if (!params.silent) {
    268			if (!params.pi)
    269				printf("[Run %d]: Requeued %d of %d threads in "
    270				       "%.4f ms\n", j + 1, nrequeued,
    271				       params.nthreads,
    272				       runtime.tv_usec / (double)USEC_PER_MSEC);
    273			else {
    274				nrequeued -= wakeups;
    275				printf("[Run %d]: Awoke and Requeued (%d+%d) of "
    276				       "%d threads in %.4f ms\n",
    277				       j + 1, wakeups, nrequeued,
    278				       params.nthreads,
    279				       runtime.tv_usec / (double)USEC_PER_MSEC);
    280			}
    281
    282		}
    283
    284		if (!params.pi) {
    285			/* everybody should be blocked on futex2, wake'em up */
    286			nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
    287			if (params.nthreads != nrequeued)
    288				warnx("couldn't wakeup all tasks (%d/%d)",
    289				      nrequeued, params.nthreads);
    290		}
    291
    292		for (i = 0; i < params.nthreads; i++) {
    293			ret = pthread_join(worker[i], NULL);
    294			if (ret)
    295				err(EXIT_FAILURE, "pthread_join");
    296		}
    297	}
    298
    299	/* cleanup & report results */
    300	pthread_cond_destroy(&thread_parent);
    301	pthread_cond_destroy(&thread_worker);
    302	pthread_mutex_destroy(&thread_lock);
    303	pthread_attr_destroy(&thread_attr);
    304
    305	print_summary();
    306
    307	free(worker);
    308	perf_cpu_map__put(cpu);
    309	return ret;
    310err:
    311	usage_with_options(bench_futex_requeue_usage, options);
    312	exit(EXIT_FAILURE);
    313}