cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

epoll-wait.c (14745B)


      1// SPDX-License-Identifier: GPL-2.0
      2#ifdef HAVE_EVENTFD_SUPPORT
      3/*
      4 * Copyright (C) 2018 Davidlohr Bueso.
      5 *
      6 * This program benchmarks concurrent epoll_wait(2) monitoring multiple
      7 * file descriptors under one or two load balancing models. The first,
      8 * and default, is the single/combined queueing (which refers to a single
      9 * epoll instance for N worker threads):
     10 *
     11 *                          |---> [worker A]
     12 *                          |---> [worker B]
     13 *        [combined queue]  .---> [worker C]
     14 *                          |---> [worker D]
     15 *                          |---> [worker E]
     16 *
     17 * While the second model, enabled via --multiq option, uses multiple
     18 * queueing (which refers to one epoll instance per worker). For example,
     19 * short lived tcp connections in a high throughput httpd server will
     20 * distribute the accept()'ing  connections across CPUs. In this case each
     21 * worker does a limited  amount of processing.
     22 *
     23 *             [queue A]  ---> [worker]
     24 *             [queue B]  ---> [worker]
     25 *             [queue C]  ---> [worker]
     26 *             [queue D]  ---> [worker]
     27 *             [queue E]  ---> [worker]
     28 *
     29 * Naturally, the single queue will enforce more concurrency on the epoll
     30 * instance, and can therefore scale poorly compared to multiple queues.
     31 * However, this is a benchmark raw data and must be taken with a grain of
     32 * salt when choosing how to make use of sys_epoll.
     33
     34 * Each thread has a number of private, nonblocking file descriptors,
     35 * referred to as fdmap. A writer thread will constantly be writing to
     36 * the fdmaps of all threads, minimizing each threads's chances of
     37 * epoll_wait not finding any ready read events and blocking as this
     38 * is not what we want to stress. The size of the fdmap can be adjusted
     39 * by the user; enlarging the value will increase the chances of
     40 * epoll_wait(2) blocking as the lineal writer thread will take "longer",
     41 * at least at a high level.
     42 *
     43 * Note that because fds are private to each thread, this workload does
     44 * not stress scenarios where multiple tasks are awoken per ready IO; ie:
     45 * EPOLLEXCLUSIVE semantics.
     46 *
     47 * The end result/metric is throughput: number of ops/second where an
     48 * operation consists of:
     49 *
     50 *   epoll_wait(2) + [others]
     51 *
     52 *        ... where [others] is the cost of re-adding the fd (EPOLLET),
     53 *            or rearming it (EPOLLONESHOT).
     54 *
     55 *
     56 * The purpose of this is program is that it be useful for measuring
     57 * kernel related changes to the sys_epoll, and not comparing different
     58 * IO polling methods, for example. Hence everything is very adhoc and
     59 * outputs raw microbenchmark numbers. Also this uses eventfd, similar
     60 * tools tend to use pipes or sockets, but the result is the same.
     61 */
     62
     63/* For the CLR_() macros */
     64#include <string.h>
     65#include <pthread.h>
     66#include <unistd.h>
     67
     68#include <errno.h>
     69#include <inttypes.h>
     70#include <signal.h>
     71#include <stdlib.h>
     72#include <linux/compiler.h>
     73#include <linux/kernel.h>
     74#include <sys/time.h>
     75#include <sys/resource.h>
     76#include <sys/epoll.h>
     77#include <sys/eventfd.h>
     78#include <sys/types.h>
     79#include <perf/cpumap.h>
     80
     81#include "../util/stat.h"
     82#include <subcmd/parse-options.h>
     83#include "bench.h"
     84
     85#include <err.h>
     86
     87#define printinfo(fmt, arg...) \
     88	do { if (__verbose) { printf(fmt, ## arg); fflush(stdout); } } while (0)
     89
     90static unsigned int nthreads = 0;
     91static unsigned int nsecs    = 8;
     92static bool wdone, done, __verbose, randomize, nonblocking;
     93
     94/*
     95 * epoll related shared variables.
     96 */
     97
     98/* Maximum number of nesting allowed inside epoll sets */
     99#define EPOLL_MAXNESTS 4
    100
    101static int epollfd;
    102static int *epollfdp;
    103static bool noaffinity;
    104static unsigned int nested = 0;
    105static bool et; /* edge-trigger */
    106static bool oneshot;
    107static bool multiq; /* use an epoll instance per thread */
    108
    109/* amount of fds to monitor, per thread */
    110static unsigned int nfds = 64;
    111
    112static pthread_mutex_t thread_lock;
    113static unsigned int threads_starting;
    114static struct stats throughput_stats;
    115static pthread_cond_t thread_parent, thread_worker;
    116
    117struct worker {
    118	int tid;
    119	int epollfd; /* for --multiq */
    120	pthread_t thread;
    121	unsigned long ops;
    122	int *fdmap;
    123};
    124
    125static const struct option options[] = {
    126	/* general benchmark options */
    127	OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
    128	OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
    129	OPT_UINTEGER('f', "nfds",    &nfds,  "Specify amount of file descriptors to monitor for each thread"),
    130	OPT_BOOLEAN( 'n', "noaffinity",  &noaffinity,   "Disables CPU affinity"),
    131	OPT_BOOLEAN('R', "randomize", &randomize,   "Enable random write behaviour (default is lineal)"),
    132	OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
    133
    134	/* epoll specific options */
    135	OPT_BOOLEAN( 'm', "multiq",  &multiq,   "Use multiple epoll instances (one per thread)"),
    136	OPT_BOOLEAN( 'B', "nonblocking", &nonblocking, "Nonblocking epoll_wait(2) behaviour"),
    137	OPT_UINTEGER( 'N', "nested",  &nested,   "Nesting level epoll hierarchy (default is 0, no nesting)"),
    138	OPT_BOOLEAN( 'S', "oneshot",  &oneshot,   "Use EPOLLONESHOT semantics"),
    139	OPT_BOOLEAN( 'E', "edge",  &et,   "Use Edge-triggered interface (default is LT)"),
    140
    141	OPT_END()
    142};
    143
    144static const char * const bench_epoll_wait_usage[] = {
    145	"perf bench epoll wait <options>",
    146	NULL
    147};
    148
    149
    150/*
    151 * Arrange the N elements of ARRAY in random order.
    152 * Only effective if N is much smaller than RAND_MAX;
    153 * if this may not be the case, use a better random
    154 * number generator. -- Ben Pfaff.
    155 */
    156static void shuffle(void *array, size_t n, size_t size)
    157{
    158	char *carray = array;
    159	void *aux;
    160	size_t i;
    161
    162	if (n <= 1)
    163		return;
    164
    165	aux = calloc(1, size);
    166	if (!aux)
    167		err(EXIT_FAILURE, "calloc");
    168
    169	for (i = 1; i < n; ++i) {
    170		size_t j =   i + rand() / (RAND_MAX / (n - i) + 1);
    171		j *= size;
    172
    173		memcpy(aux, &carray[j], size);
    174		memcpy(&carray[j], &carray[i*size], size);
    175		memcpy(&carray[i*size], aux, size);
    176	}
    177
    178	free(aux);
    179}
    180
    181
    182static void *workerfn(void *arg)
    183{
    184	int fd, ret, r;
    185	struct worker *w = (struct worker *) arg;
    186	unsigned long ops = w->ops;
    187	struct epoll_event ev;
    188	uint64_t val;
    189	int to = nonblocking? 0 : -1;
    190	int efd = multiq ? w->epollfd : epollfd;
    191
    192	pthread_mutex_lock(&thread_lock);
    193	threads_starting--;
    194	if (!threads_starting)
    195		pthread_cond_signal(&thread_parent);
    196	pthread_cond_wait(&thread_worker, &thread_lock);
    197	pthread_mutex_unlock(&thread_lock);
    198
    199	do {
    200		/*
    201		 * Block indefinitely waiting for the IN event.
    202		 * In order to stress the epoll_wait(2) syscall,
    203		 * call it event per event, instead of a larger
    204		 * batch (max)limit.
    205		 */
    206		do {
    207			ret = epoll_wait(efd, &ev, 1, to);
    208		} while (ret < 0 && errno == EINTR);
    209		if (ret < 0)
    210			err(EXIT_FAILURE, "epoll_wait");
    211
    212		fd = ev.data.fd;
    213
    214		do {
    215			r = read(fd, &val, sizeof(val));
    216		} while (!done && (r < 0 && errno == EAGAIN));
    217
    218		if (et) {
    219			ev.events = EPOLLIN | EPOLLET;
    220			ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev);
    221		}
    222
    223		if (oneshot) {
    224			/* rearm the file descriptor with a new event mask */
    225			ev.events |= EPOLLIN | EPOLLONESHOT;
    226			ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev);
    227		}
    228
    229		ops++;
    230	}  while (!done);
    231
    232	if (multiq)
    233		close(w->epollfd);
    234
    235	w->ops = ops;
    236	return NULL;
    237}
    238
    239static void nest_epollfd(struct worker *w)
    240{
    241	unsigned int i;
    242	struct epoll_event ev;
    243	int efd = multiq ? w->epollfd : epollfd;
    244
    245	if (nested > EPOLL_MAXNESTS)
    246		nested = EPOLL_MAXNESTS;
    247
    248	epollfdp = calloc(nested, sizeof(*epollfdp));
    249	if (!epollfdp)
    250		err(EXIT_FAILURE, "calloc");
    251
    252	for (i = 0; i < nested; i++) {
    253		epollfdp[i] = epoll_create(1);
    254		if (epollfdp[i] < 0)
    255			err(EXIT_FAILURE, "epoll_create");
    256	}
    257
    258	ev.events = EPOLLHUP; /* anything */
    259	ev.data.u64 = i; /* any number */
    260
    261	for (i = nested - 1; i; i--) {
    262		if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
    263			      epollfdp[i], &ev) < 0)
    264			err(EXIT_FAILURE, "epoll_ctl");
    265	}
    266
    267	if (epoll_ctl(efd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
    268		err(EXIT_FAILURE, "epoll_ctl");
    269}
    270
    271static void toggle_done(int sig __maybe_unused,
    272			siginfo_t *info __maybe_unused,
    273			void *uc __maybe_unused)
    274{
    275	/* inform all threads that we're done for the day */
    276	done = true;
    277	gettimeofday(&bench__end, NULL);
    278	timersub(&bench__end, &bench__start, &bench__runtime);
    279}
    280
    281static void print_summary(void)
    282{
    283	unsigned long avg = avg_stats(&throughput_stats);
    284	double stddev = stddev_stats(&throughput_stats);
    285
    286	printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
    287	       avg, rel_stddev_stats(stddev, avg),
    288	       (int)bench__runtime.tv_sec);
    289}
    290
    291static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
    292{
    293	pthread_attr_t thread_attr, *attrp = NULL;
    294	cpu_set_t *cpuset;
    295	unsigned int i, j;
    296	int ret = 0, events = EPOLLIN;
    297	int nrcpus;
    298	size_t size;
    299
    300	if (oneshot)
    301		events |= EPOLLONESHOT;
    302	if (et)
    303		events |= EPOLLET;
    304
    305	printinfo("starting worker/consumer %sthreads%s\n",
    306		  noaffinity ?  "":"CPU affinity ",
    307		  nonblocking ? " (nonblocking)":"");
    308	if (!noaffinity)
    309		pthread_attr_init(&thread_attr);
    310
    311	nrcpus = perf_cpu_map__nr(cpu);
    312	cpuset = CPU_ALLOC(nrcpus);
    313	BUG_ON(!cpuset);
    314	size = CPU_ALLOC_SIZE(nrcpus);
    315
    316	for (i = 0; i < nthreads; i++) {
    317		struct worker *w = &worker[i];
    318
    319		if (multiq) {
    320			w->epollfd = epoll_create(1);
    321			if (w->epollfd < 0)
    322				err(EXIT_FAILURE, "epoll_create");
    323
    324			if (nested)
    325				nest_epollfd(w);
    326		}
    327
    328		w->tid = i;
    329		w->fdmap = calloc(nfds, sizeof(int));
    330		if (!w->fdmap)
    331			return 1;
    332
    333		for (j = 0; j < nfds; j++) {
    334			int efd = multiq ? w->epollfd : epollfd;
    335			struct epoll_event ev;
    336
    337			w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
    338			if (w->fdmap[j] < 0)
    339				err(EXIT_FAILURE, "eventfd");
    340
    341			ev.data.fd = w->fdmap[j];
    342			ev.events = events;
    343
    344			ret = epoll_ctl(efd, EPOLL_CTL_ADD,
    345					w->fdmap[j], &ev);
    346			if (ret < 0)
    347				err(EXIT_FAILURE, "epoll_ctl");
    348		}
    349
    350		if (!noaffinity) {
    351			CPU_ZERO_S(size, cpuset);
    352			CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
    353					size, cpuset);
    354
    355			ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
    356			if (ret) {
    357				CPU_FREE(cpuset);
    358				err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
    359			}
    360
    361			attrp = &thread_attr;
    362		}
    363
    364		ret = pthread_create(&w->thread, attrp, workerfn,
    365				     (void *)(struct worker *) w);
    366		if (ret) {
    367			CPU_FREE(cpuset);
    368			err(EXIT_FAILURE, "pthread_create");
    369		}
    370	}
    371
    372	CPU_FREE(cpuset);
    373	if (!noaffinity)
    374		pthread_attr_destroy(&thread_attr);
    375
    376	return ret;
    377}
    378
    379static void *writerfn(void *p)
    380{
    381	struct worker *worker = p;
    382	size_t i, j, iter;
    383	const uint64_t val = 1;
    384	ssize_t sz;
    385	struct timespec ts = { .tv_sec = 0,
    386			       .tv_nsec = 500 };
    387
    388	printinfo("starting writer-thread: doing %s writes ...\n",
    389		  randomize? "random":"lineal");
    390
    391	for (iter = 0; !wdone; iter++) {
    392		if (randomize) {
    393			shuffle((void *)worker, nthreads, sizeof(*worker));
    394		}
    395
    396		for (i = 0; i < nthreads; i++) {
    397			struct worker *w = &worker[i];
    398
    399			if (randomize) {
    400				shuffle((void *)w->fdmap, nfds, sizeof(int));
    401			}
    402
    403			for (j = 0; j < nfds; j++) {
    404				do {
    405					sz = write(w->fdmap[j], &val, sizeof(val));
    406				} while (!wdone && (sz < 0 && errno == EAGAIN));
    407			}
    408		}
    409
    410		nanosleep(&ts, NULL);
    411	}
    412
    413	printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
    414	return NULL;
    415}
    416
    417static int cmpworker(const void *p1, const void *p2)
    418{
    419
    420	struct worker *w1 = (struct worker *) p1;
    421	struct worker *w2 = (struct worker *) p2;
    422	return w1->tid > w2->tid;
    423}
    424
    425int bench_epoll_wait(int argc, const char **argv)
    426{
    427	int ret = 0;
    428	struct sigaction act;
    429	unsigned int i;
    430	struct worker *worker = NULL;
    431	struct perf_cpu_map *cpu;
    432	pthread_t wthread;
    433	struct rlimit rl, prevrl;
    434
    435	argc = parse_options(argc, argv, options, bench_epoll_wait_usage, 0);
    436	if (argc) {
    437		usage_with_options(bench_epoll_wait_usage, options);
    438		exit(EXIT_FAILURE);
    439	}
    440
    441	memset(&act, 0, sizeof(act));
    442	sigfillset(&act.sa_mask);
    443	act.sa_sigaction = toggle_done;
    444	sigaction(SIGINT, &act, NULL);
    445
    446	cpu = perf_cpu_map__new(NULL);
    447	if (!cpu)
    448		goto errmem;
    449
    450	/* a single, main epoll instance */
    451	if (!multiq) {
    452		epollfd = epoll_create(1);
    453		if (epollfd < 0)
    454			err(EXIT_FAILURE, "epoll_create");
    455
    456		/*
    457		 * Deal with nested epolls, if any.
    458		 */
    459		if (nested)
    460			nest_epollfd(NULL);
    461	}
    462
    463	printinfo("Using %s queue model\n", multiq ? "multi" : "single");
    464	printinfo("Nesting level(s): %d\n", nested);
    465
    466	/* default to the number of CPUs and leave one for the writer pthread */
    467	if (!nthreads)
    468		nthreads = perf_cpu_map__nr(cpu) - 1;
    469
    470	worker = calloc(nthreads, sizeof(*worker));
    471	if (!worker) {
    472		goto errmem;
    473	}
    474
    475	if (getrlimit(RLIMIT_NOFILE, &prevrl))
    476		err(EXIT_FAILURE, "getrlimit");
    477	rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
    478	printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
    479		  (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
    480	if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
    481		err(EXIT_FAILURE, "setrlimit");
    482
    483	printf("Run summary [PID %d]: %d threads monitoring%s on "
    484	       "%d file-descriptors for %d secs.\n\n",
    485	       getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs);
    486
    487	init_stats(&throughput_stats);
    488	pthread_mutex_init(&thread_lock, NULL);
    489	pthread_cond_init(&thread_parent, NULL);
    490	pthread_cond_init(&thread_worker, NULL);
    491
    492	threads_starting = nthreads;
    493
    494	gettimeofday(&bench__start, NULL);
    495
    496	do_threads(worker, cpu);
    497
    498	pthread_mutex_lock(&thread_lock);
    499	while (threads_starting)
    500		pthread_cond_wait(&thread_parent, &thread_lock);
    501	pthread_cond_broadcast(&thread_worker);
    502	pthread_mutex_unlock(&thread_lock);
    503
    504	/*
    505	 * At this point the workers should be blocked waiting for read events
    506	 * to become ready. Launch the writer which will constantly be writing
    507	 * to each thread's fdmap.
    508	 */
    509	ret = pthread_create(&wthread, NULL, writerfn,
    510			     (void *)(struct worker *) worker);
    511	if (ret)
    512		err(EXIT_FAILURE, "pthread_create");
    513
    514	sleep(nsecs);
    515	toggle_done(0, NULL, NULL);
    516	printinfo("main thread: toggling done\n");
    517
    518	sleep(1); /* meh */
    519	wdone = true;
    520	ret = pthread_join(wthread, NULL);
    521	if (ret)
    522		err(EXIT_FAILURE, "pthread_join");
    523
    524	/* cleanup & report results */
    525	pthread_cond_destroy(&thread_parent);
    526	pthread_cond_destroy(&thread_worker);
    527	pthread_mutex_destroy(&thread_lock);
    528
    529	/* sort the array back before reporting */
    530	if (randomize)
    531		qsort(worker, nthreads, sizeof(struct worker), cmpworker);
    532
    533	for (i = 0; i < nthreads; i++) {
    534		unsigned long t = bench__runtime.tv_sec > 0 ?
    535			worker[i].ops / bench__runtime.tv_sec : 0;
    536
    537		update_stats(&throughput_stats, t);
    538
    539		if (nfds == 1)
    540			printf("[thread %2d] fdmap: %p [ %04ld ops/sec ]\n",
    541			       worker[i].tid, &worker[i].fdmap[0], t);
    542		else
    543			printf("[thread %2d] fdmap: %p ... %p [ %04ld ops/sec ]\n",
    544			       worker[i].tid, &worker[i].fdmap[0],
    545			       &worker[i].fdmap[nfds-1], t);
    546	}
    547
    548	print_summary();
    549
    550	close(epollfd);
    551	return ret;
    552errmem:
    553	err(EXIT_FAILURE, "calloc");
    554}
    555#endif // HAVE_EVENTFD_SUPPORT