cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

evlist.c (51980B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
      4 *
      5 * Parts came from builtin-{top,stat,record}.c, see those files for further
      6 * copyright notes.
      7 */
      8#include <api/fs/fs.h>
      9#include <errno.h>
     10#include <inttypes.h>
     11#include <poll.h>
     12#include "cpumap.h"
     13#include "util/mmap.h"
     14#include "thread_map.h"
     15#include "target.h"
     16#include "evlist.h"
     17#include "evsel.h"
     18#include "debug.h"
     19#include "units.h"
     20#include "bpf_counter.h"
     21#include <internal/lib.h> // page_size
     22#include "affinity.h"
     23#include "../perf.h"
     24#include "asm/bug.h"
     25#include "bpf-event.h"
     26#include "util/string2.h"
     27#include "util/perf_api_probe.h"
     28#include "util/evsel_fprintf.h"
     29#include "util/evlist-hybrid.h"
     30#include "util/pmu.h"
     31#include <signal.h>
     32#include <unistd.h>
     33#include <sched.h>
     34#include <stdlib.h>
     35
     36#include "parse-events.h"
     37#include <subcmd/parse-options.h>
     38
     39#include <fcntl.h>
     40#include <sys/ioctl.h>
     41#include <sys/mman.h>
     42#include <sys/prctl.h>
     43
     44#include <linux/bitops.h>
     45#include <linux/hash.h>
     46#include <linux/log2.h>
     47#include <linux/err.h>
     48#include <linux/string.h>
     49#include <linux/zalloc.h>
     50#include <perf/evlist.h>
     51#include <perf/evsel.h>
     52#include <perf/cpumap.h>
     53#include <perf/mmap.h>
     54
     55#include <internal/xyarray.h>
     56
     57#ifdef LACKS_SIGQUEUE_PROTOTYPE
     58int sigqueue(pid_t pid, int sig, const union sigval value);
     59#endif
     60
     61#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
     62#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
     63
     64void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
     65		  struct perf_thread_map *threads)
     66{
     67	perf_evlist__init(&evlist->core);
     68	perf_evlist__set_maps(&evlist->core, cpus, threads);
     69	evlist->workload.pid = -1;
     70	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
     71	evlist->ctl_fd.fd = -1;
     72	evlist->ctl_fd.ack = -1;
     73	evlist->ctl_fd.pos = -1;
     74}
     75
     76struct evlist *evlist__new(void)
     77{
     78	struct evlist *evlist = zalloc(sizeof(*evlist));
     79
     80	if (evlist != NULL)
     81		evlist__init(evlist, NULL, NULL);
     82
     83	return evlist;
     84}
     85
     86struct evlist *evlist__new_default(void)
     87{
     88	struct evlist *evlist = evlist__new();
     89
     90	if (evlist && evlist__add_default(evlist)) {
     91		evlist__delete(evlist);
     92		evlist = NULL;
     93	}
     94
     95	return evlist;
     96}
     97
     98struct evlist *evlist__new_dummy(void)
     99{
    100	struct evlist *evlist = evlist__new();
    101
    102	if (evlist && evlist__add_dummy(evlist)) {
    103		evlist__delete(evlist);
    104		evlist = NULL;
    105	}
    106
    107	return evlist;
    108}
    109
    110/**
    111 * evlist__set_id_pos - set the positions of event ids.
    112 * @evlist: selected event list
    113 *
    114 * Events with compatible sample types all have the same id_pos
    115 * and is_pos.  For convenience, put a copy on evlist.
    116 */
    117void evlist__set_id_pos(struct evlist *evlist)
    118{
    119	struct evsel *first = evlist__first(evlist);
    120
    121	evlist->id_pos = first->id_pos;
    122	evlist->is_pos = first->is_pos;
    123}
    124
    125static void evlist__update_id_pos(struct evlist *evlist)
    126{
    127	struct evsel *evsel;
    128
    129	evlist__for_each_entry(evlist, evsel)
    130		evsel__calc_id_pos(evsel);
    131
    132	evlist__set_id_pos(evlist);
    133}
    134
    135static void evlist__purge(struct evlist *evlist)
    136{
    137	struct evsel *pos, *n;
    138
    139	evlist__for_each_entry_safe(evlist, n, pos) {
    140		list_del_init(&pos->core.node);
    141		pos->evlist = NULL;
    142		evsel__delete(pos);
    143	}
    144
    145	evlist->core.nr_entries = 0;
    146}
    147
    148void evlist__exit(struct evlist *evlist)
    149{
    150	zfree(&evlist->mmap);
    151	zfree(&evlist->overwrite_mmap);
    152	perf_evlist__exit(&evlist->core);
    153}
    154
    155void evlist__delete(struct evlist *evlist)
    156{
    157	if (evlist == NULL)
    158		return;
    159
    160	evlist__munmap(evlist);
    161	evlist__close(evlist);
    162	evlist__purge(evlist);
    163	evlist__exit(evlist);
    164	free(evlist);
    165}
    166
    167void evlist__add(struct evlist *evlist, struct evsel *entry)
    168{
    169	perf_evlist__add(&evlist->core, &entry->core);
    170	entry->evlist = evlist;
    171	entry->tracking = !entry->core.idx;
    172
    173	if (evlist->core.nr_entries == 1)
    174		evlist__set_id_pos(evlist);
    175}
    176
    177void evlist__remove(struct evlist *evlist, struct evsel *evsel)
    178{
    179	evsel->evlist = NULL;
    180	perf_evlist__remove(&evlist->core, &evsel->core);
    181}
    182
    183void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
    184{
    185	while (!list_empty(list)) {
    186		struct evsel *evsel, *temp, *leader = NULL;
    187
    188		__evlist__for_each_entry_safe(list, temp, evsel) {
    189			list_del_init(&evsel->core.node);
    190			evlist__add(evlist, evsel);
    191			leader = evsel;
    192			break;
    193		}
    194
    195		__evlist__for_each_entry_safe(list, temp, evsel) {
    196			if (evsel__has_leader(evsel, leader)) {
    197				list_del_init(&evsel->core.node);
    198				evlist__add(evlist, evsel);
    199			}
    200		}
    201	}
    202}
    203
    204int __evlist__set_tracepoints_handlers(struct evlist *evlist,
    205				       const struct evsel_str_handler *assocs, size_t nr_assocs)
    206{
    207	size_t i;
    208	int err;
    209
    210	for (i = 0; i < nr_assocs; i++) {
    211		// Adding a handler for an event not in this evlist, just ignore it.
    212		struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
    213		if (evsel == NULL)
    214			continue;
    215
    216		err = -EEXIST;
    217		if (evsel->handler != NULL)
    218			goto out;
    219		evsel->handler = assocs[i].handler;
    220	}
    221
    222	err = 0;
    223out:
    224	return err;
    225}
    226
    227void evlist__set_leader(struct evlist *evlist)
    228{
    229	perf_evlist__set_leader(&evlist->core);
    230}
    231
    232int __evlist__add_default(struct evlist *evlist, bool precise)
    233{
    234	struct evsel *evsel;
    235
    236	evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
    237				  PERF_COUNT_HW_CPU_CYCLES);
    238	if (evsel == NULL)
    239		return -ENOMEM;
    240
    241	evlist__add(evlist, evsel);
    242	return 0;
    243}
    244
    245static struct evsel *evlist__dummy_event(struct evlist *evlist)
    246{
    247	struct perf_event_attr attr = {
    248		.type	= PERF_TYPE_SOFTWARE,
    249		.config = PERF_COUNT_SW_DUMMY,
    250		.size	= sizeof(attr), /* to capture ABI version */
    251	};
    252
    253	return evsel__new_idx(&attr, evlist->core.nr_entries);
    254}
    255
    256int evlist__add_dummy(struct evlist *evlist)
    257{
    258	struct evsel *evsel = evlist__dummy_event(evlist);
    259
    260	if (evsel == NULL)
    261		return -ENOMEM;
    262
    263	evlist__add(evlist, evsel);
    264	return 0;
    265}
    266
    267static void evlist__add_on_all_cpus(struct evlist *evlist, struct evsel *evsel)
    268{
    269	evsel->core.system_wide = true;
    270
    271	/*
    272	 * All CPUs.
    273	 *
    274	 * Note perf_event_open() does not accept CPUs that are not online, so
    275	 * in fact this CPU list will include only all online CPUs.
    276	 */
    277	perf_cpu_map__put(evsel->core.own_cpus);
    278	evsel->core.own_cpus = perf_cpu_map__new(NULL);
    279	perf_cpu_map__put(evsel->core.cpus);
    280	evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
    281
    282	/* No threads */
    283	perf_thread_map__put(evsel->core.threads);
    284	evsel->core.threads = perf_thread_map__new_dummy();
    285
    286	evlist__add(evlist, evsel);
    287}
    288
    289struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
    290{
    291	struct evsel *evsel = evlist__dummy_event(evlist);
    292
    293	if (!evsel)
    294		return NULL;
    295
    296	evsel->core.attr.exclude_kernel = 1;
    297	evsel->core.attr.exclude_guest = 1;
    298	evsel->core.attr.exclude_hv = 1;
    299	evsel->core.attr.freq = 0;
    300	evsel->core.attr.sample_period = 1;
    301	evsel->no_aux_samples = true;
    302	evsel->name = strdup("dummy:u");
    303
    304	if (system_wide)
    305		evlist__add_on_all_cpus(evlist, evsel);
    306	else
    307		evlist__add(evlist, evsel);
    308
    309	return evsel;
    310}
    311
    312static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
    313{
    314	struct evsel *evsel, *n;
    315	LIST_HEAD(head);
    316	size_t i;
    317
    318	for (i = 0; i < nr_attrs; i++) {
    319		evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
    320		if (evsel == NULL)
    321			goto out_delete_partial_list;
    322		list_add_tail(&evsel->core.node, &head);
    323	}
    324
    325	evlist__splice_list_tail(evlist, &head);
    326
    327	return 0;
    328
    329out_delete_partial_list:
    330	__evlist__for_each_entry_safe(&head, n, evsel)
    331		evsel__delete(evsel);
    332	return -1;
    333}
    334
    335int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
    336{
    337	size_t i;
    338
    339	for (i = 0; i < nr_attrs; i++)
    340		event_attr_init(attrs + i);
    341
    342	return evlist__add_attrs(evlist, attrs, nr_attrs);
    343}
    344
    345__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
    346{
    347	return 0;
    348}
    349
    350struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
    351{
    352	struct evsel *evsel;
    353
    354	evlist__for_each_entry(evlist, evsel) {
    355		if (evsel->core.attr.type   == PERF_TYPE_TRACEPOINT &&
    356		    (int)evsel->core.attr.config == id)
    357			return evsel;
    358	}
    359
    360	return NULL;
    361}
    362
    363struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
    364{
    365	struct evsel *evsel;
    366
    367	evlist__for_each_entry(evlist, evsel) {
    368		if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
    369		    (strcmp(evsel->name, name) == 0))
    370			return evsel;
    371	}
    372
    373	return NULL;
    374}
    375
    376int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
    377{
    378	struct evsel *evsel = evsel__newtp(sys, name);
    379
    380	if (IS_ERR(evsel))
    381		return -1;
    382
    383	evsel->handler = handler;
    384	evlist__add(evlist, evsel);
    385	return 0;
    386}
    387
    388struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
    389{
    390	struct evlist_cpu_iterator itr = {
    391		.container = evlist,
    392		.evsel = NULL,
    393		.cpu_map_idx = 0,
    394		.evlist_cpu_map_idx = 0,
    395		.evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
    396		.cpu = (struct perf_cpu){ .cpu = -1},
    397		.affinity = affinity,
    398	};
    399
    400	if (evlist__empty(evlist)) {
    401		/* Ensure the empty list doesn't iterate. */
    402		itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr;
    403	} else {
    404		itr.evsel = evlist__first(evlist);
    405		if (itr.affinity) {
    406			itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
    407			affinity__set(itr.affinity, itr.cpu.cpu);
    408			itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
    409			/*
    410			 * If this CPU isn't in the evsel's cpu map then advance
    411			 * through the list.
    412			 */
    413			if (itr.cpu_map_idx == -1)
    414				evlist_cpu_iterator__next(&itr);
    415		}
    416	}
    417	return itr;
    418}
    419
    420void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
    421{
    422	while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
    423		evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
    424		evlist_cpu_itr->cpu_map_idx =
    425			perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
    426					  evlist_cpu_itr->cpu);
    427		if (evlist_cpu_itr->cpu_map_idx != -1)
    428			return;
    429	}
    430	evlist_cpu_itr->evlist_cpu_map_idx++;
    431	if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
    432		evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
    433		evlist_cpu_itr->cpu =
    434			perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
    435					  evlist_cpu_itr->evlist_cpu_map_idx);
    436		if (evlist_cpu_itr->affinity)
    437			affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
    438		evlist_cpu_itr->cpu_map_idx =
    439			perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
    440					  evlist_cpu_itr->cpu);
    441		/*
    442		 * If this CPU isn't in the evsel's cpu map then advance through
    443		 * the list.
    444		 */
    445		if (evlist_cpu_itr->cpu_map_idx == -1)
    446			evlist_cpu_iterator__next(evlist_cpu_itr);
    447	}
    448}
    449
    450bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
    451{
    452	return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
    453}
    454
    455static int evsel__strcmp(struct evsel *pos, char *evsel_name)
    456{
    457	if (!evsel_name)
    458		return 0;
    459	if (evsel__is_dummy_event(pos))
    460		return 1;
    461	return strcmp(pos->name, evsel_name);
    462}
    463
    464static int evlist__is_enabled(struct evlist *evlist)
    465{
    466	struct evsel *pos;
    467
    468	evlist__for_each_entry(evlist, pos) {
    469		if (!evsel__is_group_leader(pos) || !pos->core.fd)
    470			continue;
    471		/* If at least one event is enabled, evlist is enabled. */
    472		if (!pos->disabled)
    473			return true;
    474	}
    475	return false;
    476}
    477
    478static void __evlist__disable(struct evlist *evlist, char *evsel_name)
    479{
    480	struct evsel *pos;
    481	struct evlist_cpu_iterator evlist_cpu_itr;
    482	struct affinity saved_affinity, *affinity = NULL;
    483	bool has_imm = false;
    484
    485	// See explanation in evlist__close()
    486	if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
    487		if (affinity__setup(&saved_affinity) < 0)
    488			return;
    489		affinity = &saved_affinity;
    490	}
    491
    492	/* Disable 'immediate' events last */
    493	for (int imm = 0; imm <= 1; imm++) {
    494		evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
    495			pos = evlist_cpu_itr.evsel;
    496			if (evsel__strcmp(pos, evsel_name))
    497				continue;
    498			if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
    499				continue;
    500			if (pos->immediate)
    501				has_imm = true;
    502			if (pos->immediate != imm)
    503				continue;
    504			evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
    505		}
    506		if (!has_imm)
    507			break;
    508	}
    509
    510	affinity__cleanup(affinity);
    511	evlist__for_each_entry(evlist, pos) {
    512		if (evsel__strcmp(pos, evsel_name))
    513			continue;
    514		if (!evsel__is_group_leader(pos) || !pos->core.fd)
    515			continue;
    516		pos->disabled = true;
    517	}
    518
    519	/*
    520	 * If we disabled only single event, we need to check
    521	 * the enabled state of the evlist manually.
    522	 */
    523	if (evsel_name)
    524		evlist->enabled = evlist__is_enabled(evlist);
    525	else
    526		evlist->enabled = false;
    527}
    528
    529void evlist__disable(struct evlist *evlist)
    530{
    531	__evlist__disable(evlist, NULL);
    532}
    533
    534void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
    535{
    536	__evlist__disable(evlist, evsel_name);
    537}
    538
    539static void __evlist__enable(struct evlist *evlist, char *evsel_name)
    540{
    541	struct evsel *pos;
    542	struct evlist_cpu_iterator evlist_cpu_itr;
    543	struct affinity saved_affinity, *affinity = NULL;
    544
    545	// See explanation in evlist__close()
    546	if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
    547		if (affinity__setup(&saved_affinity) < 0)
    548			return;
    549		affinity = &saved_affinity;
    550	}
    551
    552	evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
    553		pos = evlist_cpu_itr.evsel;
    554		if (evsel__strcmp(pos, evsel_name))
    555			continue;
    556		if (!evsel__is_group_leader(pos) || !pos->core.fd)
    557			continue;
    558		evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
    559	}
    560	affinity__cleanup(affinity);
    561	evlist__for_each_entry(evlist, pos) {
    562		if (evsel__strcmp(pos, evsel_name))
    563			continue;
    564		if (!evsel__is_group_leader(pos) || !pos->core.fd)
    565			continue;
    566		pos->disabled = false;
    567	}
    568
    569	/*
    570	 * Even single event sets the 'enabled' for evlist,
    571	 * so the toggle can work properly and toggle to
    572	 * 'disabled' state.
    573	 */
    574	evlist->enabled = true;
    575}
    576
    577void evlist__enable(struct evlist *evlist)
    578{
    579	__evlist__enable(evlist, NULL);
    580}
    581
    582void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
    583{
    584	__evlist__enable(evlist, evsel_name);
    585}
    586
    587void evlist__toggle_enable(struct evlist *evlist)
    588{
    589	(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
    590}
    591
    592int evlist__add_pollfd(struct evlist *evlist, int fd)
    593{
    594	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
    595}
    596
    597int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
    598{
    599	return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
    600}
    601
    602#ifdef HAVE_EVENTFD_SUPPORT
    603int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
    604{
    605	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
    606				       fdarray_flag__nonfilterable);
    607}
    608#endif
    609
    610int evlist__poll(struct evlist *evlist, int timeout)
    611{
    612	return perf_evlist__poll(&evlist->core, timeout);
    613}
    614
    615struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
    616{
    617	struct hlist_head *head;
    618	struct perf_sample_id *sid;
    619	int hash;
    620
    621	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
    622	head = &evlist->core.heads[hash];
    623
    624	hlist_for_each_entry(sid, head, node)
    625		if (sid->id == id)
    626			return sid;
    627
    628	return NULL;
    629}
    630
    631struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
    632{
    633	struct perf_sample_id *sid;
    634
    635	if (evlist->core.nr_entries == 1 || !id)
    636		return evlist__first(evlist);
    637
    638	sid = evlist__id2sid(evlist, id);
    639	if (sid)
    640		return container_of(sid->evsel, struct evsel, core);
    641
    642	if (!evlist__sample_id_all(evlist))
    643		return evlist__first(evlist);
    644
    645	return NULL;
    646}
    647
    648struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
    649{
    650	struct perf_sample_id *sid;
    651
    652	if (!id)
    653		return NULL;
    654
    655	sid = evlist__id2sid(evlist, id);
    656	if (sid)
    657		return container_of(sid->evsel, struct evsel, core);
    658
    659	return NULL;
    660}
    661
    662static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
    663{
    664	const __u64 *array = event->sample.array;
    665	ssize_t n;
    666
    667	n = (event->header.size - sizeof(event->header)) >> 3;
    668
    669	if (event->header.type == PERF_RECORD_SAMPLE) {
    670		if (evlist->id_pos >= n)
    671			return -1;
    672		*id = array[evlist->id_pos];
    673	} else {
    674		if (evlist->is_pos > n)
    675			return -1;
    676		n -= evlist->is_pos;
    677		*id = array[n];
    678	}
    679	return 0;
    680}
    681
    682struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
    683{
    684	struct evsel *first = evlist__first(evlist);
    685	struct hlist_head *head;
    686	struct perf_sample_id *sid;
    687	int hash;
    688	u64 id;
    689
    690	if (evlist->core.nr_entries == 1)
    691		return first;
    692
    693	if (!first->core.attr.sample_id_all &&
    694	    event->header.type != PERF_RECORD_SAMPLE)
    695		return first;
    696
    697	if (evlist__event2id(evlist, event, &id))
    698		return NULL;
    699
    700	/* Synthesized events have an id of zero */
    701	if (!id)
    702		return first;
    703
    704	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
    705	head = &evlist->core.heads[hash];
    706
    707	hlist_for_each_entry(sid, head, node) {
    708		if (sid->id == id)
    709			return container_of(sid->evsel, struct evsel, core);
    710	}
    711	return NULL;
    712}
    713
    714static int evlist__set_paused(struct evlist *evlist, bool value)
    715{
    716	int i;
    717
    718	if (!evlist->overwrite_mmap)
    719		return 0;
    720
    721	for (i = 0; i < evlist->core.nr_mmaps; i++) {
    722		int fd = evlist->overwrite_mmap[i].core.fd;
    723		int err;
    724
    725		if (fd < 0)
    726			continue;
    727		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
    728		if (err)
    729			return err;
    730	}
    731	return 0;
    732}
    733
    734static int evlist__pause(struct evlist *evlist)
    735{
    736	return evlist__set_paused(evlist, true);
    737}
    738
    739static int evlist__resume(struct evlist *evlist)
    740{
    741	return evlist__set_paused(evlist, false);
    742}
    743
    744static void evlist__munmap_nofree(struct evlist *evlist)
    745{
    746	int i;
    747
    748	if (evlist->mmap)
    749		for (i = 0; i < evlist->core.nr_mmaps; i++)
    750			perf_mmap__munmap(&evlist->mmap[i].core);
    751
    752	if (evlist->overwrite_mmap)
    753		for (i = 0; i < evlist->core.nr_mmaps; i++)
    754			perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
    755}
    756
    757void evlist__munmap(struct evlist *evlist)
    758{
    759	evlist__munmap_nofree(evlist);
    760	zfree(&evlist->mmap);
    761	zfree(&evlist->overwrite_mmap);
    762}
    763
    764static void perf_mmap__unmap_cb(struct perf_mmap *map)
    765{
    766	struct mmap *m = container_of(map, struct mmap, core);
    767
    768	mmap__munmap(m);
    769}
    770
    771static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
    772				       bool overwrite)
    773{
    774	int i;
    775	struct mmap *map;
    776
    777	map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
    778	if (!map)
    779		return NULL;
    780
    781	for (i = 0; i < evlist->core.nr_mmaps; i++) {
    782		struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
    783
    784		/*
    785		 * When the perf_mmap() call is made we grab one refcount, plus
    786		 * one extra to let perf_mmap__consume() get the last
    787		 * events after all real references (perf_mmap__get()) are
    788		 * dropped.
    789		 *
    790		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
    791		 * thus does perf_mmap__get() on it.
    792		 */
    793		perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
    794	}
    795
    796	return map;
    797}
    798
    799static void
    800perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
    801			 struct perf_evsel *_evsel,
    802			 struct perf_mmap_param *_mp,
    803			 int idx)
    804{
    805	struct evlist *evlist = container_of(_evlist, struct evlist, core);
    806	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
    807	struct evsel *evsel = container_of(_evsel, struct evsel, core);
    808
    809	auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
    810}
    811
    812static struct perf_mmap*
    813perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
    814{
    815	struct evlist *evlist = container_of(_evlist, struct evlist, core);
    816	struct mmap *maps;
    817
    818	maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
    819
    820	if (!maps) {
    821		maps = evlist__alloc_mmap(evlist, overwrite);
    822		if (!maps)
    823			return NULL;
    824
    825		if (overwrite) {
    826			evlist->overwrite_mmap = maps;
    827			if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
    828				evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
    829		} else {
    830			evlist->mmap = maps;
    831		}
    832	}
    833
    834	return &maps[idx].core;
    835}
    836
    837static int
    838perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
    839			  int output, struct perf_cpu cpu)
    840{
    841	struct mmap *map = container_of(_map, struct mmap, core);
    842	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
    843
    844	return mmap__mmap(map, mp, output, cpu);
    845}
    846
    847unsigned long perf_event_mlock_kb_in_pages(void)
    848{
    849	unsigned long pages;
    850	int max;
    851
    852	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
    853		/*
    854		 * Pick a once upon a time good value, i.e. things look
    855		 * strange since we can't read a sysctl value, but lets not
    856		 * die yet...
    857		 */
    858		max = 512;
    859	} else {
    860		max -= (page_size / 1024);
    861	}
    862
    863	pages = (max * 1024) / page_size;
    864	if (!is_power_of_2(pages))
    865		pages = rounddown_pow_of_two(pages);
    866
    867	return pages;
    868}
    869
    870size_t evlist__mmap_size(unsigned long pages)
    871{
    872	if (pages == UINT_MAX)
    873		pages = perf_event_mlock_kb_in_pages();
    874	else if (!is_power_of_2(pages))
    875		return 0;
    876
    877	return (pages + 1) * page_size;
    878}
    879
    880static long parse_pages_arg(const char *str, unsigned long min,
    881			    unsigned long max)
    882{
    883	unsigned long pages, val;
    884	static struct parse_tag tags[] = {
    885		{ .tag  = 'B', .mult = 1       },
    886		{ .tag  = 'K', .mult = 1 << 10 },
    887		{ .tag  = 'M', .mult = 1 << 20 },
    888		{ .tag  = 'G', .mult = 1 << 30 },
    889		{ .tag  = 0 },
    890	};
    891
    892	if (str == NULL)
    893		return -EINVAL;
    894
    895	val = parse_tag_value(str, tags);
    896	if (val != (unsigned long) -1) {
    897		/* we got file size value */
    898		pages = PERF_ALIGN(val, page_size) / page_size;
    899	} else {
    900		/* we got pages count value */
    901		char *eptr;
    902		pages = strtoul(str, &eptr, 10);
    903		if (*eptr != '\0')
    904			return -EINVAL;
    905	}
    906
    907	if (pages == 0 && min == 0) {
    908		/* leave number of pages at 0 */
    909	} else if (!is_power_of_2(pages)) {
    910		char buf[100];
    911
    912		/* round pages up to next power of 2 */
    913		pages = roundup_pow_of_two(pages);
    914		if (!pages)
    915			return -EINVAL;
    916
    917		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
    918		pr_info("rounding mmap pages size to %s (%lu pages)\n",
    919			buf, pages);
    920	}
    921
    922	if (pages > max)
    923		return -EINVAL;
    924
    925	return pages;
    926}
    927
    928int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
    929{
    930	unsigned long max = UINT_MAX;
    931	long pages;
    932
    933	if (max > SIZE_MAX / page_size)
    934		max = SIZE_MAX / page_size;
    935
    936	pages = parse_pages_arg(str, 1, max);
    937	if (pages < 0) {
    938		pr_err("Invalid argument for --mmap_pages/-m\n");
    939		return -1;
    940	}
    941
    942	*mmap_pages = pages;
    943	return 0;
    944}
    945
    946int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
    947{
    948	return __evlist__parse_mmap_pages(opt->value, str);
    949}
    950
    951/**
    952 * evlist__mmap_ex - Create mmaps to receive events.
    953 * @evlist: list of events
    954 * @pages: map length in pages
    955 * @overwrite: overwrite older events?
    956 * @auxtrace_pages - auxtrace map length in pages
    957 * @auxtrace_overwrite - overwrite older auxtrace data?
    958 *
    959 * If @overwrite is %false the user needs to signal event consumption using
    960 * perf_mmap__write_tail().  Using evlist__mmap_read() does this
    961 * automatically.
    962 *
    963 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
    964 * consumption using auxtrace_mmap__write_tail().
    965 *
    966 * Return: %0 on success, negative error code otherwise.
    967 */
    968int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
    969			 unsigned int auxtrace_pages,
    970			 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
    971			 int comp_level)
    972{
    973	/*
    974	 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
    975	 * Its value is decided by evsel's write_backward.
    976	 * So &mp should not be passed through const pointer.
    977	 */
    978	struct mmap_params mp = {
    979		.nr_cblocks	= nr_cblocks,
    980		.affinity	= affinity,
    981		.flush		= flush,
    982		.comp_level	= comp_level
    983	};
    984	struct perf_evlist_mmap_ops ops = {
    985		.idx  = perf_evlist__mmap_cb_idx,
    986		.get  = perf_evlist__mmap_cb_get,
    987		.mmap = perf_evlist__mmap_cb_mmap,
    988	};
    989
    990	evlist->core.mmap_len = evlist__mmap_size(pages);
    991	pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
    992
    993	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
    994				   auxtrace_pages, auxtrace_overwrite);
    995
    996	return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
    997}
    998
    999int evlist__mmap(struct evlist *evlist, unsigned int pages)
   1000{
   1001	return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
   1002}
   1003
   1004int evlist__create_maps(struct evlist *evlist, struct target *target)
   1005{
   1006	bool all_threads = (target->per_thread && target->system_wide);
   1007	struct perf_cpu_map *cpus;
   1008	struct perf_thread_map *threads;
   1009
   1010	/*
   1011	 * If specify '-a' and '--per-thread' to perf record, perf record
   1012	 * will override '--per-thread'. target->per_thread = false and
   1013	 * target->system_wide = true.
   1014	 *
   1015	 * If specify '--per-thread' only to perf record,
   1016	 * target->per_thread = true and target->system_wide = false.
   1017	 *
   1018	 * So target->per_thread && target->system_wide is false.
   1019	 * For perf record, thread_map__new_str doesn't call
   1020	 * thread_map__new_all_cpus. That will keep perf record's
   1021	 * current behavior.
   1022	 *
   1023	 * For perf stat, it allows the case that target->per_thread and
   1024	 * target->system_wide are all true. It means to collect system-wide
   1025	 * per-thread data. thread_map__new_str will call
   1026	 * thread_map__new_all_cpus to enumerate all threads.
   1027	 */
   1028	threads = thread_map__new_str(target->pid, target->tid, target->uid,
   1029				      all_threads);
   1030
   1031	if (!threads)
   1032		return -1;
   1033
   1034	if (target__uses_dummy_map(target))
   1035		cpus = perf_cpu_map__dummy_new();
   1036	else
   1037		cpus = perf_cpu_map__new(target->cpu_list);
   1038
   1039	if (!cpus)
   1040		goto out_delete_threads;
   1041
   1042	evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid;
   1043
   1044	perf_evlist__set_maps(&evlist->core, cpus, threads);
   1045
   1046	/* as evlist now has references, put count here */
   1047	perf_cpu_map__put(cpus);
   1048	perf_thread_map__put(threads);
   1049
   1050	return 0;
   1051
   1052out_delete_threads:
   1053	perf_thread_map__put(threads);
   1054	return -1;
   1055}
   1056
   1057int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
   1058{
   1059	struct evsel *evsel;
   1060	int err = 0;
   1061
   1062	evlist__for_each_entry(evlist, evsel) {
   1063		if (evsel->filter == NULL)
   1064			continue;
   1065
   1066		/*
   1067		 * filters only work for tracepoint event, which doesn't have cpu limit.
   1068		 * So evlist and evsel should always be same.
   1069		 */
   1070		err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
   1071		if (err) {
   1072			*err_evsel = evsel;
   1073			break;
   1074		}
   1075	}
   1076
   1077	return err;
   1078}
   1079
   1080int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
   1081{
   1082	struct evsel *evsel;
   1083	int err = 0;
   1084
   1085	if (filter == NULL)
   1086		return -1;
   1087
   1088	evlist__for_each_entry(evlist, evsel) {
   1089		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
   1090			continue;
   1091
   1092		err = evsel__set_filter(evsel, filter);
   1093		if (err)
   1094			break;
   1095	}
   1096
   1097	return err;
   1098}
   1099
   1100int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
   1101{
   1102	struct evsel *evsel;
   1103	int err = 0;
   1104
   1105	if (filter == NULL)
   1106		return -1;
   1107
   1108	evlist__for_each_entry(evlist, evsel) {
   1109		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
   1110			continue;
   1111
   1112		err = evsel__append_tp_filter(evsel, filter);
   1113		if (err)
   1114			break;
   1115	}
   1116
   1117	return err;
   1118}
   1119
   1120char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
   1121{
   1122	char *filter;
   1123	size_t i;
   1124
   1125	for (i = 0; i < npids; ++i) {
   1126		if (i == 0) {
   1127			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
   1128				return NULL;
   1129		} else {
   1130			char *tmp;
   1131
   1132			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
   1133				goto out_free;
   1134
   1135			free(filter);
   1136			filter = tmp;
   1137		}
   1138	}
   1139
   1140	return filter;
   1141out_free:
   1142	free(filter);
   1143	return NULL;
   1144}
   1145
   1146int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
   1147{
   1148	char *filter = asprintf__tp_filter_pids(npids, pids);
   1149	int ret = evlist__set_tp_filter(evlist, filter);
   1150
   1151	free(filter);
   1152	return ret;
   1153}
   1154
   1155int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
   1156{
   1157	return evlist__set_tp_filter_pids(evlist, 1, &pid);
   1158}
   1159
   1160int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
   1161{
   1162	char *filter = asprintf__tp_filter_pids(npids, pids);
   1163	int ret = evlist__append_tp_filter(evlist, filter);
   1164
   1165	free(filter);
   1166	return ret;
   1167}
   1168
   1169int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
   1170{
   1171	return evlist__append_tp_filter_pids(evlist, 1, &pid);
   1172}
   1173
   1174bool evlist__valid_sample_type(struct evlist *evlist)
   1175{
   1176	struct evsel *pos;
   1177
   1178	if (evlist->core.nr_entries == 1)
   1179		return true;
   1180
   1181	if (evlist->id_pos < 0 || evlist->is_pos < 0)
   1182		return false;
   1183
   1184	evlist__for_each_entry(evlist, pos) {
   1185		if (pos->id_pos != evlist->id_pos ||
   1186		    pos->is_pos != evlist->is_pos)
   1187			return false;
   1188	}
   1189
   1190	return true;
   1191}
   1192
   1193u64 __evlist__combined_sample_type(struct evlist *evlist)
   1194{
   1195	struct evsel *evsel;
   1196
   1197	if (evlist->combined_sample_type)
   1198		return evlist->combined_sample_type;
   1199
   1200	evlist__for_each_entry(evlist, evsel)
   1201		evlist->combined_sample_type |= evsel->core.attr.sample_type;
   1202
   1203	return evlist->combined_sample_type;
   1204}
   1205
   1206u64 evlist__combined_sample_type(struct evlist *evlist)
   1207{
   1208	evlist->combined_sample_type = 0;
   1209	return __evlist__combined_sample_type(evlist);
   1210}
   1211
   1212u64 evlist__combined_branch_type(struct evlist *evlist)
   1213{
   1214	struct evsel *evsel;
   1215	u64 branch_type = 0;
   1216
   1217	evlist__for_each_entry(evlist, evsel)
   1218		branch_type |= evsel->core.attr.branch_sample_type;
   1219	return branch_type;
   1220}
   1221
   1222bool evlist__valid_read_format(struct evlist *evlist)
   1223{
   1224	struct evsel *first = evlist__first(evlist), *pos = first;
   1225	u64 read_format = first->core.attr.read_format;
   1226	u64 sample_type = first->core.attr.sample_type;
   1227
   1228	evlist__for_each_entry(evlist, pos) {
   1229		if (read_format != pos->core.attr.read_format) {
   1230			pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
   1231				 read_format, (u64)pos->core.attr.read_format);
   1232		}
   1233	}
   1234
   1235	/* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
   1236	if ((sample_type & PERF_SAMPLE_READ) &&
   1237	    !(read_format & PERF_FORMAT_ID)) {
   1238		return false;
   1239	}
   1240
   1241	return true;
   1242}
   1243
   1244u16 evlist__id_hdr_size(struct evlist *evlist)
   1245{
   1246	struct evsel *first = evlist__first(evlist);
   1247	struct perf_sample *data;
   1248	u64 sample_type;
   1249	u16 size = 0;
   1250
   1251	if (!first->core.attr.sample_id_all)
   1252		goto out;
   1253
   1254	sample_type = first->core.attr.sample_type;
   1255
   1256	if (sample_type & PERF_SAMPLE_TID)
   1257		size += sizeof(data->tid) * 2;
   1258
   1259       if (sample_type & PERF_SAMPLE_TIME)
   1260		size += sizeof(data->time);
   1261
   1262	if (sample_type & PERF_SAMPLE_ID)
   1263		size += sizeof(data->id);
   1264
   1265	if (sample_type & PERF_SAMPLE_STREAM_ID)
   1266		size += sizeof(data->stream_id);
   1267
   1268	if (sample_type & PERF_SAMPLE_CPU)
   1269		size += sizeof(data->cpu) * 2;
   1270
   1271	if (sample_type & PERF_SAMPLE_IDENTIFIER)
   1272		size += sizeof(data->id);
   1273out:
   1274	return size;
   1275}
   1276
   1277bool evlist__valid_sample_id_all(struct evlist *evlist)
   1278{
   1279	struct evsel *first = evlist__first(evlist), *pos = first;
   1280
   1281	evlist__for_each_entry_continue(evlist, pos) {
   1282		if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
   1283			return false;
   1284	}
   1285
   1286	return true;
   1287}
   1288
   1289bool evlist__sample_id_all(struct evlist *evlist)
   1290{
   1291	struct evsel *first = evlist__first(evlist);
   1292	return first->core.attr.sample_id_all;
   1293}
   1294
   1295void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
   1296{
   1297	evlist->selected = evsel;
   1298}
   1299
   1300void evlist__close(struct evlist *evlist)
   1301{
   1302	struct evsel *evsel;
   1303	struct evlist_cpu_iterator evlist_cpu_itr;
   1304	struct affinity affinity;
   1305
   1306	/*
   1307	 * With perf record core.user_requested_cpus is usually NULL.
   1308	 * Use the old method to handle this for now.
   1309	 */
   1310	if (!evlist->core.user_requested_cpus ||
   1311	    cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
   1312		evlist__for_each_entry_reverse(evlist, evsel)
   1313			evsel__close(evsel);
   1314		return;
   1315	}
   1316
   1317	if (affinity__setup(&affinity) < 0)
   1318		return;
   1319
   1320	evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
   1321		perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
   1322				      evlist_cpu_itr.cpu_map_idx);
   1323	}
   1324
   1325	affinity__cleanup(&affinity);
   1326	evlist__for_each_entry_reverse(evlist, evsel) {
   1327		perf_evsel__free_fd(&evsel->core);
   1328		perf_evsel__free_id(&evsel->core);
   1329	}
   1330	perf_evlist__reset_id_hash(&evlist->core);
   1331}
   1332
   1333static int evlist__create_syswide_maps(struct evlist *evlist)
   1334{
   1335	struct perf_cpu_map *cpus;
   1336	struct perf_thread_map *threads;
   1337
   1338	/*
   1339	 * Try reading /sys/devices/system/cpu/online to get
   1340	 * an all cpus map.
   1341	 *
   1342	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
   1343	 * code needs an overhaul to properly forward the
   1344	 * error, and we may not want to do that fallback to a
   1345	 * default cpu identity map :-\
   1346	 */
   1347	cpus = perf_cpu_map__new(NULL);
   1348	if (!cpus)
   1349		goto out;
   1350
   1351	threads = perf_thread_map__new_dummy();
   1352	if (!threads)
   1353		goto out_put;
   1354
   1355	perf_evlist__set_maps(&evlist->core, cpus, threads);
   1356
   1357	perf_thread_map__put(threads);
   1358out_put:
   1359	perf_cpu_map__put(cpus);
   1360out:
   1361	return -ENOMEM;
   1362}
   1363
   1364int evlist__open(struct evlist *evlist)
   1365{
   1366	struct evsel *evsel;
   1367	int err;
   1368
   1369	/*
   1370	 * Default: one fd per CPU, all threads, aka systemwide
   1371	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
   1372	 */
   1373	if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
   1374		err = evlist__create_syswide_maps(evlist);
   1375		if (err < 0)
   1376			goto out_err;
   1377	}
   1378
   1379	evlist__update_id_pos(evlist);
   1380
   1381	evlist__for_each_entry(evlist, evsel) {
   1382		err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
   1383		if (err < 0)
   1384			goto out_err;
   1385	}
   1386
   1387	return 0;
   1388out_err:
   1389	evlist__close(evlist);
   1390	errno = -err;
   1391	return err;
   1392}
   1393
   1394int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
   1395			     bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
   1396{
   1397	int child_ready_pipe[2], go_pipe[2];
   1398	char bf;
   1399
   1400	if (pipe(child_ready_pipe) < 0) {
   1401		perror("failed to create 'ready' pipe");
   1402		return -1;
   1403	}
   1404
   1405	if (pipe(go_pipe) < 0) {
   1406		perror("failed to create 'go' pipe");
   1407		goto out_close_ready_pipe;
   1408	}
   1409
   1410	evlist->workload.pid = fork();
   1411	if (evlist->workload.pid < 0) {
   1412		perror("failed to fork");
   1413		goto out_close_pipes;
   1414	}
   1415
   1416	if (!evlist->workload.pid) {
   1417		int ret;
   1418
   1419		if (pipe_output)
   1420			dup2(2, 1);
   1421
   1422		signal(SIGTERM, SIG_DFL);
   1423
   1424		close(child_ready_pipe[0]);
   1425		close(go_pipe[1]);
   1426		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
   1427
   1428		/*
   1429		 * Change the name of this process not to confuse --exclude-perf users
   1430		 * that sees 'perf' in the window up to the execvp() and thinks that
   1431		 * perf samples are not being excluded.
   1432		 */
   1433		prctl(PR_SET_NAME, "perf-exec");
   1434
   1435		/*
   1436		 * Tell the parent we're ready to go
   1437		 */
   1438		close(child_ready_pipe[1]);
   1439
   1440		/*
   1441		 * Wait until the parent tells us to go.
   1442		 */
   1443		ret = read(go_pipe[0], &bf, 1);
   1444		/*
   1445		 * The parent will ask for the execvp() to be performed by
   1446		 * writing exactly one byte, in workload.cork_fd, usually via
   1447		 * evlist__start_workload().
   1448		 *
   1449		 * For cancelling the workload without actually running it,
   1450		 * the parent will just close workload.cork_fd, without writing
   1451		 * anything, i.e. read will return zero and we just exit()
   1452		 * here.
   1453		 */
   1454		if (ret != 1) {
   1455			if (ret == -1)
   1456				perror("unable to read pipe");
   1457			exit(ret);
   1458		}
   1459
   1460		execvp(argv[0], (char **)argv);
   1461
   1462		if (exec_error) {
   1463			union sigval val;
   1464
   1465			val.sival_int = errno;
   1466			if (sigqueue(getppid(), SIGUSR1, val))
   1467				perror(argv[0]);
   1468		} else
   1469			perror(argv[0]);
   1470		exit(-1);
   1471	}
   1472
   1473	if (exec_error) {
   1474		struct sigaction act = {
   1475			.sa_flags     = SA_SIGINFO,
   1476			.sa_sigaction = exec_error,
   1477		};
   1478		sigaction(SIGUSR1, &act, NULL);
   1479	}
   1480
   1481	if (target__none(target)) {
   1482		if (evlist->core.threads == NULL) {
   1483			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
   1484				__func__, __LINE__);
   1485			goto out_close_pipes;
   1486		}
   1487		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
   1488	}
   1489
   1490	close(child_ready_pipe[1]);
   1491	close(go_pipe[0]);
   1492	/*
   1493	 * wait for child to settle
   1494	 */
   1495	if (read(child_ready_pipe[0], &bf, 1) == -1) {
   1496		perror("unable to read pipe");
   1497		goto out_close_pipes;
   1498	}
   1499
   1500	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
   1501	evlist->workload.cork_fd = go_pipe[1];
   1502	close(child_ready_pipe[0]);
   1503	return 0;
   1504
   1505out_close_pipes:
   1506	close(go_pipe[0]);
   1507	close(go_pipe[1]);
   1508out_close_ready_pipe:
   1509	close(child_ready_pipe[0]);
   1510	close(child_ready_pipe[1]);
   1511	return -1;
   1512}
   1513
   1514int evlist__start_workload(struct evlist *evlist)
   1515{
   1516	if (evlist->workload.cork_fd > 0) {
   1517		char bf = 0;
   1518		int ret;
   1519		/*
   1520		 * Remove the cork, let it rip!
   1521		 */
   1522		ret = write(evlist->workload.cork_fd, &bf, 1);
   1523		if (ret < 0)
   1524			perror("unable to write to pipe");
   1525
   1526		close(evlist->workload.cork_fd);
   1527		return ret;
   1528	}
   1529
   1530	return 0;
   1531}
   1532
   1533int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
   1534{
   1535	struct evsel *evsel = evlist__event2evsel(evlist, event);
   1536
   1537	if (!evsel)
   1538		return -EFAULT;
   1539	return evsel__parse_sample(evsel, event, sample);
   1540}
   1541
   1542int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
   1543{
   1544	struct evsel *evsel = evlist__event2evsel(evlist, event);
   1545
   1546	if (!evsel)
   1547		return -EFAULT;
   1548	return evsel__parse_sample_timestamp(evsel, event, timestamp);
   1549}
   1550
   1551int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
   1552{
   1553	int printed, value;
   1554	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
   1555
   1556	switch (err) {
   1557	case EACCES:
   1558	case EPERM:
   1559		printed = scnprintf(buf, size,
   1560				    "Error:\t%s.\n"
   1561				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
   1562
   1563		value = perf_event_paranoid();
   1564
   1565		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
   1566
   1567		if (value >= 2) {
   1568			printed += scnprintf(buf + printed, size - printed,
   1569					     "For your workloads it needs to be <= 1\nHint:\t");
   1570		}
   1571		printed += scnprintf(buf + printed, size - printed,
   1572				     "For system wide tracing it needs to be set to -1.\n");
   1573
   1574		printed += scnprintf(buf + printed, size - printed,
   1575				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
   1576				    "Hint:\tThe current value is %d.", value);
   1577		break;
   1578	case EINVAL: {
   1579		struct evsel *first = evlist__first(evlist);
   1580		int max_freq;
   1581
   1582		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
   1583			goto out_default;
   1584
   1585		if (first->core.attr.sample_freq < (u64)max_freq)
   1586			goto out_default;
   1587
   1588		printed = scnprintf(buf, size,
   1589				    "Error:\t%s.\n"
   1590				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
   1591				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
   1592				    emsg, max_freq, first->core.attr.sample_freq);
   1593		break;
   1594	}
   1595	default:
   1596out_default:
   1597		scnprintf(buf, size, "%s", emsg);
   1598		break;
   1599	}
   1600
   1601	return 0;
   1602}
   1603
   1604int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
   1605{
   1606	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
   1607	int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
   1608
   1609	switch (err) {
   1610	case EPERM:
   1611		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
   1612		printed += scnprintf(buf + printed, size - printed,
   1613				     "Error:\t%s.\n"
   1614				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
   1615				     "Hint:\tTried using %zd kB.\n",
   1616				     emsg, pages_max_per_user, pages_attempted);
   1617
   1618		if (pages_attempted >= pages_max_per_user) {
   1619			printed += scnprintf(buf + printed, size - printed,
   1620					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
   1621					     pages_max_per_user + pages_attempted);
   1622		}
   1623
   1624		printed += scnprintf(buf + printed, size - printed,
   1625				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
   1626		break;
   1627	default:
   1628		scnprintf(buf, size, "%s", emsg);
   1629		break;
   1630	}
   1631
   1632	return 0;
   1633}
   1634
   1635void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
   1636{
   1637	struct evsel *evsel, *n;
   1638	LIST_HEAD(move);
   1639
   1640	if (move_evsel == evlist__first(evlist))
   1641		return;
   1642
   1643	evlist__for_each_entry_safe(evlist, n, evsel) {
   1644		if (evsel__leader(evsel) == evsel__leader(move_evsel))
   1645			list_move_tail(&evsel->core.node, &move);
   1646	}
   1647
   1648	list_splice(&move, &evlist->core.entries);
   1649}
   1650
   1651struct evsel *evlist__get_tracking_event(struct evlist *evlist)
   1652{
   1653	struct evsel *evsel;
   1654
   1655	evlist__for_each_entry(evlist, evsel) {
   1656		if (evsel->tracking)
   1657			return evsel;
   1658	}
   1659
   1660	return evlist__first(evlist);
   1661}
   1662
   1663void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
   1664{
   1665	struct evsel *evsel;
   1666
   1667	if (tracking_evsel->tracking)
   1668		return;
   1669
   1670	evlist__for_each_entry(evlist, evsel) {
   1671		if (evsel != tracking_evsel)
   1672			evsel->tracking = false;
   1673	}
   1674
   1675	tracking_evsel->tracking = true;
   1676}
   1677
   1678struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
   1679{
   1680	struct evsel *evsel;
   1681
   1682	evlist__for_each_entry(evlist, evsel) {
   1683		if (!evsel->name)
   1684			continue;
   1685		if (strcmp(str, evsel->name) == 0)
   1686			return evsel;
   1687	}
   1688
   1689	return NULL;
   1690}
   1691
   1692void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
   1693{
   1694	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
   1695	enum action {
   1696		NONE,
   1697		PAUSE,
   1698		RESUME,
   1699	} action = NONE;
   1700
   1701	if (!evlist->overwrite_mmap)
   1702		return;
   1703
   1704	switch (old_state) {
   1705	case BKW_MMAP_NOTREADY: {
   1706		if (state != BKW_MMAP_RUNNING)
   1707			goto state_err;
   1708		break;
   1709	}
   1710	case BKW_MMAP_RUNNING: {
   1711		if (state != BKW_MMAP_DATA_PENDING)
   1712			goto state_err;
   1713		action = PAUSE;
   1714		break;
   1715	}
   1716	case BKW_MMAP_DATA_PENDING: {
   1717		if (state != BKW_MMAP_EMPTY)
   1718			goto state_err;
   1719		break;
   1720	}
   1721	case BKW_MMAP_EMPTY: {
   1722		if (state != BKW_MMAP_RUNNING)
   1723			goto state_err;
   1724		action = RESUME;
   1725		break;
   1726	}
   1727	default:
   1728		WARN_ONCE(1, "Shouldn't get there\n");
   1729	}
   1730
   1731	evlist->bkw_mmap_state = state;
   1732
   1733	switch (action) {
   1734	case PAUSE:
   1735		evlist__pause(evlist);
   1736		break;
   1737	case RESUME:
   1738		evlist__resume(evlist);
   1739		break;
   1740	case NONE:
   1741	default:
   1742		break;
   1743	}
   1744
   1745state_err:
   1746	return;
   1747}
   1748
   1749bool evlist__exclude_kernel(struct evlist *evlist)
   1750{
   1751	struct evsel *evsel;
   1752
   1753	evlist__for_each_entry(evlist, evsel) {
   1754		if (!evsel->core.attr.exclude_kernel)
   1755			return false;
   1756	}
   1757
   1758	return true;
   1759}
   1760
   1761/*
   1762 * Events in data file are not collect in groups, but we still want
   1763 * the group display. Set the artificial group and set the leader's
   1764 * forced_leader flag to notify the display code.
   1765 */
   1766void evlist__force_leader(struct evlist *evlist)
   1767{
   1768	if (!evlist->core.nr_groups) {
   1769		struct evsel *leader = evlist__first(evlist);
   1770
   1771		evlist__set_leader(evlist);
   1772		leader->forced_leader = true;
   1773	}
   1774}
   1775
   1776struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
   1777{
   1778	struct evsel *c2, *leader;
   1779	bool is_open = true;
   1780
   1781	leader = evsel__leader(evsel);
   1782
   1783	pr_debug("Weak group for %s/%d failed\n",
   1784			leader->name, leader->core.nr_members);
   1785
   1786	/*
   1787	 * for_each_group_member doesn't work here because it doesn't
   1788	 * include the first entry.
   1789	 */
   1790	evlist__for_each_entry(evsel_list, c2) {
   1791		if (c2 == evsel)
   1792			is_open = false;
   1793		if (evsel__has_leader(c2, leader)) {
   1794			if (is_open && close)
   1795				perf_evsel__close(&c2->core);
   1796			/*
   1797			 * We want to close all members of the group and reopen
   1798			 * them. Some events, like Intel topdown, require being
   1799			 * in a group and so keep these in the group.
   1800			 */
   1801			evsel__remove_from_group(c2, leader);
   1802
   1803			/*
   1804			 * Set this for all former members of the group
   1805			 * to indicate they get reopened.
   1806			 */
   1807			c2->reset_group = true;
   1808		}
   1809	}
   1810	/* Reset the leader count if all entries were removed. */
   1811	if (leader->core.nr_members == 1)
   1812		leader->core.nr_members = 0;
   1813	return leader;
   1814}
   1815
   1816static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
   1817{
   1818	char *s, *p;
   1819	int ret = 0, fd;
   1820
   1821	if (strncmp(str, "fifo:", 5))
   1822		return -EINVAL;
   1823
   1824	str += 5;
   1825	if (!*str || *str == ',')
   1826		return -EINVAL;
   1827
   1828	s = strdup(str);
   1829	if (!s)
   1830		return -ENOMEM;
   1831
   1832	p = strchr(s, ',');
   1833	if (p)
   1834		*p = '\0';
   1835
   1836	/*
   1837	 * O_RDWR avoids POLLHUPs which is necessary to allow the other
   1838	 * end of a FIFO to be repeatedly opened and closed.
   1839	 */
   1840	fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
   1841	if (fd < 0) {
   1842		pr_err("Failed to open '%s'\n", s);
   1843		ret = -errno;
   1844		goto out_free;
   1845	}
   1846	*ctl_fd = fd;
   1847	*ctl_fd_close = true;
   1848
   1849	if (p && *++p) {
   1850		/* O_RDWR | O_NONBLOCK means the other end need not be open */
   1851		fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
   1852		if (fd < 0) {
   1853			pr_err("Failed to open '%s'\n", p);
   1854			ret = -errno;
   1855			goto out_free;
   1856		}
   1857		*ctl_fd_ack = fd;
   1858	}
   1859
   1860out_free:
   1861	free(s);
   1862	return ret;
   1863}
   1864
   1865int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
   1866{
   1867	char *comma = NULL, *endptr = NULL;
   1868
   1869	*ctl_fd_close = false;
   1870
   1871	if (strncmp(str, "fd:", 3))
   1872		return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
   1873
   1874	*ctl_fd = strtoul(&str[3], &endptr, 0);
   1875	if (endptr == &str[3])
   1876		return -EINVAL;
   1877
   1878	comma = strchr(str, ',');
   1879	if (comma) {
   1880		if (endptr != comma)
   1881			return -EINVAL;
   1882
   1883		*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
   1884		if (endptr == comma + 1 || *endptr != '\0')
   1885			return -EINVAL;
   1886	}
   1887
   1888	return 0;
   1889}
   1890
   1891void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
   1892{
   1893	if (*ctl_fd_close) {
   1894		*ctl_fd_close = false;
   1895		close(ctl_fd);
   1896		if (ctl_fd_ack >= 0)
   1897			close(ctl_fd_ack);
   1898	}
   1899}
   1900
   1901int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
   1902{
   1903	if (fd == -1) {
   1904		pr_debug("Control descriptor is not initialized\n");
   1905		return 0;
   1906	}
   1907
   1908	evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
   1909						     fdarray_flag__nonfilterable);
   1910	if (evlist->ctl_fd.pos < 0) {
   1911		evlist->ctl_fd.pos = -1;
   1912		pr_err("Failed to add ctl fd entry: %m\n");
   1913		return -1;
   1914	}
   1915
   1916	evlist->ctl_fd.fd = fd;
   1917	evlist->ctl_fd.ack = ack;
   1918
   1919	return 0;
   1920}
   1921
   1922bool evlist__ctlfd_initialized(struct evlist *evlist)
   1923{
   1924	return evlist->ctl_fd.pos >= 0;
   1925}
   1926
   1927int evlist__finalize_ctlfd(struct evlist *evlist)
   1928{
   1929	struct pollfd *entries = evlist->core.pollfd.entries;
   1930
   1931	if (!evlist__ctlfd_initialized(evlist))
   1932		return 0;
   1933
   1934	entries[evlist->ctl_fd.pos].fd = -1;
   1935	entries[evlist->ctl_fd.pos].events = 0;
   1936	entries[evlist->ctl_fd.pos].revents = 0;
   1937
   1938	evlist->ctl_fd.pos = -1;
   1939	evlist->ctl_fd.ack = -1;
   1940	evlist->ctl_fd.fd = -1;
   1941
   1942	return 0;
   1943}
   1944
   1945static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
   1946			      char *cmd_data, size_t data_size)
   1947{
   1948	int err;
   1949	char c;
   1950	size_t bytes_read = 0;
   1951
   1952	*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
   1953	memset(cmd_data, 0, data_size);
   1954	data_size--;
   1955
   1956	do {
   1957		err = read(evlist->ctl_fd.fd, &c, 1);
   1958		if (err > 0) {
   1959			if (c == '\n' || c == '\0')
   1960				break;
   1961			cmd_data[bytes_read++] = c;
   1962			if (bytes_read == data_size)
   1963				break;
   1964			continue;
   1965		} else if (err == -1) {
   1966			if (errno == EINTR)
   1967				continue;
   1968			if (errno == EAGAIN || errno == EWOULDBLOCK)
   1969				err = 0;
   1970			else
   1971				pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
   1972		}
   1973		break;
   1974	} while (1);
   1975
   1976	pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
   1977		 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
   1978
   1979	if (bytes_read > 0) {
   1980		if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
   1981			     (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
   1982			*cmd = EVLIST_CTL_CMD_ENABLE;
   1983		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
   1984				    (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
   1985			*cmd = EVLIST_CTL_CMD_DISABLE;
   1986		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
   1987				    (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
   1988			*cmd = EVLIST_CTL_CMD_SNAPSHOT;
   1989			pr_debug("is snapshot\n");
   1990		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
   1991				    (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
   1992			*cmd = EVLIST_CTL_CMD_EVLIST;
   1993		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
   1994				    (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
   1995			*cmd = EVLIST_CTL_CMD_STOP;
   1996		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
   1997				    (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
   1998			*cmd = EVLIST_CTL_CMD_PING;
   1999		}
   2000	}
   2001
   2002	return bytes_read ? (int)bytes_read : err;
   2003}
   2004
   2005int evlist__ctlfd_ack(struct evlist *evlist)
   2006{
   2007	int err;
   2008
   2009	if (evlist->ctl_fd.ack == -1)
   2010		return 0;
   2011
   2012	err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
   2013		    sizeof(EVLIST_CTL_CMD_ACK_TAG));
   2014	if (err == -1)
   2015		pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
   2016
   2017	return err;
   2018}
   2019
   2020static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
   2021{
   2022	char *data = cmd_data + cmd_size;
   2023
   2024	/* no argument */
   2025	if (!*data)
   2026		return 0;
   2027
   2028	/* there's argument */
   2029	if (*data == ' ') {
   2030		*arg = data + 1;
   2031		return 1;
   2032	}
   2033
   2034	/* malformed */
   2035	return -1;
   2036}
   2037
   2038static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
   2039{
   2040	struct evsel *evsel;
   2041	char *name;
   2042	int err;
   2043
   2044	err = get_cmd_arg(cmd_data,
   2045			  enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
   2046				   sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
   2047			  &name);
   2048	if (err < 0) {
   2049		pr_info("failed: wrong command\n");
   2050		return -1;
   2051	}
   2052
   2053	if (err) {
   2054		evsel = evlist__find_evsel_by_str(evlist, name);
   2055		if (evsel) {
   2056			if (enable)
   2057				evlist__enable_evsel(evlist, name);
   2058			else
   2059				evlist__disable_evsel(evlist, name);
   2060			pr_info("Event %s %s\n", evsel->name,
   2061				enable ? "enabled" : "disabled");
   2062		} else {
   2063			pr_info("failed: can't find '%s' event\n", name);
   2064		}
   2065	} else {
   2066		if (enable) {
   2067			evlist__enable(evlist);
   2068			pr_info(EVLIST_ENABLED_MSG);
   2069		} else {
   2070			evlist__disable(evlist);
   2071			pr_info(EVLIST_DISABLED_MSG);
   2072		}
   2073	}
   2074
   2075	return 0;
   2076}
   2077
   2078static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
   2079{
   2080	struct perf_attr_details details = { .verbose = false, };
   2081	struct evsel *evsel;
   2082	char *arg;
   2083	int err;
   2084
   2085	err = get_cmd_arg(cmd_data,
   2086			  sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
   2087			  &arg);
   2088	if (err < 0) {
   2089		pr_info("failed: wrong command\n");
   2090		return -1;
   2091	}
   2092
   2093	if (err) {
   2094		if (!strcmp(arg, "-v")) {
   2095			details.verbose = true;
   2096		} else if (!strcmp(arg, "-g")) {
   2097			details.event_group = true;
   2098		} else if (!strcmp(arg, "-F")) {
   2099			details.freq = true;
   2100		} else {
   2101			pr_info("failed: wrong command\n");
   2102			return -1;
   2103		}
   2104	}
   2105
   2106	evlist__for_each_entry(evlist, evsel)
   2107		evsel__fprintf(evsel, &details, stderr);
   2108
   2109	return 0;
   2110}
   2111
   2112int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
   2113{
   2114	int err = 0;
   2115	char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
   2116	int ctlfd_pos = evlist->ctl_fd.pos;
   2117	struct pollfd *entries = evlist->core.pollfd.entries;
   2118
   2119	if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
   2120		return 0;
   2121
   2122	if (entries[ctlfd_pos].revents & POLLIN) {
   2123		err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
   2124					 EVLIST_CTL_CMD_MAX_LEN);
   2125		if (err > 0) {
   2126			switch (*cmd) {
   2127			case EVLIST_CTL_CMD_ENABLE:
   2128			case EVLIST_CTL_CMD_DISABLE:
   2129				err = evlist__ctlfd_enable(evlist, cmd_data,
   2130							   *cmd == EVLIST_CTL_CMD_ENABLE);
   2131				break;
   2132			case EVLIST_CTL_CMD_EVLIST:
   2133				err = evlist__ctlfd_list(evlist, cmd_data);
   2134				break;
   2135			case EVLIST_CTL_CMD_SNAPSHOT:
   2136			case EVLIST_CTL_CMD_STOP:
   2137			case EVLIST_CTL_CMD_PING:
   2138				break;
   2139			case EVLIST_CTL_CMD_ACK:
   2140			case EVLIST_CTL_CMD_UNSUPPORTED:
   2141			default:
   2142				pr_debug("ctlfd: unsupported %d\n", *cmd);
   2143				break;
   2144			}
   2145			if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
   2146			      *cmd == EVLIST_CTL_CMD_SNAPSHOT))
   2147				evlist__ctlfd_ack(evlist);
   2148		}
   2149	}
   2150
   2151	if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
   2152		evlist__finalize_ctlfd(evlist);
   2153	else
   2154		entries[ctlfd_pos].revents = 0;
   2155
   2156	return err;
   2157}
   2158
   2159int evlist__ctlfd_update(struct evlist *evlist, struct pollfd *update)
   2160{
   2161	int ctlfd_pos = evlist->ctl_fd.pos;
   2162	struct pollfd *entries = evlist->core.pollfd.entries;
   2163
   2164	if (!evlist__ctlfd_initialized(evlist))
   2165		return 0;
   2166
   2167	if (entries[ctlfd_pos].fd != update->fd ||
   2168	    entries[ctlfd_pos].events != update->events)
   2169		return -1;
   2170
   2171	entries[ctlfd_pos].revents = update->revents;
   2172	return 0;
   2173}
   2174
   2175struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
   2176{
   2177	struct evsel *evsel;
   2178
   2179	evlist__for_each_entry(evlist, evsel) {
   2180		if (evsel->core.idx == idx)
   2181			return evsel;
   2182	}
   2183	return NULL;
   2184}
   2185
   2186int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
   2187{
   2188	struct evsel *evsel;
   2189	int printed = 0;
   2190
   2191	evlist__for_each_entry(evlist, evsel) {
   2192		if (evsel__is_dummy_event(evsel))
   2193			continue;
   2194		if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
   2195			printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
   2196		} else {
   2197			printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
   2198			break;
   2199		}
   2200	}
   2201
   2202	return printed;
   2203}
   2204
   2205void evlist__check_mem_load_aux(struct evlist *evlist)
   2206{
   2207	struct evsel *leader, *evsel, *pos;
   2208
   2209	/*
   2210	 * For some platforms, the 'mem-loads' event is required to use
   2211	 * together with 'mem-loads-aux' within a group and 'mem-loads-aux'
   2212	 * must be the group leader. Now we disable this group before reporting
   2213	 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
   2214	 * any valid memory load information.
   2215	 */
   2216	evlist__for_each_entry(evlist, evsel) {
   2217		leader = evsel__leader(evsel);
   2218		if (leader == evsel)
   2219			continue;
   2220
   2221		if (leader->name && strstr(leader->name, "mem-loads-aux")) {
   2222			for_each_group_evsel(pos, leader) {
   2223				evsel__set_leader(pos, pos);
   2224				pos->core.nr_members = 0;
   2225			}
   2226		}
   2227	}
   2228}