cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

auxtrace.c (64674B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * auxtrace.c: AUX area trace support
      4 * Copyright (c) 2013-2015, Intel Corporation.
      5 */
      6
      7#include <inttypes.h>
      8#include <sys/types.h>
      9#include <sys/mman.h>
     10#include <stdbool.h>
     11#include <string.h>
     12#include <limits.h>
     13#include <errno.h>
     14
     15#include <linux/kernel.h>
     16#include <linux/perf_event.h>
     17#include <linux/types.h>
     18#include <linux/bitops.h>
     19#include <linux/log2.h>
     20#include <linux/string.h>
     21#include <linux/time64.h>
     22
     23#include <sys/param.h>
     24#include <stdlib.h>
     25#include <stdio.h>
     26#include <linux/list.h>
     27#include <linux/zalloc.h>
     28
     29#include "evlist.h"
     30#include "dso.h"
     31#include "map.h"
     32#include "pmu.h"
     33#include "evsel.h"
     34#include "evsel_config.h"
     35#include "symbol.h"
     36#include "util/perf_api_probe.h"
     37#include "util/synthetic-events.h"
     38#include "thread_map.h"
     39#include "asm/bug.h"
     40#include "auxtrace.h"
     41
     42#include <linux/hash.h>
     43
     44#include "event.h"
     45#include "record.h"
     46#include "session.h"
     47#include "debug.h"
     48#include <subcmd/parse-options.h>
     49
     50#include "cs-etm.h"
     51#include "intel-pt.h"
     52#include "intel-bts.h"
     53#include "arm-spe.h"
     54#include "s390-cpumsf.h"
     55#include "util/mmap.h"
     56
     57#include <linux/ctype.h>
     58#include "symbol/kallsyms.h"
     59#include <internal/lib.h>
     60
     61/*
     62 * Make a group from 'leader' to 'last', requiring that the events were not
     63 * already grouped to a different leader.
     64 */
     65static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
     66{
     67	struct evsel *evsel;
     68	bool grp;
     69
     70	if (!evsel__is_group_leader(leader))
     71		return -EINVAL;
     72
     73	grp = false;
     74	evlist__for_each_entry(evlist, evsel) {
     75		if (grp) {
     76			if (!(evsel__leader(evsel) == leader ||
     77			     (evsel__leader(evsel) == evsel &&
     78			      evsel->core.nr_members <= 1)))
     79				return -EINVAL;
     80		} else if (evsel == leader) {
     81			grp = true;
     82		}
     83		if (evsel == last)
     84			break;
     85	}
     86
     87	grp = false;
     88	evlist__for_each_entry(evlist, evsel) {
     89		if (grp) {
     90			if (!evsel__has_leader(evsel, leader)) {
     91				evsel__set_leader(evsel, leader);
     92				if (leader->core.nr_members < 1)
     93					leader->core.nr_members = 1;
     94				leader->core.nr_members += 1;
     95			}
     96		} else if (evsel == leader) {
     97			grp = true;
     98		}
     99		if (evsel == last)
    100			break;
    101	}
    102
    103	return 0;
    104}
    105
    106static bool auxtrace__dont_decode(struct perf_session *session)
    107{
    108	return !session->itrace_synth_opts ||
    109	       session->itrace_synth_opts->dont_decode;
    110}
    111
    112int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
    113			struct auxtrace_mmap_params *mp,
    114			void *userpg, int fd)
    115{
    116	struct perf_event_mmap_page *pc = userpg;
    117
    118	WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
    119
    120	mm->userpg = userpg;
    121	mm->mask = mp->mask;
    122	mm->len = mp->len;
    123	mm->prev = 0;
    124	mm->idx = mp->idx;
    125	mm->tid = mp->tid;
    126	mm->cpu = mp->cpu.cpu;
    127
    128	if (!mp->len || !mp->mmap_needed) {
    129		mm->base = NULL;
    130		return 0;
    131	}
    132
    133	pc->aux_offset = mp->offset;
    134	pc->aux_size = mp->len;
    135
    136	mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
    137	if (mm->base == MAP_FAILED) {
    138		pr_debug2("failed to mmap AUX area\n");
    139		mm->base = NULL;
    140		return -1;
    141	}
    142
    143	return 0;
    144}
    145
    146void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
    147{
    148	if (mm->base) {
    149		munmap(mm->base, mm->len);
    150		mm->base = NULL;
    151	}
    152}
    153
    154void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
    155				off_t auxtrace_offset,
    156				unsigned int auxtrace_pages,
    157				bool auxtrace_overwrite)
    158{
    159	if (auxtrace_pages) {
    160		mp->offset = auxtrace_offset;
    161		mp->len = auxtrace_pages * (size_t)page_size;
    162		mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
    163		mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
    164		pr_debug2("AUX area mmap length %zu\n", mp->len);
    165	} else {
    166		mp->len = 0;
    167	}
    168}
    169
    170void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
    171				   struct evlist *evlist,
    172				   struct evsel *evsel, int idx)
    173{
    174	bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
    175
    176	mp->mmap_needed = evsel->needs_auxtrace_mmap;
    177
    178	if (!mp->mmap_needed)
    179		return;
    180
    181	mp->idx = idx;
    182
    183	if (per_cpu) {
    184		mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
    185		if (evlist->core.threads)
    186			mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
    187		else
    188			mp->tid = -1;
    189	} else {
    190		mp->cpu.cpu = -1;
    191		mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
    192	}
    193}
    194
    195#define AUXTRACE_INIT_NR_QUEUES	32
    196
    197static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
    198{
    199	struct auxtrace_queue *queue_array;
    200	unsigned int max_nr_queues, i;
    201
    202	max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
    203	if (nr_queues > max_nr_queues)
    204		return NULL;
    205
    206	queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
    207	if (!queue_array)
    208		return NULL;
    209
    210	for (i = 0; i < nr_queues; i++) {
    211		INIT_LIST_HEAD(&queue_array[i].head);
    212		queue_array[i].priv = NULL;
    213	}
    214
    215	return queue_array;
    216}
    217
    218int auxtrace_queues__init(struct auxtrace_queues *queues)
    219{
    220	queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
    221	queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
    222	if (!queues->queue_array)
    223		return -ENOMEM;
    224	return 0;
    225}
    226
    227static int auxtrace_queues__grow(struct auxtrace_queues *queues,
    228				 unsigned int new_nr_queues)
    229{
    230	unsigned int nr_queues = queues->nr_queues;
    231	struct auxtrace_queue *queue_array;
    232	unsigned int i;
    233
    234	if (!nr_queues)
    235		nr_queues = AUXTRACE_INIT_NR_QUEUES;
    236
    237	while (nr_queues && nr_queues < new_nr_queues)
    238		nr_queues <<= 1;
    239
    240	if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
    241		return -EINVAL;
    242
    243	queue_array = auxtrace_alloc_queue_array(nr_queues);
    244	if (!queue_array)
    245		return -ENOMEM;
    246
    247	for (i = 0; i < queues->nr_queues; i++) {
    248		list_splice_tail(&queues->queue_array[i].head,
    249				 &queue_array[i].head);
    250		queue_array[i].tid = queues->queue_array[i].tid;
    251		queue_array[i].cpu = queues->queue_array[i].cpu;
    252		queue_array[i].set = queues->queue_array[i].set;
    253		queue_array[i].priv = queues->queue_array[i].priv;
    254	}
    255
    256	queues->nr_queues = nr_queues;
    257	queues->queue_array = queue_array;
    258
    259	return 0;
    260}
    261
    262static void *auxtrace_copy_data(u64 size, struct perf_session *session)
    263{
    264	int fd = perf_data__fd(session->data);
    265	void *p;
    266	ssize_t ret;
    267
    268	if (size > SSIZE_MAX)
    269		return NULL;
    270
    271	p = malloc(size);
    272	if (!p)
    273		return NULL;
    274
    275	ret = readn(fd, p, size);
    276	if (ret != (ssize_t)size) {
    277		free(p);
    278		return NULL;
    279	}
    280
    281	return p;
    282}
    283
    284static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
    285					 unsigned int idx,
    286					 struct auxtrace_buffer *buffer)
    287{
    288	struct auxtrace_queue *queue;
    289	int err;
    290
    291	if (idx >= queues->nr_queues) {
    292		err = auxtrace_queues__grow(queues, idx + 1);
    293		if (err)
    294			return err;
    295	}
    296
    297	queue = &queues->queue_array[idx];
    298
    299	if (!queue->set) {
    300		queue->set = true;
    301		queue->tid = buffer->tid;
    302		queue->cpu = buffer->cpu.cpu;
    303	}
    304
    305	buffer->buffer_nr = queues->next_buffer_nr++;
    306
    307	list_add_tail(&buffer->list, &queue->head);
    308
    309	queues->new_data = true;
    310	queues->populated = true;
    311
    312	return 0;
    313}
    314
    315/* Limit buffers to 32MiB on 32-bit */
    316#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
    317
    318static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
    319					 unsigned int idx,
    320					 struct auxtrace_buffer *buffer)
    321{
    322	u64 sz = buffer->size;
    323	bool consecutive = false;
    324	struct auxtrace_buffer *b;
    325	int err;
    326
    327	while (sz > BUFFER_LIMIT_FOR_32_BIT) {
    328		b = memdup(buffer, sizeof(struct auxtrace_buffer));
    329		if (!b)
    330			return -ENOMEM;
    331		b->size = BUFFER_LIMIT_FOR_32_BIT;
    332		b->consecutive = consecutive;
    333		err = auxtrace_queues__queue_buffer(queues, idx, b);
    334		if (err) {
    335			auxtrace_buffer__free(b);
    336			return err;
    337		}
    338		buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
    339		sz -= BUFFER_LIMIT_FOR_32_BIT;
    340		consecutive = true;
    341	}
    342
    343	buffer->size = sz;
    344	buffer->consecutive = consecutive;
    345
    346	return 0;
    347}
    348
    349static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu)
    350{
    351	unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
    352
    353	return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap);
    354}
    355
    356static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
    357				       struct perf_session *session,
    358				       unsigned int idx,
    359				       struct auxtrace_buffer *buffer,
    360				       struct auxtrace_buffer **buffer_ptr)
    361{
    362	int err = -ENOMEM;
    363
    364	if (filter_cpu(session, buffer->cpu))
    365		return 0;
    366
    367	buffer = memdup(buffer, sizeof(*buffer));
    368	if (!buffer)
    369		return -ENOMEM;
    370
    371	if (session->one_mmap) {
    372		buffer->data = buffer->data_offset - session->one_mmap_offset +
    373			       session->one_mmap_addr;
    374	} else if (perf_data__is_pipe(session->data)) {
    375		buffer->data = auxtrace_copy_data(buffer->size, session);
    376		if (!buffer->data)
    377			goto out_free;
    378		buffer->data_needs_freeing = true;
    379	} else if (BITS_PER_LONG == 32 &&
    380		   buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
    381		err = auxtrace_queues__split_buffer(queues, idx, buffer);
    382		if (err)
    383			goto out_free;
    384	}
    385
    386	err = auxtrace_queues__queue_buffer(queues, idx, buffer);
    387	if (err)
    388		goto out_free;
    389
    390	/* FIXME: Doesn't work for split buffer */
    391	if (buffer_ptr)
    392		*buffer_ptr = buffer;
    393
    394	return 0;
    395
    396out_free:
    397	auxtrace_buffer__free(buffer);
    398	return err;
    399}
    400
    401int auxtrace_queues__add_event(struct auxtrace_queues *queues,
    402			       struct perf_session *session,
    403			       union perf_event *event, off_t data_offset,
    404			       struct auxtrace_buffer **buffer_ptr)
    405{
    406	struct auxtrace_buffer buffer = {
    407		.pid = -1,
    408		.tid = event->auxtrace.tid,
    409		.cpu = { event->auxtrace.cpu },
    410		.data_offset = data_offset,
    411		.offset = event->auxtrace.offset,
    412		.reference = event->auxtrace.reference,
    413		.size = event->auxtrace.size,
    414	};
    415	unsigned int idx = event->auxtrace.idx;
    416
    417	return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
    418					   buffer_ptr);
    419}
    420
    421static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
    422					      struct perf_session *session,
    423					      off_t file_offset, size_t sz)
    424{
    425	union perf_event *event;
    426	int err;
    427	char buf[PERF_SAMPLE_MAX_SIZE];
    428
    429	err = perf_session__peek_event(session, file_offset, buf,
    430				       PERF_SAMPLE_MAX_SIZE, &event, NULL);
    431	if (err)
    432		return err;
    433
    434	if (event->header.type == PERF_RECORD_AUXTRACE) {
    435		if (event->header.size < sizeof(struct perf_record_auxtrace) ||
    436		    event->header.size != sz) {
    437			err = -EINVAL;
    438			goto out;
    439		}
    440		file_offset += event->header.size;
    441		err = auxtrace_queues__add_event(queues, session, event,
    442						 file_offset, NULL);
    443	}
    444out:
    445	return err;
    446}
    447
    448void auxtrace_queues__free(struct auxtrace_queues *queues)
    449{
    450	unsigned int i;
    451
    452	for (i = 0; i < queues->nr_queues; i++) {
    453		while (!list_empty(&queues->queue_array[i].head)) {
    454			struct auxtrace_buffer *buffer;
    455
    456			buffer = list_entry(queues->queue_array[i].head.next,
    457					    struct auxtrace_buffer, list);
    458			list_del_init(&buffer->list);
    459			auxtrace_buffer__free(buffer);
    460		}
    461	}
    462
    463	zfree(&queues->queue_array);
    464	queues->nr_queues = 0;
    465}
    466
    467static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
    468			     unsigned int pos, unsigned int queue_nr,
    469			     u64 ordinal)
    470{
    471	unsigned int parent;
    472
    473	while (pos) {
    474		parent = (pos - 1) >> 1;
    475		if (heap_array[parent].ordinal <= ordinal)
    476			break;
    477		heap_array[pos] = heap_array[parent];
    478		pos = parent;
    479	}
    480	heap_array[pos].queue_nr = queue_nr;
    481	heap_array[pos].ordinal = ordinal;
    482}
    483
    484int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
    485		       u64 ordinal)
    486{
    487	struct auxtrace_heap_item *heap_array;
    488
    489	if (queue_nr >= heap->heap_sz) {
    490		unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
    491
    492		while (heap_sz <= queue_nr)
    493			heap_sz <<= 1;
    494		heap_array = realloc(heap->heap_array,
    495				     heap_sz * sizeof(struct auxtrace_heap_item));
    496		if (!heap_array)
    497			return -ENOMEM;
    498		heap->heap_array = heap_array;
    499		heap->heap_sz = heap_sz;
    500	}
    501
    502	auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
    503
    504	return 0;
    505}
    506
    507void auxtrace_heap__free(struct auxtrace_heap *heap)
    508{
    509	zfree(&heap->heap_array);
    510	heap->heap_cnt = 0;
    511	heap->heap_sz = 0;
    512}
    513
    514void auxtrace_heap__pop(struct auxtrace_heap *heap)
    515{
    516	unsigned int pos, last, heap_cnt = heap->heap_cnt;
    517	struct auxtrace_heap_item *heap_array;
    518
    519	if (!heap_cnt)
    520		return;
    521
    522	heap->heap_cnt -= 1;
    523
    524	heap_array = heap->heap_array;
    525
    526	pos = 0;
    527	while (1) {
    528		unsigned int left, right;
    529
    530		left = (pos << 1) + 1;
    531		if (left >= heap_cnt)
    532			break;
    533		right = left + 1;
    534		if (right >= heap_cnt) {
    535			heap_array[pos] = heap_array[left];
    536			return;
    537		}
    538		if (heap_array[left].ordinal < heap_array[right].ordinal) {
    539			heap_array[pos] = heap_array[left];
    540			pos = left;
    541		} else {
    542			heap_array[pos] = heap_array[right];
    543			pos = right;
    544		}
    545	}
    546
    547	last = heap_cnt - 1;
    548	auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
    549			 heap_array[last].ordinal);
    550}
    551
    552size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
    553				       struct evlist *evlist)
    554{
    555	if (itr)
    556		return itr->info_priv_size(itr, evlist);
    557	return 0;
    558}
    559
    560static int auxtrace_not_supported(void)
    561{
    562	pr_err("AUX area tracing is not supported on this architecture\n");
    563	return -EINVAL;
    564}
    565
    566int auxtrace_record__info_fill(struct auxtrace_record *itr,
    567			       struct perf_session *session,
    568			       struct perf_record_auxtrace_info *auxtrace_info,
    569			       size_t priv_size)
    570{
    571	if (itr)
    572		return itr->info_fill(itr, session, auxtrace_info, priv_size);
    573	return auxtrace_not_supported();
    574}
    575
    576void auxtrace_record__free(struct auxtrace_record *itr)
    577{
    578	if (itr)
    579		itr->free(itr);
    580}
    581
    582int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
    583{
    584	if (itr && itr->snapshot_start)
    585		return itr->snapshot_start(itr);
    586	return 0;
    587}
    588
    589int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
    590{
    591	if (!on_exit && itr && itr->snapshot_finish)
    592		return itr->snapshot_finish(itr);
    593	return 0;
    594}
    595
    596int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
    597				   struct auxtrace_mmap *mm,
    598				   unsigned char *data, u64 *head, u64 *old)
    599{
    600	if (itr && itr->find_snapshot)
    601		return itr->find_snapshot(itr, idx, mm, data, head, old);
    602	return 0;
    603}
    604
    605int auxtrace_record__options(struct auxtrace_record *itr,
    606			     struct evlist *evlist,
    607			     struct record_opts *opts)
    608{
    609	if (itr) {
    610		itr->evlist = evlist;
    611		return itr->recording_options(itr, evlist, opts);
    612	}
    613	return 0;
    614}
    615
    616u64 auxtrace_record__reference(struct auxtrace_record *itr)
    617{
    618	if (itr)
    619		return itr->reference(itr);
    620	return 0;
    621}
    622
    623int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
    624				    struct record_opts *opts, const char *str)
    625{
    626	if (!str)
    627		return 0;
    628
    629	/* PMU-agnostic options */
    630	switch (*str) {
    631	case 'e':
    632		opts->auxtrace_snapshot_on_exit = true;
    633		str++;
    634		break;
    635	default:
    636		break;
    637	}
    638
    639	if (itr && itr->parse_snapshot_options)
    640		return itr->parse_snapshot_options(itr, opts, str);
    641
    642	pr_err("No AUX area tracing to snapshot\n");
    643	return -EINVAL;
    644}
    645
    646static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
    647{
    648	bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
    649
    650	if (per_cpu_mmaps) {
    651		struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
    652		int cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
    653
    654		if (cpu_map_idx == -1)
    655			return -EINVAL;
    656		return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
    657	}
    658
    659	return perf_evsel__enable_thread(&evsel->core, idx);
    660}
    661
    662int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
    663{
    664	struct evsel *evsel;
    665
    666	if (!itr->evlist || !itr->pmu)
    667		return -EINVAL;
    668
    669	evlist__for_each_entry(itr->evlist, evsel) {
    670		if (evsel->core.attr.type == itr->pmu->type) {
    671			if (evsel->disabled)
    672				return 0;
    673			return evlist__enable_event_idx(itr->evlist, evsel, idx);
    674		}
    675	}
    676	return -EINVAL;
    677}
    678
    679/*
    680 * Event record size is 16-bit which results in a maximum size of about 64KiB.
    681 * Allow about 4KiB for the rest of the sample record, to give a maximum
    682 * AUX area sample size of 60KiB.
    683 */
    684#define MAX_AUX_SAMPLE_SIZE (60 * 1024)
    685
    686/* Arbitrary default size if no other default provided */
    687#define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
    688
    689static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
    690					     struct record_opts *opts)
    691{
    692	struct evsel *evsel;
    693	bool has_aux_leader = false;
    694	u32 sz;
    695
    696	evlist__for_each_entry(evlist, evsel) {
    697		sz = evsel->core.attr.aux_sample_size;
    698		if (evsel__is_group_leader(evsel)) {
    699			has_aux_leader = evsel__is_aux_event(evsel);
    700			if (sz) {
    701				if (has_aux_leader)
    702					pr_err("Cannot add AUX area sampling to an AUX area event\n");
    703				else
    704					pr_err("Cannot add AUX area sampling to a group leader\n");
    705				return -EINVAL;
    706			}
    707		}
    708		if (sz > MAX_AUX_SAMPLE_SIZE) {
    709			pr_err("AUX area sample size %u too big, max. %d\n",
    710			       sz, MAX_AUX_SAMPLE_SIZE);
    711			return -EINVAL;
    712		}
    713		if (sz) {
    714			if (!has_aux_leader) {
    715				pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
    716				return -EINVAL;
    717			}
    718			evsel__set_sample_bit(evsel, AUX);
    719			opts->auxtrace_sample_mode = true;
    720		} else {
    721			evsel__reset_sample_bit(evsel, AUX);
    722		}
    723	}
    724
    725	if (!opts->auxtrace_sample_mode) {
    726		pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
    727		return -EINVAL;
    728	}
    729
    730	if (!perf_can_aux_sample()) {
    731		pr_err("AUX area sampling is not supported by kernel\n");
    732		return -EINVAL;
    733	}
    734
    735	return 0;
    736}
    737
    738int auxtrace_parse_sample_options(struct auxtrace_record *itr,
    739				  struct evlist *evlist,
    740				  struct record_opts *opts, const char *str)
    741{
    742	struct evsel_config_term *term;
    743	struct evsel *aux_evsel;
    744	bool has_aux_sample_size = false;
    745	bool has_aux_leader = false;
    746	struct evsel *evsel;
    747	char *endptr;
    748	unsigned long sz;
    749
    750	if (!str)
    751		goto no_opt;
    752
    753	if (!itr) {
    754		pr_err("No AUX area event to sample\n");
    755		return -EINVAL;
    756	}
    757
    758	sz = strtoul(str, &endptr, 0);
    759	if (*endptr || sz > UINT_MAX) {
    760		pr_err("Bad AUX area sampling option: '%s'\n", str);
    761		return -EINVAL;
    762	}
    763
    764	if (!sz)
    765		sz = itr->default_aux_sample_size;
    766
    767	if (!sz)
    768		sz = DEFAULT_AUX_SAMPLE_SIZE;
    769
    770	/* Set aux_sample_size based on --aux-sample option */
    771	evlist__for_each_entry(evlist, evsel) {
    772		if (evsel__is_group_leader(evsel)) {
    773			has_aux_leader = evsel__is_aux_event(evsel);
    774		} else if (has_aux_leader) {
    775			evsel->core.attr.aux_sample_size = sz;
    776		}
    777	}
    778no_opt:
    779	aux_evsel = NULL;
    780	/* Override with aux_sample_size from config term */
    781	evlist__for_each_entry(evlist, evsel) {
    782		if (evsel__is_aux_event(evsel))
    783			aux_evsel = evsel;
    784		term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
    785		if (term) {
    786			has_aux_sample_size = true;
    787			evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
    788			/* If possible, group with the AUX event */
    789			if (aux_evsel && evsel->core.attr.aux_sample_size)
    790				evlist__regroup(evlist, aux_evsel, evsel);
    791		}
    792	}
    793
    794	if (!str && !has_aux_sample_size)
    795		return 0;
    796
    797	if (!itr) {
    798		pr_err("No AUX area event to sample\n");
    799		return -EINVAL;
    800	}
    801
    802	return auxtrace_validate_aux_sample_size(evlist, opts);
    803}
    804
    805void auxtrace_regroup_aux_output(struct evlist *evlist)
    806{
    807	struct evsel *evsel, *aux_evsel = NULL;
    808	struct evsel_config_term *term;
    809
    810	evlist__for_each_entry(evlist, evsel) {
    811		if (evsel__is_aux_event(evsel))
    812			aux_evsel = evsel;
    813		term = evsel__get_config_term(evsel, AUX_OUTPUT);
    814		/* If possible, group with the AUX event */
    815		if (term && aux_evsel)
    816			evlist__regroup(evlist, aux_evsel, evsel);
    817	}
    818}
    819
    820struct auxtrace_record *__weak
    821auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
    822{
    823	*err = 0;
    824	return NULL;
    825}
    826
    827static int auxtrace_index__alloc(struct list_head *head)
    828{
    829	struct auxtrace_index *auxtrace_index;
    830
    831	auxtrace_index = malloc(sizeof(struct auxtrace_index));
    832	if (!auxtrace_index)
    833		return -ENOMEM;
    834
    835	auxtrace_index->nr = 0;
    836	INIT_LIST_HEAD(&auxtrace_index->list);
    837
    838	list_add_tail(&auxtrace_index->list, head);
    839
    840	return 0;
    841}
    842
    843void auxtrace_index__free(struct list_head *head)
    844{
    845	struct auxtrace_index *auxtrace_index, *n;
    846
    847	list_for_each_entry_safe(auxtrace_index, n, head, list) {
    848		list_del_init(&auxtrace_index->list);
    849		free(auxtrace_index);
    850	}
    851}
    852
    853static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
    854{
    855	struct auxtrace_index *auxtrace_index;
    856	int err;
    857
    858	if (list_empty(head)) {
    859		err = auxtrace_index__alloc(head);
    860		if (err)
    861			return NULL;
    862	}
    863
    864	auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
    865
    866	if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
    867		err = auxtrace_index__alloc(head);
    868		if (err)
    869			return NULL;
    870		auxtrace_index = list_entry(head->prev, struct auxtrace_index,
    871					    list);
    872	}
    873
    874	return auxtrace_index;
    875}
    876
    877int auxtrace_index__auxtrace_event(struct list_head *head,
    878				   union perf_event *event, off_t file_offset)
    879{
    880	struct auxtrace_index *auxtrace_index;
    881	size_t nr;
    882
    883	auxtrace_index = auxtrace_index__last(head);
    884	if (!auxtrace_index)
    885		return -ENOMEM;
    886
    887	nr = auxtrace_index->nr;
    888	auxtrace_index->entries[nr].file_offset = file_offset;
    889	auxtrace_index->entries[nr].sz = event->header.size;
    890	auxtrace_index->nr += 1;
    891
    892	return 0;
    893}
    894
    895static int auxtrace_index__do_write(int fd,
    896				    struct auxtrace_index *auxtrace_index)
    897{
    898	struct auxtrace_index_entry ent;
    899	size_t i;
    900
    901	for (i = 0; i < auxtrace_index->nr; i++) {
    902		ent.file_offset = auxtrace_index->entries[i].file_offset;
    903		ent.sz = auxtrace_index->entries[i].sz;
    904		if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
    905			return -errno;
    906	}
    907	return 0;
    908}
    909
    910int auxtrace_index__write(int fd, struct list_head *head)
    911{
    912	struct auxtrace_index *auxtrace_index;
    913	u64 total = 0;
    914	int err;
    915
    916	list_for_each_entry(auxtrace_index, head, list)
    917		total += auxtrace_index->nr;
    918
    919	if (writen(fd, &total, sizeof(total)) != sizeof(total))
    920		return -errno;
    921
    922	list_for_each_entry(auxtrace_index, head, list) {
    923		err = auxtrace_index__do_write(fd, auxtrace_index);
    924		if (err)
    925			return err;
    926	}
    927
    928	return 0;
    929}
    930
    931static int auxtrace_index__process_entry(int fd, struct list_head *head,
    932					 bool needs_swap)
    933{
    934	struct auxtrace_index *auxtrace_index;
    935	struct auxtrace_index_entry ent;
    936	size_t nr;
    937
    938	if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
    939		return -1;
    940
    941	auxtrace_index = auxtrace_index__last(head);
    942	if (!auxtrace_index)
    943		return -1;
    944
    945	nr = auxtrace_index->nr;
    946	if (needs_swap) {
    947		auxtrace_index->entries[nr].file_offset =
    948						bswap_64(ent.file_offset);
    949		auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
    950	} else {
    951		auxtrace_index->entries[nr].file_offset = ent.file_offset;
    952		auxtrace_index->entries[nr].sz = ent.sz;
    953	}
    954
    955	auxtrace_index->nr = nr + 1;
    956
    957	return 0;
    958}
    959
    960int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
    961			    bool needs_swap)
    962{
    963	struct list_head *head = &session->auxtrace_index;
    964	u64 nr;
    965
    966	if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
    967		return -1;
    968
    969	if (needs_swap)
    970		nr = bswap_64(nr);
    971
    972	if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
    973		return -1;
    974
    975	while (nr--) {
    976		int err;
    977
    978		err = auxtrace_index__process_entry(fd, head, needs_swap);
    979		if (err)
    980			return -1;
    981	}
    982
    983	return 0;
    984}
    985
    986static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
    987						struct perf_session *session,
    988						struct auxtrace_index_entry *ent)
    989{
    990	return auxtrace_queues__add_indexed_event(queues, session,
    991						  ent->file_offset, ent->sz);
    992}
    993
    994int auxtrace_queues__process_index(struct auxtrace_queues *queues,
    995				   struct perf_session *session)
    996{
    997	struct auxtrace_index *auxtrace_index;
    998	struct auxtrace_index_entry *ent;
    999	size_t i;
   1000	int err;
   1001
   1002	if (auxtrace__dont_decode(session))
   1003		return 0;
   1004
   1005	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
   1006		for (i = 0; i < auxtrace_index->nr; i++) {
   1007			ent = &auxtrace_index->entries[i];
   1008			err = auxtrace_queues__process_index_entry(queues,
   1009								   session,
   1010								   ent);
   1011			if (err)
   1012				return err;
   1013		}
   1014	}
   1015	return 0;
   1016}
   1017
   1018struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
   1019					      struct auxtrace_buffer *buffer)
   1020{
   1021	if (buffer) {
   1022		if (list_is_last(&buffer->list, &queue->head))
   1023			return NULL;
   1024		return list_entry(buffer->list.next, struct auxtrace_buffer,
   1025				  list);
   1026	} else {
   1027		if (list_empty(&queue->head))
   1028			return NULL;
   1029		return list_entry(queue->head.next, struct auxtrace_buffer,
   1030				  list);
   1031	}
   1032}
   1033
   1034struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
   1035						     struct perf_sample *sample,
   1036						     struct perf_session *session)
   1037{
   1038	struct perf_sample_id *sid;
   1039	unsigned int idx;
   1040	u64 id;
   1041
   1042	id = sample->id;
   1043	if (!id)
   1044		return NULL;
   1045
   1046	sid = evlist__id2sid(session->evlist, id);
   1047	if (!sid)
   1048		return NULL;
   1049
   1050	idx = sid->idx;
   1051
   1052	if (idx >= queues->nr_queues)
   1053		return NULL;
   1054
   1055	return &queues->queue_array[idx];
   1056}
   1057
   1058int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
   1059				struct perf_session *session,
   1060				struct perf_sample *sample, u64 data_offset,
   1061				u64 reference)
   1062{
   1063	struct auxtrace_buffer buffer = {
   1064		.pid = -1,
   1065		.data_offset = data_offset,
   1066		.reference = reference,
   1067		.size = sample->aux_sample.size,
   1068	};
   1069	struct perf_sample_id *sid;
   1070	u64 id = sample->id;
   1071	unsigned int idx;
   1072
   1073	if (!id)
   1074		return -EINVAL;
   1075
   1076	sid = evlist__id2sid(session->evlist, id);
   1077	if (!sid)
   1078		return -ENOENT;
   1079
   1080	idx = sid->idx;
   1081	buffer.tid = sid->tid;
   1082	buffer.cpu = sid->cpu;
   1083
   1084	return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
   1085}
   1086
   1087struct queue_data {
   1088	bool samples;
   1089	bool events;
   1090};
   1091
   1092static int auxtrace_queue_data_cb(struct perf_session *session,
   1093				  union perf_event *event, u64 offset,
   1094				  void *data)
   1095{
   1096	struct queue_data *qd = data;
   1097	struct perf_sample sample;
   1098	int err;
   1099
   1100	if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
   1101		if (event->header.size < sizeof(struct perf_record_auxtrace))
   1102			return -EINVAL;
   1103		offset += event->header.size;
   1104		return session->auxtrace->queue_data(session, NULL, event,
   1105						     offset);
   1106	}
   1107
   1108	if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
   1109		return 0;
   1110
   1111	err = evlist__parse_sample(session->evlist, event, &sample);
   1112	if (err)
   1113		return err;
   1114
   1115	if (!sample.aux_sample.size)
   1116		return 0;
   1117
   1118	offset += sample.aux_sample.data - (void *)event;
   1119
   1120	return session->auxtrace->queue_data(session, &sample, NULL, offset);
   1121}
   1122
   1123int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
   1124{
   1125	struct queue_data qd = {
   1126		.samples = samples,
   1127		.events = events,
   1128	};
   1129
   1130	if (auxtrace__dont_decode(session))
   1131		return 0;
   1132
   1133	if (!session->auxtrace || !session->auxtrace->queue_data)
   1134		return -EINVAL;
   1135
   1136	return perf_session__peek_events(session, session->header.data_offset,
   1137					 session->header.data_size,
   1138					 auxtrace_queue_data_cb, &qd);
   1139}
   1140
   1141void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
   1142{
   1143	int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
   1144	size_t adj = buffer->data_offset & (page_size - 1);
   1145	size_t size = buffer->size + adj;
   1146	off_t file_offset = buffer->data_offset - adj;
   1147	void *addr;
   1148
   1149	if (buffer->data)
   1150		return buffer->data;
   1151
   1152	addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
   1153	if (addr == MAP_FAILED)
   1154		return NULL;
   1155
   1156	buffer->mmap_addr = addr;
   1157	buffer->mmap_size = size;
   1158
   1159	buffer->data = addr + adj;
   1160
   1161	return buffer->data;
   1162}
   1163
   1164void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
   1165{
   1166	if (!buffer->data || !buffer->mmap_addr)
   1167		return;
   1168	munmap(buffer->mmap_addr, buffer->mmap_size);
   1169	buffer->mmap_addr = NULL;
   1170	buffer->mmap_size = 0;
   1171	buffer->data = NULL;
   1172	buffer->use_data = NULL;
   1173}
   1174
   1175void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
   1176{
   1177	auxtrace_buffer__put_data(buffer);
   1178	if (buffer->data_needs_freeing) {
   1179		buffer->data_needs_freeing = false;
   1180		zfree(&buffer->data);
   1181		buffer->use_data = NULL;
   1182		buffer->size = 0;
   1183	}
   1184}
   1185
   1186void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
   1187{
   1188	auxtrace_buffer__drop_data(buffer);
   1189	free(buffer);
   1190}
   1191
   1192void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
   1193			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
   1194			  const char *msg, u64 timestamp)
   1195{
   1196	size_t size;
   1197
   1198	memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
   1199
   1200	auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
   1201	auxtrace_error->type = type;
   1202	auxtrace_error->code = code;
   1203	auxtrace_error->cpu = cpu;
   1204	auxtrace_error->pid = pid;
   1205	auxtrace_error->tid = tid;
   1206	auxtrace_error->fmt = 1;
   1207	auxtrace_error->ip = ip;
   1208	auxtrace_error->time = timestamp;
   1209	strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
   1210
   1211	size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
   1212	       strlen(auxtrace_error->msg) + 1;
   1213	auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
   1214}
   1215
   1216int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
   1217					 struct perf_tool *tool,
   1218					 struct perf_session *session,
   1219					 perf_event__handler_t process)
   1220{
   1221	union perf_event *ev;
   1222	size_t priv_size;
   1223	int err;
   1224
   1225	pr_debug2("Synthesizing auxtrace information\n");
   1226	priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
   1227	ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
   1228	if (!ev)
   1229		return -ENOMEM;
   1230
   1231	ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
   1232	ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
   1233					priv_size;
   1234	err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
   1235					 priv_size);
   1236	if (err)
   1237		goto out_free;
   1238
   1239	err = process(tool, ev, NULL, NULL);
   1240out_free:
   1241	free(ev);
   1242	return err;
   1243}
   1244
   1245static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
   1246{
   1247	struct evsel *new_leader = NULL;
   1248	struct evsel *evsel;
   1249
   1250	/* Find new leader for the group */
   1251	evlist__for_each_entry(evlist, evsel) {
   1252		if (!evsel__has_leader(evsel, leader) || evsel == leader)
   1253			continue;
   1254		if (!new_leader)
   1255			new_leader = evsel;
   1256		evsel__set_leader(evsel, new_leader);
   1257	}
   1258
   1259	/* Update group information */
   1260	if (new_leader) {
   1261		zfree(&new_leader->group_name);
   1262		new_leader->group_name = leader->group_name;
   1263		leader->group_name = NULL;
   1264
   1265		new_leader->core.nr_members = leader->core.nr_members - 1;
   1266		leader->core.nr_members = 1;
   1267	}
   1268}
   1269
   1270static void unleader_auxtrace(struct perf_session *session)
   1271{
   1272	struct evsel *evsel;
   1273
   1274	evlist__for_each_entry(session->evlist, evsel) {
   1275		if (auxtrace__evsel_is_auxtrace(session, evsel) &&
   1276		    evsel__is_group_leader(evsel)) {
   1277			unleader_evsel(session->evlist, evsel);
   1278		}
   1279	}
   1280}
   1281
   1282int perf_event__process_auxtrace_info(struct perf_session *session,
   1283				      union perf_event *event)
   1284{
   1285	enum auxtrace_type type = event->auxtrace_info.type;
   1286	int err;
   1287
   1288	if (dump_trace)
   1289		fprintf(stdout, " type: %u\n", type);
   1290
   1291	switch (type) {
   1292	case PERF_AUXTRACE_INTEL_PT:
   1293		err = intel_pt_process_auxtrace_info(event, session);
   1294		break;
   1295	case PERF_AUXTRACE_INTEL_BTS:
   1296		err = intel_bts_process_auxtrace_info(event, session);
   1297		break;
   1298	case PERF_AUXTRACE_ARM_SPE:
   1299		err = arm_spe_process_auxtrace_info(event, session);
   1300		break;
   1301	case PERF_AUXTRACE_CS_ETM:
   1302		err = cs_etm__process_auxtrace_info(event, session);
   1303		break;
   1304	case PERF_AUXTRACE_S390_CPUMSF:
   1305		err = s390_cpumsf_process_auxtrace_info(event, session);
   1306		break;
   1307	case PERF_AUXTRACE_UNKNOWN:
   1308	default:
   1309		return -EINVAL;
   1310	}
   1311
   1312	if (err)
   1313		return err;
   1314
   1315	unleader_auxtrace(session);
   1316
   1317	return 0;
   1318}
   1319
   1320s64 perf_event__process_auxtrace(struct perf_session *session,
   1321				 union perf_event *event)
   1322{
   1323	s64 err;
   1324
   1325	if (dump_trace)
   1326		fprintf(stdout, " size: %#"PRI_lx64"  offset: %#"PRI_lx64"  ref: %#"PRI_lx64"  idx: %u  tid: %d  cpu: %d\n",
   1327			event->auxtrace.size, event->auxtrace.offset,
   1328			event->auxtrace.reference, event->auxtrace.idx,
   1329			event->auxtrace.tid, event->auxtrace.cpu);
   1330
   1331	if (auxtrace__dont_decode(session))
   1332		return event->auxtrace.size;
   1333
   1334	if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
   1335		return -EINVAL;
   1336
   1337	err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
   1338	if (err < 0)
   1339		return err;
   1340
   1341	return event->auxtrace.size;
   1342}
   1343
   1344#define PERF_ITRACE_DEFAULT_PERIOD_TYPE		PERF_ITRACE_PERIOD_NANOSECS
   1345#define PERF_ITRACE_DEFAULT_PERIOD		100000
   1346#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ	16
   1347#define PERF_ITRACE_MAX_CALLCHAIN_SZ		1024
   1348#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ	64
   1349#define PERF_ITRACE_MAX_LAST_BRANCH_SZ		1024
   1350
   1351void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
   1352				    bool no_sample)
   1353{
   1354	synth_opts->branches = true;
   1355	synth_opts->transactions = true;
   1356	synth_opts->ptwrites = true;
   1357	synth_opts->pwr_events = true;
   1358	synth_opts->other_events = true;
   1359	synth_opts->intr_events = true;
   1360	synth_opts->errors = true;
   1361	synth_opts->flc = true;
   1362	synth_opts->llc = true;
   1363	synth_opts->tlb = true;
   1364	synth_opts->mem = true;
   1365	synth_opts->remote_access = true;
   1366
   1367	if (no_sample) {
   1368		synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
   1369		synth_opts->period = 1;
   1370		synth_opts->calls = true;
   1371	} else {
   1372		synth_opts->instructions = true;
   1373		synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
   1374		synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
   1375	}
   1376	synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
   1377	synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
   1378	synth_opts->initial_skip = 0;
   1379}
   1380
   1381static int get_flag(const char **ptr, unsigned int *flags)
   1382{
   1383	while (1) {
   1384		char c = **ptr;
   1385
   1386		if (c >= 'a' && c <= 'z') {
   1387			*flags |= 1 << (c - 'a');
   1388			++*ptr;
   1389			return 0;
   1390		} else if (c == ' ') {
   1391			++*ptr;
   1392			continue;
   1393		} else {
   1394			return -1;
   1395		}
   1396	}
   1397}
   1398
   1399static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
   1400{
   1401	while (1) {
   1402		switch (**ptr) {
   1403		case '+':
   1404			++*ptr;
   1405			if (get_flag(ptr, plus_flags))
   1406				return -1;
   1407			break;
   1408		case '-':
   1409			++*ptr;
   1410			if (get_flag(ptr, minus_flags))
   1411				return -1;
   1412			break;
   1413		case ' ':
   1414			++*ptr;
   1415			break;
   1416		default:
   1417			return 0;
   1418		}
   1419	}
   1420}
   1421
   1422/*
   1423 * Please check tools/perf/Documentation/perf-script.txt for information
   1424 * about the options parsed here, which is introduced after this cset,
   1425 * when support in 'perf script' for these options is introduced.
   1426 */
   1427int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
   1428			       const char *str, int unset)
   1429{
   1430	const char *p;
   1431	char *endptr;
   1432	bool period_type_set = false;
   1433	bool period_set = false;
   1434
   1435	synth_opts->set = true;
   1436
   1437	if (unset) {
   1438		synth_opts->dont_decode = true;
   1439		return 0;
   1440	}
   1441
   1442	if (!str) {
   1443		itrace_synth_opts__set_default(synth_opts,
   1444					       synth_opts->default_no_sample);
   1445		return 0;
   1446	}
   1447
   1448	for (p = str; *p;) {
   1449		switch (*p++) {
   1450		case 'i':
   1451			synth_opts->instructions = true;
   1452			while (*p == ' ' || *p == ',')
   1453				p += 1;
   1454			if (isdigit(*p)) {
   1455				synth_opts->period = strtoull(p, &endptr, 10);
   1456				period_set = true;
   1457				p = endptr;
   1458				while (*p == ' ' || *p == ',')
   1459					p += 1;
   1460				switch (*p++) {
   1461				case 'i':
   1462					synth_opts->period_type =
   1463						PERF_ITRACE_PERIOD_INSTRUCTIONS;
   1464					period_type_set = true;
   1465					break;
   1466				case 't':
   1467					synth_opts->period_type =
   1468						PERF_ITRACE_PERIOD_TICKS;
   1469					period_type_set = true;
   1470					break;
   1471				case 'm':
   1472					synth_opts->period *= 1000;
   1473					/* Fall through */
   1474				case 'u':
   1475					synth_opts->period *= 1000;
   1476					/* Fall through */
   1477				case 'n':
   1478					if (*p++ != 's')
   1479						goto out_err;
   1480					synth_opts->period_type =
   1481						PERF_ITRACE_PERIOD_NANOSECS;
   1482					period_type_set = true;
   1483					break;
   1484				case '\0':
   1485					goto out;
   1486				default:
   1487					goto out_err;
   1488				}
   1489			}
   1490			break;
   1491		case 'b':
   1492			synth_opts->branches = true;
   1493			break;
   1494		case 'x':
   1495			synth_opts->transactions = true;
   1496			break;
   1497		case 'w':
   1498			synth_opts->ptwrites = true;
   1499			break;
   1500		case 'p':
   1501			synth_opts->pwr_events = true;
   1502			break;
   1503		case 'o':
   1504			synth_opts->other_events = true;
   1505			break;
   1506		case 'I':
   1507			synth_opts->intr_events = true;
   1508			break;
   1509		case 'e':
   1510			synth_opts->errors = true;
   1511			if (get_flags(&p, &synth_opts->error_plus_flags,
   1512				      &synth_opts->error_minus_flags))
   1513				goto out_err;
   1514			break;
   1515		case 'd':
   1516			synth_opts->log = true;
   1517			if (get_flags(&p, &synth_opts->log_plus_flags,
   1518				      &synth_opts->log_minus_flags))
   1519				goto out_err;
   1520			break;
   1521		case 'c':
   1522			synth_opts->branches = true;
   1523			synth_opts->calls = true;
   1524			break;
   1525		case 'r':
   1526			synth_opts->branches = true;
   1527			synth_opts->returns = true;
   1528			break;
   1529		case 'G':
   1530		case 'g':
   1531			if (p[-1] == 'G')
   1532				synth_opts->add_callchain = true;
   1533			else
   1534				synth_opts->callchain = true;
   1535			synth_opts->callchain_sz =
   1536					PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
   1537			while (*p == ' ' || *p == ',')
   1538				p += 1;
   1539			if (isdigit(*p)) {
   1540				unsigned int val;
   1541
   1542				val = strtoul(p, &endptr, 10);
   1543				p = endptr;
   1544				if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
   1545					goto out_err;
   1546				synth_opts->callchain_sz = val;
   1547			}
   1548			break;
   1549		case 'L':
   1550		case 'l':
   1551			if (p[-1] == 'L')
   1552				synth_opts->add_last_branch = true;
   1553			else
   1554				synth_opts->last_branch = true;
   1555			synth_opts->last_branch_sz =
   1556					PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
   1557			while (*p == ' ' || *p == ',')
   1558				p += 1;
   1559			if (isdigit(*p)) {
   1560				unsigned int val;
   1561
   1562				val = strtoul(p, &endptr, 10);
   1563				p = endptr;
   1564				if (!val ||
   1565				    val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
   1566					goto out_err;
   1567				synth_opts->last_branch_sz = val;
   1568			}
   1569			break;
   1570		case 's':
   1571			synth_opts->initial_skip = strtoul(p, &endptr, 10);
   1572			if (p == endptr)
   1573				goto out_err;
   1574			p = endptr;
   1575			break;
   1576		case 'f':
   1577			synth_opts->flc = true;
   1578			break;
   1579		case 'm':
   1580			synth_opts->llc = true;
   1581			break;
   1582		case 't':
   1583			synth_opts->tlb = true;
   1584			break;
   1585		case 'a':
   1586			synth_opts->remote_access = true;
   1587			break;
   1588		case 'M':
   1589			synth_opts->mem = true;
   1590			break;
   1591		case 'q':
   1592			synth_opts->quick += 1;
   1593			break;
   1594		case 'A':
   1595			synth_opts->approx_ipc = true;
   1596			break;
   1597		case 'Z':
   1598			synth_opts->timeless_decoding = true;
   1599			break;
   1600		case ' ':
   1601		case ',':
   1602			break;
   1603		default:
   1604			goto out_err;
   1605		}
   1606	}
   1607out:
   1608	if (synth_opts->instructions) {
   1609		if (!period_type_set)
   1610			synth_opts->period_type =
   1611					PERF_ITRACE_DEFAULT_PERIOD_TYPE;
   1612		if (!period_set)
   1613			synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
   1614	}
   1615
   1616	return 0;
   1617
   1618out_err:
   1619	pr_err("Bad Instruction Tracing options '%s'\n", str);
   1620	return -EINVAL;
   1621}
   1622
   1623int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
   1624{
   1625	return itrace_do_parse_synth_opts(opt->value, str, unset);
   1626}
   1627
   1628static const char * const auxtrace_error_type_name[] = {
   1629	[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
   1630};
   1631
   1632static const char *auxtrace_error_name(int type)
   1633{
   1634	const char *error_type_name = NULL;
   1635
   1636	if (type < PERF_AUXTRACE_ERROR_MAX)
   1637		error_type_name = auxtrace_error_type_name[type];
   1638	if (!error_type_name)
   1639		error_type_name = "unknown AUX";
   1640	return error_type_name;
   1641}
   1642
   1643size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
   1644{
   1645	struct perf_record_auxtrace_error *e = &event->auxtrace_error;
   1646	unsigned long long nsecs = e->time;
   1647	const char *msg = e->msg;
   1648	int ret;
   1649
   1650	ret = fprintf(fp, " %s error type %u",
   1651		      auxtrace_error_name(e->type), e->type);
   1652
   1653	if (e->fmt && nsecs) {
   1654		unsigned long secs = nsecs / NSEC_PER_SEC;
   1655
   1656		nsecs -= secs * NSEC_PER_SEC;
   1657		ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
   1658	} else {
   1659		ret += fprintf(fp, " time 0");
   1660	}
   1661
   1662	if (!e->fmt)
   1663		msg = (const char *)&e->time;
   1664
   1665	ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
   1666		       e->cpu, e->pid, e->tid, e->ip, e->code, msg);
   1667	return ret;
   1668}
   1669
   1670void perf_session__auxtrace_error_inc(struct perf_session *session,
   1671				      union perf_event *event)
   1672{
   1673	struct perf_record_auxtrace_error *e = &event->auxtrace_error;
   1674
   1675	if (e->type < PERF_AUXTRACE_ERROR_MAX)
   1676		session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
   1677}
   1678
   1679void events_stats__auxtrace_error_warn(const struct events_stats *stats)
   1680{
   1681	int i;
   1682
   1683	for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
   1684		if (!stats->nr_auxtrace_errors[i])
   1685			continue;
   1686		ui__warning("%u %s errors\n",
   1687			    stats->nr_auxtrace_errors[i],
   1688			    auxtrace_error_name(i));
   1689	}
   1690}
   1691
   1692int perf_event__process_auxtrace_error(struct perf_session *session,
   1693				       union perf_event *event)
   1694{
   1695	if (auxtrace__dont_decode(session))
   1696		return 0;
   1697
   1698	perf_event__fprintf_auxtrace_error(event, stdout);
   1699	return 0;
   1700}
   1701
   1702/*
   1703 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode,
   1704 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to
   1705 * the issues caused by the below sequence on multiple CPUs: when perf tool
   1706 * accesses either the load operation or the store operation for 64-bit value,
   1707 * on some architectures the operation is divided into two instructions, one
   1708 * is for accessing the low 32-bit value and another is for the high 32-bit;
   1709 * thus these two user operations can give the kernel chances to access the
   1710 * 64-bit value, and thus leads to the unexpected load values.
   1711 *
   1712 *   kernel (64-bit)                        user (32-bit)
   1713 *
   1714 *   if (LOAD ->aux_tail) { --,             LOAD ->aux_head_lo
   1715 *       STORE $aux_data      |       ,--->
   1716 *       FLUSH $aux_data      |       |     LOAD ->aux_head_hi
   1717 *       STORE ->aux_head   --|-------`     smp_rmb()
   1718 *   }                        |             LOAD $data
   1719 *                            |             smp_mb()
   1720 *                            |             STORE ->aux_tail_lo
   1721 *                            `----------->
   1722 *                                          STORE ->aux_tail_hi
   1723 *
   1724 * For this reason, it's impossible for the perf tool to work correctly when
   1725 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we
   1726 * can not simply limit the AUX ring buffer to less than 4GB, the reason is
   1727 * the pointers can be increased monotonically, whatever the buffer size it is,
   1728 * at the end the head and tail can be bigger than 4GB and carry out to the
   1729 * high 32-bit.
   1730 *
   1731 * To mitigate the issues and improve the user experience, we can allow the
   1732 * perf tool working in certain conditions and bail out with error if detect
   1733 * any overflow cannot be handled.
   1734 *
   1735 * For reading the AUX head, it reads out the values for three times, and
   1736 * compares the high 4 bytes of the values between the first time and the last
   1737 * time, if there has no change for high 4 bytes injected by the kernel during
   1738 * the user reading sequence, it's safe for use the second value.
   1739 *
   1740 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high
   1741 * 32 bits, it means there have two store operations in user space and it cannot
   1742 * promise the atomicity for 64-bit write, so return '-1' in this case to tell
   1743 * the caller an overflow error has happened.
   1744 */
   1745u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
   1746{
   1747	struct perf_event_mmap_page *pc = mm->userpg;
   1748	u64 first, second, last;
   1749	u64 mask = (u64)(UINT32_MAX) << 32;
   1750
   1751	do {
   1752		first = READ_ONCE(pc->aux_head);
   1753		/* Ensure all reads are done after we read the head */
   1754		smp_rmb();
   1755		second = READ_ONCE(pc->aux_head);
   1756		/* Ensure all reads are done after we read the head */
   1757		smp_rmb();
   1758		last = READ_ONCE(pc->aux_head);
   1759	} while ((first & mask) != (last & mask));
   1760
   1761	return second;
   1762}
   1763
   1764int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
   1765{
   1766	struct perf_event_mmap_page *pc = mm->userpg;
   1767	u64 mask = (u64)(UINT32_MAX) << 32;
   1768
   1769	if (tail & mask)
   1770		return -1;
   1771
   1772	/* Ensure all reads are done before we write the tail out */
   1773	smp_mb();
   1774	WRITE_ONCE(pc->aux_tail, tail);
   1775	return 0;
   1776}
   1777
   1778static int __auxtrace_mmap__read(struct mmap *map,
   1779				 struct auxtrace_record *itr,
   1780				 struct perf_tool *tool, process_auxtrace_t fn,
   1781				 bool snapshot, size_t snapshot_size)
   1782{
   1783	struct auxtrace_mmap *mm = &map->auxtrace_mmap;
   1784	u64 head, old = mm->prev, offset, ref;
   1785	unsigned char *data = mm->base;
   1786	size_t size, head_off, old_off, len1, len2, padding;
   1787	union perf_event ev;
   1788	void *data1, *data2;
   1789	int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
   1790
   1791	head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
   1792
   1793	if (snapshot &&
   1794	    auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old))
   1795		return -1;
   1796
   1797	if (old == head)
   1798		return 0;
   1799
   1800	pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
   1801		  mm->idx, old, head, head - old);
   1802
   1803	if (mm->mask) {
   1804		head_off = head & mm->mask;
   1805		old_off = old & mm->mask;
   1806	} else {
   1807		head_off = head % mm->len;
   1808		old_off = old % mm->len;
   1809	}
   1810
   1811	if (head_off > old_off)
   1812		size = head_off - old_off;
   1813	else
   1814		size = mm->len - (old_off - head_off);
   1815
   1816	if (snapshot && size > snapshot_size)
   1817		size = snapshot_size;
   1818
   1819	ref = auxtrace_record__reference(itr);
   1820
   1821	if (head > old || size <= head || mm->mask) {
   1822		offset = head - size;
   1823	} else {
   1824		/*
   1825		 * When the buffer size is not a power of 2, 'head' wraps at the
   1826		 * highest multiple of the buffer size, so we have to subtract
   1827		 * the remainder here.
   1828		 */
   1829		u64 rem = (0ULL - mm->len) % mm->len;
   1830
   1831		offset = head - size - rem;
   1832	}
   1833
   1834	if (size > head_off) {
   1835		len1 = size - head_off;
   1836		data1 = &data[mm->len - len1];
   1837		len2 = head_off;
   1838		data2 = &data[0];
   1839	} else {
   1840		len1 = size;
   1841		data1 = &data[head_off - len1];
   1842		len2 = 0;
   1843		data2 = NULL;
   1844	}
   1845
   1846	if (itr->alignment) {
   1847		unsigned int unwanted = len1 % itr->alignment;
   1848
   1849		len1 -= unwanted;
   1850		size -= unwanted;
   1851	}
   1852
   1853	/* padding must be written by fn() e.g. record__process_auxtrace() */
   1854	padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
   1855	if (padding)
   1856		padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
   1857
   1858	memset(&ev, 0, sizeof(ev));
   1859	ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
   1860	ev.auxtrace.header.size = sizeof(ev.auxtrace);
   1861	ev.auxtrace.size = size + padding;
   1862	ev.auxtrace.offset = offset;
   1863	ev.auxtrace.reference = ref;
   1864	ev.auxtrace.idx = mm->idx;
   1865	ev.auxtrace.tid = mm->tid;
   1866	ev.auxtrace.cpu = mm->cpu;
   1867
   1868	if (fn(tool, map, &ev, data1, len1, data2, len2))
   1869		return -1;
   1870
   1871	mm->prev = head;
   1872
   1873	if (!snapshot) {
   1874		int err;
   1875
   1876		err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit);
   1877		if (err < 0)
   1878			return err;
   1879
   1880		if (itr->read_finish) {
   1881			err = itr->read_finish(itr, mm->idx);
   1882			if (err < 0)
   1883				return err;
   1884		}
   1885	}
   1886
   1887	return 1;
   1888}
   1889
   1890int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
   1891			struct perf_tool *tool, process_auxtrace_t fn)
   1892{
   1893	return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
   1894}
   1895
   1896int auxtrace_mmap__read_snapshot(struct mmap *map,
   1897				 struct auxtrace_record *itr,
   1898				 struct perf_tool *tool, process_auxtrace_t fn,
   1899				 size_t snapshot_size)
   1900{
   1901	return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
   1902}
   1903
   1904/**
   1905 * struct auxtrace_cache - hash table to implement a cache
   1906 * @hashtable: the hashtable
   1907 * @sz: hashtable size (number of hlists)
   1908 * @entry_size: size of an entry
   1909 * @limit: limit the number of entries to this maximum, when reached the cache
   1910 *         is dropped and caching begins again with an empty cache
   1911 * @cnt: current number of entries
   1912 * @bits: hashtable size (@sz = 2^@bits)
   1913 */
   1914struct auxtrace_cache {
   1915	struct hlist_head *hashtable;
   1916	size_t sz;
   1917	size_t entry_size;
   1918	size_t limit;
   1919	size_t cnt;
   1920	unsigned int bits;
   1921};
   1922
   1923struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
   1924					   unsigned int limit_percent)
   1925{
   1926	struct auxtrace_cache *c;
   1927	struct hlist_head *ht;
   1928	size_t sz, i;
   1929
   1930	c = zalloc(sizeof(struct auxtrace_cache));
   1931	if (!c)
   1932		return NULL;
   1933
   1934	sz = 1UL << bits;
   1935
   1936	ht = calloc(sz, sizeof(struct hlist_head));
   1937	if (!ht)
   1938		goto out_free;
   1939
   1940	for (i = 0; i < sz; i++)
   1941		INIT_HLIST_HEAD(&ht[i]);
   1942
   1943	c->hashtable = ht;
   1944	c->sz = sz;
   1945	c->entry_size = entry_size;
   1946	c->limit = (c->sz * limit_percent) / 100;
   1947	c->bits = bits;
   1948
   1949	return c;
   1950
   1951out_free:
   1952	free(c);
   1953	return NULL;
   1954}
   1955
   1956static void auxtrace_cache__drop(struct auxtrace_cache *c)
   1957{
   1958	struct auxtrace_cache_entry *entry;
   1959	struct hlist_node *tmp;
   1960	size_t i;
   1961
   1962	if (!c)
   1963		return;
   1964
   1965	for (i = 0; i < c->sz; i++) {
   1966		hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
   1967			hlist_del(&entry->hash);
   1968			auxtrace_cache__free_entry(c, entry);
   1969		}
   1970	}
   1971
   1972	c->cnt = 0;
   1973}
   1974
   1975void auxtrace_cache__free(struct auxtrace_cache *c)
   1976{
   1977	if (!c)
   1978		return;
   1979
   1980	auxtrace_cache__drop(c);
   1981	zfree(&c->hashtable);
   1982	free(c);
   1983}
   1984
   1985void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
   1986{
   1987	return malloc(c->entry_size);
   1988}
   1989
   1990void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
   1991				void *entry)
   1992{
   1993	free(entry);
   1994}
   1995
   1996int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
   1997			struct auxtrace_cache_entry *entry)
   1998{
   1999	if (c->limit && ++c->cnt > c->limit)
   2000		auxtrace_cache__drop(c);
   2001
   2002	entry->key = key;
   2003	hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
   2004
   2005	return 0;
   2006}
   2007
   2008static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
   2009						       u32 key)
   2010{
   2011	struct auxtrace_cache_entry *entry;
   2012	struct hlist_head *hlist;
   2013	struct hlist_node *n;
   2014
   2015	if (!c)
   2016		return NULL;
   2017
   2018	hlist = &c->hashtable[hash_32(key, c->bits)];
   2019	hlist_for_each_entry_safe(entry, n, hlist, hash) {
   2020		if (entry->key == key) {
   2021			hlist_del(&entry->hash);
   2022			return entry;
   2023		}
   2024	}
   2025
   2026	return NULL;
   2027}
   2028
   2029void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
   2030{
   2031	struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
   2032
   2033	auxtrace_cache__free_entry(c, entry);
   2034}
   2035
   2036void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
   2037{
   2038	struct auxtrace_cache_entry *entry;
   2039	struct hlist_head *hlist;
   2040
   2041	if (!c)
   2042		return NULL;
   2043
   2044	hlist = &c->hashtable[hash_32(key, c->bits)];
   2045	hlist_for_each_entry(entry, hlist, hash) {
   2046		if (entry->key == key)
   2047			return entry;
   2048	}
   2049
   2050	return NULL;
   2051}
   2052
   2053static void addr_filter__free_str(struct addr_filter *filt)
   2054{
   2055	zfree(&filt->str);
   2056	filt->action   = NULL;
   2057	filt->sym_from = NULL;
   2058	filt->sym_to   = NULL;
   2059	filt->filename = NULL;
   2060}
   2061
   2062static struct addr_filter *addr_filter__new(void)
   2063{
   2064	struct addr_filter *filt = zalloc(sizeof(*filt));
   2065
   2066	if (filt)
   2067		INIT_LIST_HEAD(&filt->list);
   2068
   2069	return filt;
   2070}
   2071
   2072static void addr_filter__free(struct addr_filter *filt)
   2073{
   2074	if (filt)
   2075		addr_filter__free_str(filt);
   2076	free(filt);
   2077}
   2078
   2079static void addr_filters__add(struct addr_filters *filts,
   2080			      struct addr_filter *filt)
   2081{
   2082	list_add_tail(&filt->list, &filts->head);
   2083	filts->cnt += 1;
   2084}
   2085
   2086static void addr_filters__del(struct addr_filters *filts,
   2087			      struct addr_filter *filt)
   2088{
   2089	list_del_init(&filt->list);
   2090	filts->cnt -= 1;
   2091}
   2092
   2093void addr_filters__init(struct addr_filters *filts)
   2094{
   2095	INIT_LIST_HEAD(&filts->head);
   2096	filts->cnt = 0;
   2097}
   2098
   2099void addr_filters__exit(struct addr_filters *filts)
   2100{
   2101	struct addr_filter *filt, *n;
   2102
   2103	list_for_each_entry_safe(filt, n, &filts->head, list) {
   2104		addr_filters__del(filts, filt);
   2105		addr_filter__free(filt);
   2106	}
   2107}
   2108
   2109static int parse_num_or_str(char **inp, u64 *num, const char **str,
   2110			    const char *str_delim)
   2111{
   2112	*inp += strspn(*inp, " ");
   2113
   2114	if (isdigit(**inp)) {
   2115		char *endptr;
   2116
   2117		if (!num)
   2118			return -EINVAL;
   2119		errno = 0;
   2120		*num = strtoull(*inp, &endptr, 0);
   2121		if (errno)
   2122			return -errno;
   2123		if (endptr == *inp)
   2124			return -EINVAL;
   2125		*inp = endptr;
   2126	} else {
   2127		size_t n;
   2128
   2129		if (!str)
   2130			return -EINVAL;
   2131		*inp += strspn(*inp, " ");
   2132		*str = *inp;
   2133		n = strcspn(*inp, str_delim);
   2134		if (!n)
   2135			return -EINVAL;
   2136		*inp += n;
   2137		if (**inp) {
   2138			**inp = '\0';
   2139			*inp += 1;
   2140		}
   2141	}
   2142	return 0;
   2143}
   2144
   2145static int parse_action(struct addr_filter *filt)
   2146{
   2147	if (!strcmp(filt->action, "filter")) {
   2148		filt->start = true;
   2149		filt->range = true;
   2150	} else if (!strcmp(filt->action, "start")) {
   2151		filt->start = true;
   2152	} else if (!strcmp(filt->action, "stop")) {
   2153		filt->start = false;
   2154	} else if (!strcmp(filt->action, "tracestop")) {
   2155		filt->start = false;
   2156		filt->range = true;
   2157		filt->action += 5; /* Change 'tracestop' to 'stop' */
   2158	} else {
   2159		return -EINVAL;
   2160	}
   2161	return 0;
   2162}
   2163
   2164static int parse_sym_idx(char **inp, int *idx)
   2165{
   2166	*idx = -1;
   2167
   2168	*inp += strspn(*inp, " ");
   2169
   2170	if (**inp != '#')
   2171		return 0;
   2172
   2173	*inp += 1;
   2174
   2175	if (**inp == 'g' || **inp == 'G') {
   2176		*inp += 1;
   2177		*idx = 0;
   2178	} else {
   2179		unsigned long num;
   2180		char *endptr;
   2181
   2182		errno = 0;
   2183		num = strtoul(*inp, &endptr, 0);
   2184		if (errno)
   2185			return -errno;
   2186		if (endptr == *inp || num > INT_MAX)
   2187			return -EINVAL;
   2188		*inp = endptr;
   2189		*idx = num;
   2190	}
   2191
   2192	return 0;
   2193}
   2194
   2195static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
   2196{
   2197	int err = parse_num_or_str(inp, num, str, " ");
   2198
   2199	if (!err && *str)
   2200		err = parse_sym_idx(inp, idx);
   2201
   2202	return err;
   2203}
   2204
   2205static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
   2206{
   2207	char *fstr;
   2208	int err;
   2209
   2210	filt->str = fstr = strdup(*filter_inp);
   2211	if (!fstr)
   2212		return -ENOMEM;
   2213
   2214	err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
   2215	if (err)
   2216		goto out_err;
   2217
   2218	err = parse_action(filt);
   2219	if (err)
   2220		goto out_err;
   2221
   2222	err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
   2223			      &filt->sym_from_idx);
   2224	if (err)
   2225		goto out_err;
   2226
   2227	fstr += strspn(fstr, " ");
   2228
   2229	if (*fstr == '/') {
   2230		fstr += 1;
   2231		err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
   2232				      &filt->sym_to_idx);
   2233		if (err)
   2234			goto out_err;
   2235		filt->range = true;
   2236	}
   2237
   2238	fstr += strspn(fstr, " ");
   2239
   2240	if (*fstr == '@') {
   2241		fstr += 1;
   2242		err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
   2243		if (err)
   2244			goto out_err;
   2245	}
   2246
   2247	fstr += strspn(fstr, " ,");
   2248
   2249	*filter_inp += fstr - filt->str;
   2250
   2251	return 0;
   2252
   2253out_err:
   2254	addr_filter__free_str(filt);
   2255
   2256	return err;
   2257}
   2258
   2259int addr_filters__parse_bare_filter(struct addr_filters *filts,
   2260				    const char *filter)
   2261{
   2262	struct addr_filter *filt;
   2263	const char *fstr = filter;
   2264	int err;
   2265
   2266	while (*fstr) {
   2267		filt = addr_filter__new();
   2268		err = parse_one_filter(filt, &fstr);
   2269		if (err) {
   2270			addr_filter__free(filt);
   2271			addr_filters__exit(filts);
   2272			return err;
   2273		}
   2274		addr_filters__add(filts, filt);
   2275	}
   2276
   2277	return 0;
   2278}
   2279
   2280struct sym_args {
   2281	const char	*name;
   2282	u64		start;
   2283	u64		size;
   2284	int		idx;
   2285	int		cnt;
   2286	bool		started;
   2287	bool		global;
   2288	bool		selected;
   2289	bool		duplicate;
   2290	bool		near;
   2291};
   2292
   2293static bool kern_sym_match(struct sym_args *args, const char *name, char type)
   2294{
   2295	/* A function with the same name, and global or the n'th found or any */
   2296	return kallsyms__is_function(type) &&
   2297	       !strcmp(name, args->name) &&
   2298	       ((args->global && isupper(type)) ||
   2299		(args->selected && ++(args->cnt) == args->idx) ||
   2300		(!args->global && !args->selected));
   2301}
   2302
   2303static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
   2304{
   2305	struct sym_args *args = arg;
   2306
   2307	if (args->started) {
   2308		if (!args->size)
   2309			args->size = start - args->start;
   2310		if (args->selected) {
   2311			if (args->size)
   2312				return 1;
   2313		} else if (kern_sym_match(args, name, type)) {
   2314			args->duplicate = true;
   2315			return 1;
   2316		}
   2317	} else if (kern_sym_match(args, name, type)) {
   2318		args->started = true;
   2319		args->start = start;
   2320	}
   2321
   2322	return 0;
   2323}
   2324
   2325static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
   2326{
   2327	struct sym_args *args = arg;
   2328
   2329	if (kern_sym_match(args, name, type)) {
   2330		pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
   2331		       ++args->cnt, start, type, name);
   2332		args->near = true;
   2333	} else if (args->near) {
   2334		args->near = false;
   2335		pr_err("\t\twhich is near\t\t%s\n", name);
   2336	}
   2337
   2338	return 0;
   2339}
   2340
   2341static int sym_not_found_error(const char *sym_name, int idx)
   2342{
   2343	if (idx > 0) {
   2344		pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
   2345		       idx, sym_name);
   2346	} else if (!idx) {
   2347		pr_err("Global symbol '%s' not found.\n", sym_name);
   2348	} else {
   2349		pr_err("Symbol '%s' not found.\n", sym_name);
   2350	}
   2351	pr_err("Note that symbols must be functions.\n");
   2352
   2353	return -EINVAL;
   2354}
   2355
   2356static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
   2357{
   2358	struct sym_args args = {
   2359		.name = sym_name,
   2360		.idx = idx,
   2361		.global = !idx,
   2362		.selected = idx > 0,
   2363	};
   2364	int err;
   2365
   2366	*start = 0;
   2367	*size = 0;
   2368
   2369	err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
   2370	if (err < 0) {
   2371		pr_err("Failed to parse /proc/kallsyms\n");
   2372		return err;
   2373	}
   2374
   2375	if (args.duplicate) {
   2376		pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
   2377		args.cnt = 0;
   2378		kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
   2379		pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
   2380		       sym_name);
   2381		pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
   2382		return -EINVAL;
   2383	}
   2384
   2385	if (!args.started) {
   2386		pr_err("Kernel symbol lookup: ");
   2387		return sym_not_found_error(sym_name, idx);
   2388	}
   2389
   2390	*start = args.start;
   2391	*size = args.size;
   2392
   2393	return 0;
   2394}
   2395
   2396static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
   2397			       char type, u64 start)
   2398{
   2399	struct sym_args *args = arg;
   2400
   2401	if (!kallsyms__is_function(type))
   2402		return 0;
   2403
   2404	if (!args->started) {
   2405		args->started = true;
   2406		args->start = start;
   2407	}
   2408	/* Don't know exactly where the kernel ends, so we add a page */
   2409	args->size = round_up(start, page_size) + page_size - args->start;
   2410
   2411	return 0;
   2412}
   2413
   2414static int addr_filter__entire_kernel(struct addr_filter *filt)
   2415{
   2416	struct sym_args args = { .started = false };
   2417	int err;
   2418
   2419	err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
   2420	if (err < 0 || !args.started) {
   2421		pr_err("Failed to parse /proc/kallsyms\n");
   2422		return err;
   2423	}
   2424
   2425	filt->addr = args.start;
   2426	filt->size = args.size;
   2427
   2428	return 0;
   2429}
   2430
   2431static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
   2432{
   2433	if (start + size >= filt->addr)
   2434		return 0;
   2435
   2436	if (filt->sym_from) {
   2437		pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
   2438		       filt->sym_to, start, filt->sym_from, filt->addr);
   2439	} else {
   2440		pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
   2441		       filt->sym_to, start, filt->addr);
   2442	}
   2443
   2444	return -EINVAL;
   2445}
   2446
   2447static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
   2448{
   2449	bool no_size = false;
   2450	u64 start, size;
   2451	int err;
   2452
   2453	if (symbol_conf.kptr_restrict) {
   2454		pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
   2455		return -EINVAL;
   2456	}
   2457
   2458	if (filt->sym_from && !strcmp(filt->sym_from, "*"))
   2459		return addr_filter__entire_kernel(filt);
   2460
   2461	if (filt->sym_from) {
   2462		err = find_kern_sym(filt->sym_from, &start, &size,
   2463				    filt->sym_from_idx);
   2464		if (err)
   2465			return err;
   2466		filt->addr = start;
   2467		if (filt->range && !filt->size && !filt->sym_to) {
   2468			filt->size = size;
   2469			no_size = !size;
   2470		}
   2471	}
   2472
   2473	if (filt->sym_to) {
   2474		err = find_kern_sym(filt->sym_to, &start, &size,
   2475				    filt->sym_to_idx);
   2476		if (err)
   2477			return err;
   2478
   2479		err = check_end_after_start(filt, start, size);
   2480		if (err)
   2481			return err;
   2482		filt->size = start + size - filt->addr;
   2483		no_size = !size;
   2484	}
   2485
   2486	/* The very last symbol in kallsyms does not imply a particular size */
   2487	if (no_size) {
   2488		pr_err("Cannot determine size of symbol '%s'\n",
   2489		       filt->sym_to ? filt->sym_to : filt->sym_from);
   2490		return -EINVAL;
   2491	}
   2492
   2493	return 0;
   2494}
   2495
   2496static struct dso *load_dso(const char *name)
   2497{
   2498	struct map *map;
   2499	struct dso *dso;
   2500
   2501	map = dso__new_map(name);
   2502	if (!map)
   2503		return NULL;
   2504
   2505	if (map__load(map) < 0)
   2506		pr_err("File '%s' not found or has no symbols.\n", name);
   2507
   2508	dso = dso__get(map->dso);
   2509
   2510	map__put(map);
   2511
   2512	return dso;
   2513}
   2514
   2515static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
   2516			  int idx)
   2517{
   2518	/* Same name, and global or the n'th found or any */
   2519	return !arch__compare_symbol_names(name, sym->name) &&
   2520	       ((!idx && sym->binding == STB_GLOBAL) ||
   2521		(idx > 0 && ++*cnt == idx) ||
   2522		idx < 0);
   2523}
   2524
   2525static void print_duplicate_syms(struct dso *dso, const char *sym_name)
   2526{
   2527	struct symbol *sym;
   2528	bool near = false;
   2529	int cnt = 0;
   2530
   2531	pr_err("Multiple symbols with name '%s'\n", sym_name);
   2532
   2533	sym = dso__first_symbol(dso);
   2534	while (sym) {
   2535		if (dso_sym_match(sym, sym_name, &cnt, -1)) {
   2536			pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
   2537			       ++cnt, sym->start,
   2538			       sym->binding == STB_GLOBAL ? 'g' :
   2539			       sym->binding == STB_LOCAL  ? 'l' : 'w',
   2540			       sym->name);
   2541			near = true;
   2542		} else if (near) {
   2543			near = false;
   2544			pr_err("\t\twhich is near\t\t%s\n", sym->name);
   2545		}
   2546		sym = dso__next_symbol(sym);
   2547	}
   2548
   2549	pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
   2550	       sym_name);
   2551	pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
   2552}
   2553
   2554static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
   2555			u64 *size, int idx)
   2556{
   2557	struct symbol *sym;
   2558	int cnt = 0;
   2559
   2560	*start = 0;
   2561	*size = 0;
   2562
   2563	sym = dso__first_symbol(dso);
   2564	while (sym) {
   2565		if (*start) {
   2566			if (!*size)
   2567				*size = sym->start - *start;
   2568			if (idx > 0) {
   2569				if (*size)
   2570					return 1;
   2571			} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
   2572				print_duplicate_syms(dso, sym_name);
   2573				return -EINVAL;
   2574			}
   2575		} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
   2576			*start = sym->start;
   2577			*size = sym->end - sym->start;
   2578		}
   2579		sym = dso__next_symbol(sym);
   2580	}
   2581
   2582	if (!*start)
   2583		return sym_not_found_error(sym_name, idx);
   2584
   2585	return 0;
   2586}
   2587
   2588static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
   2589{
   2590	if (dso__data_file_size(dso, NULL)) {
   2591		pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
   2592		       filt->filename);
   2593		return -EINVAL;
   2594	}
   2595
   2596	filt->addr = 0;
   2597	filt->size = dso->data.file_size;
   2598
   2599	return 0;
   2600}
   2601
   2602static int addr_filter__resolve_syms(struct addr_filter *filt)
   2603{
   2604	u64 start, size;
   2605	struct dso *dso;
   2606	int err = 0;
   2607
   2608	if (!filt->sym_from && !filt->sym_to)
   2609		return 0;
   2610
   2611	if (!filt->filename)
   2612		return addr_filter__resolve_kernel_syms(filt);
   2613
   2614	dso = load_dso(filt->filename);
   2615	if (!dso) {
   2616		pr_err("Failed to load symbols from: %s\n", filt->filename);
   2617		return -EINVAL;
   2618	}
   2619
   2620	if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
   2621		err = addr_filter__entire_dso(filt, dso);
   2622		goto put_dso;
   2623	}
   2624
   2625	if (filt->sym_from) {
   2626		err = find_dso_sym(dso, filt->sym_from, &start, &size,
   2627				   filt->sym_from_idx);
   2628		if (err)
   2629			goto put_dso;
   2630		filt->addr = start;
   2631		if (filt->range && !filt->size && !filt->sym_to)
   2632			filt->size = size;
   2633	}
   2634
   2635	if (filt->sym_to) {
   2636		err = find_dso_sym(dso, filt->sym_to, &start, &size,
   2637				   filt->sym_to_idx);
   2638		if (err)
   2639			goto put_dso;
   2640
   2641		err = check_end_after_start(filt, start, size);
   2642		if (err)
   2643			return err;
   2644
   2645		filt->size = start + size - filt->addr;
   2646	}
   2647
   2648put_dso:
   2649	dso__put(dso);
   2650
   2651	return err;
   2652}
   2653
   2654static char *addr_filter__to_str(struct addr_filter *filt)
   2655{
   2656	char filename_buf[PATH_MAX];
   2657	const char *at = "";
   2658	const char *fn = "";
   2659	char *filter;
   2660	int err;
   2661
   2662	if (filt->filename) {
   2663		at = "@";
   2664		fn = realpath(filt->filename, filename_buf);
   2665		if (!fn)
   2666			return NULL;
   2667	}
   2668
   2669	if (filt->range) {
   2670		err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
   2671			       filt->action, filt->addr, filt->size, at, fn);
   2672	} else {
   2673		err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
   2674			       filt->action, filt->addr, at, fn);
   2675	}
   2676
   2677	return err < 0 ? NULL : filter;
   2678}
   2679
   2680static int parse_addr_filter(struct evsel *evsel, const char *filter,
   2681			     int max_nr)
   2682{
   2683	struct addr_filters filts;
   2684	struct addr_filter *filt;
   2685	int err;
   2686
   2687	addr_filters__init(&filts);
   2688
   2689	err = addr_filters__parse_bare_filter(&filts, filter);
   2690	if (err)
   2691		goto out_exit;
   2692
   2693	if (filts.cnt > max_nr) {
   2694		pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
   2695		       filts.cnt, max_nr);
   2696		err = -EINVAL;
   2697		goto out_exit;
   2698	}
   2699
   2700	list_for_each_entry(filt, &filts.head, list) {
   2701		char *new_filter;
   2702
   2703		err = addr_filter__resolve_syms(filt);
   2704		if (err)
   2705			goto out_exit;
   2706
   2707		new_filter = addr_filter__to_str(filt);
   2708		if (!new_filter) {
   2709			err = -ENOMEM;
   2710			goto out_exit;
   2711		}
   2712
   2713		if (evsel__append_addr_filter(evsel, new_filter)) {
   2714			err = -ENOMEM;
   2715			goto out_exit;
   2716		}
   2717	}
   2718
   2719out_exit:
   2720	addr_filters__exit(&filts);
   2721
   2722	if (err) {
   2723		pr_err("Failed to parse address filter: '%s'\n", filter);
   2724		pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
   2725		pr_err("Where multiple filters are separated by space or comma.\n");
   2726	}
   2727
   2728	return err;
   2729}
   2730
   2731static int evsel__nr_addr_filter(struct evsel *evsel)
   2732{
   2733	struct perf_pmu *pmu = evsel__find_pmu(evsel);
   2734	int nr_addr_filters = 0;
   2735
   2736	if (!pmu)
   2737		return 0;
   2738
   2739	perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
   2740
   2741	return nr_addr_filters;
   2742}
   2743
   2744int auxtrace_parse_filters(struct evlist *evlist)
   2745{
   2746	struct evsel *evsel;
   2747	char *filter;
   2748	int err, max_nr;
   2749
   2750	evlist__for_each_entry(evlist, evsel) {
   2751		filter = evsel->filter;
   2752		max_nr = evsel__nr_addr_filter(evsel);
   2753		if (!filter || !max_nr)
   2754			continue;
   2755		evsel->filter = NULL;
   2756		err = parse_addr_filter(evsel, filter, max_nr);
   2757		free(filter);
   2758		if (err)
   2759			return err;
   2760		pr_debug("Address filter: %s\n", evsel->filter);
   2761	}
   2762
   2763	return 0;
   2764}
   2765
   2766int auxtrace__process_event(struct perf_session *session, union perf_event *event,
   2767			    struct perf_sample *sample, struct perf_tool *tool)
   2768{
   2769	if (!session->auxtrace)
   2770		return 0;
   2771
   2772	return session->auxtrace->process_event(session, event, sample, tool);
   2773}
   2774
   2775void auxtrace__dump_auxtrace_sample(struct perf_session *session,
   2776				    struct perf_sample *sample)
   2777{
   2778	if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
   2779	    auxtrace__dont_decode(session))
   2780		return;
   2781
   2782	session->auxtrace->dump_auxtrace_sample(session, sample);
   2783}
   2784
   2785int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
   2786{
   2787	if (!session->auxtrace)
   2788		return 0;
   2789
   2790	return session->auxtrace->flush_events(session, tool);
   2791}
   2792
   2793void auxtrace__free_events(struct perf_session *session)
   2794{
   2795	if (!session->auxtrace)
   2796		return;
   2797
   2798	return session->auxtrace->free_events(session);
   2799}
   2800
   2801void auxtrace__free(struct perf_session *session)
   2802{
   2803	if (!session->auxtrace)
   2804		return;
   2805
   2806	return session->auxtrace->free(session);
   2807}
   2808
   2809bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
   2810				 struct evsel *evsel)
   2811{
   2812	if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
   2813		return false;
   2814
   2815	return session->auxtrace->evsel_is_auxtrace(session, evsel);
   2816}