cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cs-etm.c (88634B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright(C) 2015-2018 Linaro Limited.
      4 *
      5 * Author: Tor Jeremiassen <tor@ti.com>
      6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
      7 */
      8
      9#include <linux/bitops.h>
     10#include <linux/coresight-pmu.h>
     11#include <linux/err.h>
     12#include <linux/kernel.h>
     13#include <linux/log2.h>
     14#include <linux/types.h>
     15#include <linux/zalloc.h>
     16
     17#include <opencsd/ocsd_if_types.h>
     18#include <stdlib.h>
     19
     20#include "auxtrace.h"
     21#include "color.h"
     22#include "cs-etm.h"
     23#include "cs-etm-decoder/cs-etm-decoder.h"
     24#include "debug.h"
     25#include "dso.h"
     26#include "evlist.h"
     27#include "intlist.h"
     28#include "machine.h"
     29#include "map.h"
     30#include "perf.h"
     31#include "session.h"
     32#include "map_symbol.h"
     33#include "branch.h"
     34#include "symbol.h"
     35#include "tool.h"
     36#include "thread.h"
     37#include "thread-stack.h"
     38#include <tools/libc_compat.h>
     39#include "util/synthetic-events.h"
     40
     41struct cs_etm_auxtrace {
     42	struct auxtrace auxtrace;
     43	struct auxtrace_queues queues;
     44	struct auxtrace_heap heap;
     45	struct itrace_synth_opts synth_opts;
     46	struct perf_session *session;
     47	struct machine *machine;
     48	struct thread *unknown_thread;
     49
     50	u8 timeless_decoding;
     51	u8 snapshot_mode;
     52	u8 data_queued;
     53
     54	int num_cpu;
     55	u64 latest_kernel_timestamp;
     56	u32 auxtrace_type;
     57	u64 branches_sample_type;
     58	u64 branches_id;
     59	u64 instructions_sample_type;
     60	u64 instructions_sample_period;
     61	u64 instructions_id;
     62	u64 **metadata;
     63	unsigned int pmu_type;
     64};
     65
     66struct cs_etm_traceid_queue {
     67	u8 trace_chan_id;
     68	pid_t pid, tid;
     69	u64 period_instructions;
     70	size_t last_branch_pos;
     71	union perf_event *event_buf;
     72	struct thread *thread;
     73	struct branch_stack *last_branch;
     74	struct branch_stack *last_branch_rb;
     75	struct cs_etm_packet *prev_packet;
     76	struct cs_etm_packet *packet;
     77	struct cs_etm_packet_queue packet_queue;
     78};
     79
     80struct cs_etm_queue {
     81	struct cs_etm_auxtrace *etm;
     82	struct cs_etm_decoder *decoder;
     83	struct auxtrace_buffer *buffer;
     84	unsigned int queue_nr;
     85	u8 pending_timestamp_chan_id;
     86	u64 offset;
     87	const unsigned char *buf;
     88	size_t buf_len, buf_used;
     89	/* Conversion between traceID and index in traceid_queues array */
     90	struct intlist *traceid_queues_list;
     91	struct cs_etm_traceid_queue **traceid_queues;
     92};
     93
     94/* RB tree for quick conversion between traceID and metadata pointers */
     95static struct intlist *traceid_list;
     96
     97static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
     98static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
     99					   pid_t tid);
    100static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
    101static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
    102
    103/* PTMs ETMIDR [11:8] set to b0011 */
    104#define ETMIDR_PTM_VERSION 0x00000300
    105
    106/*
    107 * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
    108 * work with.  One option is to modify to auxtrace_heap_XYZ() API or simply
    109 * encode the etm queue number as the upper 16 bit and the channel as
    110 * the lower 16 bit.
    111 */
    112#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id)	\
    113		      (queue_nr << 16 | trace_chan_id)
    114#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
    115#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
    116
    117static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
    118{
    119	etmidr &= ETMIDR_PTM_VERSION;
    120
    121	if (etmidr == ETMIDR_PTM_VERSION)
    122		return CS_ETM_PROTO_PTM;
    123
    124	return CS_ETM_PROTO_ETMV3;
    125}
    126
    127static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
    128{
    129	struct int_node *inode;
    130	u64 *metadata;
    131
    132	inode = intlist__find(traceid_list, trace_chan_id);
    133	if (!inode)
    134		return -EINVAL;
    135
    136	metadata = inode->priv;
    137	*magic = metadata[CS_ETM_MAGIC];
    138	return 0;
    139}
    140
    141int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
    142{
    143	struct int_node *inode;
    144	u64 *metadata;
    145
    146	inode = intlist__find(traceid_list, trace_chan_id);
    147	if (!inode)
    148		return -EINVAL;
    149
    150	metadata = inode->priv;
    151	*cpu = (int)metadata[CS_ETM_CPU];
    152	return 0;
    153}
    154
    155/*
    156 * The returned PID format is presented by two bits:
    157 *
    158 *   Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
    159 *   Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
    160 *
    161 * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
    162 * are enabled at the same time when the session runs on an EL2 kernel.
    163 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
    164 * recorded in the trace data, the tool will selectively use
    165 * CONTEXTIDR_EL2 as PID.
    166 */
    167int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
    168{
    169	struct int_node *inode;
    170	u64 *metadata, val;
    171
    172	inode = intlist__find(traceid_list, trace_chan_id);
    173	if (!inode)
    174		return -EINVAL;
    175
    176	metadata = inode->priv;
    177
    178	if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
    179		val = metadata[CS_ETM_ETMCR];
    180		/* CONTEXTIDR is traced */
    181		if (val & BIT(ETM_OPT_CTXTID))
    182			*pid_fmt = BIT(ETM_OPT_CTXTID);
    183	} else {
    184		val = metadata[CS_ETMV4_TRCCONFIGR];
    185		/* CONTEXTIDR_EL2 is traced */
    186		if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
    187			*pid_fmt = BIT(ETM_OPT_CTXTID2);
    188		/* CONTEXTIDR_EL1 is traced */
    189		else if (val & BIT(ETM4_CFG_BIT_CTXTID))
    190			*pid_fmt = BIT(ETM_OPT_CTXTID);
    191	}
    192
    193	return 0;
    194}
    195
    196void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
    197					      u8 trace_chan_id)
    198{
    199	/*
    200	 * When a timestamp packet is encountered the backend code
    201	 * is stopped so that the front end has time to process packets
    202	 * that were accumulated in the traceID queue.  Since there can
    203	 * be more than one channel per cs_etm_queue, we need to specify
    204	 * what traceID queue needs servicing.
    205	 */
    206	etmq->pending_timestamp_chan_id = trace_chan_id;
    207}
    208
    209static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
    210				      u8 *trace_chan_id)
    211{
    212	struct cs_etm_packet_queue *packet_queue;
    213
    214	if (!etmq->pending_timestamp_chan_id)
    215		return 0;
    216
    217	if (trace_chan_id)
    218		*trace_chan_id = etmq->pending_timestamp_chan_id;
    219
    220	packet_queue = cs_etm__etmq_get_packet_queue(etmq,
    221						     etmq->pending_timestamp_chan_id);
    222	if (!packet_queue)
    223		return 0;
    224
    225	/* Acknowledge pending status */
    226	etmq->pending_timestamp_chan_id = 0;
    227
    228	/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
    229	return packet_queue->cs_timestamp;
    230}
    231
    232static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
    233{
    234	int i;
    235
    236	queue->head = 0;
    237	queue->tail = 0;
    238	queue->packet_count = 0;
    239	for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
    240		queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
    241		queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
    242		queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
    243		queue->packet_buffer[i].instr_count = 0;
    244		queue->packet_buffer[i].last_instr_taken_branch = false;
    245		queue->packet_buffer[i].last_instr_size = 0;
    246		queue->packet_buffer[i].last_instr_type = 0;
    247		queue->packet_buffer[i].last_instr_subtype = 0;
    248		queue->packet_buffer[i].last_instr_cond = 0;
    249		queue->packet_buffer[i].flags = 0;
    250		queue->packet_buffer[i].exception_number = UINT32_MAX;
    251		queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
    252		queue->packet_buffer[i].cpu = INT_MIN;
    253	}
    254}
    255
    256static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
    257{
    258	int idx;
    259	struct int_node *inode;
    260	struct cs_etm_traceid_queue *tidq;
    261	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
    262
    263	intlist__for_each_entry(inode, traceid_queues_list) {
    264		idx = (int)(intptr_t)inode->priv;
    265		tidq = etmq->traceid_queues[idx];
    266		cs_etm__clear_packet_queue(&tidq->packet_queue);
    267	}
    268}
    269
    270static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
    271				      struct cs_etm_traceid_queue *tidq,
    272				      u8 trace_chan_id)
    273{
    274	int rc = -ENOMEM;
    275	struct auxtrace_queue *queue;
    276	struct cs_etm_auxtrace *etm = etmq->etm;
    277
    278	cs_etm__clear_packet_queue(&tidq->packet_queue);
    279
    280	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
    281	tidq->tid = queue->tid;
    282	tidq->pid = -1;
    283	tidq->trace_chan_id = trace_chan_id;
    284
    285	tidq->packet = zalloc(sizeof(struct cs_etm_packet));
    286	if (!tidq->packet)
    287		goto out;
    288
    289	tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
    290	if (!tidq->prev_packet)
    291		goto out_free;
    292
    293	if (etm->synth_opts.last_branch) {
    294		size_t sz = sizeof(struct branch_stack);
    295
    296		sz += etm->synth_opts.last_branch_sz *
    297		      sizeof(struct branch_entry);
    298		tidq->last_branch = zalloc(sz);
    299		if (!tidq->last_branch)
    300			goto out_free;
    301		tidq->last_branch_rb = zalloc(sz);
    302		if (!tidq->last_branch_rb)
    303			goto out_free;
    304	}
    305
    306	tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
    307	if (!tidq->event_buf)
    308		goto out_free;
    309
    310	return 0;
    311
    312out_free:
    313	zfree(&tidq->last_branch_rb);
    314	zfree(&tidq->last_branch);
    315	zfree(&tidq->prev_packet);
    316	zfree(&tidq->packet);
    317out:
    318	return rc;
    319}
    320
    321static struct cs_etm_traceid_queue
    322*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
    323{
    324	int idx;
    325	struct int_node *inode;
    326	struct intlist *traceid_queues_list;
    327	struct cs_etm_traceid_queue *tidq, **traceid_queues;
    328	struct cs_etm_auxtrace *etm = etmq->etm;
    329
    330	if (etm->timeless_decoding)
    331		trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
    332
    333	traceid_queues_list = etmq->traceid_queues_list;
    334
    335	/*
    336	 * Check if the traceid_queue exist for this traceID by looking
    337	 * in the queue list.
    338	 */
    339	inode = intlist__find(traceid_queues_list, trace_chan_id);
    340	if (inode) {
    341		idx = (int)(intptr_t)inode->priv;
    342		return etmq->traceid_queues[idx];
    343	}
    344
    345	/* We couldn't find a traceid_queue for this traceID, allocate one */
    346	tidq = malloc(sizeof(*tidq));
    347	if (!tidq)
    348		return NULL;
    349
    350	memset(tidq, 0, sizeof(*tidq));
    351
    352	/* Get a valid index for the new traceid_queue */
    353	idx = intlist__nr_entries(traceid_queues_list);
    354	/* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
    355	inode = intlist__findnew(traceid_queues_list, trace_chan_id);
    356	if (!inode)
    357		goto out_free;
    358
    359	/* Associate this traceID with this index */
    360	inode->priv = (void *)(intptr_t)idx;
    361
    362	if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
    363		goto out_free;
    364
    365	/* Grow the traceid_queues array by one unit */
    366	traceid_queues = etmq->traceid_queues;
    367	traceid_queues = reallocarray(traceid_queues,
    368				      idx + 1,
    369				      sizeof(*traceid_queues));
    370
    371	/*
    372	 * On failure reallocarray() returns NULL and the original block of
    373	 * memory is left untouched.
    374	 */
    375	if (!traceid_queues)
    376		goto out_free;
    377
    378	traceid_queues[idx] = tidq;
    379	etmq->traceid_queues = traceid_queues;
    380
    381	return etmq->traceid_queues[idx];
    382
    383out_free:
    384	/*
    385	 * Function intlist__remove() removes the inode from the list
    386	 * and delete the memory associated to it.
    387	 */
    388	intlist__remove(traceid_queues_list, inode);
    389	free(tidq);
    390
    391	return NULL;
    392}
    393
    394struct cs_etm_packet_queue
    395*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
    396{
    397	struct cs_etm_traceid_queue *tidq;
    398
    399	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
    400	if (tidq)
    401		return &tidq->packet_queue;
    402
    403	return NULL;
    404}
    405
    406static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
    407				struct cs_etm_traceid_queue *tidq)
    408{
    409	struct cs_etm_packet *tmp;
    410
    411	if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
    412	    etm->synth_opts.instructions) {
    413		/*
    414		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
    415		 * the next incoming packet.
    416		 */
    417		tmp = tidq->packet;
    418		tidq->packet = tidq->prev_packet;
    419		tidq->prev_packet = tmp;
    420	}
    421}
    422
    423static void cs_etm__packet_dump(const char *pkt_string)
    424{
    425	const char *color = PERF_COLOR_BLUE;
    426	int len = strlen(pkt_string);
    427
    428	if (len && (pkt_string[len-1] == '\n'))
    429		color_fprintf(stdout, color, "	%s", pkt_string);
    430	else
    431		color_fprintf(stdout, color, "	%s\n", pkt_string);
    432
    433	fflush(stdout);
    434}
    435
    436static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
    437					  struct cs_etm_auxtrace *etm, int idx,
    438					  u32 etmidr)
    439{
    440	u64 **metadata = etm->metadata;
    441
    442	t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
    443	t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
    444	t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
    445}
    446
    447static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
    448					  struct cs_etm_auxtrace *etm, int idx)
    449{
    450	u64 **metadata = etm->metadata;
    451
    452	t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
    453	t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
    454	t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
    455	t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
    456	t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
    457	t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
    458	t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
    459}
    460
    461static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
    462					  struct cs_etm_auxtrace *etm, int idx)
    463{
    464	u64 **metadata = etm->metadata;
    465
    466	t_params[idx].protocol = CS_ETM_PROTO_ETE;
    467	t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
    468	t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
    469	t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
    470	t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
    471	t_params[idx].ete.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
    472	t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
    473	t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
    474}
    475
    476static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
    477				     struct cs_etm_auxtrace *etm,
    478				     int decoders)
    479{
    480	int i;
    481	u32 etmidr;
    482	u64 architecture;
    483
    484	for (i = 0; i < decoders; i++) {
    485		architecture = etm->metadata[i][CS_ETM_MAGIC];
    486
    487		switch (architecture) {
    488		case __perf_cs_etmv3_magic:
    489			etmidr = etm->metadata[i][CS_ETM_ETMIDR];
    490			cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
    491			break;
    492		case __perf_cs_etmv4_magic:
    493			cs_etm__set_trace_param_etmv4(t_params, etm, i);
    494			break;
    495		case __perf_cs_ete_magic:
    496			cs_etm__set_trace_param_ete(t_params, etm, i);
    497			break;
    498		default:
    499			return -EINVAL;
    500		}
    501	}
    502
    503	return 0;
    504}
    505
    506static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
    507				       struct cs_etm_queue *etmq,
    508				       enum cs_etm_decoder_operation mode,
    509				       bool formatted)
    510{
    511	int ret = -EINVAL;
    512
    513	if (!(mode < CS_ETM_OPERATION_MAX))
    514		goto out;
    515
    516	d_params->packet_printer = cs_etm__packet_dump;
    517	d_params->operation = mode;
    518	d_params->data = etmq;
    519	d_params->formatted = formatted;
    520	d_params->fsyncs = false;
    521	d_params->hsyncs = false;
    522	d_params->frame_aligned = true;
    523
    524	ret = 0;
    525out:
    526	return ret;
    527}
    528
    529static void cs_etm__dump_event(struct cs_etm_queue *etmq,
    530			       struct auxtrace_buffer *buffer)
    531{
    532	int ret;
    533	const char *color = PERF_COLOR_BLUE;
    534	size_t buffer_used = 0;
    535
    536	fprintf(stdout, "\n");
    537	color_fprintf(stdout, color,
    538		     ". ... CoreSight %s Trace data: size %#zx bytes\n",
    539		     cs_etm_decoder__get_name(etmq->decoder), buffer->size);
    540
    541	do {
    542		size_t consumed;
    543
    544		ret = cs_etm_decoder__process_data_block(
    545				etmq->decoder, buffer->offset,
    546				&((u8 *)buffer->data)[buffer_used],
    547				buffer->size - buffer_used, &consumed);
    548		if (ret)
    549			break;
    550
    551		buffer_used += consumed;
    552	} while (buffer_used < buffer->size);
    553
    554	cs_etm_decoder__reset(etmq->decoder);
    555}
    556
    557static int cs_etm__flush_events(struct perf_session *session,
    558				struct perf_tool *tool)
    559{
    560	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
    561						   struct cs_etm_auxtrace,
    562						   auxtrace);
    563	if (dump_trace)
    564		return 0;
    565
    566	if (!tool->ordered_events)
    567		return -EINVAL;
    568
    569	if (etm->timeless_decoding)
    570		return cs_etm__process_timeless_queues(etm, -1);
    571
    572	return cs_etm__process_queues(etm);
    573}
    574
    575static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
    576{
    577	int idx;
    578	uintptr_t priv;
    579	struct int_node *inode, *tmp;
    580	struct cs_etm_traceid_queue *tidq;
    581	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
    582
    583	intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
    584		priv = (uintptr_t)inode->priv;
    585		idx = priv;
    586
    587		/* Free this traceid_queue from the array */
    588		tidq = etmq->traceid_queues[idx];
    589		thread__zput(tidq->thread);
    590		zfree(&tidq->event_buf);
    591		zfree(&tidq->last_branch);
    592		zfree(&tidq->last_branch_rb);
    593		zfree(&tidq->prev_packet);
    594		zfree(&tidq->packet);
    595		zfree(&tidq);
    596
    597		/*
    598		 * Function intlist__remove() removes the inode from the list
    599		 * and delete the memory associated to it.
    600		 */
    601		intlist__remove(traceid_queues_list, inode);
    602	}
    603
    604	/* Then the RB tree itself */
    605	intlist__delete(traceid_queues_list);
    606	etmq->traceid_queues_list = NULL;
    607
    608	/* finally free the traceid_queues array */
    609	zfree(&etmq->traceid_queues);
    610}
    611
    612static void cs_etm__free_queue(void *priv)
    613{
    614	struct cs_etm_queue *etmq = priv;
    615
    616	if (!etmq)
    617		return;
    618
    619	cs_etm_decoder__free(etmq->decoder);
    620	cs_etm__free_traceid_queues(etmq);
    621	free(etmq);
    622}
    623
    624static void cs_etm__free_events(struct perf_session *session)
    625{
    626	unsigned int i;
    627	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
    628						   struct cs_etm_auxtrace,
    629						   auxtrace);
    630	struct auxtrace_queues *queues = &aux->queues;
    631
    632	for (i = 0; i < queues->nr_queues; i++) {
    633		cs_etm__free_queue(queues->queue_array[i].priv);
    634		queues->queue_array[i].priv = NULL;
    635	}
    636
    637	auxtrace_queues__free(queues);
    638}
    639
    640static void cs_etm__free(struct perf_session *session)
    641{
    642	int i;
    643	struct int_node *inode, *tmp;
    644	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
    645						   struct cs_etm_auxtrace,
    646						   auxtrace);
    647	cs_etm__free_events(session);
    648	session->auxtrace = NULL;
    649
    650	/* First remove all traceID/metadata nodes for the RB tree */
    651	intlist__for_each_entry_safe(inode, tmp, traceid_list)
    652		intlist__remove(traceid_list, inode);
    653	/* Then the RB tree itself */
    654	intlist__delete(traceid_list);
    655
    656	for (i = 0; i < aux->num_cpu; i++)
    657		zfree(&aux->metadata[i]);
    658
    659	thread__zput(aux->unknown_thread);
    660	zfree(&aux->metadata);
    661	zfree(&aux);
    662}
    663
    664static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
    665				      struct evsel *evsel)
    666{
    667	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
    668						   struct cs_etm_auxtrace,
    669						   auxtrace);
    670
    671	return evsel->core.attr.type == aux->pmu_type;
    672}
    673
    674static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
    675{
    676	struct machine *machine;
    677
    678	machine = etmq->etm->machine;
    679
    680	if (address >= machine__kernel_start(machine)) {
    681		if (machine__is_host(machine))
    682			return PERF_RECORD_MISC_KERNEL;
    683		else
    684			return PERF_RECORD_MISC_GUEST_KERNEL;
    685	} else {
    686		if (machine__is_host(machine))
    687			return PERF_RECORD_MISC_USER;
    688		else if (perf_guest)
    689			return PERF_RECORD_MISC_GUEST_USER;
    690		else
    691			return PERF_RECORD_MISC_HYPERVISOR;
    692	}
    693}
    694
    695static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
    696			      u64 address, size_t size, u8 *buffer)
    697{
    698	u8  cpumode;
    699	u64 offset;
    700	int len;
    701	struct thread *thread;
    702	struct machine *machine;
    703	struct addr_location al;
    704	struct cs_etm_traceid_queue *tidq;
    705
    706	if (!etmq)
    707		return 0;
    708
    709	machine = etmq->etm->machine;
    710	cpumode = cs_etm__cpu_mode(etmq, address);
    711	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
    712	if (!tidq)
    713		return 0;
    714
    715	thread = tidq->thread;
    716	if (!thread) {
    717		if (cpumode != PERF_RECORD_MISC_KERNEL)
    718			return 0;
    719		thread = etmq->etm->unknown_thread;
    720	}
    721
    722	if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
    723		return 0;
    724
    725	if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
    726	    dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
    727		return 0;
    728
    729	offset = al.map->map_ip(al.map, address);
    730
    731	map__load(al.map);
    732
    733	len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
    734
    735	if (len <= 0) {
    736		ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
    737				 "              Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
    738		if (!al.map->dso->auxtrace_warned) {
    739			pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
    740				    address,
    741				    al.map->dso->long_name ? al.map->dso->long_name : "Unknown");
    742			al.map->dso->auxtrace_warned = true;
    743		}
    744		return 0;
    745	}
    746
    747	return len;
    748}
    749
    750static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
    751						bool formatted)
    752{
    753	struct cs_etm_decoder_params d_params;
    754	struct cs_etm_trace_params  *t_params = NULL;
    755	struct cs_etm_queue *etmq;
    756	/*
    757	 * Each queue can only contain data from one CPU when unformatted, so only one decoder is
    758	 * needed.
    759	 */
    760	int decoders = formatted ? etm->num_cpu : 1;
    761
    762	etmq = zalloc(sizeof(*etmq));
    763	if (!etmq)
    764		return NULL;
    765
    766	etmq->traceid_queues_list = intlist__new(NULL);
    767	if (!etmq->traceid_queues_list)
    768		goto out_free;
    769
    770	/* Use metadata to fill in trace parameters for trace decoder */
    771	t_params = zalloc(sizeof(*t_params) * decoders);
    772
    773	if (!t_params)
    774		goto out_free;
    775
    776	if (cs_etm__init_trace_params(t_params, etm, decoders))
    777		goto out_free;
    778
    779	/* Set decoder parameters to decode trace packets */
    780	if (cs_etm__init_decoder_params(&d_params, etmq,
    781					dump_trace ? CS_ETM_OPERATION_PRINT :
    782						     CS_ETM_OPERATION_DECODE,
    783					formatted))
    784		goto out_free;
    785
    786	etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
    787					    t_params);
    788
    789	if (!etmq->decoder)
    790		goto out_free;
    791
    792	/*
    793	 * Register a function to handle all memory accesses required by
    794	 * the trace decoder library.
    795	 */
    796	if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
    797					      0x0L, ((u64) -1L),
    798					      cs_etm__mem_access))
    799		goto out_free_decoder;
    800
    801	zfree(&t_params);
    802	return etmq;
    803
    804out_free_decoder:
    805	cs_etm_decoder__free(etmq->decoder);
    806out_free:
    807	intlist__delete(etmq->traceid_queues_list);
    808	free(etmq);
    809
    810	return NULL;
    811}
    812
    813static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
    814			       struct auxtrace_queue *queue,
    815			       unsigned int queue_nr,
    816			       bool formatted)
    817{
    818	struct cs_etm_queue *etmq = queue->priv;
    819
    820	if (list_empty(&queue->head) || etmq)
    821		return 0;
    822
    823	etmq = cs_etm__alloc_queue(etm, formatted);
    824
    825	if (!etmq)
    826		return -ENOMEM;
    827
    828	queue->priv = etmq;
    829	etmq->etm = etm;
    830	etmq->queue_nr = queue_nr;
    831	etmq->offset = 0;
    832
    833	return 0;
    834}
    835
    836static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
    837					    struct cs_etm_queue *etmq,
    838					    unsigned int queue_nr)
    839{
    840	int ret = 0;
    841	unsigned int cs_queue_nr;
    842	u8 trace_chan_id;
    843	u64 cs_timestamp;
    844
    845	/*
    846	 * We are under a CPU-wide trace scenario.  As such we need to know
    847	 * when the code that generated the traces started to execute so that
    848	 * it can be correlated with execution on other CPUs.  So we get a
    849	 * handle on the beginning of traces and decode until we find a
    850	 * timestamp.  The timestamp is then added to the auxtrace min heap
    851	 * in order to know what nibble (of all the etmqs) to decode first.
    852	 */
    853	while (1) {
    854		/*
    855		 * Fetch an aux_buffer from this etmq.  Bail if no more
    856		 * blocks or an error has been encountered.
    857		 */
    858		ret = cs_etm__get_data_block(etmq);
    859		if (ret <= 0)
    860			goto out;
    861
    862		/*
    863		 * Run decoder on the trace block.  The decoder will stop when
    864		 * encountering a CS timestamp, a full packet queue or the end of
    865		 * trace for that block.
    866		 */
    867		ret = cs_etm__decode_data_block(etmq);
    868		if (ret)
    869			goto out;
    870
    871		/*
    872		 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
    873		 * the timestamp calculation for us.
    874		 */
    875		cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
    876
    877		/* We found a timestamp, no need to continue. */
    878		if (cs_timestamp)
    879			break;
    880
    881		/*
    882		 * We didn't find a timestamp so empty all the traceid packet
    883		 * queues before looking for another timestamp packet, either
    884		 * in the current data block or a new one.  Packets that were
    885		 * just decoded are useless since no timestamp has been
    886		 * associated with them.  As such simply discard them.
    887		 */
    888		cs_etm__clear_all_packet_queues(etmq);
    889	}
    890
    891	/*
    892	 * We have a timestamp.  Add it to the min heap to reflect when
    893	 * instructions conveyed by the range packets of this traceID queue
    894	 * started to execute.  Once the same has been done for all the traceID
    895	 * queues of each etmq, redenring and decoding can start in
    896	 * chronological order.
    897	 *
    898	 * Note that packets decoded above are still in the traceID's packet
    899	 * queue and will be processed in cs_etm__process_queues().
    900	 */
    901	cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
    902	ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
    903out:
    904	return ret;
    905}
    906
    907static inline
    908void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
    909				 struct cs_etm_traceid_queue *tidq)
    910{
    911	struct branch_stack *bs_src = tidq->last_branch_rb;
    912	struct branch_stack *bs_dst = tidq->last_branch;
    913	size_t nr = 0;
    914
    915	/*
    916	 * Set the number of records before early exit: ->nr is used to
    917	 * determine how many branches to copy from ->entries.
    918	 */
    919	bs_dst->nr = bs_src->nr;
    920
    921	/*
    922	 * Early exit when there is nothing to copy.
    923	 */
    924	if (!bs_src->nr)
    925		return;
    926
    927	/*
    928	 * As bs_src->entries is a circular buffer, we need to copy from it in
    929	 * two steps.  First, copy the branches from the most recently inserted
    930	 * branch ->last_branch_pos until the end of bs_src->entries buffer.
    931	 */
    932	nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
    933	memcpy(&bs_dst->entries[0],
    934	       &bs_src->entries[tidq->last_branch_pos],
    935	       sizeof(struct branch_entry) * nr);
    936
    937	/*
    938	 * If we wrapped around at least once, the branches from the beginning
    939	 * of the bs_src->entries buffer and until the ->last_branch_pos element
    940	 * are older valid branches: copy them over.  The total number of
    941	 * branches copied over will be equal to the number of branches asked by
    942	 * the user in last_branch_sz.
    943	 */
    944	if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
    945		memcpy(&bs_dst->entries[nr],
    946		       &bs_src->entries[0],
    947		       sizeof(struct branch_entry) * tidq->last_branch_pos);
    948	}
    949}
    950
    951static inline
    952void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
    953{
    954	tidq->last_branch_pos = 0;
    955	tidq->last_branch_rb->nr = 0;
    956}
    957
    958static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
    959					 u8 trace_chan_id, u64 addr)
    960{
    961	u8 instrBytes[2];
    962
    963	cs_etm__mem_access(etmq, trace_chan_id, addr,
    964			   ARRAY_SIZE(instrBytes), instrBytes);
    965	/*
    966	 * T32 instruction size is indicated by bits[15:11] of the first
    967	 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
    968	 * denote a 32-bit instruction.
    969	 */
    970	return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
    971}
    972
    973static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
    974{
    975	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
    976	if (packet->sample_type == CS_ETM_DISCONTINUITY)
    977		return 0;
    978
    979	return packet->start_addr;
    980}
    981
    982static inline
    983u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
    984{
    985	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
    986	if (packet->sample_type == CS_ETM_DISCONTINUITY)
    987		return 0;
    988
    989	return packet->end_addr - packet->last_instr_size;
    990}
    991
    992static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
    993				     u64 trace_chan_id,
    994				     const struct cs_etm_packet *packet,
    995				     u64 offset)
    996{
    997	if (packet->isa == CS_ETM_ISA_T32) {
    998		u64 addr = packet->start_addr;
    999
   1000		while (offset) {
   1001			addr += cs_etm__t32_instr_size(etmq,
   1002						       trace_chan_id, addr);
   1003			offset--;
   1004		}
   1005		return addr;
   1006	}
   1007
   1008	/* Assume a 4 byte instruction size (A32/A64) */
   1009	return packet->start_addr + offset * 4;
   1010}
   1011
   1012static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
   1013					  struct cs_etm_traceid_queue *tidq)
   1014{
   1015	struct branch_stack *bs = tidq->last_branch_rb;
   1016	struct branch_entry *be;
   1017
   1018	/*
   1019	 * The branches are recorded in a circular buffer in reverse
   1020	 * chronological order: we start recording from the last element of the
   1021	 * buffer down.  After writing the first element of the stack, move the
   1022	 * insert position back to the end of the buffer.
   1023	 */
   1024	if (!tidq->last_branch_pos)
   1025		tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
   1026
   1027	tidq->last_branch_pos -= 1;
   1028
   1029	be       = &bs->entries[tidq->last_branch_pos];
   1030	be->from = cs_etm__last_executed_instr(tidq->prev_packet);
   1031	be->to	 = cs_etm__first_executed_instr(tidq->packet);
   1032	/* No support for mispredict */
   1033	be->flags.mispred = 0;
   1034	be->flags.predicted = 1;
   1035
   1036	/*
   1037	 * Increment bs->nr until reaching the number of last branches asked by
   1038	 * the user on the command line.
   1039	 */
   1040	if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
   1041		bs->nr += 1;
   1042}
   1043
   1044static int cs_etm__inject_event(union perf_event *event,
   1045			       struct perf_sample *sample, u64 type)
   1046{
   1047	event->header.size = perf_event__sample_event_size(sample, type, 0);
   1048	return perf_event__synthesize_sample(event, type, 0, sample);
   1049}
   1050
   1051
   1052static int
   1053cs_etm__get_trace(struct cs_etm_queue *etmq)
   1054{
   1055	struct auxtrace_buffer *aux_buffer = etmq->buffer;
   1056	struct auxtrace_buffer *old_buffer = aux_buffer;
   1057	struct auxtrace_queue *queue;
   1058
   1059	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
   1060
   1061	aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
   1062
   1063	/* If no more data, drop the previous auxtrace_buffer and return */
   1064	if (!aux_buffer) {
   1065		if (old_buffer)
   1066			auxtrace_buffer__drop_data(old_buffer);
   1067		etmq->buf_len = 0;
   1068		return 0;
   1069	}
   1070
   1071	etmq->buffer = aux_buffer;
   1072
   1073	/* If the aux_buffer doesn't have data associated, try to load it */
   1074	if (!aux_buffer->data) {
   1075		/* get the file desc associated with the perf data file */
   1076		int fd = perf_data__fd(etmq->etm->session->data);
   1077
   1078		aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
   1079		if (!aux_buffer->data)
   1080			return -ENOMEM;
   1081	}
   1082
   1083	/* If valid, drop the previous buffer */
   1084	if (old_buffer)
   1085		auxtrace_buffer__drop_data(old_buffer);
   1086
   1087	etmq->buf_used = 0;
   1088	etmq->buf_len = aux_buffer->size;
   1089	etmq->buf = aux_buffer->data;
   1090
   1091	return etmq->buf_len;
   1092}
   1093
   1094static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
   1095				    struct cs_etm_traceid_queue *tidq)
   1096{
   1097	if ((!tidq->thread) && (tidq->tid != -1))
   1098		tidq->thread = machine__find_thread(etm->machine, -1,
   1099						    tidq->tid);
   1100
   1101	if (tidq->thread)
   1102		tidq->pid = tidq->thread->pid_;
   1103}
   1104
   1105int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
   1106			 pid_t tid, u8 trace_chan_id)
   1107{
   1108	int cpu, err = -EINVAL;
   1109	struct cs_etm_auxtrace *etm = etmq->etm;
   1110	struct cs_etm_traceid_queue *tidq;
   1111
   1112	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
   1113	if (!tidq)
   1114		return err;
   1115
   1116	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
   1117		return err;
   1118
   1119	err = machine__set_current_tid(etm->machine, cpu, tid, tid);
   1120	if (err)
   1121		return err;
   1122
   1123	tidq->tid = tid;
   1124	thread__zput(tidq->thread);
   1125
   1126	cs_etm__set_pid_tid_cpu(etm, tidq);
   1127	return 0;
   1128}
   1129
   1130bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
   1131{
   1132	return !!etmq->etm->timeless_decoding;
   1133}
   1134
   1135static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
   1136			      u64 trace_chan_id,
   1137			      const struct cs_etm_packet *packet,
   1138			      struct perf_sample *sample)
   1139{
   1140	/*
   1141	 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
   1142	 * packet, so directly bail out with 'insn_len' = 0.
   1143	 */
   1144	if (packet->sample_type == CS_ETM_DISCONTINUITY) {
   1145		sample->insn_len = 0;
   1146		return;
   1147	}
   1148
   1149	/*
   1150	 * T32 instruction size might be 32-bit or 16-bit, decide by calling
   1151	 * cs_etm__t32_instr_size().
   1152	 */
   1153	if (packet->isa == CS_ETM_ISA_T32)
   1154		sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
   1155							  sample->ip);
   1156	/* Otherwise, A64 and A32 instruction size are always 32-bit. */
   1157	else
   1158		sample->insn_len = 4;
   1159
   1160	cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
   1161			   sample->insn_len, (void *)sample->insn);
   1162}
   1163
   1164static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
   1165					    struct cs_etm_traceid_queue *tidq,
   1166					    u64 addr, u64 period)
   1167{
   1168	int ret = 0;
   1169	struct cs_etm_auxtrace *etm = etmq->etm;
   1170	union perf_event *event = tidq->event_buf;
   1171	struct perf_sample sample = {.ip = 0,};
   1172
   1173	event->sample.header.type = PERF_RECORD_SAMPLE;
   1174	event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
   1175	event->sample.header.size = sizeof(struct perf_event_header);
   1176
   1177	if (!etm->timeless_decoding)
   1178		sample.time = etm->latest_kernel_timestamp;
   1179	sample.ip = addr;
   1180	sample.pid = tidq->pid;
   1181	sample.tid = tidq->tid;
   1182	sample.id = etmq->etm->instructions_id;
   1183	sample.stream_id = etmq->etm->instructions_id;
   1184	sample.period = period;
   1185	sample.cpu = tidq->packet->cpu;
   1186	sample.flags = tidq->prev_packet->flags;
   1187	sample.cpumode = event->sample.header.misc;
   1188
   1189	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
   1190
   1191	if (etm->synth_opts.last_branch)
   1192		sample.branch_stack = tidq->last_branch;
   1193
   1194	if (etm->synth_opts.inject) {
   1195		ret = cs_etm__inject_event(event, &sample,
   1196					   etm->instructions_sample_type);
   1197		if (ret)
   1198			return ret;
   1199	}
   1200
   1201	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
   1202
   1203	if (ret)
   1204		pr_err(
   1205			"CS ETM Trace: failed to deliver instruction event, error %d\n",
   1206			ret);
   1207
   1208	return ret;
   1209}
   1210
   1211/*
   1212 * The cs etm packet encodes an instruction range between a branch target
   1213 * and the next taken branch. Generate sample accordingly.
   1214 */
   1215static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
   1216				       struct cs_etm_traceid_queue *tidq)
   1217{
   1218	int ret = 0;
   1219	struct cs_etm_auxtrace *etm = etmq->etm;
   1220	struct perf_sample sample = {.ip = 0,};
   1221	union perf_event *event = tidq->event_buf;
   1222	struct dummy_branch_stack {
   1223		u64			nr;
   1224		u64			hw_idx;
   1225		struct branch_entry	entries;
   1226	} dummy_bs;
   1227	u64 ip;
   1228
   1229	ip = cs_etm__last_executed_instr(tidq->prev_packet);
   1230
   1231	event->sample.header.type = PERF_RECORD_SAMPLE;
   1232	event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
   1233	event->sample.header.size = sizeof(struct perf_event_header);
   1234
   1235	if (!etm->timeless_decoding)
   1236		sample.time = etm->latest_kernel_timestamp;
   1237	sample.ip = ip;
   1238	sample.pid = tidq->pid;
   1239	sample.tid = tidq->tid;
   1240	sample.addr = cs_etm__first_executed_instr(tidq->packet);
   1241	sample.id = etmq->etm->branches_id;
   1242	sample.stream_id = etmq->etm->branches_id;
   1243	sample.period = 1;
   1244	sample.cpu = tidq->packet->cpu;
   1245	sample.flags = tidq->prev_packet->flags;
   1246	sample.cpumode = event->sample.header.misc;
   1247
   1248	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
   1249			  &sample);
   1250
   1251	/*
   1252	 * perf report cannot handle events without a branch stack
   1253	 */
   1254	if (etm->synth_opts.last_branch) {
   1255		dummy_bs = (struct dummy_branch_stack){
   1256			.nr = 1,
   1257			.hw_idx = -1ULL,
   1258			.entries = {
   1259				.from = sample.ip,
   1260				.to = sample.addr,
   1261			},
   1262		};
   1263		sample.branch_stack = (struct branch_stack *)&dummy_bs;
   1264	}
   1265
   1266	if (etm->synth_opts.inject) {
   1267		ret = cs_etm__inject_event(event, &sample,
   1268					   etm->branches_sample_type);
   1269		if (ret)
   1270			return ret;
   1271	}
   1272
   1273	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
   1274
   1275	if (ret)
   1276		pr_err(
   1277		"CS ETM Trace: failed to deliver instruction event, error %d\n",
   1278		ret);
   1279
   1280	return ret;
   1281}
   1282
   1283struct cs_etm_synth {
   1284	struct perf_tool dummy_tool;
   1285	struct perf_session *session;
   1286};
   1287
   1288static int cs_etm__event_synth(struct perf_tool *tool,
   1289			       union perf_event *event,
   1290			       struct perf_sample *sample __maybe_unused,
   1291			       struct machine *machine __maybe_unused)
   1292{
   1293	struct cs_etm_synth *cs_etm_synth =
   1294		      container_of(tool, struct cs_etm_synth, dummy_tool);
   1295
   1296	return perf_session__deliver_synth_event(cs_etm_synth->session,
   1297						 event, NULL);
   1298}
   1299
   1300static int cs_etm__synth_event(struct perf_session *session,
   1301			       struct perf_event_attr *attr, u64 id)
   1302{
   1303	struct cs_etm_synth cs_etm_synth;
   1304
   1305	memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
   1306	cs_etm_synth.session = session;
   1307
   1308	return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
   1309					   &id, cs_etm__event_synth);
   1310}
   1311
   1312static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
   1313				struct perf_session *session)
   1314{
   1315	struct evlist *evlist = session->evlist;
   1316	struct evsel *evsel;
   1317	struct perf_event_attr attr;
   1318	bool found = false;
   1319	u64 id;
   1320	int err;
   1321
   1322	evlist__for_each_entry(evlist, evsel) {
   1323		if (evsel->core.attr.type == etm->pmu_type) {
   1324			found = true;
   1325			break;
   1326		}
   1327	}
   1328
   1329	if (!found) {
   1330		pr_debug("No selected events with CoreSight Trace data\n");
   1331		return 0;
   1332	}
   1333
   1334	memset(&attr, 0, sizeof(struct perf_event_attr));
   1335	attr.size = sizeof(struct perf_event_attr);
   1336	attr.type = PERF_TYPE_HARDWARE;
   1337	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
   1338	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
   1339			    PERF_SAMPLE_PERIOD;
   1340	if (etm->timeless_decoding)
   1341		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
   1342	else
   1343		attr.sample_type |= PERF_SAMPLE_TIME;
   1344
   1345	attr.exclude_user = evsel->core.attr.exclude_user;
   1346	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
   1347	attr.exclude_hv = evsel->core.attr.exclude_hv;
   1348	attr.exclude_host = evsel->core.attr.exclude_host;
   1349	attr.exclude_guest = evsel->core.attr.exclude_guest;
   1350	attr.sample_id_all = evsel->core.attr.sample_id_all;
   1351	attr.read_format = evsel->core.attr.read_format;
   1352
   1353	/* create new id val to be a fixed offset from evsel id */
   1354	id = evsel->core.id[0] + 1000000000;
   1355
   1356	if (!id)
   1357		id = 1;
   1358
   1359	if (etm->synth_opts.branches) {
   1360		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
   1361		attr.sample_period = 1;
   1362		attr.sample_type |= PERF_SAMPLE_ADDR;
   1363		err = cs_etm__synth_event(session, &attr, id);
   1364		if (err)
   1365			return err;
   1366		etm->branches_sample_type = attr.sample_type;
   1367		etm->branches_id = id;
   1368		id += 1;
   1369		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
   1370	}
   1371
   1372	if (etm->synth_opts.last_branch) {
   1373		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
   1374		/*
   1375		 * We don't use the hardware index, but the sample generation
   1376		 * code uses the new format branch_stack with this field,
   1377		 * so the event attributes must indicate that it's present.
   1378		 */
   1379		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
   1380	}
   1381
   1382	if (etm->synth_opts.instructions) {
   1383		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
   1384		attr.sample_period = etm->synth_opts.period;
   1385		etm->instructions_sample_period = attr.sample_period;
   1386		err = cs_etm__synth_event(session, &attr, id);
   1387		if (err)
   1388			return err;
   1389		etm->instructions_sample_type = attr.sample_type;
   1390		etm->instructions_id = id;
   1391		id += 1;
   1392	}
   1393
   1394	return 0;
   1395}
   1396
   1397static int cs_etm__sample(struct cs_etm_queue *etmq,
   1398			  struct cs_etm_traceid_queue *tidq)
   1399{
   1400	struct cs_etm_auxtrace *etm = etmq->etm;
   1401	int ret;
   1402	u8 trace_chan_id = tidq->trace_chan_id;
   1403	u64 instrs_prev;
   1404
   1405	/* Get instructions remainder from previous packet */
   1406	instrs_prev = tidq->period_instructions;
   1407
   1408	tidq->period_instructions += tidq->packet->instr_count;
   1409
   1410	/*
   1411	 * Record a branch when the last instruction in
   1412	 * PREV_PACKET is a branch.
   1413	 */
   1414	if (etm->synth_opts.last_branch &&
   1415	    tidq->prev_packet->sample_type == CS_ETM_RANGE &&
   1416	    tidq->prev_packet->last_instr_taken_branch)
   1417		cs_etm__update_last_branch_rb(etmq, tidq);
   1418
   1419	if (etm->synth_opts.instructions &&
   1420	    tidq->period_instructions >= etm->instructions_sample_period) {
   1421		/*
   1422		 * Emit instruction sample periodically
   1423		 * TODO: allow period to be defined in cycles and clock time
   1424		 */
   1425
   1426		/*
   1427		 * Below diagram demonstrates the instruction samples
   1428		 * generation flows:
   1429		 *
   1430		 *    Instrs     Instrs       Instrs       Instrs
   1431		 *   Sample(n)  Sample(n+1)  Sample(n+2)  Sample(n+3)
   1432		 *    |            |            |            |
   1433		 *    V            V            V            V
   1434		 *   --------------------------------------------------
   1435		 *            ^                                  ^
   1436		 *            |                                  |
   1437		 *         Period                             Period
   1438		 *    instructions(Pi)                   instructions(Pi')
   1439		 *
   1440		 *            |                                  |
   1441		 *            \---------------- -----------------/
   1442		 *                             V
   1443		 *                 tidq->packet->instr_count
   1444		 *
   1445		 * Instrs Sample(n...) are the synthesised samples occurring
   1446		 * every etm->instructions_sample_period instructions - as
   1447		 * defined on the perf command line.  Sample(n) is being the
   1448		 * last sample before the current etm packet, n+1 to n+3
   1449		 * samples are generated from the current etm packet.
   1450		 *
   1451		 * tidq->packet->instr_count represents the number of
   1452		 * instructions in the current etm packet.
   1453		 *
   1454		 * Period instructions (Pi) contains the the number of
   1455		 * instructions executed after the sample point(n) from the
   1456		 * previous etm packet.  This will always be less than
   1457		 * etm->instructions_sample_period.
   1458		 *
   1459		 * When generate new samples, it combines with two parts
   1460		 * instructions, one is the tail of the old packet and another
   1461		 * is the head of the new coming packet, to generate
   1462		 * sample(n+1); sample(n+2) and sample(n+3) consume the
   1463		 * instructions with sample period.  After sample(n+3), the rest
   1464		 * instructions will be used by later packet and it is assigned
   1465		 * to tidq->period_instructions for next round calculation.
   1466		 */
   1467
   1468		/*
   1469		 * Get the initial offset into the current packet instructions;
   1470		 * entry conditions ensure that instrs_prev is less than
   1471		 * etm->instructions_sample_period.
   1472		 */
   1473		u64 offset = etm->instructions_sample_period - instrs_prev;
   1474		u64 addr;
   1475
   1476		/* Prepare last branches for instruction sample */
   1477		if (etm->synth_opts.last_branch)
   1478			cs_etm__copy_last_branch_rb(etmq, tidq);
   1479
   1480		while (tidq->period_instructions >=
   1481				etm->instructions_sample_period) {
   1482			/*
   1483			 * Calculate the address of the sampled instruction (-1
   1484			 * as sample is reported as though instruction has just
   1485			 * been executed, but PC has not advanced to next
   1486			 * instruction)
   1487			 */
   1488			addr = cs_etm__instr_addr(etmq, trace_chan_id,
   1489						  tidq->packet, offset - 1);
   1490			ret = cs_etm__synth_instruction_sample(
   1491				etmq, tidq, addr,
   1492				etm->instructions_sample_period);
   1493			if (ret)
   1494				return ret;
   1495
   1496			offset += etm->instructions_sample_period;
   1497			tidq->period_instructions -=
   1498				etm->instructions_sample_period;
   1499		}
   1500	}
   1501
   1502	if (etm->synth_opts.branches) {
   1503		bool generate_sample = false;
   1504
   1505		/* Generate sample for tracing on packet */
   1506		if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
   1507			generate_sample = true;
   1508
   1509		/* Generate sample for branch taken packet */
   1510		if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
   1511		    tidq->prev_packet->last_instr_taken_branch)
   1512			generate_sample = true;
   1513
   1514		if (generate_sample) {
   1515			ret = cs_etm__synth_branch_sample(etmq, tidq);
   1516			if (ret)
   1517				return ret;
   1518		}
   1519	}
   1520
   1521	cs_etm__packet_swap(etm, tidq);
   1522
   1523	return 0;
   1524}
   1525
   1526static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
   1527{
   1528	/*
   1529	 * When the exception packet is inserted, whether the last instruction
   1530	 * in previous range packet is taken branch or not, we need to force
   1531	 * to set 'prev_packet->last_instr_taken_branch' to true.  This ensures
   1532	 * to generate branch sample for the instruction range before the
   1533	 * exception is trapped to kernel or before the exception returning.
   1534	 *
   1535	 * The exception packet includes the dummy address values, so don't
   1536	 * swap PACKET with PREV_PACKET.  This keeps PREV_PACKET to be useful
   1537	 * for generating instruction and branch samples.
   1538	 */
   1539	if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
   1540		tidq->prev_packet->last_instr_taken_branch = true;
   1541
   1542	return 0;
   1543}
   1544
   1545static int cs_etm__flush(struct cs_etm_queue *etmq,
   1546			 struct cs_etm_traceid_queue *tidq)
   1547{
   1548	int err = 0;
   1549	struct cs_etm_auxtrace *etm = etmq->etm;
   1550
   1551	/* Handle start tracing packet */
   1552	if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
   1553		goto swap_packet;
   1554
   1555	if (etmq->etm->synth_opts.last_branch &&
   1556	    etmq->etm->synth_opts.instructions &&
   1557	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
   1558		u64 addr;
   1559
   1560		/* Prepare last branches for instruction sample */
   1561		cs_etm__copy_last_branch_rb(etmq, tidq);
   1562
   1563		/*
   1564		 * Generate a last branch event for the branches left in the
   1565		 * circular buffer at the end of the trace.
   1566		 *
   1567		 * Use the address of the end of the last reported execution
   1568		 * range
   1569		 */
   1570		addr = cs_etm__last_executed_instr(tidq->prev_packet);
   1571
   1572		err = cs_etm__synth_instruction_sample(
   1573			etmq, tidq, addr,
   1574			tidq->period_instructions);
   1575		if (err)
   1576			return err;
   1577
   1578		tidq->period_instructions = 0;
   1579
   1580	}
   1581
   1582	if (etm->synth_opts.branches &&
   1583	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
   1584		err = cs_etm__synth_branch_sample(etmq, tidq);
   1585		if (err)
   1586			return err;
   1587	}
   1588
   1589swap_packet:
   1590	cs_etm__packet_swap(etm, tidq);
   1591
   1592	/* Reset last branches after flush the trace */
   1593	if (etm->synth_opts.last_branch)
   1594		cs_etm__reset_last_branch_rb(tidq);
   1595
   1596	return err;
   1597}
   1598
   1599static int cs_etm__end_block(struct cs_etm_queue *etmq,
   1600			     struct cs_etm_traceid_queue *tidq)
   1601{
   1602	int err;
   1603
   1604	/*
   1605	 * It has no new packet coming and 'etmq->packet' contains the stale
   1606	 * packet which was set at the previous time with packets swapping;
   1607	 * so skip to generate branch sample to avoid stale packet.
   1608	 *
   1609	 * For this case only flush branch stack and generate a last branch
   1610	 * event for the branches left in the circular buffer at the end of
   1611	 * the trace.
   1612	 */
   1613	if (etmq->etm->synth_opts.last_branch &&
   1614	    etmq->etm->synth_opts.instructions &&
   1615	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
   1616		u64 addr;
   1617
   1618		/* Prepare last branches for instruction sample */
   1619		cs_etm__copy_last_branch_rb(etmq, tidq);
   1620
   1621		/*
   1622		 * Use the address of the end of the last reported execution
   1623		 * range.
   1624		 */
   1625		addr = cs_etm__last_executed_instr(tidq->prev_packet);
   1626
   1627		err = cs_etm__synth_instruction_sample(
   1628			etmq, tidq, addr,
   1629			tidq->period_instructions);
   1630		if (err)
   1631			return err;
   1632
   1633		tidq->period_instructions = 0;
   1634	}
   1635
   1636	return 0;
   1637}
   1638/*
   1639 * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
   1640 *			   if need be.
   1641 * Returns:	< 0	if error
   1642 *		= 0	if no more auxtrace_buffer to read
   1643 *		> 0	if the current buffer isn't empty yet
   1644 */
   1645static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
   1646{
   1647	int ret;
   1648
   1649	if (!etmq->buf_len) {
   1650		ret = cs_etm__get_trace(etmq);
   1651		if (ret <= 0)
   1652			return ret;
   1653		/*
   1654		 * We cannot assume consecutive blocks in the data file
   1655		 * are contiguous, reset the decoder to force re-sync.
   1656		 */
   1657		ret = cs_etm_decoder__reset(etmq->decoder);
   1658		if (ret)
   1659			return ret;
   1660	}
   1661
   1662	return etmq->buf_len;
   1663}
   1664
   1665static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
   1666				 struct cs_etm_packet *packet,
   1667				 u64 end_addr)
   1668{
   1669	/* Initialise to keep compiler happy */
   1670	u16 instr16 = 0;
   1671	u32 instr32 = 0;
   1672	u64 addr;
   1673
   1674	switch (packet->isa) {
   1675	case CS_ETM_ISA_T32:
   1676		/*
   1677		 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
   1678		 *
   1679		 *  b'15         b'8
   1680		 * +-----------------+--------+
   1681		 * | 1 1 0 1 1 1 1 1 |  imm8  |
   1682		 * +-----------------+--------+
   1683		 *
   1684		 * According to the specification, it only defines SVC for T32
   1685		 * with 16 bits instruction and has no definition for 32bits;
   1686		 * so below only read 2 bytes as instruction size for T32.
   1687		 */
   1688		addr = end_addr - 2;
   1689		cs_etm__mem_access(etmq, trace_chan_id, addr,
   1690				   sizeof(instr16), (u8 *)&instr16);
   1691		if ((instr16 & 0xFF00) == 0xDF00)
   1692			return true;
   1693
   1694		break;
   1695	case CS_ETM_ISA_A32:
   1696		/*
   1697		 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
   1698		 *
   1699		 *  b'31 b'28 b'27 b'24
   1700		 * +---------+---------+-------------------------+
   1701		 * |  !1111  | 1 1 1 1 |        imm24            |
   1702		 * +---------+---------+-------------------------+
   1703		 */
   1704		addr = end_addr - 4;
   1705		cs_etm__mem_access(etmq, trace_chan_id, addr,
   1706				   sizeof(instr32), (u8 *)&instr32);
   1707		if ((instr32 & 0x0F000000) == 0x0F000000 &&
   1708		    (instr32 & 0xF0000000) != 0xF0000000)
   1709			return true;
   1710
   1711		break;
   1712	case CS_ETM_ISA_A64:
   1713		/*
   1714		 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
   1715		 *
   1716		 *  b'31               b'21           b'4     b'0
   1717		 * +-----------------------+---------+-----------+
   1718		 * | 1 1 0 1 0 1 0 0 0 0 0 |  imm16  | 0 0 0 0 1 |
   1719		 * +-----------------------+---------+-----------+
   1720		 */
   1721		addr = end_addr - 4;
   1722		cs_etm__mem_access(etmq, trace_chan_id, addr,
   1723				   sizeof(instr32), (u8 *)&instr32);
   1724		if ((instr32 & 0xFFE0001F) == 0xd4000001)
   1725			return true;
   1726
   1727		break;
   1728	case CS_ETM_ISA_UNKNOWN:
   1729	default:
   1730		break;
   1731	}
   1732
   1733	return false;
   1734}
   1735
   1736static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
   1737			       struct cs_etm_traceid_queue *tidq, u64 magic)
   1738{
   1739	u8 trace_chan_id = tidq->trace_chan_id;
   1740	struct cs_etm_packet *packet = tidq->packet;
   1741	struct cs_etm_packet *prev_packet = tidq->prev_packet;
   1742
   1743	if (magic == __perf_cs_etmv3_magic)
   1744		if (packet->exception_number == CS_ETMV3_EXC_SVC)
   1745			return true;
   1746
   1747	/*
   1748	 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
   1749	 * HVC cases; need to check if it's SVC instruction based on
   1750	 * packet address.
   1751	 */
   1752	if (magic == __perf_cs_etmv4_magic) {
   1753		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
   1754		    cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
   1755					 prev_packet->end_addr))
   1756			return true;
   1757	}
   1758
   1759	return false;
   1760}
   1761
   1762static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
   1763				       u64 magic)
   1764{
   1765	struct cs_etm_packet *packet = tidq->packet;
   1766
   1767	if (magic == __perf_cs_etmv3_magic)
   1768		if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
   1769		    packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
   1770		    packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
   1771		    packet->exception_number == CS_ETMV3_EXC_IRQ ||
   1772		    packet->exception_number == CS_ETMV3_EXC_FIQ)
   1773			return true;
   1774
   1775	if (magic == __perf_cs_etmv4_magic)
   1776		if (packet->exception_number == CS_ETMV4_EXC_RESET ||
   1777		    packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
   1778		    packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
   1779		    packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
   1780		    packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
   1781		    packet->exception_number == CS_ETMV4_EXC_IRQ ||
   1782		    packet->exception_number == CS_ETMV4_EXC_FIQ)
   1783			return true;
   1784
   1785	return false;
   1786}
   1787
   1788static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
   1789				      struct cs_etm_traceid_queue *tidq,
   1790				      u64 magic)
   1791{
   1792	u8 trace_chan_id = tidq->trace_chan_id;
   1793	struct cs_etm_packet *packet = tidq->packet;
   1794	struct cs_etm_packet *prev_packet = tidq->prev_packet;
   1795
   1796	if (magic == __perf_cs_etmv3_magic)
   1797		if (packet->exception_number == CS_ETMV3_EXC_SMC ||
   1798		    packet->exception_number == CS_ETMV3_EXC_HYP ||
   1799		    packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
   1800		    packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
   1801		    packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
   1802		    packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
   1803		    packet->exception_number == CS_ETMV3_EXC_GENERIC)
   1804			return true;
   1805
   1806	if (magic == __perf_cs_etmv4_magic) {
   1807		if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
   1808		    packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
   1809		    packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
   1810		    packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
   1811			return true;
   1812
   1813		/*
   1814		 * For CS_ETMV4_EXC_CALL, except SVC other instructions
   1815		 * (SMC, HVC) are taken as sync exceptions.
   1816		 */
   1817		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
   1818		    !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
   1819					  prev_packet->end_addr))
   1820			return true;
   1821
   1822		/*
   1823		 * ETMv4 has 5 bits for exception number; if the numbers
   1824		 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
   1825		 * they are implementation defined exceptions.
   1826		 *
   1827		 * For this case, simply take it as sync exception.
   1828		 */
   1829		if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
   1830		    packet->exception_number <= CS_ETMV4_EXC_END)
   1831			return true;
   1832	}
   1833
   1834	return false;
   1835}
   1836
   1837static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
   1838				    struct cs_etm_traceid_queue *tidq)
   1839{
   1840	struct cs_etm_packet *packet = tidq->packet;
   1841	struct cs_etm_packet *prev_packet = tidq->prev_packet;
   1842	u8 trace_chan_id = tidq->trace_chan_id;
   1843	u64 magic;
   1844	int ret;
   1845
   1846	switch (packet->sample_type) {
   1847	case CS_ETM_RANGE:
   1848		/*
   1849		 * Immediate branch instruction without neither link nor
   1850		 * return flag, it's normal branch instruction within
   1851		 * the function.
   1852		 */
   1853		if (packet->last_instr_type == OCSD_INSTR_BR &&
   1854		    packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
   1855			packet->flags = PERF_IP_FLAG_BRANCH;
   1856
   1857			if (packet->last_instr_cond)
   1858				packet->flags |= PERF_IP_FLAG_CONDITIONAL;
   1859		}
   1860
   1861		/*
   1862		 * Immediate branch instruction with link (e.g. BL), this is
   1863		 * branch instruction for function call.
   1864		 */
   1865		if (packet->last_instr_type == OCSD_INSTR_BR &&
   1866		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
   1867			packet->flags = PERF_IP_FLAG_BRANCH |
   1868					PERF_IP_FLAG_CALL;
   1869
   1870		/*
   1871		 * Indirect branch instruction with link (e.g. BLR), this is
   1872		 * branch instruction for function call.
   1873		 */
   1874		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
   1875		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
   1876			packet->flags = PERF_IP_FLAG_BRANCH |
   1877					PERF_IP_FLAG_CALL;
   1878
   1879		/*
   1880		 * Indirect branch instruction with subtype of
   1881		 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
   1882		 * function return for A32/T32.
   1883		 */
   1884		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
   1885		    packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
   1886			packet->flags = PERF_IP_FLAG_BRANCH |
   1887					PERF_IP_FLAG_RETURN;
   1888
   1889		/*
   1890		 * Indirect branch instruction without link (e.g. BR), usually
   1891		 * this is used for function return, especially for functions
   1892		 * within dynamic link lib.
   1893		 */
   1894		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
   1895		    packet->last_instr_subtype == OCSD_S_INSTR_NONE)
   1896			packet->flags = PERF_IP_FLAG_BRANCH |
   1897					PERF_IP_FLAG_RETURN;
   1898
   1899		/* Return instruction for function return. */
   1900		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
   1901		    packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
   1902			packet->flags = PERF_IP_FLAG_BRANCH |
   1903					PERF_IP_FLAG_RETURN;
   1904
   1905		/*
   1906		 * Decoder might insert a discontinuity in the middle of
   1907		 * instruction packets, fixup prev_packet with flag
   1908		 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
   1909		 */
   1910		if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
   1911			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
   1912					      PERF_IP_FLAG_TRACE_BEGIN;
   1913
   1914		/*
   1915		 * If the previous packet is an exception return packet
   1916		 * and the return address just follows SVC instruction,
   1917		 * it needs to calibrate the previous packet sample flags
   1918		 * as PERF_IP_FLAG_SYSCALLRET.
   1919		 */
   1920		if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
   1921					   PERF_IP_FLAG_RETURN |
   1922					   PERF_IP_FLAG_INTERRUPT) &&
   1923		    cs_etm__is_svc_instr(etmq, trace_chan_id,
   1924					 packet, packet->start_addr))
   1925			prev_packet->flags = PERF_IP_FLAG_BRANCH |
   1926					     PERF_IP_FLAG_RETURN |
   1927					     PERF_IP_FLAG_SYSCALLRET;
   1928		break;
   1929	case CS_ETM_DISCONTINUITY:
   1930		/*
   1931		 * The trace is discontinuous, if the previous packet is
   1932		 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
   1933		 * for previous packet.
   1934		 */
   1935		if (prev_packet->sample_type == CS_ETM_RANGE)
   1936			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
   1937					      PERF_IP_FLAG_TRACE_END;
   1938		break;
   1939	case CS_ETM_EXCEPTION:
   1940		ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
   1941		if (ret)
   1942			return ret;
   1943
   1944		/* The exception is for system call. */
   1945		if (cs_etm__is_syscall(etmq, tidq, magic))
   1946			packet->flags = PERF_IP_FLAG_BRANCH |
   1947					PERF_IP_FLAG_CALL |
   1948					PERF_IP_FLAG_SYSCALLRET;
   1949		/*
   1950		 * The exceptions are triggered by external signals from bus,
   1951		 * interrupt controller, debug module, PE reset or halt.
   1952		 */
   1953		else if (cs_etm__is_async_exception(tidq, magic))
   1954			packet->flags = PERF_IP_FLAG_BRANCH |
   1955					PERF_IP_FLAG_CALL |
   1956					PERF_IP_FLAG_ASYNC |
   1957					PERF_IP_FLAG_INTERRUPT;
   1958		/*
   1959		 * Otherwise, exception is caused by trap, instruction &
   1960		 * data fault, or alignment errors.
   1961		 */
   1962		else if (cs_etm__is_sync_exception(etmq, tidq, magic))
   1963			packet->flags = PERF_IP_FLAG_BRANCH |
   1964					PERF_IP_FLAG_CALL |
   1965					PERF_IP_FLAG_INTERRUPT;
   1966
   1967		/*
   1968		 * When the exception packet is inserted, since exception
   1969		 * packet is not used standalone for generating samples
   1970		 * and it's affiliation to the previous instruction range
   1971		 * packet; so set previous range packet flags to tell perf
   1972		 * it is an exception taken branch.
   1973		 */
   1974		if (prev_packet->sample_type == CS_ETM_RANGE)
   1975			prev_packet->flags = packet->flags;
   1976		break;
   1977	case CS_ETM_EXCEPTION_RET:
   1978		/*
   1979		 * When the exception return packet is inserted, since
   1980		 * exception return packet is not used standalone for
   1981		 * generating samples and it's affiliation to the previous
   1982		 * instruction range packet; so set previous range packet
   1983		 * flags to tell perf it is an exception return branch.
   1984		 *
   1985		 * The exception return can be for either system call or
   1986		 * other exception types; unfortunately the packet doesn't
   1987		 * contain exception type related info so we cannot decide
   1988		 * the exception type purely based on exception return packet.
   1989		 * If we record the exception number from exception packet and
   1990		 * reuse it for exception return packet, this is not reliable
   1991		 * due the trace can be discontinuity or the interrupt can
   1992		 * be nested, thus the recorded exception number cannot be
   1993		 * used for exception return packet for these two cases.
   1994		 *
   1995		 * For exception return packet, we only need to distinguish the
   1996		 * packet is for system call or for other types.  Thus the
   1997		 * decision can be deferred when receive the next packet which
   1998		 * contains the return address, based on the return address we
   1999		 * can read out the previous instruction and check if it's a
   2000		 * system call instruction and then calibrate the sample flag
   2001		 * as needed.
   2002		 */
   2003		if (prev_packet->sample_type == CS_ETM_RANGE)
   2004			prev_packet->flags = PERF_IP_FLAG_BRANCH |
   2005					     PERF_IP_FLAG_RETURN |
   2006					     PERF_IP_FLAG_INTERRUPT;
   2007		break;
   2008	case CS_ETM_EMPTY:
   2009	default:
   2010		break;
   2011	}
   2012
   2013	return 0;
   2014}
   2015
   2016static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
   2017{
   2018	int ret = 0;
   2019	size_t processed = 0;
   2020
   2021	/*
   2022	 * Packets are decoded and added to the decoder's packet queue
   2023	 * until the decoder packet processing callback has requested that
   2024	 * processing stops or there is nothing left in the buffer.  Normal
   2025	 * operations that stop processing are a timestamp packet or a full
   2026	 * decoder buffer queue.
   2027	 */
   2028	ret = cs_etm_decoder__process_data_block(etmq->decoder,
   2029						 etmq->offset,
   2030						 &etmq->buf[etmq->buf_used],
   2031						 etmq->buf_len,
   2032						 &processed);
   2033	if (ret)
   2034		goto out;
   2035
   2036	etmq->offset += processed;
   2037	etmq->buf_used += processed;
   2038	etmq->buf_len -= processed;
   2039
   2040out:
   2041	return ret;
   2042}
   2043
   2044static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
   2045					 struct cs_etm_traceid_queue *tidq)
   2046{
   2047	int ret;
   2048	struct cs_etm_packet_queue *packet_queue;
   2049
   2050	packet_queue = &tidq->packet_queue;
   2051
   2052	/* Process each packet in this chunk */
   2053	while (1) {
   2054		ret = cs_etm_decoder__get_packet(packet_queue,
   2055						 tidq->packet);
   2056		if (ret <= 0)
   2057			/*
   2058			 * Stop processing this chunk on
   2059			 * end of data or error
   2060			 */
   2061			break;
   2062
   2063		/*
   2064		 * Since packet addresses are swapped in packet
   2065		 * handling within below switch() statements,
   2066		 * thus setting sample flags must be called
   2067		 * prior to switch() statement to use address
   2068		 * information before packets swapping.
   2069		 */
   2070		ret = cs_etm__set_sample_flags(etmq, tidq);
   2071		if (ret < 0)
   2072			break;
   2073
   2074		switch (tidq->packet->sample_type) {
   2075		case CS_ETM_RANGE:
   2076			/*
   2077			 * If the packet contains an instruction
   2078			 * range, generate instruction sequence
   2079			 * events.
   2080			 */
   2081			cs_etm__sample(etmq, tidq);
   2082			break;
   2083		case CS_ETM_EXCEPTION:
   2084		case CS_ETM_EXCEPTION_RET:
   2085			/*
   2086			 * If the exception packet is coming,
   2087			 * make sure the previous instruction
   2088			 * range packet to be handled properly.
   2089			 */
   2090			cs_etm__exception(tidq);
   2091			break;
   2092		case CS_ETM_DISCONTINUITY:
   2093			/*
   2094			 * Discontinuity in trace, flush
   2095			 * previous branch stack
   2096			 */
   2097			cs_etm__flush(etmq, tidq);
   2098			break;
   2099		case CS_ETM_EMPTY:
   2100			/*
   2101			 * Should not receive empty packet,
   2102			 * report error.
   2103			 */
   2104			pr_err("CS ETM Trace: empty packet\n");
   2105			return -EINVAL;
   2106		default:
   2107			break;
   2108		}
   2109	}
   2110
   2111	return ret;
   2112}
   2113
   2114static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
   2115{
   2116	int idx;
   2117	struct int_node *inode;
   2118	struct cs_etm_traceid_queue *tidq;
   2119	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
   2120
   2121	intlist__for_each_entry(inode, traceid_queues_list) {
   2122		idx = (int)(intptr_t)inode->priv;
   2123		tidq = etmq->traceid_queues[idx];
   2124
   2125		/* Ignore return value */
   2126		cs_etm__process_traceid_queue(etmq, tidq);
   2127
   2128		/*
   2129		 * Generate an instruction sample with the remaining
   2130		 * branchstack entries.
   2131		 */
   2132		cs_etm__flush(etmq, tidq);
   2133	}
   2134}
   2135
   2136static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
   2137{
   2138	int err = 0;
   2139	struct cs_etm_traceid_queue *tidq;
   2140
   2141	tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
   2142	if (!tidq)
   2143		return -EINVAL;
   2144
   2145	/* Go through each buffer in the queue and decode them one by one */
   2146	while (1) {
   2147		err = cs_etm__get_data_block(etmq);
   2148		if (err <= 0)
   2149			return err;
   2150
   2151		/* Run trace decoder until buffer consumed or end of trace */
   2152		do {
   2153			err = cs_etm__decode_data_block(etmq);
   2154			if (err)
   2155				return err;
   2156
   2157			/*
   2158			 * Process each packet in this chunk, nothing to do if
   2159			 * an error occurs other than hoping the next one will
   2160			 * be better.
   2161			 */
   2162			err = cs_etm__process_traceid_queue(etmq, tidq);
   2163
   2164		} while (etmq->buf_len);
   2165
   2166		if (err == 0)
   2167			/* Flush any remaining branch stack entries */
   2168			err = cs_etm__end_block(etmq, tidq);
   2169	}
   2170
   2171	return err;
   2172}
   2173
   2174static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
   2175					   pid_t tid)
   2176{
   2177	unsigned int i;
   2178	struct auxtrace_queues *queues = &etm->queues;
   2179
   2180	for (i = 0; i < queues->nr_queues; i++) {
   2181		struct auxtrace_queue *queue = &etm->queues.queue_array[i];
   2182		struct cs_etm_queue *etmq = queue->priv;
   2183		struct cs_etm_traceid_queue *tidq;
   2184
   2185		if (!etmq)
   2186			continue;
   2187
   2188		tidq = cs_etm__etmq_get_traceid_queue(etmq,
   2189						CS_ETM_PER_THREAD_TRACEID);
   2190
   2191		if (!tidq)
   2192			continue;
   2193
   2194		if ((tid == -1) || (tidq->tid == tid)) {
   2195			cs_etm__set_pid_tid_cpu(etm, tidq);
   2196			cs_etm__run_decoder(etmq);
   2197		}
   2198	}
   2199
   2200	return 0;
   2201}
   2202
   2203static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
   2204{
   2205	int ret = 0;
   2206	unsigned int cs_queue_nr, queue_nr, i;
   2207	u8 trace_chan_id;
   2208	u64 cs_timestamp;
   2209	struct auxtrace_queue *queue;
   2210	struct cs_etm_queue *etmq;
   2211	struct cs_etm_traceid_queue *tidq;
   2212
   2213	/*
   2214	 * Pre-populate the heap with one entry from each queue so that we can
   2215	 * start processing in time order across all queues.
   2216	 */
   2217	for (i = 0; i < etm->queues.nr_queues; i++) {
   2218		etmq = etm->queues.queue_array[i].priv;
   2219		if (!etmq)
   2220			continue;
   2221
   2222		ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
   2223		if (ret)
   2224			return ret;
   2225	}
   2226
   2227	while (1) {
   2228		if (!etm->heap.heap_cnt)
   2229			goto out;
   2230
   2231		/* Take the entry at the top of the min heap */
   2232		cs_queue_nr = etm->heap.heap_array[0].queue_nr;
   2233		queue_nr = TO_QUEUE_NR(cs_queue_nr);
   2234		trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
   2235		queue = &etm->queues.queue_array[queue_nr];
   2236		etmq = queue->priv;
   2237
   2238		/*
   2239		 * Remove the top entry from the heap since we are about
   2240		 * to process it.
   2241		 */
   2242		auxtrace_heap__pop(&etm->heap);
   2243
   2244		tidq  = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
   2245		if (!tidq) {
   2246			/*
   2247			 * No traceID queue has been allocated for this traceID,
   2248			 * which means something somewhere went very wrong.  No
   2249			 * other choice than simply exit.
   2250			 */
   2251			ret = -EINVAL;
   2252			goto out;
   2253		}
   2254
   2255		/*
   2256		 * Packets associated with this timestamp are already in
   2257		 * the etmq's traceID queue, so process them.
   2258		 */
   2259		ret = cs_etm__process_traceid_queue(etmq, tidq);
   2260		if (ret < 0)
   2261			goto out;
   2262
   2263		/*
   2264		 * Packets for this timestamp have been processed, time to
   2265		 * move on to the next timestamp, fetching a new auxtrace_buffer
   2266		 * if need be.
   2267		 */
   2268refetch:
   2269		ret = cs_etm__get_data_block(etmq);
   2270		if (ret < 0)
   2271			goto out;
   2272
   2273		/*
   2274		 * No more auxtrace_buffers to process in this etmq, simply
   2275		 * move on to another entry in the auxtrace_heap.
   2276		 */
   2277		if (!ret)
   2278			continue;
   2279
   2280		ret = cs_etm__decode_data_block(etmq);
   2281		if (ret)
   2282			goto out;
   2283
   2284		cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
   2285
   2286		if (!cs_timestamp) {
   2287			/*
   2288			 * Function cs_etm__decode_data_block() returns when
   2289			 * there is no more traces to decode in the current
   2290			 * auxtrace_buffer OR when a timestamp has been
   2291			 * encountered on any of the traceID queues.  Since we
   2292			 * did not get a timestamp, there is no more traces to
   2293			 * process in this auxtrace_buffer.  As such empty and
   2294			 * flush all traceID queues.
   2295			 */
   2296			cs_etm__clear_all_traceid_queues(etmq);
   2297
   2298			/* Fetch another auxtrace_buffer for this etmq */
   2299			goto refetch;
   2300		}
   2301
   2302		/*
   2303		 * Add to the min heap the timestamp for packets that have
   2304		 * just been decoded.  They will be processed and synthesized
   2305		 * during the next call to cs_etm__process_traceid_queue() for
   2306		 * this queue/traceID.
   2307		 */
   2308		cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
   2309		ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
   2310	}
   2311
   2312out:
   2313	return ret;
   2314}
   2315
   2316static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
   2317					union perf_event *event)
   2318{
   2319	struct thread *th;
   2320
   2321	if (etm->timeless_decoding)
   2322		return 0;
   2323
   2324	/*
   2325	 * Add the tid/pid to the log so that we can get a match when
   2326	 * we get a contextID from the decoder.
   2327	 */
   2328	th = machine__findnew_thread(etm->machine,
   2329				     event->itrace_start.pid,
   2330				     event->itrace_start.tid);
   2331	if (!th)
   2332		return -ENOMEM;
   2333
   2334	thread__put(th);
   2335
   2336	return 0;
   2337}
   2338
   2339static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
   2340					   union perf_event *event)
   2341{
   2342	struct thread *th;
   2343	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
   2344
   2345	/*
   2346	 * Context switch in per-thread mode are irrelevant since perf
   2347	 * will start/stop tracing as the process is scheduled.
   2348	 */
   2349	if (etm->timeless_decoding)
   2350		return 0;
   2351
   2352	/*
   2353	 * SWITCH_IN events carry the next process to be switched out while
   2354	 * SWITCH_OUT events carry the process to be switched in.  As such
   2355	 * we don't care about IN events.
   2356	 */
   2357	if (!out)
   2358		return 0;
   2359
   2360	/*
   2361	 * Add the tid/pid to the log so that we can get a match when
   2362	 * we get a contextID from the decoder.
   2363	 */
   2364	th = machine__findnew_thread(etm->machine,
   2365				     event->context_switch.next_prev_pid,
   2366				     event->context_switch.next_prev_tid);
   2367	if (!th)
   2368		return -ENOMEM;
   2369
   2370	thread__put(th);
   2371
   2372	return 0;
   2373}
   2374
   2375static int cs_etm__process_event(struct perf_session *session,
   2376				 union perf_event *event,
   2377				 struct perf_sample *sample,
   2378				 struct perf_tool *tool)
   2379{
   2380	u64 sample_kernel_timestamp;
   2381	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
   2382						   struct cs_etm_auxtrace,
   2383						   auxtrace);
   2384
   2385	if (dump_trace)
   2386		return 0;
   2387
   2388	if (!tool->ordered_events) {
   2389		pr_err("CoreSight ETM Trace requires ordered events\n");
   2390		return -EINVAL;
   2391	}
   2392
   2393	if (sample->time && (sample->time != (u64) -1))
   2394		sample_kernel_timestamp = sample->time;
   2395	else
   2396		sample_kernel_timestamp = 0;
   2397
   2398	/*
   2399	 * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
   2400	 * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
   2401	 * ETM_OPT_CTXTID is not enabled.
   2402	 */
   2403	if (etm->timeless_decoding &&
   2404	    event->header.type == PERF_RECORD_EXIT)
   2405		return cs_etm__process_timeless_queues(etm,
   2406						       event->fork.tid);
   2407
   2408	if (event->header.type == PERF_RECORD_ITRACE_START)
   2409		return cs_etm__process_itrace_start(etm, event);
   2410	else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
   2411		return cs_etm__process_switch_cpu_wide(etm, event);
   2412
   2413	if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
   2414		/*
   2415		 * Record the latest kernel timestamp available in the header
   2416		 * for samples so that synthesised samples occur from this point
   2417		 * onwards.
   2418		 */
   2419		etm->latest_kernel_timestamp = sample_kernel_timestamp;
   2420	}
   2421
   2422	return 0;
   2423}
   2424
   2425static void dump_queued_data(struct cs_etm_auxtrace *etm,
   2426			     struct perf_record_auxtrace *event)
   2427{
   2428	struct auxtrace_buffer *buf;
   2429	unsigned int i;
   2430	/*
   2431	 * Find all buffers with same reference in the queues and dump them.
   2432	 * This is because the queues can contain multiple entries of the same
   2433	 * buffer that were split on aux records.
   2434	 */
   2435	for (i = 0; i < etm->queues.nr_queues; ++i)
   2436		list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
   2437			if (buf->reference == event->reference)
   2438				cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
   2439}
   2440
   2441static int cs_etm__process_auxtrace_event(struct perf_session *session,
   2442					  union perf_event *event,
   2443					  struct perf_tool *tool __maybe_unused)
   2444{
   2445	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
   2446						   struct cs_etm_auxtrace,
   2447						   auxtrace);
   2448	if (!etm->data_queued) {
   2449		struct auxtrace_buffer *buffer;
   2450		off_t  data_offset;
   2451		int fd = perf_data__fd(session->data);
   2452		bool is_pipe = perf_data__is_pipe(session->data);
   2453		int err;
   2454		int idx = event->auxtrace.idx;
   2455
   2456		if (is_pipe)
   2457			data_offset = 0;
   2458		else {
   2459			data_offset = lseek(fd, 0, SEEK_CUR);
   2460			if (data_offset == -1)
   2461				return -errno;
   2462		}
   2463
   2464		err = auxtrace_queues__add_event(&etm->queues, session,
   2465						 event, data_offset, &buffer);
   2466		if (err)
   2467			return err;
   2468
   2469		/*
   2470		 * Knowing if the trace is formatted or not requires a lookup of
   2471		 * the aux record so only works in non-piped mode where data is
   2472		 * queued in cs_etm__queue_aux_records(). Always assume
   2473		 * formatted in piped mode (true).
   2474		 */
   2475		err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
   2476					  idx, true);
   2477		if (err)
   2478			return err;
   2479
   2480		if (dump_trace)
   2481			if (auxtrace_buffer__get_data(buffer, fd)) {
   2482				cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
   2483				auxtrace_buffer__put_data(buffer);
   2484			}
   2485	} else if (dump_trace)
   2486		dump_queued_data(etm, &event->auxtrace);
   2487
   2488	return 0;
   2489}
   2490
   2491static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
   2492{
   2493	struct evsel *evsel;
   2494	struct evlist *evlist = etm->session->evlist;
   2495	bool timeless_decoding = true;
   2496
   2497	/* Override timeless mode with user input from --itrace=Z */
   2498	if (etm->synth_opts.timeless_decoding)
   2499		return true;
   2500
   2501	/*
   2502	 * Circle through the list of event and complain if we find one
   2503	 * with the time bit set.
   2504	 */
   2505	evlist__for_each_entry(evlist, evsel) {
   2506		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
   2507			timeless_decoding = false;
   2508	}
   2509
   2510	return timeless_decoding;
   2511}
   2512
   2513static const char * const cs_etm_global_header_fmts[] = {
   2514	[CS_HEADER_VERSION]	= "	Header version		       %llx\n",
   2515	[CS_PMU_TYPE_CPUS]	= "	PMU type/num cpus	       %llx\n",
   2516	[CS_ETM_SNAPSHOT]	= "	Snapshot		       %llx\n",
   2517};
   2518
   2519static const char * const cs_etm_priv_fmts[] = {
   2520	[CS_ETM_MAGIC]		= "	Magic number		       %llx\n",
   2521	[CS_ETM_CPU]		= "	CPU			       %lld\n",
   2522	[CS_ETM_NR_TRC_PARAMS]	= "	NR_TRC_PARAMS		       %llx\n",
   2523	[CS_ETM_ETMCR]		= "	ETMCR			       %llx\n",
   2524	[CS_ETM_ETMTRACEIDR]	= "	ETMTRACEIDR		       %llx\n",
   2525	[CS_ETM_ETMCCER]	= "	ETMCCER			       %llx\n",
   2526	[CS_ETM_ETMIDR]		= "	ETMIDR			       %llx\n",
   2527};
   2528
   2529static const char * const cs_etmv4_priv_fmts[] = {
   2530	[CS_ETM_MAGIC]		= "	Magic number		       %llx\n",
   2531	[CS_ETM_CPU]		= "	CPU			       %lld\n",
   2532	[CS_ETM_NR_TRC_PARAMS]	= "	NR_TRC_PARAMS		       %llx\n",
   2533	[CS_ETMV4_TRCCONFIGR]	= "	TRCCONFIGR		       %llx\n",
   2534	[CS_ETMV4_TRCTRACEIDR]	= "	TRCTRACEIDR		       %llx\n",
   2535	[CS_ETMV4_TRCIDR0]	= "	TRCIDR0			       %llx\n",
   2536	[CS_ETMV4_TRCIDR1]	= "	TRCIDR1			       %llx\n",
   2537	[CS_ETMV4_TRCIDR2]	= "	TRCIDR2			       %llx\n",
   2538	[CS_ETMV4_TRCIDR8]	= "	TRCIDR8			       %llx\n",
   2539	[CS_ETMV4_TRCAUTHSTATUS] = "	TRCAUTHSTATUS		       %llx\n",
   2540	[CS_ETE_TRCDEVARCH]	= "	TRCDEVARCH                     %llx\n"
   2541};
   2542
   2543static const char * const param_unk_fmt =
   2544	"	Unknown parameter [%d]	       %llx\n";
   2545static const char * const magic_unk_fmt =
   2546	"	Magic number Unknown	       %llx\n";
   2547
   2548static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
   2549{
   2550	int i = *offset, j, nr_params = 0, fmt_offset;
   2551	__u64 magic;
   2552
   2553	/* check magic value */
   2554	magic = val[i + CS_ETM_MAGIC];
   2555	if ((magic != __perf_cs_etmv3_magic) &&
   2556	    (magic != __perf_cs_etmv4_magic)) {
   2557		/* failure - note bad magic value */
   2558		fprintf(stdout, magic_unk_fmt, magic);
   2559		return -EINVAL;
   2560	}
   2561
   2562	/* print common header block */
   2563	fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
   2564	fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
   2565
   2566	if (magic == __perf_cs_etmv3_magic) {
   2567		nr_params = CS_ETM_NR_TRC_PARAMS_V0;
   2568		fmt_offset = CS_ETM_ETMCR;
   2569		/* after common block, offset format index past NR_PARAMS */
   2570		for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
   2571			fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
   2572	} else if (magic == __perf_cs_etmv4_magic) {
   2573		nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
   2574		fmt_offset = CS_ETMV4_TRCCONFIGR;
   2575		/* after common block, offset format index past NR_PARAMS */
   2576		for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
   2577			fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
   2578	}
   2579	*offset = i;
   2580	return 0;
   2581}
   2582
   2583static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
   2584{
   2585	int i = *offset, j, total_params = 0;
   2586	__u64 magic;
   2587
   2588	magic = val[i + CS_ETM_MAGIC];
   2589	/* total params to print is NR_PARAMS + common block size for v1 */
   2590	total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
   2591
   2592	if (magic == __perf_cs_etmv3_magic) {
   2593		for (j = 0; j < total_params; j++, i++) {
   2594			/* if newer record - could be excess params */
   2595			if (j >= CS_ETM_PRIV_MAX)
   2596				fprintf(stdout, param_unk_fmt, j, val[i]);
   2597			else
   2598				fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
   2599		}
   2600	} else if (magic == __perf_cs_etmv4_magic || magic == __perf_cs_ete_magic) {
   2601		/*
   2602		 * ETE and ETMv4 can be printed in the same block because the number of parameters
   2603		 * is saved and they share the list of parameter names. ETE is also only supported
   2604		 * in V1 files.
   2605		 */
   2606		for (j = 0; j < total_params; j++, i++) {
   2607			/* if newer record - could be excess params */
   2608			if (j >= CS_ETE_PRIV_MAX)
   2609				fprintf(stdout, param_unk_fmt, j, val[i]);
   2610			else
   2611				fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
   2612		}
   2613	} else {
   2614		/* failure - note bad magic value and error out */
   2615		fprintf(stdout, magic_unk_fmt, magic);
   2616		return -EINVAL;
   2617	}
   2618	*offset = i;
   2619	return 0;
   2620}
   2621
   2622static void cs_etm__print_auxtrace_info(__u64 *val, int num)
   2623{
   2624	int i, cpu = 0, version, err;
   2625
   2626	/* bail out early on bad header version */
   2627	version = val[0];
   2628	if (version > CS_HEADER_CURRENT_VERSION) {
   2629		/* failure.. return */
   2630		fprintf(stdout, "	Unknown Header Version = %x, ", version);
   2631		fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
   2632		return;
   2633	}
   2634
   2635	for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
   2636		fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
   2637
   2638	for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
   2639		if (version == 0)
   2640			err = cs_etm__print_cpu_metadata_v0(val, &i);
   2641		else if (version == 1)
   2642			err = cs_etm__print_cpu_metadata_v1(val, &i);
   2643		if (err)
   2644			return;
   2645	}
   2646}
   2647
   2648/*
   2649 * Read a single cpu parameter block from the auxtrace_info priv block.
   2650 *
   2651 * For version 1 there is a per cpu nr_params entry. If we are handling
   2652 * version 1 file, then there may be less, the same, or more params
   2653 * indicated by this value than the compile time number we understand.
   2654 *
   2655 * For a version 0 info block, there are a fixed number, and we need to
   2656 * fill out the nr_param value in the metadata we create.
   2657 */
   2658static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
   2659				    int out_blk_size, int nr_params_v0)
   2660{
   2661	u64 *metadata = NULL;
   2662	int hdr_version;
   2663	int nr_in_params, nr_out_params, nr_cmn_params;
   2664	int i, k;
   2665
   2666	metadata = zalloc(sizeof(*metadata) * out_blk_size);
   2667	if (!metadata)
   2668		return NULL;
   2669
   2670	/* read block current index & version */
   2671	i = *buff_in_offset;
   2672	hdr_version = buff_in[CS_HEADER_VERSION];
   2673
   2674	if (!hdr_version) {
   2675	/* read version 0 info block into a version 1 metadata block  */
   2676		nr_in_params = nr_params_v0;
   2677		metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
   2678		metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
   2679		metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
   2680		/* remaining block params at offset +1 from source */
   2681		for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
   2682			metadata[k + 1] = buff_in[i + k];
   2683		/* version 0 has 2 common params */
   2684		nr_cmn_params = 2;
   2685	} else {
   2686	/* read version 1 info block - input and output nr_params may differ */
   2687		/* version 1 has 3 common params */
   2688		nr_cmn_params = 3;
   2689		nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
   2690
   2691		/* if input has more params than output - skip excess */
   2692		nr_out_params = nr_in_params + nr_cmn_params;
   2693		if (nr_out_params > out_blk_size)
   2694			nr_out_params = out_blk_size;
   2695
   2696		for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
   2697			metadata[k] = buff_in[i + k];
   2698
   2699		/* record the actual nr params we copied */
   2700		metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
   2701	}
   2702
   2703	/* adjust in offset by number of in params used */
   2704	i += nr_in_params + nr_cmn_params;
   2705	*buff_in_offset = i;
   2706	return metadata;
   2707}
   2708
   2709/**
   2710 * Puts a fragment of an auxtrace buffer into the auxtrace queues based
   2711 * on the bounds of aux_event, if it matches with the buffer that's at
   2712 * file_offset.
   2713 *
   2714 * Normally, whole auxtrace buffers would be added to the queue. But we
   2715 * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
   2716 * is reset across each buffer, so splitting the buffers up in advance has
   2717 * the same effect.
   2718 */
   2719static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
   2720				      struct perf_record_aux *aux_event, struct perf_sample *sample)
   2721{
   2722	int err;
   2723	char buf[PERF_SAMPLE_MAX_SIZE];
   2724	union perf_event *auxtrace_event_union;
   2725	struct perf_record_auxtrace *auxtrace_event;
   2726	union perf_event auxtrace_fragment;
   2727	__u64 aux_offset, aux_size;
   2728	__u32 idx;
   2729	bool formatted;
   2730
   2731	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
   2732						   struct cs_etm_auxtrace,
   2733						   auxtrace);
   2734
   2735	/*
   2736	 * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
   2737	 * from looping through the auxtrace index.
   2738	 */
   2739	err = perf_session__peek_event(session, file_offset, buf,
   2740				       PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
   2741	if (err)
   2742		return err;
   2743	auxtrace_event = &auxtrace_event_union->auxtrace;
   2744	if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
   2745		return -EINVAL;
   2746
   2747	if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
   2748		auxtrace_event->header.size != sz) {
   2749		return -EINVAL;
   2750	}
   2751
   2752	/*
   2753	 * In per-thread mode, CPU is set to -1, but TID will be set instead. See
   2754	 * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
   2755	 */
   2756	if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
   2757			auxtrace_event->cpu != sample->cpu)
   2758		return 1;
   2759
   2760	if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
   2761		/*
   2762		 * Clamp size in snapshot mode. The buffer size is clamped in
   2763		 * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
   2764		 * the buffer size.
   2765		 */
   2766		aux_size = min(aux_event->aux_size, auxtrace_event->size);
   2767
   2768		/*
   2769		 * In this mode, the head also points to the end of the buffer so aux_offset
   2770		 * needs to have the size subtracted so it points to the beginning as in normal mode
   2771		 */
   2772		aux_offset = aux_event->aux_offset - aux_size;
   2773	} else {
   2774		aux_size = aux_event->aux_size;
   2775		aux_offset = aux_event->aux_offset;
   2776	}
   2777
   2778	if (aux_offset >= auxtrace_event->offset &&
   2779	    aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
   2780		/*
   2781		 * If this AUX event was inside this buffer somewhere, create a new auxtrace event
   2782		 * based on the sizes of the aux event, and queue that fragment.
   2783		 */
   2784		auxtrace_fragment.auxtrace = *auxtrace_event;
   2785		auxtrace_fragment.auxtrace.size = aux_size;
   2786		auxtrace_fragment.auxtrace.offset = aux_offset;
   2787		file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
   2788
   2789		pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
   2790			  " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
   2791		err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
   2792						 file_offset, NULL);
   2793		if (err)
   2794			return err;
   2795
   2796		idx = auxtrace_event->idx;
   2797		formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
   2798		return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
   2799					   idx, formatted);
   2800	}
   2801
   2802	/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
   2803	return 1;
   2804}
   2805
   2806static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
   2807					u64 offset __maybe_unused, void *data __maybe_unused)
   2808{
   2809	struct perf_sample sample;
   2810	int ret;
   2811	struct auxtrace_index_entry *ent;
   2812	struct auxtrace_index *auxtrace_index;
   2813	struct evsel *evsel;
   2814	size_t i;
   2815
   2816	/* Don't care about any other events, we're only queuing buffers for AUX events */
   2817	if (event->header.type != PERF_RECORD_AUX)
   2818		return 0;
   2819
   2820	if (event->header.size < sizeof(struct perf_record_aux))
   2821		return -EINVAL;
   2822
   2823	/* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
   2824	if (!event->aux.aux_size)
   2825		return 0;
   2826
   2827	/*
   2828	 * Parse the sample, we need the sample_id_all data that comes after the event so that the
   2829	 * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
   2830	 */
   2831	evsel = evlist__event2evsel(session->evlist, event);
   2832	if (!evsel)
   2833		return -EINVAL;
   2834	ret = evsel__parse_sample(evsel, event, &sample);
   2835	if (ret)
   2836		return ret;
   2837
   2838	/*
   2839	 * Loop through the auxtrace index to find the buffer that matches up with this aux event.
   2840	 */
   2841	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
   2842		for (i = 0; i < auxtrace_index->nr; i++) {
   2843			ent = &auxtrace_index->entries[i];
   2844			ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
   2845							 ent->sz, &event->aux, &sample);
   2846			/*
   2847			 * Stop search on error or successful values. Continue search on
   2848			 * 1 ('not found')
   2849			 */
   2850			if (ret != 1)
   2851				return ret;
   2852		}
   2853	}
   2854
   2855	/*
   2856	 * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
   2857	 * don't exit with an error because it will still be possible to decode other aux records.
   2858	 */
   2859	pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
   2860	       " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
   2861	return 0;
   2862}
   2863
   2864static int cs_etm__queue_aux_records(struct perf_session *session)
   2865{
   2866	struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
   2867								struct auxtrace_index, list);
   2868	if (index && index->nr > 0)
   2869		return perf_session__peek_events(session, session->header.data_offset,
   2870						 session->header.data_size,
   2871						 cs_etm__queue_aux_records_cb, NULL);
   2872
   2873	/*
   2874	 * We would get here if there are no entries in the index (either no auxtrace
   2875	 * buffers or no index at all). Fail silently as there is the possibility of
   2876	 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
   2877	 * false.
   2878	 *
   2879	 * In that scenario, buffers will not be split by AUX records.
   2880	 */
   2881	return 0;
   2882}
   2883
   2884int cs_etm__process_auxtrace_info(union perf_event *event,
   2885				  struct perf_session *session)
   2886{
   2887	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
   2888	struct cs_etm_auxtrace *etm = NULL;
   2889	struct int_node *inode;
   2890	unsigned int pmu_type;
   2891	int event_header_size = sizeof(struct perf_event_header);
   2892	int info_header_size;
   2893	int total_size = auxtrace_info->header.size;
   2894	int priv_size = 0;
   2895	int num_cpu, trcidr_idx;
   2896	int err = 0;
   2897	int i, j;
   2898	u64 *ptr, *hdr = NULL;
   2899	u64 **metadata = NULL;
   2900	u64 hdr_version;
   2901
   2902	/*
   2903	 * sizeof(auxtrace_info_event::type) +
   2904	 * sizeof(auxtrace_info_event::reserved) == 8
   2905	 */
   2906	info_header_size = 8;
   2907
   2908	if (total_size < (event_header_size + info_header_size))
   2909		return -EINVAL;
   2910
   2911	priv_size = total_size - event_header_size - info_header_size;
   2912
   2913	/* First the global part */
   2914	ptr = (u64 *) auxtrace_info->priv;
   2915
   2916	/* Look for version of the header */
   2917	hdr_version = ptr[0];
   2918	if (hdr_version > CS_HEADER_CURRENT_VERSION) {
   2919		/* print routine will print an error on bad version */
   2920		if (dump_trace)
   2921			cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
   2922		return -EINVAL;
   2923	}
   2924
   2925	hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
   2926	if (!hdr)
   2927		return -ENOMEM;
   2928
   2929	/* Extract header information - see cs-etm.h for format */
   2930	for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
   2931		hdr[i] = ptr[i];
   2932	num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
   2933	pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
   2934				    0xffffffff);
   2935
   2936	/*
   2937	 * Create an RB tree for traceID-metadata tuple.  Since the conversion
   2938	 * has to be made for each packet that gets decoded, optimizing access
   2939	 * in anything other than a sequential array is worth doing.
   2940	 */
   2941	traceid_list = intlist__new(NULL);
   2942	if (!traceid_list) {
   2943		err = -ENOMEM;
   2944		goto err_free_hdr;
   2945	}
   2946
   2947	metadata = zalloc(sizeof(*metadata) * num_cpu);
   2948	if (!metadata) {
   2949		err = -ENOMEM;
   2950		goto err_free_traceid_list;
   2951	}
   2952
   2953	/*
   2954	 * The metadata is stored in the auxtrace_info section and encodes
   2955	 * the configuration of the ARM embedded trace macrocell which is
   2956	 * required by the trace decoder to properly decode the trace due
   2957	 * to its highly compressed nature.
   2958	 */
   2959	for (j = 0; j < num_cpu; j++) {
   2960		if (ptr[i] == __perf_cs_etmv3_magic) {
   2961			metadata[j] =
   2962				cs_etm__create_meta_blk(ptr, &i,
   2963							CS_ETM_PRIV_MAX,
   2964							CS_ETM_NR_TRC_PARAMS_V0);
   2965
   2966			/* The traceID is our handle */
   2967			trcidr_idx = CS_ETM_ETMTRACEIDR;
   2968
   2969		} else if (ptr[i] == __perf_cs_etmv4_magic) {
   2970			metadata[j] =
   2971				cs_etm__create_meta_blk(ptr, &i,
   2972							CS_ETMV4_PRIV_MAX,
   2973							CS_ETMV4_NR_TRC_PARAMS_V0);
   2974
   2975			/* The traceID is our handle */
   2976			trcidr_idx = CS_ETMV4_TRCTRACEIDR;
   2977		} else if (ptr[i] == __perf_cs_ete_magic) {
   2978			metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
   2979
   2980			/* ETE shares first part of metadata with ETMv4 */
   2981			trcidr_idx = CS_ETMV4_TRCTRACEIDR;
   2982		} else {
   2983			ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
   2984				  ptr[i]);
   2985			err = -EINVAL;
   2986			goto err_free_metadata;
   2987		}
   2988
   2989		if (!metadata[j]) {
   2990			err = -ENOMEM;
   2991			goto err_free_metadata;
   2992		}
   2993
   2994		/* Get an RB node for this CPU */
   2995		inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
   2996
   2997		/* Something went wrong, no need to continue */
   2998		if (!inode) {
   2999			err = -ENOMEM;
   3000			goto err_free_metadata;
   3001		}
   3002
   3003		/*
   3004		 * The node for that CPU should not be taken.
   3005		 * Back out if that's the case.
   3006		 */
   3007		if (inode->priv) {
   3008			err = -EINVAL;
   3009			goto err_free_metadata;
   3010		}
   3011		/* All good, associate the traceID with the metadata pointer */
   3012		inode->priv = metadata[j];
   3013	}
   3014
   3015	/*
   3016	 * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
   3017	 * CS_ETMV4_PRIV_MAX mark how many double words are in the
   3018	 * global metadata, and each cpu's metadata respectively.
   3019	 * The following tests if the correct number of double words was
   3020	 * present in the auxtrace info section.
   3021	 */
   3022	if (i * 8 != priv_size) {
   3023		err = -EINVAL;
   3024		goto err_free_metadata;
   3025	}
   3026
   3027	etm = zalloc(sizeof(*etm));
   3028
   3029	if (!etm) {
   3030		err = -ENOMEM;
   3031		goto err_free_metadata;
   3032	}
   3033
   3034	err = auxtrace_queues__init(&etm->queues);
   3035	if (err)
   3036		goto err_free_etm;
   3037
   3038	if (session->itrace_synth_opts->set) {
   3039		etm->synth_opts = *session->itrace_synth_opts;
   3040	} else {
   3041		itrace_synth_opts__set_default(&etm->synth_opts,
   3042				session->itrace_synth_opts->default_no_sample);
   3043		etm->synth_opts.callchain = false;
   3044	}
   3045
   3046	etm->session = session;
   3047	etm->machine = &session->machines.host;
   3048
   3049	etm->num_cpu = num_cpu;
   3050	etm->pmu_type = pmu_type;
   3051	etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
   3052	etm->metadata = metadata;
   3053	etm->auxtrace_type = auxtrace_info->type;
   3054	etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
   3055
   3056	etm->auxtrace.process_event = cs_etm__process_event;
   3057	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
   3058	etm->auxtrace.flush_events = cs_etm__flush_events;
   3059	etm->auxtrace.free_events = cs_etm__free_events;
   3060	etm->auxtrace.free = cs_etm__free;
   3061	etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
   3062	session->auxtrace = &etm->auxtrace;
   3063
   3064	etm->unknown_thread = thread__new(999999999, 999999999);
   3065	if (!etm->unknown_thread) {
   3066		err = -ENOMEM;
   3067		goto err_free_queues;
   3068	}
   3069
   3070	/*
   3071	 * Initialize list node so that at thread__zput() we can avoid
   3072	 * segmentation fault at list_del_init().
   3073	 */
   3074	INIT_LIST_HEAD(&etm->unknown_thread->node);
   3075
   3076	err = thread__set_comm(etm->unknown_thread, "unknown", 0);
   3077	if (err)
   3078		goto err_delete_thread;
   3079
   3080	if (thread__init_maps(etm->unknown_thread, etm->machine)) {
   3081		err = -ENOMEM;
   3082		goto err_delete_thread;
   3083	}
   3084
   3085	if (dump_trace) {
   3086		cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
   3087	}
   3088
   3089	err = cs_etm__synth_events(etm, session);
   3090	if (err)
   3091		goto err_delete_thread;
   3092
   3093	err = cs_etm__queue_aux_records(session);
   3094	if (err)
   3095		goto err_delete_thread;
   3096
   3097	etm->data_queued = etm->queues.populated;
   3098	/*
   3099	 * Print warning in pipe mode, see cs_etm__process_auxtrace_event() and
   3100	 * cs_etm__queue_aux_fragment() for details relating to limitations.
   3101	 */
   3102	if (!etm->data_queued)
   3103		pr_warning("CS ETM warning: Coresight decode and TRBE support requires random file access.\n"
   3104			   "Continuing with best effort decoding in piped mode.\n\n");
   3105
   3106	return 0;
   3107
   3108err_delete_thread:
   3109	thread__zput(etm->unknown_thread);
   3110err_free_queues:
   3111	auxtrace_queues__free(&etm->queues);
   3112	session->auxtrace = NULL;
   3113err_free_etm:
   3114	zfree(&etm);
   3115err_free_metadata:
   3116	/* No need to check @metadata[j], free(NULL) is supported */
   3117	for (j = 0; j < num_cpu; j++)
   3118		zfree(&metadata[j]);
   3119	zfree(&metadata);
   3120err_free_traceid_list:
   3121	intlist__delete(traceid_list);
   3122err_free_hdr:
   3123	zfree(&hdr);
   3124	/*
   3125	 * At this point, as a minimum we have valid header. Dump the rest of
   3126	 * the info section - the print routines will error out on structural
   3127	 * issues.
   3128	 */
   3129	if (dump_trace)
   3130		cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
   3131	return err;
   3132}