cs-etm-decoder.c (22616B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright(C) 2015-2018 Linaro Limited. 4 * 5 * Author: Tor Jeremiassen <tor@ti.com> 6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 7 */ 8 9#include <asm/bug.h> 10#include <linux/coresight-pmu.h> 11#include <linux/err.h> 12#include <linux/list.h> 13#include <linux/zalloc.h> 14#include <stdlib.h> 15#include <opencsd/c_api/opencsd_c_api.h> 16 17#include "cs-etm.h" 18#include "cs-etm-decoder.h" 19#include "debug.h" 20#include "intlist.h" 21 22/* use raw logging */ 23#ifdef CS_DEBUG_RAW 24#define CS_LOG_RAW_FRAMES 25#ifdef CS_RAW_PACKED 26#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \ 27 OCSD_DFRMTR_PACKED_RAW_OUT) 28#else 29#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT) 30#endif 31#endif 32 33struct cs_etm_decoder { 34 void *data; 35 void (*packet_printer)(const char *msg); 36 bool suppress_printing; 37 dcd_tree_handle_t dcd_tree; 38 cs_etm_mem_cb_type mem_access; 39 ocsd_datapath_resp_t prev_return; 40 const char *decoder_name; 41}; 42 43static u32 44cs_etm_decoder__mem_access(const void *context, 45 const ocsd_vaddr_t address, 46 const ocsd_mem_space_acc_t mem_space __maybe_unused, 47 const u8 trace_chan_id, 48 const u32 req_size, 49 u8 *buffer) 50{ 51 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; 52 53 return decoder->mem_access(decoder->data, trace_chan_id, 54 address, req_size, buffer); 55} 56 57int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder, 58 u64 start, u64 end, 59 cs_etm_mem_cb_type cb_func) 60{ 61 decoder->mem_access = cb_func; 62 63 if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end, 64 OCSD_MEM_SPACE_ANY, 65 cs_etm_decoder__mem_access, 66 decoder)) 67 return -1; 68 69 return 0; 70} 71 72int cs_etm_decoder__reset(struct cs_etm_decoder *decoder) 73{ 74 ocsd_datapath_resp_t dp_ret; 75 76 decoder->prev_return = OCSD_RESP_CONT; 77 decoder->suppress_printing = true; 78 dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET, 79 0, 0, NULL, NULL); 80 decoder->suppress_printing = false; 81 if (OCSD_DATA_RESP_IS_FATAL(dp_ret)) 82 return -1; 83 84 return 0; 85} 86 87int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue, 88 struct cs_etm_packet *packet) 89{ 90 if (!packet_queue || !packet) 91 return -EINVAL; 92 93 /* Nothing to do, might as well just return */ 94 if (packet_queue->packet_count == 0) 95 return 0; 96 /* 97 * The queueing process in function cs_etm_decoder__buffer_packet() 98 * increments the tail *before* using it. This is somewhat counter 99 * intuitive but it has the advantage of centralizing tail management 100 * at a single location. Because of that we need to follow the same 101 * heuristic with the head, i.e we increment it before using its 102 * value. Otherwise the first element of the packet queue is not 103 * used. 104 */ 105 packet_queue->head = (packet_queue->head + 1) & 106 (CS_ETM_PACKET_MAX_BUFFER - 1); 107 108 *packet = packet_queue->packet_buffer[packet_queue->head]; 109 110 packet_queue->packet_count--; 111 112 return 1; 113} 114 115static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params, 116 ocsd_etmv3_cfg *config) 117{ 118 config->reg_idr = params->etmv3.reg_idr; 119 config->reg_ctrl = params->etmv3.reg_ctrl; 120 config->reg_ccer = params->etmv3.reg_ccer; 121 config->reg_trc_id = params->etmv3.reg_trc_id; 122 config->arch_ver = ARCH_V7; 123 config->core_prof = profile_CortexA; 124 125 return 0; 126} 127 128#define TRCIDR1_TRCARCHMIN_SHIFT 4 129#define TRCIDR1_TRCARCHMIN_MASK GENMASK(7, 4) 130#define TRCIDR1_TRCARCHMIN(x) (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT) 131 132static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1) 133{ 134 /* 135 * For ETMv4 if the trace minor version is 4 or more then we can assume 136 * the architecture is ARCH_AA64 rather than just V8. 137 * ARCH_V8 = V8 architecture 138 * ARCH_AA64 = Min v8r3 plus additional AA64 PE features 139 */ 140 return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8; 141} 142 143static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params, 144 ocsd_etmv4_cfg *config) 145{ 146 config->reg_configr = params->etmv4.reg_configr; 147 config->reg_traceidr = params->etmv4.reg_traceidr; 148 config->reg_idr0 = params->etmv4.reg_idr0; 149 config->reg_idr1 = params->etmv4.reg_idr1; 150 config->reg_idr2 = params->etmv4.reg_idr2; 151 config->reg_idr8 = params->etmv4.reg_idr8; 152 config->reg_idr9 = 0; 153 config->reg_idr10 = 0; 154 config->reg_idr11 = 0; 155 config->reg_idr12 = 0; 156 config->reg_idr13 = 0; 157 config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1); 158 config->core_prof = profile_CortexA; 159} 160 161static void cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params *params, 162 ocsd_ete_cfg *config) 163{ 164 config->reg_configr = params->ete.reg_configr; 165 config->reg_traceidr = params->ete.reg_traceidr; 166 config->reg_idr0 = params->ete.reg_idr0; 167 config->reg_idr1 = params->ete.reg_idr1; 168 config->reg_idr2 = params->ete.reg_idr2; 169 config->reg_idr8 = params->ete.reg_idr8; 170 config->reg_devarch = params->ete.reg_devarch; 171 config->arch_ver = ARCH_AA64; 172 config->core_prof = profile_CortexA; 173} 174 175static void cs_etm_decoder__print_str_cb(const void *p_context, 176 const char *msg, 177 const int str_len) 178{ 179 const struct cs_etm_decoder *decoder = p_context; 180 181 if (p_context && str_len && !decoder->suppress_printing) 182 decoder->packet_printer(msg); 183} 184 185static int 186cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params, 187 struct cs_etm_decoder *decoder) 188{ 189 int ret = 0; 190 191 if (d_params->packet_printer == NULL) 192 return -1; 193 194 decoder->packet_printer = d_params->packet_printer; 195 196 /* 197 * Set up a library default logger to process any printers 198 * (packet/raw frame) we add later. 199 */ 200 ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1); 201 if (ret != 0) 202 return -1; 203 204 /* no stdout / err / file output */ 205 ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL); 206 if (ret != 0) 207 return -1; 208 209 /* 210 * Set the string CB for the default logger, passes strings to 211 * perf print logger. 212 */ 213 ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree, 214 (void *)decoder, 215 cs_etm_decoder__print_str_cb); 216 if (ret != 0) 217 ret = -1; 218 219 return 0; 220} 221 222#ifdef CS_LOG_RAW_FRAMES 223static void 224cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params, 225 struct cs_etm_decoder *decoder) 226{ 227 /* Only log these during a --dump operation */ 228 if (d_params->operation == CS_ETM_OPERATION_PRINT) { 229 /* set up a library default logger to process the 230 * raw frame printer we add later 231 */ 232 ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1); 233 234 /* no stdout / err / file output */ 235 ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL); 236 237 /* set the string CB for the default logger, 238 * passes strings to perf print logger. 239 */ 240 ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree, 241 (void *)decoder, 242 cs_etm_decoder__print_str_cb); 243 244 /* use the built in library printer for the raw frames */ 245 ocsd_dt_set_raw_frame_printer(decoder->dcd_tree, 246 CS_RAW_DEBUG_FLAGS); 247 } 248} 249#else 250static void 251cs_etm_decoder__init_raw_frame_logging( 252 struct cs_etm_decoder_params *d_params __maybe_unused, 253 struct cs_etm_decoder *decoder __maybe_unused) 254{ 255} 256#endif 257 258static ocsd_datapath_resp_t 259cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq, 260 struct cs_etm_packet_queue *packet_queue, 261 const uint8_t trace_chan_id) 262{ 263 /* No timestamp packet has been received, nothing to do */ 264 if (!packet_queue->cs_timestamp) 265 return OCSD_RESP_CONT; 266 267 packet_queue->cs_timestamp = packet_queue->next_cs_timestamp; 268 269 /* Estimate the timestamp for the next range packet */ 270 packet_queue->next_cs_timestamp += packet_queue->instr_count; 271 packet_queue->instr_count = 0; 272 273 /* Tell the front end which traceid_queue needs attention */ 274 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id); 275 276 return OCSD_RESP_WAIT; 277} 278 279static ocsd_datapath_resp_t 280cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq, 281 const ocsd_generic_trace_elem *elem, 282 const uint8_t trace_chan_id, 283 const ocsd_trc_index_t indx) 284{ 285 struct cs_etm_packet_queue *packet_queue; 286 287 /* First get the packet queue for this traceID */ 288 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id); 289 if (!packet_queue) 290 return OCSD_RESP_FATAL_SYS_ERR; 291 292 /* 293 * We've seen a timestamp packet before - simply record the new value. 294 * Function do_soft_timestamp() will report the value to the front end, 295 * hence asking the decoder to keep decoding rather than stopping. 296 */ 297 if (packet_queue->cs_timestamp) { 298 packet_queue->next_cs_timestamp = elem->timestamp; 299 return OCSD_RESP_CONT; 300 } 301 302 303 if (!elem->timestamp) { 304 /* 305 * Zero timestamps can be seen due to misconfiguration or hardware bugs. 306 * Warn once, and don't try to subtract instr_count as it would result in an 307 * underflow. 308 */ 309 packet_queue->cs_timestamp = 0; 310 if (!cs_etm__etmq_is_timeless(etmq)) 311 pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR 312 ". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n", 313 indx); 314 315 } else if (packet_queue->instr_count > elem->timestamp) { 316 /* 317 * Sanity check that the elem->timestamp - packet_queue->instr_count would not 318 * result in an underflow. Warn and clamp at 0 if it would. 319 */ 320 packet_queue->cs_timestamp = 0; 321 pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx); 322 } else { 323 /* 324 * This is the first timestamp we've seen since the beginning of traces 325 * or a discontinuity. Since timestamps packets are generated *after* 326 * range packets have been generated, we need to estimate the time at 327 * which instructions started by subtracting the number of instructions 328 * executed to the timestamp. 329 */ 330 packet_queue->cs_timestamp = elem->timestamp - packet_queue->instr_count; 331 } 332 packet_queue->next_cs_timestamp = elem->timestamp; 333 packet_queue->instr_count = 0; 334 335 /* Tell the front end which traceid_queue needs attention */ 336 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id); 337 338 /* Halt processing until we are being told to proceed */ 339 return OCSD_RESP_WAIT; 340} 341 342static void 343cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue) 344{ 345 packet_queue->cs_timestamp = 0; 346 packet_queue->next_cs_timestamp = 0; 347 packet_queue->instr_count = 0; 348} 349 350static ocsd_datapath_resp_t 351cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue, 352 const u8 trace_chan_id, 353 enum cs_etm_sample_type sample_type) 354{ 355 u32 et = 0; 356 int cpu; 357 358 if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1) 359 return OCSD_RESP_FATAL_SYS_ERR; 360 361 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0) 362 return OCSD_RESP_FATAL_SYS_ERR; 363 364 et = packet_queue->tail; 365 et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1); 366 packet_queue->tail = et; 367 packet_queue->packet_count++; 368 369 packet_queue->packet_buffer[et].sample_type = sample_type; 370 packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN; 371 packet_queue->packet_buffer[et].cpu = cpu; 372 packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR; 373 packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR; 374 packet_queue->packet_buffer[et].instr_count = 0; 375 packet_queue->packet_buffer[et].last_instr_taken_branch = false; 376 packet_queue->packet_buffer[et].last_instr_size = 0; 377 packet_queue->packet_buffer[et].last_instr_type = 0; 378 packet_queue->packet_buffer[et].last_instr_subtype = 0; 379 packet_queue->packet_buffer[et].last_instr_cond = 0; 380 packet_queue->packet_buffer[et].flags = 0; 381 packet_queue->packet_buffer[et].exception_number = UINT32_MAX; 382 packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id; 383 384 if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1) 385 return OCSD_RESP_WAIT; 386 387 return OCSD_RESP_CONT; 388} 389 390static ocsd_datapath_resp_t 391cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq, 392 struct cs_etm_packet_queue *packet_queue, 393 const ocsd_generic_trace_elem *elem, 394 const uint8_t trace_chan_id) 395{ 396 int ret = 0; 397 struct cs_etm_packet *packet; 398 399 ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id, 400 CS_ETM_RANGE); 401 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT) 402 return ret; 403 404 packet = &packet_queue->packet_buffer[packet_queue->tail]; 405 406 switch (elem->isa) { 407 case ocsd_isa_aarch64: 408 packet->isa = CS_ETM_ISA_A64; 409 break; 410 case ocsd_isa_arm: 411 packet->isa = CS_ETM_ISA_A32; 412 break; 413 case ocsd_isa_thumb2: 414 packet->isa = CS_ETM_ISA_T32; 415 break; 416 case ocsd_isa_tee: 417 case ocsd_isa_jazelle: 418 case ocsd_isa_custom: 419 case ocsd_isa_unknown: 420 default: 421 packet->isa = CS_ETM_ISA_UNKNOWN; 422 } 423 424 packet->start_addr = elem->st_addr; 425 packet->end_addr = elem->en_addr; 426 packet->instr_count = elem->num_instr_range; 427 packet->last_instr_type = elem->last_i_type; 428 packet->last_instr_subtype = elem->last_i_subtype; 429 packet->last_instr_cond = elem->last_instr_cond; 430 431 if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT) 432 packet->last_instr_taken_branch = elem->last_instr_exec; 433 else 434 packet->last_instr_taken_branch = false; 435 436 packet->last_instr_size = elem->last_instr_sz; 437 438 /* per-thread scenario, no need to generate a timestamp */ 439 if (cs_etm__etmq_is_timeless(etmq)) 440 goto out; 441 442 /* 443 * The packet queue is full and we haven't seen a timestamp (had we 444 * seen one the packet queue wouldn't be full). Let the front end 445 * deal with it. 446 */ 447 if (ret == OCSD_RESP_WAIT) 448 goto out; 449 450 packet_queue->instr_count += elem->num_instr_range; 451 /* Tell the front end we have a new timestamp to process */ 452 ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue, 453 trace_chan_id); 454out: 455 return ret; 456} 457 458static ocsd_datapath_resp_t 459cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue, 460 const uint8_t trace_chan_id) 461{ 462 /* 463 * Something happened and who knows when we'll get new traces so 464 * reset time statistics. 465 */ 466 cs_etm_decoder__reset_timestamp(queue); 467 return cs_etm_decoder__buffer_packet(queue, trace_chan_id, 468 CS_ETM_DISCONTINUITY); 469} 470 471static ocsd_datapath_resp_t 472cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue, 473 const ocsd_generic_trace_elem *elem, 474 const uint8_t trace_chan_id) 475{ int ret = 0; 476 struct cs_etm_packet *packet; 477 478 ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id, 479 CS_ETM_EXCEPTION); 480 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT) 481 return ret; 482 483 packet = &queue->packet_buffer[queue->tail]; 484 packet->exception_number = elem->exception_number; 485 486 return ret; 487} 488 489static ocsd_datapath_resp_t 490cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue, 491 const uint8_t trace_chan_id) 492{ 493 return cs_etm_decoder__buffer_packet(queue, trace_chan_id, 494 CS_ETM_EXCEPTION_RET); 495} 496 497static ocsd_datapath_resp_t 498cs_etm_decoder__set_tid(struct cs_etm_queue *etmq, 499 struct cs_etm_packet_queue *packet_queue, 500 const ocsd_generic_trace_elem *elem, 501 const uint8_t trace_chan_id) 502{ 503 pid_t tid = -1; 504 static u64 pid_fmt; 505 int ret; 506 507 /* 508 * As all the ETMs run at the same exception level, the system should 509 * have the same PID format crossing CPUs. So cache the PID format 510 * and reuse it for sequential decoding. 511 */ 512 if (!pid_fmt) { 513 ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt); 514 if (ret) 515 return OCSD_RESP_FATAL_SYS_ERR; 516 } 517 518 /* 519 * Process the PE_CONTEXT packets if we have a valid contextID or VMID. 520 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2 521 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case. 522 */ 523 switch (pid_fmt) { 524 case BIT(ETM_OPT_CTXTID): 525 if (elem->context.ctxt_id_valid) 526 tid = elem->context.context_id; 527 break; 528 case BIT(ETM_OPT_CTXTID2): 529 if (elem->context.vmid_valid) 530 tid = elem->context.vmid; 531 break; 532 default: 533 break; 534 } 535 536 if (tid == -1) 537 return OCSD_RESP_CONT; 538 539 if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id)) 540 return OCSD_RESP_FATAL_SYS_ERR; 541 542 /* 543 * A timestamp is generated after a PE_CONTEXT element so make sure 544 * to rely on that coming one. 545 */ 546 cs_etm_decoder__reset_timestamp(packet_queue); 547 548 return OCSD_RESP_CONT; 549} 550 551static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( 552 const void *context, 553 const ocsd_trc_index_t indx, 554 const u8 trace_chan_id __maybe_unused, 555 const ocsd_generic_trace_elem *elem) 556{ 557 ocsd_datapath_resp_t resp = OCSD_RESP_CONT; 558 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; 559 struct cs_etm_queue *etmq = decoder->data; 560 struct cs_etm_packet_queue *packet_queue; 561 562 /* First get the packet queue for this traceID */ 563 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id); 564 if (!packet_queue) 565 return OCSD_RESP_FATAL_SYS_ERR; 566 567 switch (elem->elem_type) { 568 case OCSD_GEN_TRC_ELEM_UNKNOWN: 569 break; 570 case OCSD_GEN_TRC_ELEM_EO_TRACE: 571 case OCSD_GEN_TRC_ELEM_NO_SYNC: 572 case OCSD_GEN_TRC_ELEM_TRACE_ON: 573 resp = cs_etm_decoder__buffer_discontinuity(packet_queue, 574 trace_chan_id); 575 break; 576 case OCSD_GEN_TRC_ELEM_INSTR_RANGE: 577 resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem, 578 trace_chan_id); 579 break; 580 case OCSD_GEN_TRC_ELEM_EXCEPTION: 581 resp = cs_etm_decoder__buffer_exception(packet_queue, elem, 582 trace_chan_id); 583 break; 584 case OCSD_GEN_TRC_ELEM_EXCEPTION_RET: 585 resp = cs_etm_decoder__buffer_exception_ret(packet_queue, 586 trace_chan_id); 587 break; 588 case OCSD_GEN_TRC_ELEM_TIMESTAMP: 589 resp = cs_etm_decoder__do_hard_timestamp(etmq, elem, 590 trace_chan_id, 591 indx); 592 break; 593 case OCSD_GEN_TRC_ELEM_PE_CONTEXT: 594 resp = cs_etm_decoder__set_tid(etmq, packet_queue, 595 elem, trace_chan_id); 596 break; 597 /* Unused packet types */ 598 case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH: 599 case OCSD_GEN_TRC_ELEM_ADDR_NACC: 600 case OCSD_GEN_TRC_ELEM_CYCLE_COUNT: 601 case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN: 602 case OCSD_GEN_TRC_ELEM_EVENT: 603 case OCSD_GEN_TRC_ELEM_SWTRACE: 604 case OCSD_GEN_TRC_ELEM_CUSTOM: 605 case OCSD_GEN_TRC_ELEM_SYNC_MARKER: 606 case OCSD_GEN_TRC_ELEM_MEMTRANS: 607 default: 608 break; 609 } 610 611 return resp; 612} 613 614static int 615cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params, 616 struct cs_etm_trace_params *t_params, 617 struct cs_etm_decoder *decoder) 618{ 619 ocsd_etmv3_cfg config_etmv3; 620 ocsd_etmv4_cfg trace_config_etmv4; 621 ocsd_ete_cfg trace_config_ete; 622 void *trace_config; 623 u8 csid; 624 625 switch (t_params->protocol) { 626 case CS_ETM_PROTO_ETMV3: 627 case CS_ETM_PROTO_PTM: 628 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3); 629 decoder->decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ? 630 OCSD_BUILTIN_DCD_ETMV3 : 631 OCSD_BUILTIN_DCD_PTM; 632 trace_config = &config_etmv3; 633 break; 634 case CS_ETM_PROTO_ETMV4i: 635 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 636 decoder->decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 637 trace_config = &trace_config_etmv4; 638 break; 639 case CS_ETM_PROTO_ETE: 640 cs_etm_decoder__gen_ete_config(t_params, &trace_config_ete); 641 decoder->decoder_name = OCSD_BUILTIN_DCD_ETE; 642 trace_config = &trace_config_ete; 643 break; 644 default: 645 return -1; 646 } 647 648 if (d_params->operation == CS_ETM_OPERATION_DECODE) { 649 if (ocsd_dt_create_decoder(decoder->dcd_tree, 650 decoder->decoder_name, 651 OCSD_CREATE_FLG_FULL_DECODER, 652 trace_config, &csid)) 653 return -1; 654 655 if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree, 656 cs_etm_decoder__gen_trace_elem_printer, 657 decoder)) 658 return -1; 659 660 return 0; 661 } else if (d_params->operation == CS_ETM_OPERATION_PRINT) { 662 if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder->decoder_name, 663 OCSD_CREATE_FLG_PACKET_PROC, 664 trace_config, &csid)) 665 return -1; 666 667 if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0)) 668 return -1; 669 670 return 0; 671 } 672 673 return -1; 674} 675 676struct cs_etm_decoder * 677cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params, 678 struct cs_etm_trace_params t_params[]) 679{ 680 struct cs_etm_decoder *decoder; 681 ocsd_dcd_tree_src_t format; 682 u32 flags; 683 int i, ret; 684 685 if ((!t_params) || (!d_params)) 686 return NULL; 687 688 decoder = zalloc(sizeof(*decoder)); 689 690 if (!decoder) 691 return NULL; 692 693 decoder->data = d_params->data; 694 decoder->prev_return = OCSD_RESP_CONT; 695 format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED : 696 OCSD_TRC_SRC_SINGLE); 697 flags = 0; 698 flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0); 699 flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0); 700 flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0); 701 702 /* 703 * Drivers may add barrier frames when used with perf, set up to 704 * handle this. Barriers const of FSYNC packet repeated 4 times. 705 */ 706 flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC; 707 708 /* Create decode tree for the data source */ 709 decoder->dcd_tree = ocsd_create_dcd_tree(format, flags); 710 711 if (decoder->dcd_tree == 0) 712 goto err_free_decoder; 713 714 /* init library print logging support */ 715 ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder); 716 if (ret != 0) 717 goto err_free_decoder; 718 719 /* init raw frame logging if required */ 720 cs_etm_decoder__init_raw_frame_logging(d_params, decoder); 721 722 for (i = 0; i < decoders; i++) { 723 ret = cs_etm_decoder__create_etm_decoder(d_params, 724 &t_params[i], 725 decoder); 726 if (ret != 0) 727 goto err_free_decoder; 728 } 729 730 return decoder; 731 732err_free_decoder: 733 cs_etm_decoder__free(decoder); 734 return NULL; 735} 736 737int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder, 738 u64 indx, const u8 *buf, 739 size_t len, size_t *consumed) 740{ 741 int ret = 0; 742 ocsd_datapath_resp_t cur = OCSD_RESP_CONT; 743 ocsd_datapath_resp_t prev_return = decoder->prev_return; 744 size_t processed = 0; 745 u32 count; 746 747 while (processed < len) { 748 if (OCSD_DATA_RESP_IS_WAIT(prev_return)) { 749 cur = ocsd_dt_process_data(decoder->dcd_tree, 750 OCSD_OP_FLUSH, 751 0, 752 0, 753 NULL, 754 NULL); 755 } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) { 756 cur = ocsd_dt_process_data(decoder->dcd_tree, 757 OCSD_OP_DATA, 758 indx + processed, 759 len - processed, 760 &buf[processed], 761 &count); 762 processed += count; 763 } else { 764 ret = -EINVAL; 765 break; 766 } 767 768 /* 769 * Return to the input code if the packet buffer is full. 770 * Flushing will get done once the packet buffer has been 771 * processed. 772 */ 773 if (OCSD_DATA_RESP_IS_WAIT(cur)) 774 break; 775 776 prev_return = cur; 777 } 778 779 decoder->prev_return = cur; 780 *consumed = processed; 781 782 return ret; 783} 784 785void cs_etm_decoder__free(struct cs_etm_decoder *decoder) 786{ 787 if (!decoder) 788 return; 789 790 ocsd_destroy_dcd_tree(decoder->dcd_tree); 791 decoder->dcd_tree = NULL; 792 free(decoder); 793} 794 795const char *cs_etm_decoder__get_name(struct cs_etm_decoder *decoder) 796{ 797 return decoder->decoder_name; 798}