hist.c (21107B)
1// SPDX-License-Identifier: GPL-2.0 2#include <inttypes.h> 3#include <math.h> 4#include <stdlib.h> 5#include <string.h> 6#include <linux/compiler.h> 7 8#include "../util/callchain.h" 9#include "../util/debug.h" 10#include "../util/hist.h" 11#include "../util/sort.h" 12#include "../util/evsel.h" 13#include "../util/evlist.h" 14#include "../perf.h" 15 16/* hist period print (hpp) functions */ 17 18#define hpp__call_print_fn(hpp, fn, fmt, ...) \ 19({ \ 20 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ 21 advance_hpp(hpp, __ret); \ 22 __ret; \ 23}) 24 25static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, 26 hpp_field_fn get_field, const char *fmt, int len, 27 hpp_snprint_fn print_fn, bool fmt_percent) 28{ 29 int ret; 30 struct hists *hists = he->hists; 31 struct evsel *evsel = hists_to_evsel(hists); 32 char *buf = hpp->buf; 33 size_t size = hpp->size; 34 35 if (fmt_percent) { 36 double percent = 0.0; 37 u64 total = hists__total_period(hists); 38 39 if (total) 40 percent = 100.0 * get_field(he) / total; 41 42 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); 43 } else 44 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); 45 46 if (evsel__is_group_event(evsel)) { 47 int prev_idx, idx_delta; 48 struct hist_entry *pair; 49 int nr_members = evsel->core.nr_members; 50 51 prev_idx = evsel__group_idx(evsel); 52 53 list_for_each_entry(pair, &he->pairs.head, pairs.node) { 54 u64 period = get_field(pair); 55 u64 total = hists__total_period(pair->hists); 56 57 if (!total) 58 continue; 59 60 evsel = hists_to_evsel(pair->hists); 61 idx_delta = evsel__group_idx(evsel) - prev_idx - 1; 62 63 while (idx_delta--) { 64 /* 65 * zero-fill group members in the middle which 66 * have no sample 67 */ 68 if (fmt_percent) { 69 ret += hpp__call_print_fn(hpp, print_fn, 70 fmt, len, 0.0); 71 } else { 72 ret += hpp__call_print_fn(hpp, print_fn, 73 fmt, len, 0ULL); 74 } 75 } 76 77 if (fmt_percent) { 78 ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 79 100.0 * period / total); 80 } else { 81 ret += hpp__call_print_fn(hpp, print_fn, fmt, 82 len, period); 83 } 84 85 prev_idx = evsel__group_idx(evsel); 86 } 87 88 idx_delta = nr_members - prev_idx - 1; 89 90 while (idx_delta--) { 91 /* 92 * zero-fill group members at last which have no sample 93 */ 94 if (fmt_percent) { 95 ret += hpp__call_print_fn(hpp, print_fn, 96 fmt, len, 0.0); 97 } else { 98 ret += hpp__call_print_fn(hpp, print_fn, 99 fmt, len, 0ULL); 100 } 101 } 102 } 103 104 /* 105 * Restore original buf and size as it's where caller expects 106 * the result will be saved. 107 */ 108 hpp->buf = buf; 109 hpp->size = size; 110 111 return ret; 112} 113 114int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 115 struct hist_entry *he, hpp_field_fn get_field, 116 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 117{ 118 int len = fmt->user_len ?: fmt->len; 119 120 if (symbol_conf.field_sep) { 121 return __hpp__fmt(hpp, he, get_field, fmtstr, 1, 122 print_fn, fmt_percent); 123 } 124 125 if (fmt_percent) 126 len -= 2; /* 2 for a space and a % sign */ 127 else 128 len -= 1; 129 130 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); 131} 132 133int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 134 struct hist_entry *he, hpp_field_fn get_field, 135 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 136{ 137 if (!symbol_conf.cumulate_callchain) { 138 int len = fmt->user_len ?: fmt->len; 139 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); 140 } 141 142 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); 143} 144 145static int field_cmp(u64 field_a, u64 field_b) 146{ 147 if (field_a > field_b) 148 return 1; 149 if (field_a < field_b) 150 return -1; 151 return 0; 152} 153 154static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b, 155 hpp_field_fn get_field, int nr_members, 156 u64 **fields_a, u64 **fields_b) 157{ 158 u64 *fa = calloc(nr_members, sizeof(*fa)), 159 *fb = calloc(nr_members, sizeof(*fb)); 160 struct hist_entry *pair; 161 162 if (!fa || !fb) 163 goto out_free; 164 165 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 166 struct evsel *evsel = hists_to_evsel(pair->hists); 167 fa[evsel__group_idx(evsel)] = get_field(pair); 168 } 169 170 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 171 struct evsel *evsel = hists_to_evsel(pair->hists); 172 fb[evsel__group_idx(evsel)] = get_field(pair); 173 } 174 175 *fields_a = fa; 176 *fields_b = fb; 177 return 0; 178out_free: 179 free(fa); 180 free(fb); 181 *fields_a = *fields_b = NULL; 182 return -1; 183} 184 185static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b, 186 hpp_field_fn get_field, int idx) 187{ 188 struct evsel *evsel = hists_to_evsel(a->hists); 189 u64 *fields_a, *fields_b; 190 int cmp, nr_members, ret, i; 191 192 cmp = field_cmp(get_field(a), get_field(b)); 193 if (!evsel__is_group_event(evsel)) 194 return cmp; 195 196 nr_members = evsel->core.nr_members; 197 if (idx < 1 || idx >= nr_members) 198 return cmp; 199 200 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b); 201 if (ret) { 202 ret = cmp; 203 goto out; 204 } 205 206 ret = field_cmp(fields_a[idx], fields_b[idx]); 207 if (ret) 208 goto out; 209 210 for (i = 1; i < nr_members; i++) { 211 if (i != idx) { 212 ret = field_cmp(fields_a[i], fields_b[i]); 213 if (ret) 214 goto out; 215 } 216 } 217 218out: 219 free(fields_a); 220 free(fields_b); 221 222 return ret; 223} 224 225static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, 226 hpp_field_fn get_field) 227{ 228 s64 ret; 229 int i, nr_members; 230 struct evsel *evsel; 231 u64 *fields_a, *fields_b; 232 233 if (symbol_conf.group_sort_idx && symbol_conf.event_group) { 234 return __hpp__group_sort_idx(a, b, get_field, 235 symbol_conf.group_sort_idx); 236 } 237 238 ret = field_cmp(get_field(a), get_field(b)); 239 if (ret || !symbol_conf.event_group) 240 return ret; 241 242 evsel = hists_to_evsel(a->hists); 243 if (!evsel__is_group_event(evsel)) 244 return ret; 245 246 nr_members = evsel->core.nr_members; 247 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b); 248 if (i) 249 goto out; 250 251 for (i = 1; i < nr_members; i++) { 252 ret = field_cmp(fields_a[i], fields_b[i]); 253 if (ret) 254 break; 255 } 256 257out: 258 free(fields_a); 259 free(fields_b); 260 261 return ret; 262} 263 264static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, 265 hpp_field_fn get_field) 266{ 267 s64 ret = 0; 268 269 if (symbol_conf.cumulate_callchain) { 270 /* 271 * Put caller above callee when they have equal period. 272 */ 273 ret = field_cmp(get_field(a), get_field(b)); 274 if (ret) 275 return ret; 276 277 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain) 278 return 0; 279 280 ret = b->callchain->max_depth - a->callchain->max_depth; 281 if (callchain_param.order == ORDER_CALLER) 282 ret = -ret; 283 } 284 return ret; 285} 286 287static int hpp__width_fn(struct perf_hpp_fmt *fmt, 288 struct perf_hpp *hpp __maybe_unused, 289 struct hists *hists) 290{ 291 int len = fmt->user_len ?: fmt->len; 292 struct evsel *evsel = hists_to_evsel(hists); 293 294 if (symbol_conf.event_group) 295 len = max(len, evsel->core.nr_members * fmt->len); 296 297 if (len < (int)strlen(fmt->name)) 298 len = strlen(fmt->name); 299 300 return len; 301} 302 303static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 304 struct hists *hists, int line __maybe_unused, 305 int *span __maybe_unused) 306{ 307 int len = hpp__width_fn(fmt, hpp, hists); 308 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); 309} 310 311int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 312{ 313 va_list args; 314 ssize_t ssize = hpp->size; 315 double percent; 316 int ret, len; 317 318 va_start(args, fmt); 319 len = va_arg(args, int); 320 percent = va_arg(args, double); 321 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); 322 va_end(args); 323 324 return (ret >= ssize) ? (ssize - 1) : ret; 325} 326 327static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 328{ 329 va_list args; 330 ssize_t ssize = hpp->size; 331 int ret; 332 333 va_start(args, fmt); 334 ret = vsnprintf(hpp->buf, hpp->size, fmt, args); 335 va_end(args); 336 337 return (ret >= ssize) ? (ssize - 1) : ret; 338} 339 340#define __HPP_COLOR_PERCENT_FN(_type, _field) \ 341static u64 he_get_##_field(struct hist_entry *he) \ 342{ \ 343 return he->stat._field; \ 344} \ 345 \ 346static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 347 struct perf_hpp *hpp, struct hist_entry *he) \ 348{ \ 349 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 350 hpp_color_scnprintf, true); \ 351} 352 353#define __HPP_ENTRY_PERCENT_FN(_type, _field) \ 354static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 355 struct perf_hpp *hpp, struct hist_entry *he) \ 356{ \ 357 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 358 hpp_entry_scnprintf, true); \ 359} 360 361#define __HPP_SORT_FN(_type, _field) \ 362static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 363 struct hist_entry *a, struct hist_entry *b) \ 364{ \ 365 return __hpp__sort(a, b, he_get_##_field); \ 366} 367 368#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 369static u64 he_get_acc_##_field(struct hist_entry *he) \ 370{ \ 371 return he->stat_acc->_field; \ 372} \ 373 \ 374static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 375 struct perf_hpp *hpp, struct hist_entry *he) \ 376{ \ 377 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 378 hpp_color_scnprintf, true); \ 379} 380 381#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 382static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 383 struct perf_hpp *hpp, struct hist_entry *he) \ 384{ \ 385 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 386 hpp_entry_scnprintf, true); \ 387} 388 389#define __HPP_SORT_ACC_FN(_type, _field) \ 390static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 391 struct hist_entry *a, struct hist_entry *b) \ 392{ \ 393 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 394} 395 396#define __HPP_ENTRY_RAW_FN(_type, _field) \ 397static u64 he_get_raw_##_field(struct hist_entry *he) \ 398{ \ 399 return he->stat._field; \ 400} \ 401 \ 402static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 403 struct perf_hpp *hpp, struct hist_entry *he) \ 404{ \ 405 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ 406 hpp_entry_scnprintf, false); \ 407} 408 409#define __HPP_SORT_RAW_FN(_type, _field) \ 410static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 411 struct hist_entry *a, struct hist_entry *b) \ 412{ \ 413 return __hpp__sort(a, b, he_get_raw_##_field); \ 414} 415 416 417#define HPP_PERCENT_FNS(_type, _field) \ 418__HPP_COLOR_PERCENT_FN(_type, _field) \ 419__HPP_ENTRY_PERCENT_FN(_type, _field) \ 420__HPP_SORT_FN(_type, _field) 421 422#define HPP_PERCENT_ACC_FNS(_type, _field) \ 423__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 424__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 425__HPP_SORT_ACC_FN(_type, _field) 426 427#define HPP_RAW_FNS(_type, _field) \ 428__HPP_ENTRY_RAW_FN(_type, _field) \ 429__HPP_SORT_RAW_FN(_type, _field) 430 431HPP_PERCENT_FNS(overhead, period) 432HPP_PERCENT_FNS(overhead_sys, period_sys) 433HPP_PERCENT_FNS(overhead_us, period_us) 434HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) 435HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) 436HPP_PERCENT_ACC_FNS(overhead_acc, period) 437 438HPP_RAW_FNS(samples, nr_events) 439HPP_RAW_FNS(period, period) 440 441static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 442 struct hist_entry *a __maybe_unused, 443 struct hist_entry *b __maybe_unused) 444{ 445 return 0; 446} 447 448static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) 449{ 450 return a->header == hpp__header_fn; 451} 452 453static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 454{ 455 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) 456 return false; 457 458 return a->idx == b->idx; 459} 460 461#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ 462 { \ 463 .name = _name, \ 464 .header = hpp__header_fn, \ 465 .width = hpp__width_fn, \ 466 .color = hpp__color_ ## _fn, \ 467 .entry = hpp__entry_ ## _fn, \ 468 .cmp = hpp__nop_cmp, \ 469 .collapse = hpp__nop_cmp, \ 470 .sort = hpp__sort_ ## _fn, \ 471 .idx = PERF_HPP__ ## _idx, \ 472 .equal = hpp__equal, \ 473 } 474 475#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ 476 { \ 477 .name = _name, \ 478 .header = hpp__header_fn, \ 479 .width = hpp__width_fn, \ 480 .color = hpp__color_ ## _fn, \ 481 .entry = hpp__entry_ ## _fn, \ 482 .cmp = hpp__nop_cmp, \ 483 .collapse = hpp__nop_cmp, \ 484 .sort = hpp__sort_ ## _fn, \ 485 .idx = PERF_HPP__ ## _idx, \ 486 .equal = hpp__equal, \ 487 } 488 489#define HPP__PRINT_FNS(_name, _fn, _idx) \ 490 { \ 491 .name = _name, \ 492 .header = hpp__header_fn, \ 493 .width = hpp__width_fn, \ 494 .entry = hpp__entry_ ## _fn, \ 495 .cmp = hpp__nop_cmp, \ 496 .collapse = hpp__nop_cmp, \ 497 .sort = hpp__sort_ ## _fn, \ 498 .idx = PERF_HPP__ ## _idx, \ 499 .equal = hpp__equal, \ 500 } 501 502struct perf_hpp_fmt perf_hpp__format[] = { 503 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), 504 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), 505 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), 506 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), 507 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), 508 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), 509 HPP__PRINT_FNS("Samples", samples, SAMPLES), 510 HPP__PRINT_FNS("Period", period, PERIOD) 511}; 512 513struct perf_hpp_list perf_hpp_list = { 514 .fields = LIST_HEAD_INIT(perf_hpp_list.fields), 515 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), 516 .nr_header_lines = 1, 517}; 518 519#undef HPP__COLOR_PRINT_FNS 520#undef HPP__COLOR_ACC_PRINT_FNS 521#undef HPP__PRINT_FNS 522 523#undef HPP_PERCENT_FNS 524#undef HPP_PERCENT_ACC_FNS 525#undef HPP_RAW_FNS 526 527#undef __HPP_HEADER_FN 528#undef __HPP_WIDTH_FN 529#undef __HPP_COLOR_PERCENT_FN 530#undef __HPP_ENTRY_PERCENT_FN 531#undef __HPP_COLOR_ACC_PERCENT_FN 532#undef __HPP_ENTRY_ACC_PERCENT_FN 533#undef __HPP_ENTRY_RAW_FN 534#undef __HPP_SORT_FN 535#undef __HPP_SORT_ACC_FN 536#undef __HPP_SORT_RAW_FN 537 538static void fmt_free(struct perf_hpp_fmt *fmt) 539{ 540 /* 541 * At this point fmt should be completely 542 * unhooked, if not it's a bug. 543 */ 544 BUG_ON(!list_empty(&fmt->list)); 545 BUG_ON(!list_empty(&fmt->sort_list)); 546 547 if (fmt->free) 548 fmt->free(fmt); 549} 550 551void perf_hpp__init(void) 552{ 553 int i; 554 555 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 556 struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; 557 558 INIT_LIST_HEAD(&fmt->list); 559 560 /* sort_list may be linked by setup_sorting() */ 561 if (fmt->sort_list.next == NULL) 562 INIT_LIST_HEAD(&fmt->sort_list); 563 } 564 565 /* 566 * If user specified field order, no need to setup default fields. 567 */ 568 if (is_strict_order(field_order)) 569 return; 570 571 if (symbol_conf.cumulate_callchain) { 572 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); 573 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 574 } 575 576 hpp_dimension__add_output(PERF_HPP__OVERHEAD); 577 578 if (symbol_conf.show_cpu_utilization) { 579 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); 580 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); 581 582 if (perf_guest) { 583 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); 584 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); 585 } 586 } 587 588 if (symbol_conf.show_nr_samples) 589 hpp_dimension__add_output(PERF_HPP__SAMPLES); 590 591 if (symbol_conf.show_total_period) 592 hpp_dimension__add_output(PERF_HPP__PERIOD); 593} 594 595void perf_hpp_list__column_register(struct perf_hpp_list *list, 596 struct perf_hpp_fmt *format) 597{ 598 list_add_tail(&format->list, &list->fields); 599} 600 601void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 602 struct perf_hpp_fmt *format) 603{ 604 list_add_tail(&format->sort_list, &list->sorts); 605} 606 607void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, 608 struct perf_hpp_fmt *format) 609{ 610 list_add(&format->sort_list, &list->sorts); 611} 612 613static void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 614{ 615 list_del_init(&format->list); 616 fmt_free(format); 617} 618 619void perf_hpp__cancel_cumulate(void) 620{ 621 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; 622 623 if (is_strict_order(field_order)) 624 return; 625 626 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; 627 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; 628 629 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { 630 if (acc->equal(acc, fmt)) { 631 perf_hpp__column_unregister(fmt); 632 continue; 633 } 634 635 if (ovh->equal(ovh, fmt)) 636 fmt->name = "Overhead"; 637 } 638} 639 640static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 641{ 642 return a->equal && a->equal(a, b); 643} 644 645void perf_hpp__setup_output_field(struct perf_hpp_list *list) 646{ 647 struct perf_hpp_fmt *fmt; 648 649 /* append sort keys to output field */ 650 perf_hpp_list__for_each_sort_list(list, fmt) { 651 struct perf_hpp_fmt *pos; 652 653 /* skip sort-only fields ("sort_compute" in perf diff) */ 654 if (!fmt->entry && !fmt->color) 655 continue; 656 657 perf_hpp_list__for_each_format(list, pos) { 658 if (fmt_equal(fmt, pos)) 659 goto next; 660 } 661 662 perf_hpp__column_register(fmt); 663next: 664 continue; 665 } 666} 667 668void perf_hpp__append_sort_keys(struct perf_hpp_list *list) 669{ 670 struct perf_hpp_fmt *fmt; 671 672 /* append output fields to sort keys */ 673 perf_hpp_list__for_each_format(list, fmt) { 674 struct perf_hpp_fmt *pos; 675 676 perf_hpp_list__for_each_sort_list(list, pos) { 677 if (fmt_equal(fmt, pos)) 678 goto next; 679 } 680 681 perf_hpp__register_sort_field(fmt); 682next: 683 continue; 684 } 685} 686 687 688void perf_hpp__reset_output_field(struct perf_hpp_list *list) 689{ 690 struct perf_hpp_fmt *fmt, *tmp; 691 692 /* reset output fields */ 693 perf_hpp_list__for_each_format_safe(list, fmt, tmp) { 694 list_del_init(&fmt->list); 695 list_del_init(&fmt->sort_list); 696 fmt_free(fmt); 697 } 698 699 /* reset sort keys */ 700 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { 701 list_del_init(&fmt->list); 702 list_del_init(&fmt->sort_list); 703 fmt_free(fmt); 704 } 705} 706 707/* 708 * See hists__fprintf to match the column widths 709 */ 710unsigned int hists__sort_list_width(struct hists *hists) 711{ 712 struct perf_hpp_fmt *fmt; 713 int ret = 0; 714 bool first = true; 715 struct perf_hpp dummy_hpp; 716 717 hists__for_each_format(hists, fmt) { 718 if (perf_hpp__should_skip(fmt, hists)) 719 continue; 720 721 if (first) 722 first = false; 723 else 724 ret += 2; 725 726 ret += fmt->width(fmt, &dummy_hpp, hists); 727 } 728 729 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */ 730 ret += 3 + BITS_PER_LONG / 4; 731 732 return ret; 733} 734 735unsigned int hists__overhead_width(struct hists *hists) 736{ 737 struct perf_hpp_fmt *fmt; 738 int ret = 0; 739 bool first = true; 740 struct perf_hpp dummy_hpp; 741 742 hists__for_each_format(hists, fmt) { 743 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 744 break; 745 746 if (first) 747 first = false; 748 else 749 ret += 2; 750 751 ret += fmt->width(fmt, &dummy_hpp, hists); 752 } 753 754 return ret; 755} 756 757void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) 758{ 759 if (perf_hpp__is_sort_entry(fmt)) 760 return perf_hpp__reset_sort_width(fmt, hists); 761 762 if (perf_hpp__is_dynamic_entry(fmt)) 763 return; 764 765 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); 766 767 switch (fmt->idx) { 768 case PERF_HPP__OVERHEAD: 769 case PERF_HPP__OVERHEAD_SYS: 770 case PERF_HPP__OVERHEAD_US: 771 case PERF_HPP__OVERHEAD_ACC: 772 fmt->len = 8; 773 break; 774 775 case PERF_HPP__OVERHEAD_GUEST_SYS: 776 case PERF_HPP__OVERHEAD_GUEST_US: 777 fmt->len = 9; 778 break; 779 780 case PERF_HPP__SAMPLES: 781 case PERF_HPP__PERIOD: 782 fmt->len = 12; 783 break; 784 785 default: 786 break; 787 } 788} 789 790void hists__reset_column_width(struct hists *hists) 791{ 792 struct perf_hpp_fmt *fmt; 793 struct perf_hpp_list_node *node; 794 795 hists__for_each_format(hists, fmt) 796 perf_hpp__reset_width(fmt, hists); 797 798 /* hierarchy entries have their own hpp list */ 799 list_for_each_entry(node, &hists->hpp_formats, list) { 800 perf_hpp_list__for_each_format(&node->hpp, fmt) 801 perf_hpp__reset_width(fmt, hists); 802 } 803} 804 805void perf_hpp__set_user_width(const char *width_list_str) 806{ 807 struct perf_hpp_fmt *fmt; 808 const char *ptr = width_list_str; 809 810 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 811 char *p; 812 813 int len = strtol(ptr, &p, 10); 814 fmt->user_len = len; 815 816 if (*p == ',') 817 ptr = p + 1; 818 else 819 break; 820 } 821} 822 823static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) 824{ 825 struct perf_hpp_list_node *node = NULL; 826 struct perf_hpp_fmt *fmt_copy; 827 bool found = false; 828 bool skip = perf_hpp__should_skip(fmt, hists); 829 830 list_for_each_entry(node, &hists->hpp_formats, list) { 831 if (node->level == fmt->level) { 832 found = true; 833 break; 834 } 835 } 836 837 if (!found) { 838 node = malloc(sizeof(*node)); 839 if (node == NULL) 840 return -1; 841 842 node->skip = skip; 843 node->level = fmt->level; 844 perf_hpp_list__init(&node->hpp); 845 846 hists->nr_hpp_node++; 847 list_add_tail(&node->list, &hists->hpp_formats); 848 } 849 850 fmt_copy = perf_hpp_fmt__dup(fmt); 851 if (fmt_copy == NULL) 852 return -1; 853 854 if (!skip) 855 node->skip = false; 856 857 list_add_tail(&fmt_copy->list, &node->hpp.fields); 858 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); 859 860 return 0; 861} 862 863int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, 864 struct evlist *evlist) 865{ 866 struct evsel *evsel; 867 struct perf_hpp_fmt *fmt; 868 struct hists *hists; 869 int ret; 870 871 if (!symbol_conf.report_hierarchy) 872 return 0; 873 874 evlist__for_each_entry(evlist, evsel) { 875 hists = evsel__hists(evsel); 876 877 perf_hpp_list__for_each_sort_list(list, fmt) { 878 if (perf_hpp__is_dynamic_entry(fmt) && 879 !perf_hpp__defined_dynamic_entry(fmt, hists)) 880 continue; 881 882 ret = add_hierarchy_fmt(hists, fmt); 883 if (ret < 0) 884 return ret; 885 } 886 } 887 888 return 0; 889}