stat.c (14638B)
1// SPDX-License-Identifier: GPL-2.0 2#include <errno.h> 3#include <linux/err.h> 4#include <inttypes.h> 5#include <math.h> 6#include <string.h> 7#include "counts.h" 8#include "cpumap.h" 9#include "debug.h" 10#include "header.h" 11#include "stat.h" 12#include "session.h" 13#include "target.h" 14#include "evlist.h" 15#include "evsel.h" 16#include "thread_map.h" 17#include "hashmap.h" 18#include <linux/zalloc.h> 19 20void update_stats(struct stats *stats, u64 val) 21{ 22 double delta; 23 24 stats->n++; 25 delta = val - stats->mean; 26 stats->mean += delta / stats->n; 27 stats->M2 += delta*(val - stats->mean); 28 29 if (val > stats->max) 30 stats->max = val; 31 32 if (val < stats->min) 33 stats->min = val; 34} 35 36double avg_stats(struct stats *stats) 37{ 38 return stats->mean; 39} 40 41/* 42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance 43 * 44 * (\Sum n_i^2) - ((\Sum n_i)^2)/n 45 * s^2 = ------------------------------- 46 * n - 1 47 * 48 * http://en.wikipedia.org/wiki/Stddev 49 * 50 * The std dev of the mean is related to the std dev by: 51 * 52 * s 53 * s_mean = ------- 54 * sqrt(n) 55 * 56 */ 57double stddev_stats(struct stats *stats) 58{ 59 double variance, variance_mean; 60 61 if (stats->n < 2) 62 return 0.0; 63 64 variance = stats->M2 / (stats->n - 1); 65 variance_mean = variance / stats->n; 66 67 return sqrt(variance_mean); 68} 69 70double rel_stddev_stats(double stddev, double avg) 71{ 72 double pct = 0.0; 73 74 if (avg) 75 pct = 100.0 * stddev/avg; 76 77 return pct; 78} 79 80bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id) 81{ 82 struct perf_stat_evsel *ps = evsel->stats; 83 84 return ps->id == id; 85} 86 87#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name 88static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = { 89 ID(NONE, x), 90 ID(CYCLES_IN_TX, cpu/cycles-t/), 91 ID(TRANSACTION_START, cpu/tx-start/), 92 ID(ELISION_START, cpu/el-start/), 93 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/), 94 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots), 95 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued), 96 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired), 97 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles), 98 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles), 99 ID(TOPDOWN_RETIRING, topdown-retiring), 100 ID(TOPDOWN_BAD_SPEC, topdown-bad-spec), 101 ID(TOPDOWN_FE_BOUND, topdown-fe-bound), 102 ID(TOPDOWN_BE_BOUND, topdown-be-bound), 103 ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops), 104 ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict), 105 ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat), 106 ID(TOPDOWN_MEM_BOUND, topdown-mem-bound), 107 ID(SMI_NUM, msr/smi/), 108 ID(APERF, msr/aperf/), 109}; 110#undef ID 111 112static void perf_stat_evsel_id_init(struct evsel *evsel) 113{ 114 struct perf_stat_evsel *ps = evsel->stats; 115 int i; 116 117 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ 118 119 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { 120 if (!strcmp(evsel__name(evsel), id_str[i]) || 121 (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name 122 && strstr(evsel__name(evsel), evsel->pmu_name))) { 123 ps->id = i; 124 break; 125 } 126 } 127} 128 129static void evsel__reset_stat_priv(struct evsel *evsel) 130{ 131 int i; 132 struct perf_stat_evsel *ps = evsel->stats; 133 134 for (i = 0; i < 3; i++) 135 init_stats(&ps->res_stats[i]); 136 137 perf_stat_evsel_id_init(evsel); 138} 139 140static int evsel__alloc_stat_priv(struct evsel *evsel) 141{ 142 evsel->stats = zalloc(sizeof(struct perf_stat_evsel)); 143 if (evsel->stats == NULL) 144 return -ENOMEM; 145 evsel__reset_stat_priv(evsel); 146 return 0; 147} 148 149static void evsel__free_stat_priv(struct evsel *evsel) 150{ 151 struct perf_stat_evsel *ps = evsel->stats; 152 153 if (ps) 154 zfree(&ps->group_data); 155 zfree(&evsel->stats); 156} 157 158static int evsel__alloc_prev_raw_counts(struct evsel *evsel) 159{ 160 int cpu_map_nr = evsel__nr_cpus(evsel); 161 int nthreads = perf_thread_map__nr(evsel->core.threads); 162 struct perf_counts *counts; 163 164 counts = perf_counts__new(cpu_map_nr, nthreads); 165 if (counts) 166 evsel->prev_raw_counts = counts; 167 168 return counts ? 0 : -ENOMEM; 169} 170 171static void evsel__free_prev_raw_counts(struct evsel *evsel) 172{ 173 perf_counts__delete(evsel->prev_raw_counts); 174 evsel->prev_raw_counts = NULL; 175} 176 177static void evsel__reset_prev_raw_counts(struct evsel *evsel) 178{ 179 if (evsel->prev_raw_counts) 180 perf_counts__reset(evsel->prev_raw_counts); 181} 182 183static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw) 184{ 185 if (evsel__alloc_stat_priv(evsel) < 0 || 186 evsel__alloc_counts(evsel) < 0 || 187 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0)) 188 return -ENOMEM; 189 190 return 0; 191} 192 193int evlist__alloc_stats(struct evlist *evlist, bool alloc_raw) 194{ 195 struct evsel *evsel; 196 197 evlist__for_each_entry(evlist, evsel) { 198 if (evsel__alloc_stats(evsel, alloc_raw)) 199 goto out_free; 200 } 201 202 return 0; 203 204out_free: 205 evlist__free_stats(evlist); 206 return -1; 207} 208 209void evlist__free_stats(struct evlist *evlist) 210{ 211 struct evsel *evsel; 212 213 evlist__for_each_entry(evlist, evsel) { 214 evsel__free_stat_priv(evsel); 215 evsel__free_counts(evsel); 216 evsel__free_prev_raw_counts(evsel); 217 } 218} 219 220void evlist__reset_stats(struct evlist *evlist) 221{ 222 struct evsel *evsel; 223 224 evlist__for_each_entry(evlist, evsel) { 225 evsel__reset_stat_priv(evsel); 226 evsel__reset_counts(evsel); 227 } 228} 229 230void evlist__reset_prev_raw_counts(struct evlist *evlist) 231{ 232 struct evsel *evsel; 233 234 evlist__for_each_entry(evlist, evsel) 235 evsel__reset_prev_raw_counts(evsel); 236} 237 238static void evsel__copy_prev_raw_counts(struct evsel *evsel) 239{ 240 int idx, nthreads = perf_thread_map__nr(evsel->core.threads); 241 242 for (int thread = 0; thread < nthreads; thread++) { 243 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) { 244 *perf_counts(evsel->counts, idx, thread) = 245 *perf_counts(evsel->prev_raw_counts, idx, thread); 246 } 247 } 248 249 evsel->counts->aggr = evsel->prev_raw_counts->aggr; 250} 251 252void evlist__copy_prev_raw_counts(struct evlist *evlist) 253{ 254 struct evsel *evsel; 255 256 evlist__for_each_entry(evlist, evsel) 257 evsel__copy_prev_raw_counts(evsel); 258} 259 260void evlist__save_aggr_prev_raw_counts(struct evlist *evlist) 261{ 262 struct evsel *evsel; 263 264 /* 265 * To collect the overall statistics for interval mode, 266 * we copy the counts from evsel->prev_raw_counts to 267 * evsel->counts. The perf_stat_process_counter creates 268 * aggr values from per cpu values, but the per cpu values 269 * are 0 for AGGR_GLOBAL. So we use a trick that saves the 270 * previous aggr value to the first member of perf_counts, 271 * then aggr calculation in process_counter_values can work 272 * correctly. 273 */ 274 evlist__for_each_entry(evlist, evsel) { 275 *perf_counts(evsel->prev_raw_counts, 0, 0) = 276 evsel->prev_raw_counts->aggr; 277 } 278} 279 280static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused) 281{ 282 uint64_t *key = (uint64_t *) __key; 283 284 return *key & 0xffffffff; 285} 286 287static bool pkg_id_equal(const void *__key1, const void *__key2, 288 void *ctx __maybe_unused) 289{ 290 uint64_t *key1 = (uint64_t *) __key1; 291 uint64_t *key2 = (uint64_t *) __key2; 292 293 return *key1 == *key2; 294} 295 296static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, 297 int cpu_map_idx, bool *skip) 298{ 299 struct hashmap *mask = counter->per_pkg_mask; 300 struct perf_cpu_map *cpus = evsel__cpus(counter); 301 struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx); 302 int s, d, ret = 0; 303 uint64_t *key; 304 305 *skip = false; 306 307 if (!counter->per_pkg) 308 return 0; 309 310 if (perf_cpu_map__empty(cpus)) 311 return 0; 312 313 if (!mask) { 314 mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL); 315 if (IS_ERR(mask)) 316 return -ENOMEM; 317 318 counter->per_pkg_mask = mask; 319 } 320 321 /* 322 * we do not consider an event that has not run as a good 323 * instance to mark a package as used (skip=1). Otherwise 324 * we may run into a situation where the first CPU in a package 325 * is not running anything, yet the second is, and this function 326 * would mark the package as used after the first CPU and would 327 * not read the values from the second CPU. 328 */ 329 if (!(vals->run && vals->ena)) 330 return 0; 331 332 s = cpu__get_socket_id(cpu); 333 if (s < 0) 334 return -1; 335 336 /* 337 * On multi-die system, die_id > 0. On no-die system, die_id = 0. 338 * We use hashmap(socket, die) to check the used socket+die pair. 339 */ 340 d = cpu__get_die_id(cpu); 341 if (d < 0) 342 return -1; 343 344 key = malloc(sizeof(*key)); 345 if (!key) 346 return -ENOMEM; 347 348 *key = (uint64_t)d << 32 | s; 349 if (hashmap__find(mask, (void *)key, NULL)) { 350 *skip = true; 351 free(key); 352 } else 353 ret = hashmap__add(mask, (void *)key, (void *)1); 354 355 return ret; 356} 357 358static int 359process_counter_values(struct perf_stat_config *config, struct evsel *evsel, 360 int cpu_map_idx, int thread, 361 struct perf_counts_values *count) 362{ 363 struct perf_counts_values *aggr = &evsel->counts->aggr; 364 static struct perf_counts_values zero; 365 bool skip = false; 366 367 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) { 368 pr_err("failed to read per-pkg counter\n"); 369 return -1; 370 } 371 372 if (skip) 373 count = &zero; 374 375 switch (config->aggr_mode) { 376 case AGGR_THREAD: 377 case AGGR_CORE: 378 case AGGR_DIE: 379 case AGGR_SOCKET: 380 case AGGR_NODE: 381 case AGGR_NONE: 382 if (!evsel->snapshot) 383 evsel__compute_deltas(evsel, cpu_map_idx, thread, count); 384 perf_counts_values__scale(count, config->scale, NULL); 385 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) { 386 perf_stat__update_shadow_stats(evsel, count->val, 387 cpu_map_idx, &rt_stat); 388 } 389 390 if (config->aggr_mode == AGGR_THREAD) { 391 if (config->stats) 392 perf_stat__update_shadow_stats(evsel, 393 count->val, 0, &config->stats[thread]); 394 else 395 perf_stat__update_shadow_stats(evsel, 396 count->val, 0, &rt_stat); 397 } 398 break; 399 case AGGR_GLOBAL: 400 aggr->val += count->val; 401 aggr->ena += count->ena; 402 aggr->run += count->run; 403 case AGGR_UNSET: 404 default: 405 break; 406 } 407 408 return 0; 409} 410 411static int process_counter_maps(struct perf_stat_config *config, 412 struct evsel *counter) 413{ 414 int nthreads = perf_thread_map__nr(counter->core.threads); 415 int ncpus = evsel__nr_cpus(counter); 416 int idx, thread; 417 418 if (counter->core.system_wide) 419 nthreads = 1; 420 421 for (thread = 0; thread < nthreads; thread++) { 422 for (idx = 0; idx < ncpus; idx++) { 423 if (process_counter_values(config, counter, idx, thread, 424 perf_counts(counter->counts, idx, thread))) 425 return -1; 426 } 427 } 428 429 return 0; 430} 431 432int perf_stat_process_counter(struct perf_stat_config *config, 433 struct evsel *counter) 434{ 435 struct perf_counts_values *aggr = &counter->counts->aggr; 436 struct perf_stat_evsel *ps = counter->stats; 437 u64 *count = counter->counts->aggr.values; 438 int i, ret; 439 440 aggr->val = aggr->ena = aggr->run = 0; 441 442 if (counter->per_pkg) 443 evsel__zero_per_pkg(counter); 444 445 ret = process_counter_maps(config, counter); 446 if (ret) 447 return ret; 448 449 if (config->aggr_mode != AGGR_GLOBAL) 450 return 0; 451 452 if (!counter->snapshot) 453 evsel__compute_deltas(counter, -1, -1, aggr); 454 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); 455 456 for (i = 0; i < 3; i++) 457 update_stats(&ps->res_stats[i], count[i]); 458 459 if (verbose > 0) { 460 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 461 evsel__name(counter), count[0], count[1], count[2]); 462 } 463 464 /* 465 * Save the full runtime - to allow normalization during printout: 466 */ 467 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat); 468 469 return 0; 470} 471 472int perf_event__process_stat_event(struct perf_session *session, 473 union perf_event *event) 474{ 475 struct perf_counts_values count, *ptr; 476 struct perf_record_stat *st = &event->stat; 477 struct evsel *counter; 478 int cpu_map_idx; 479 480 count.val = st->val; 481 count.ena = st->ena; 482 count.run = st->run; 483 484 counter = evlist__id2evsel(session->evlist, st->id); 485 if (!counter) { 486 pr_err("Failed to resolve counter for stat event.\n"); 487 return -EINVAL; 488 } 489 cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu}); 490 if (cpu_map_idx == -1) { 491 pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter)); 492 return -EINVAL; 493 } 494 ptr = perf_counts(counter->counts, cpu_map_idx, st->thread); 495 if (ptr == NULL) { 496 pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n", 497 st->cpu, st->thread, evsel__name(counter)); 498 return -EINVAL; 499 } 500 *ptr = count; 501 counter->supported = true; 502 return 0; 503} 504 505size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp) 506{ 507 struct perf_record_stat *st = (struct perf_record_stat *)event; 508 size_t ret; 509 510 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n", 511 st->id, st->cpu, st->thread); 512 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n", 513 st->val, st->ena, st->run); 514 515 return ret; 516} 517 518size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp) 519{ 520 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event; 521 size_t ret; 522 523 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time, 524 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL"); 525 526 return ret; 527} 528 529size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) 530{ 531 struct perf_stat_config sc; 532 size_t ret; 533 534 perf_event__read_stat_config(&sc, &event->stat_config); 535 536 ret = fprintf(fp, "\n"); 537 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode); 538 ret += fprintf(fp, "... scale %d\n", sc.scale); 539 ret += fprintf(fp, "... interval %u\n", sc.interval); 540 541 return ret; 542} 543 544int create_perf_stat_counter(struct evsel *evsel, 545 struct perf_stat_config *config, 546 struct target *target, 547 int cpu_map_idx) 548{ 549 struct perf_event_attr *attr = &evsel->core.attr; 550 struct evsel *leader = evsel__leader(evsel); 551 552 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 553 PERF_FORMAT_TOTAL_TIME_RUNNING; 554 555 /* 556 * The event is part of non trivial group, let's enable 557 * the group read (for leader) and ID retrieval for all 558 * members. 559 */ 560 if (leader->core.nr_members > 1) 561 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; 562 563 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list); 564 565 /* 566 * Some events get initialized with sample_(period/type) set, 567 * like tracepoints. Clear it up for counting. 568 */ 569 attr->sample_period = 0; 570 571 if (config->identifier) 572 attr->sample_type = PERF_SAMPLE_IDENTIFIER; 573 574 if (config->all_user) { 575 attr->exclude_kernel = 1; 576 attr->exclude_user = 0; 577 } 578 579 if (config->all_kernel) { 580 attr->exclude_kernel = 0; 581 attr->exclude_user = 1; 582 } 583 584 /* 585 * Disabling all counters initially, they will be enabled 586 * either manually by us or by kernel via enable_on_exec 587 * set later. 588 */ 589 if (evsel__is_group_leader(evsel)) { 590 attr->disabled = 1; 591 592 /* 593 * In case of initial_delay we enable tracee 594 * events manually. 595 */ 596 if (target__none(target) && !config->initial_delay) 597 attr->enable_on_exec = 1; 598 } 599 600 if (target__has_cpu(target) && !target__has_per_thread(target)) 601 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx); 602 603 return evsel__open_per_thread(evsel, evsel->core.threads); 604}