cputopo.c (7992B)
1// SPDX-License-Identifier: GPL-2.0 2#include <sys/param.h> 3#include <sys/utsname.h> 4#include <inttypes.h> 5#include <stdlib.h> 6#include <string.h> 7#include <api/fs/fs.h> 8#include <linux/zalloc.h> 9#include <perf/cpumap.h> 10 11#include "cputopo.h" 12#include "cpumap.h" 13#include "debug.h" 14#include "env.h" 15#include "pmu-hybrid.h" 16 17#define PACKAGE_CPUS_FMT \ 18 "%s/devices/system/cpu/cpu%d/topology/package_cpus_list" 19#define PACKAGE_CPUS_FMT_OLD \ 20 "%s/devices/system/cpu/cpu%d/topology/core_siblings_list" 21#define DIE_CPUS_FMT \ 22 "%s/devices/system/cpu/cpu%d/topology/die_cpus_list" 23#define CORE_CPUS_FMT \ 24 "%s/devices/system/cpu/cpu%d/topology/core_cpus_list" 25#define CORE_CPUS_FMT_OLD \ 26 "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list" 27#define NODE_ONLINE_FMT \ 28 "%s/devices/system/node/online" 29#define NODE_MEMINFO_FMT \ 30 "%s/devices/system/node/node%d/meminfo" 31#define NODE_CPULIST_FMT \ 32 "%s/devices/system/node/node%d/cpulist" 33 34static int build_cpu_topology(struct cpu_topology *tp, int cpu) 35{ 36 FILE *fp; 37 char filename[MAXPATHLEN]; 38 char *buf = NULL, *p; 39 size_t len = 0; 40 ssize_t sret; 41 u32 i = 0; 42 int ret = -1; 43 44 scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT, 45 sysfs__mountpoint(), cpu); 46 if (access(filename, F_OK) == -1) { 47 scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT_OLD, 48 sysfs__mountpoint(), cpu); 49 } 50 fp = fopen(filename, "r"); 51 if (!fp) 52 goto try_dies; 53 54 sret = getline(&buf, &len, fp); 55 fclose(fp); 56 if (sret <= 0) 57 goto try_dies; 58 59 p = strchr(buf, '\n'); 60 if (p) 61 *p = '\0'; 62 63 for (i = 0; i < tp->package_cpus_lists; i++) { 64 if (!strcmp(buf, tp->package_cpus_list[i])) 65 break; 66 } 67 if (i == tp->package_cpus_lists) { 68 tp->package_cpus_list[i] = buf; 69 tp->package_cpus_lists++; 70 buf = NULL; 71 len = 0; 72 } 73 ret = 0; 74 75try_dies: 76 if (!tp->die_cpus_list) 77 goto try_threads; 78 79 scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT, 80 sysfs__mountpoint(), cpu); 81 fp = fopen(filename, "r"); 82 if (!fp) 83 goto try_threads; 84 85 sret = getline(&buf, &len, fp); 86 fclose(fp); 87 if (sret <= 0) 88 goto try_threads; 89 90 p = strchr(buf, '\n'); 91 if (p) 92 *p = '\0'; 93 94 for (i = 0; i < tp->die_cpus_lists; i++) { 95 if (!strcmp(buf, tp->die_cpus_list[i])) 96 break; 97 } 98 if (i == tp->die_cpus_lists) { 99 tp->die_cpus_list[i] = buf; 100 tp->die_cpus_lists++; 101 buf = NULL; 102 len = 0; 103 } 104 ret = 0; 105 106try_threads: 107 scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT, 108 sysfs__mountpoint(), cpu); 109 if (access(filename, F_OK) == -1) { 110 scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT_OLD, 111 sysfs__mountpoint(), cpu); 112 } 113 fp = fopen(filename, "r"); 114 if (!fp) 115 goto done; 116 117 if (getline(&buf, &len, fp) <= 0) 118 goto done; 119 120 p = strchr(buf, '\n'); 121 if (p) 122 *p = '\0'; 123 124 for (i = 0; i < tp->core_cpus_lists; i++) { 125 if (!strcmp(buf, tp->core_cpus_list[i])) 126 break; 127 } 128 if (i == tp->core_cpus_lists) { 129 tp->core_cpus_list[i] = buf; 130 tp->core_cpus_lists++; 131 buf = NULL; 132 } 133 ret = 0; 134done: 135 if (fp) 136 fclose(fp); 137 free(buf); 138 return ret; 139} 140 141void cpu_topology__delete(struct cpu_topology *tp) 142{ 143 u32 i; 144 145 if (!tp) 146 return; 147 148 for (i = 0 ; i < tp->package_cpus_lists; i++) 149 zfree(&tp->package_cpus_list[i]); 150 151 for (i = 0 ; i < tp->die_cpus_lists; i++) 152 zfree(&tp->die_cpus_list[i]); 153 154 for (i = 0 ; i < tp->core_cpus_lists; i++) 155 zfree(&tp->core_cpus_list[i]); 156 157 free(tp); 158} 159 160static bool has_die_topology(void) 161{ 162 char filename[MAXPATHLEN]; 163 struct utsname uts; 164 165 if (uname(&uts) < 0) 166 return false; 167 168 if (strncmp(uts.machine, "x86_64", 6) && 169 strncmp(uts.machine, "s390x", 5)) 170 return false; 171 172 scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT, 173 sysfs__mountpoint(), 0); 174 if (access(filename, F_OK) == -1) 175 return false; 176 177 return true; 178} 179 180struct cpu_topology *cpu_topology__new(void) 181{ 182 struct cpu_topology *tp = NULL; 183 void *addr; 184 u32 nr, i, nr_addr; 185 size_t sz; 186 long ncpus; 187 int ret = -1; 188 struct perf_cpu_map *map; 189 bool has_die = has_die_topology(); 190 191 ncpus = cpu__max_present_cpu().cpu; 192 193 /* build online CPU map */ 194 map = perf_cpu_map__new(NULL); 195 if (map == NULL) { 196 pr_debug("failed to get system cpumap\n"); 197 return NULL; 198 } 199 200 nr = (u32)(ncpus & UINT_MAX); 201 202 sz = nr * sizeof(char *); 203 if (has_die) 204 nr_addr = 3; 205 else 206 nr_addr = 2; 207 addr = calloc(1, sizeof(*tp) + nr_addr * sz); 208 if (!addr) 209 goto out_free; 210 211 tp = addr; 212 addr += sizeof(*tp); 213 tp->package_cpus_list = addr; 214 addr += sz; 215 if (has_die) { 216 tp->die_cpus_list = addr; 217 addr += sz; 218 } 219 tp->core_cpus_list = addr; 220 221 for (i = 0; i < nr; i++) { 222 if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i })) 223 continue; 224 225 ret = build_cpu_topology(tp, i); 226 if (ret < 0) 227 break; 228 } 229 230out_free: 231 perf_cpu_map__put(map); 232 if (ret) { 233 cpu_topology__delete(tp); 234 tp = NULL; 235 } 236 return tp; 237} 238 239static int load_numa_node(struct numa_topology_node *node, int nr) 240{ 241 char str[MAXPATHLEN]; 242 char field[32]; 243 char *buf = NULL, *p; 244 size_t len = 0; 245 int ret = -1; 246 FILE *fp; 247 u64 mem; 248 249 node->node = (u32) nr; 250 251 scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT, 252 sysfs__mountpoint(), nr); 253 fp = fopen(str, "r"); 254 if (!fp) 255 return -1; 256 257 while (getline(&buf, &len, fp) > 0) { 258 /* skip over invalid lines */ 259 if (!strchr(buf, ':')) 260 continue; 261 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 262 goto err; 263 if (!strcmp(field, "MemTotal:")) 264 node->mem_total = mem; 265 if (!strcmp(field, "MemFree:")) 266 node->mem_free = mem; 267 if (node->mem_total && node->mem_free) 268 break; 269 } 270 271 fclose(fp); 272 fp = NULL; 273 274 scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT, 275 sysfs__mountpoint(), nr); 276 277 fp = fopen(str, "r"); 278 if (!fp) 279 return -1; 280 281 if (getline(&buf, &len, fp) <= 0) 282 goto err; 283 284 p = strchr(buf, '\n'); 285 if (p) 286 *p = '\0'; 287 288 node->cpus = buf; 289 fclose(fp); 290 return 0; 291 292err: 293 free(buf); 294 if (fp) 295 fclose(fp); 296 return ret; 297} 298 299struct numa_topology *numa_topology__new(void) 300{ 301 struct perf_cpu_map *node_map = NULL; 302 struct numa_topology *tp = NULL; 303 char path[MAXPATHLEN]; 304 char *buf = NULL; 305 size_t len = 0; 306 u32 nr, i; 307 FILE *fp; 308 char *c; 309 310 scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT, 311 sysfs__mountpoint()); 312 313 fp = fopen(path, "r"); 314 if (!fp) 315 return NULL; 316 317 if (getline(&buf, &len, fp) <= 0) 318 goto out; 319 320 c = strchr(buf, '\n'); 321 if (c) 322 *c = '\0'; 323 324 node_map = perf_cpu_map__new(buf); 325 if (!node_map) 326 goto out; 327 328 nr = (u32) perf_cpu_map__nr(node_map); 329 330 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr); 331 if (!tp) 332 goto out; 333 334 tp->nr = nr; 335 336 for (i = 0; i < nr; i++) { 337 if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) { 338 numa_topology__delete(tp); 339 tp = NULL; 340 break; 341 } 342 } 343 344out: 345 free(buf); 346 fclose(fp); 347 perf_cpu_map__put(node_map); 348 return tp; 349} 350 351void numa_topology__delete(struct numa_topology *tp) 352{ 353 u32 i; 354 355 for (i = 0; i < tp->nr; i++) 356 zfree(&tp->nodes[i].cpus); 357 358 free(tp); 359} 360 361static int load_hybrid_node(struct hybrid_topology_node *node, 362 struct perf_pmu *pmu) 363{ 364 const char *sysfs; 365 char path[PATH_MAX]; 366 char *buf = NULL, *p; 367 FILE *fp; 368 size_t len = 0; 369 370 node->pmu_name = strdup(pmu->name); 371 if (!node->pmu_name) 372 return -1; 373 374 sysfs = sysfs__mountpoint(); 375 if (!sysfs) 376 goto err; 377 378 snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, pmu->name); 379 fp = fopen(path, "r"); 380 if (!fp) 381 goto err; 382 383 if (getline(&buf, &len, fp) <= 0) { 384 fclose(fp); 385 goto err; 386 } 387 388 p = strchr(buf, '\n'); 389 if (p) 390 *p = '\0'; 391 392 fclose(fp); 393 node->cpus = buf; 394 return 0; 395 396err: 397 zfree(&node->pmu_name); 398 free(buf); 399 return -1; 400} 401 402struct hybrid_topology *hybrid_topology__new(void) 403{ 404 struct perf_pmu *pmu; 405 struct hybrid_topology *tp = NULL; 406 u32 nr, i = 0; 407 408 nr = perf_pmu__hybrid_pmu_num(); 409 if (nr == 0) 410 return NULL; 411 412 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr); 413 if (!tp) 414 return NULL; 415 416 tp->nr = nr; 417 perf_pmu__for_each_hybrid_pmu(pmu) { 418 if (load_hybrid_node(&tp->nodes[i], pmu)) { 419 hybrid_topology__delete(tp); 420 return NULL; 421 } 422 i++; 423 } 424 425 return tp; 426} 427 428void hybrid_topology__delete(struct hybrid_topology *tp) 429{ 430 u32 i; 431 432 for (i = 0; i < tp->nr; i++) { 433 zfree(&tp->nodes[i].pmu_name); 434 zfree(&tp->nodes[i].cpus); 435 } 436 437 free(tp); 438}