cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

synthetic-events.c (56238B)


      1// SPDX-License-Identifier: GPL-2.0-only 
      2
      3#include "util/cgroup.h"
      4#include "util/data.h"
      5#include "util/debug.h"
      6#include "util/dso.h"
      7#include "util/event.h"
      8#include "util/evlist.h"
      9#include "util/machine.h"
     10#include "util/map.h"
     11#include "util/map_symbol.h"
     12#include "util/branch.h"
     13#include "util/memswap.h"
     14#include "util/namespaces.h"
     15#include "util/session.h"
     16#include "util/stat.h"
     17#include "util/symbol.h"
     18#include "util/synthetic-events.h"
     19#include "util/target.h"
     20#include "util/time-utils.h"
     21#include <linux/bitops.h>
     22#include <linux/kernel.h>
     23#include <linux/string.h>
     24#include <linux/zalloc.h>
     25#include <linux/perf_event.h>
     26#include <asm/bug.h>
     27#include <perf/evsel.h>
     28#include <perf/cpumap.h>
     29#include <internal/lib.h> // page_size
     30#include <internal/threadmap.h>
     31#include <perf/threadmap.h>
     32#include <symbol/kallsyms.h>
     33#include <dirent.h>
     34#include <errno.h>
     35#include <inttypes.h>
     36#include <stdio.h>
     37#include <string.h>
     38#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
     39#include <api/fs/fs.h>
     40#include <api/io.h>
     41#include <sys/types.h>
     42#include <sys/stat.h>
     43#include <fcntl.h>
     44#include <unistd.h>
     45
     46#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
     47
     48unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
     49
     50int perf_tool__process_synth_event(struct perf_tool *tool,
     51				   union perf_event *event,
     52				   struct machine *machine,
     53				   perf_event__handler_t process)
     54{
     55	struct perf_sample synth_sample = {
     56		.pid	   = -1,
     57		.tid	   = -1,
     58		.time	   = -1,
     59		.stream_id = -1,
     60		.cpu	   = -1,
     61		.period	   = 1,
     62		.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
     63	};
     64
     65	return process(tool, event, &synth_sample, machine);
     66};
     67
     68/*
     69 * Assumes that the first 4095 bytes of /proc/pid/stat contains
     70 * the comm, tgid and ppid.
     71 */
     72static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
     73				    pid_t *tgid, pid_t *ppid, bool *kernel)
     74{
     75	char bf[4096];
     76	int fd;
     77	size_t size = 0;
     78	ssize_t n;
     79	char *name, *tgids, *ppids, *vmpeak, *threads;
     80
     81	*tgid = -1;
     82	*ppid = -1;
     83
     84	if (pid)
     85		snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
     86	else
     87		snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
     88
     89	fd = open(bf, O_RDONLY);
     90	if (fd < 0) {
     91		pr_debug("couldn't open %s\n", bf);
     92		return -1;
     93	}
     94
     95	n = read(fd, bf, sizeof(bf) - 1);
     96	close(fd);
     97	if (n <= 0) {
     98		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
     99			   tid);
    100		return -1;
    101	}
    102	bf[n] = '\0';
    103
    104	name = strstr(bf, "Name:");
    105	tgids = strstr(name ?: bf, "Tgid:");
    106	ppids = strstr(tgids ?: bf, "PPid:");
    107	vmpeak = strstr(ppids ?: bf, "VmPeak:");
    108
    109	if (vmpeak)
    110		threads = NULL;
    111	else
    112		threads = strstr(ppids ?: bf, "Threads:");
    113
    114	if (name) {
    115		char *nl;
    116
    117		name = skip_spaces(name + 5);  /* strlen("Name:") */
    118		nl = strchr(name, '\n');
    119		if (nl)
    120			*nl = '\0';
    121
    122		size = strlen(name);
    123		if (size >= len)
    124			size = len - 1;
    125		memcpy(comm, name, size);
    126		comm[size] = '\0';
    127	} else {
    128		pr_debug("Name: string not found for pid %d\n", tid);
    129	}
    130
    131	if (tgids) {
    132		tgids += 5;  /* strlen("Tgid:") */
    133		*tgid = atoi(tgids);
    134	} else {
    135		pr_debug("Tgid: string not found for pid %d\n", tid);
    136	}
    137
    138	if (ppids) {
    139		ppids += 5;  /* strlen("PPid:") */
    140		*ppid = atoi(ppids);
    141	} else {
    142		pr_debug("PPid: string not found for pid %d\n", tid);
    143	}
    144
    145	if (!vmpeak && threads)
    146		*kernel = true;
    147	else
    148		*kernel = false;
    149
    150	return 0;
    151}
    152
    153static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
    154				    struct machine *machine,
    155				    pid_t *tgid, pid_t *ppid, bool *kernel)
    156{
    157	size_t size;
    158
    159	*ppid = -1;
    160
    161	memset(&event->comm, 0, sizeof(event->comm));
    162
    163	if (machine__is_host(machine)) {
    164		if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
    165					     sizeof(event->comm.comm),
    166					     tgid, ppid, kernel) != 0) {
    167			return -1;
    168		}
    169	} else {
    170		*tgid = machine->pid;
    171	}
    172
    173	if (*tgid < 0)
    174		return -1;
    175
    176	event->comm.pid = *tgid;
    177	event->comm.header.type = PERF_RECORD_COMM;
    178
    179	size = strlen(event->comm.comm) + 1;
    180	size = PERF_ALIGN(size, sizeof(u64));
    181	memset(event->comm.comm + size, 0, machine->id_hdr_size);
    182	event->comm.header.size = (sizeof(event->comm) -
    183				(sizeof(event->comm.comm) - size) +
    184				machine->id_hdr_size);
    185	event->comm.tid = tid;
    186
    187	return 0;
    188}
    189
    190pid_t perf_event__synthesize_comm(struct perf_tool *tool,
    191					 union perf_event *event, pid_t pid,
    192					 perf_event__handler_t process,
    193					 struct machine *machine)
    194{
    195	pid_t tgid, ppid;
    196	bool kernel_thread;
    197
    198	if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
    199				     &kernel_thread) != 0)
    200		return -1;
    201
    202	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
    203		return -1;
    204
    205	return tgid;
    206}
    207
    208static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
    209					 struct perf_ns_link_info *ns_link_info)
    210{
    211	struct stat64 st;
    212	char proc_ns[128];
    213
    214	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
    215	if (stat64(proc_ns, &st) == 0) {
    216		ns_link_info->dev = st.st_dev;
    217		ns_link_info->ino = st.st_ino;
    218	}
    219}
    220
    221int perf_event__synthesize_namespaces(struct perf_tool *tool,
    222				      union perf_event *event,
    223				      pid_t pid, pid_t tgid,
    224				      perf_event__handler_t process,
    225				      struct machine *machine)
    226{
    227	u32 idx;
    228	struct perf_ns_link_info *ns_link_info;
    229
    230	if (!tool || !tool->namespace_events)
    231		return 0;
    232
    233	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
    234	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
    235	       machine->id_hdr_size));
    236
    237	event->namespaces.pid = tgid;
    238	event->namespaces.tid = pid;
    239
    240	event->namespaces.nr_namespaces = NR_NAMESPACES;
    241
    242	ns_link_info = event->namespaces.link_info;
    243
    244	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
    245		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
    246					     &ns_link_info[idx]);
    247
    248	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
    249
    250	event->namespaces.header.size = (sizeof(event->namespaces) +
    251			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
    252			machine->id_hdr_size);
    253
    254	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
    255		return -1;
    256
    257	return 0;
    258}
    259
    260static int perf_event__synthesize_fork(struct perf_tool *tool,
    261				       union perf_event *event,
    262				       pid_t pid, pid_t tgid, pid_t ppid,
    263				       perf_event__handler_t process,
    264				       struct machine *machine)
    265{
    266	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
    267
    268	/*
    269	 * for main thread set parent to ppid from status file. For other
    270	 * threads set parent pid to main thread. ie., assume main thread
    271	 * spawns all threads in a process
    272	*/
    273	if (tgid == pid) {
    274		event->fork.ppid = ppid;
    275		event->fork.ptid = ppid;
    276	} else {
    277		event->fork.ppid = tgid;
    278		event->fork.ptid = tgid;
    279	}
    280	event->fork.pid  = tgid;
    281	event->fork.tid  = pid;
    282	event->fork.header.type = PERF_RECORD_FORK;
    283	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
    284
    285	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
    286
    287	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
    288		return -1;
    289
    290	return 0;
    291}
    292
    293static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
    294				u32 *prot, u32 *flags, __u64 *offset,
    295				u32 *maj, u32 *min,
    296				__u64 *inode,
    297				ssize_t pathname_size, char *pathname)
    298{
    299	__u64 temp;
    300	int ch;
    301	char *start_pathname = pathname;
    302
    303	if (io__get_hex(io, start) != '-')
    304		return false;
    305	if (io__get_hex(io, end) != ' ')
    306		return false;
    307
    308	/* map protection and flags bits */
    309	*prot = 0;
    310	ch = io__get_char(io);
    311	if (ch == 'r')
    312		*prot |= PROT_READ;
    313	else if (ch != '-')
    314		return false;
    315	ch = io__get_char(io);
    316	if (ch == 'w')
    317		*prot |= PROT_WRITE;
    318	else if (ch != '-')
    319		return false;
    320	ch = io__get_char(io);
    321	if (ch == 'x')
    322		*prot |= PROT_EXEC;
    323	else if (ch != '-')
    324		return false;
    325	ch = io__get_char(io);
    326	if (ch == 's')
    327		*flags = MAP_SHARED;
    328	else if (ch == 'p')
    329		*flags = MAP_PRIVATE;
    330	else
    331		return false;
    332	if (io__get_char(io) != ' ')
    333		return false;
    334
    335	if (io__get_hex(io, offset) != ' ')
    336		return false;
    337
    338	if (io__get_hex(io, &temp) != ':')
    339		return false;
    340	*maj = temp;
    341	if (io__get_hex(io, &temp) != ' ')
    342		return false;
    343	*min = temp;
    344
    345	ch = io__get_dec(io, inode);
    346	if (ch != ' ') {
    347		*pathname = '\0';
    348		return ch == '\n';
    349	}
    350	do {
    351		ch = io__get_char(io);
    352	} while (ch == ' ');
    353	while (true) {
    354		if (ch < 0)
    355			return false;
    356		if (ch == '\0' || ch == '\n' ||
    357		    (pathname + 1 - start_pathname) >= pathname_size) {
    358			*pathname = '\0';
    359			return true;
    360		}
    361		*pathname++ = ch;
    362		ch = io__get_char(io);
    363	}
    364}
    365
    366static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
    367					     bool is_kernel)
    368{
    369	struct build_id bid;
    370	int rc;
    371
    372	if (is_kernel)
    373		rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
    374	else
    375		rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
    376
    377	if (rc == 0) {
    378		memcpy(event->build_id, bid.data, sizeof(bid.data));
    379		event->build_id_size = (u8) bid.size;
    380		event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
    381		event->__reserved_1 = 0;
    382		event->__reserved_2 = 0;
    383	} else {
    384		if (event->filename[0] == '/') {
    385			pr_debug2("Failed to read build ID for %s\n",
    386				  event->filename);
    387		}
    388	}
    389}
    390
    391int perf_event__synthesize_mmap_events(struct perf_tool *tool,
    392				       union perf_event *event,
    393				       pid_t pid, pid_t tgid,
    394				       perf_event__handler_t process,
    395				       struct machine *machine,
    396				       bool mmap_data)
    397{
    398	unsigned long long t;
    399	char bf[BUFSIZ];
    400	struct io io;
    401	bool truncation = false;
    402	unsigned long long timeout = proc_map_timeout * 1000000ULL;
    403	int rc = 0;
    404	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
    405	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
    406
    407	if (machine__is_default_guest(machine))
    408		return 0;
    409
    410	snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
    411		machine->root_dir, pid, pid);
    412
    413	io.fd = open(bf, O_RDONLY, 0);
    414	if (io.fd < 0) {
    415		/*
    416		 * We raced with a task exiting - just return:
    417		 */
    418		pr_debug("couldn't open %s\n", bf);
    419		return -1;
    420	}
    421	io__init(&io, io.fd, bf, sizeof(bf));
    422
    423	event->header.type = PERF_RECORD_MMAP2;
    424	t = rdclock();
    425
    426	while (!io.eof) {
    427		static const char anonstr[] = "//anon";
    428		size_t size, aligned_size;
    429
    430		/* ensure null termination since stack will be reused. */
    431		event->mmap2.filename[0] = '\0';
    432
    433		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
    434		if (!read_proc_maps_line(&io,
    435					&event->mmap2.start,
    436					&event->mmap2.len,
    437					&event->mmap2.prot,
    438					&event->mmap2.flags,
    439					&event->mmap2.pgoff,
    440					&event->mmap2.maj,
    441					&event->mmap2.min,
    442					&event->mmap2.ino,
    443					sizeof(event->mmap2.filename),
    444					event->mmap2.filename))
    445			continue;
    446
    447		if ((rdclock() - t) > timeout) {
    448			pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
    449				   "You may want to increase "
    450				   "the time limit by --proc-map-timeout\n",
    451				   machine->root_dir, pid, pid);
    452			truncation = true;
    453			goto out;
    454		}
    455
    456		event->mmap2.ino_generation = 0;
    457
    458		/*
    459		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
    460		 */
    461		if (machine__is_host(machine))
    462			event->header.misc = PERF_RECORD_MISC_USER;
    463		else
    464			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
    465
    466		if ((event->mmap2.prot & PROT_EXEC) == 0) {
    467			if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
    468				continue;
    469
    470			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
    471		}
    472
    473out:
    474		if (truncation)
    475			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
    476
    477		if (!strcmp(event->mmap2.filename, ""))
    478			strcpy(event->mmap2.filename, anonstr);
    479
    480		if (hugetlbfs_mnt_len &&
    481		    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
    482			     hugetlbfs_mnt_len)) {
    483			strcpy(event->mmap2.filename, anonstr);
    484			event->mmap2.flags |= MAP_HUGETLB;
    485		}
    486
    487		size = strlen(event->mmap2.filename) + 1;
    488		aligned_size = PERF_ALIGN(size, sizeof(u64));
    489		event->mmap2.len -= event->mmap.start;
    490		event->mmap2.header.size = (sizeof(event->mmap2) -
    491					(sizeof(event->mmap2.filename) - aligned_size));
    492		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
    493			(aligned_size - size));
    494		event->mmap2.header.size += machine->id_hdr_size;
    495		event->mmap2.pid = tgid;
    496		event->mmap2.tid = pid;
    497
    498		if (symbol_conf.buildid_mmap2)
    499			perf_record_mmap2__read_build_id(&event->mmap2, false);
    500
    501		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
    502			rc = -1;
    503			break;
    504		}
    505
    506		if (truncation)
    507			break;
    508	}
    509
    510	close(io.fd);
    511	return rc;
    512}
    513
    514#ifdef HAVE_FILE_HANDLE
    515static int perf_event__synthesize_cgroup(struct perf_tool *tool,
    516					 union perf_event *event,
    517					 char *path, size_t mount_len,
    518					 perf_event__handler_t process,
    519					 struct machine *machine)
    520{
    521	size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
    522	size_t path_len = strlen(path) - mount_len + 1;
    523	struct {
    524		struct file_handle fh;
    525		uint64_t cgroup_id;
    526	} handle;
    527	int mount_id;
    528
    529	while (path_len % sizeof(u64))
    530		path[mount_len + path_len++] = '\0';
    531
    532	memset(&event->cgroup, 0, event_size);
    533
    534	event->cgroup.header.type = PERF_RECORD_CGROUP;
    535	event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
    536
    537	handle.fh.handle_bytes = sizeof(handle.cgroup_id);
    538	if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
    539		pr_debug("stat failed: %s\n", path);
    540		return -1;
    541	}
    542
    543	event->cgroup.id = handle.cgroup_id;
    544	strncpy(event->cgroup.path, path + mount_len, path_len);
    545	memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
    546
    547	if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
    548		pr_debug("process synth event failed\n");
    549		return -1;
    550	}
    551
    552	return 0;
    553}
    554
    555static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
    556					union perf_event *event,
    557					char *path, size_t mount_len,
    558					perf_event__handler_t process,
    559					struct machine *machine)
    560{
    561	size_t pos = strlen(path);
    562	DIR *d;
    563	struct dirent *dent;
    564	int ret = 0;
    565
    566	if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
    567					  process, machine) < 0)
    568		return -1;
    569
    570	d = opendir(path);
    571	if (d == NULL) {
    572		pr_debug("failed to open directory: %s\n", path);
    573		return -1;
    574	}
    575
    576	while ((dent = readdir(d)) != NULL) {
    577		if (dent->d_type != DT_DIR)
    578			continue;
    579		if (!strcmp(dent->d_name, ".") ||
    580		    !strcmp(dent->d_name, ".."))
    581			continue;
    582
    583		/* any sane path should be less than PATH_MAX */
    584		if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
    585			continue;
    586
    587		if (path[pos - 1] != '/')
    588			strcat(path, "/");
    589		strcat(path, dent->d_name);
    590
    591		ret = perf_event__walk_cgroup_tree(tool, event, path,
    592						   mount_len, process, machine);
    593		if (ret < 0)
    594			break;
    595
    596		path[pos] = '\0';
    597	}
    598
    599	closedir(d);
    600	return ret;
    601}
    602
    603int perf_event__synthesize_cgroups(struct perf_tool *tool,
    604				   perf_event__handler_t process,
    605				   struct machine *machine)
    606{
    607	union perf_event event;
    608	char cgrp_root[PATH_MAX];
    609	size_t mount_len;  /* length of mount point in the path */
    610
    611	if (!tool || !tool->cgroup_events)
    612		return 0;
    613
    614	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
    615		pr_debug("cannot find cgroup mount point\n");
    616		return -1;
    617	}
    618
    619	mount_len = strlen(cgrp_root);
    620	/* make sure the path starts with a slash (after mount point) */
    621	strcat(cgrp_root, "/");
    622
    623	if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
    624					 process, machine) < 0)
    625		return -1;
    626
    627	return 0;
    628}
    629#else
    630int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
    631				   perf_event__handler_t process __maybe_unused,
    632				   struct machine *machine __maybe_unused)
    633{
    634	return -1;
    635}
    636#endif
    637
    638int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
    639				   struct machine *machine)
    640{
    641	int rc = 0;
    642	struct map *pos;
    643	struct maps *maps = machine__kernel_maps(machine);
    644	union perf_event *event;
    645	size_t size = symbol_conf.buildid_mmap2 ?
    646			sizeof(event->mmap2) : sizeof(event->mmap);
    647
    648	event = zalloc(size + machine->id_hdr_size);
    649	if (event == NULL) {
    650		pr_debug("Not enough memory synthesizing mmap event "
    651			 "for kernel modules\n");
    652		return -1;
    653	}
    654
    655	/*
    656	 * kernel uses 0 for user space maps, see kernel/perf_event.c
    657	 * __perf_event_mmap
    658	 */
    659	if (machine__is_host(machine))
    660		event->header.misc = PERF_RECORD_MISC_KERNEL;
    661	else
    662		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
    663
    664	maps__for_each_entry(maps, pos) {
    665		if (!__map__is_kmodule(pos))
    666			continue;
    667
    668		if (symbol_conf.buildid_mmap2) {
    669			size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
    670			event->mmap2.header.type = PERF_RECORD_MMAP2;
    671			event->mmap2.header.size = (sizeof(event->mmap2) -
    672						(sizeof(event->mmap2.filename) - size));
    673			memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
    674			event->mmap2.header.size += machine->id_hdr_size;
    675			event->mmap2.start = pos->start;
    676			event->mmap2.len   = pos->end - pos->start;
    677			event->mmap2.pid   = machine->pid;
    678
    679			memcpy(event->mmap2.filename, pos->dso->long_name,
    680			       pos->dso->long_name_len + 1);
    681
    682			perf_record_mmap2__read_build_id(&event->mmap2, false);
    683		} else {
    684			size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
    685			event->mmap.header.type = PERF_RECORD_MMAP;
    686			event->mmap.header.size = (sizeof(event->mmap) -
    687						(sizeof(event->mmap.filename) - size));
    688			memset(event->mmap.filename + size, 0, machine->id_hdr_size);
    689			event->mmap.header.size += machine->id_hdr_size;
    690			event->mmap.start = pos->start;
    691			event->mmap.len   = pos->end - pos->start;
    692			event->mmap.pid   = machine->pid;
    693
    694			memcpy(event->mmap.filename, pos->dso->long_name,
    695			       pos->dso->long_name_len + 1);
    696		}
    697
    698		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
    699			rc = -1;
    700			break;
    701		}
    702	}
    703
    704	free(event);
    705	return rc;
    706}
    707
    708static int filter_task(const struct dirent *dirent)
    709{
    710	return isdigit(dirent->d_name[0]);
    711}
    712
    713static int __event__synthesize_thread(union perf_event *comm_event,
    714				      union perf_event *mmap_event,
    715				      union perf_event *fork_event,
    716				      union perf_event *namespaces_event,
    717				      pid_t pid, int full, perf_event__handler_t process,
    718				      struct perf_tool *tool, struct machine *machine,
    719				      bool needs_mmap, bool mmap_data)
    720{
    721	char filename[PATH_MAX];
    722	struct dirent **dirent;
    723	pid_t tgid, ppid;
    724	int rc = 0;
    725	int i, n;
    726
    727	/* special case: only send one comm event using passed in pid */
    728	if (!full) {
    729		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
    730						   process, machine);
    731
    732		if (tgid == -1)
    733			return -1;
    734
    735		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
    736						      tgid, process, machine) < 0)
    737			return -1;
    738
    739		/*
    740		 * send mmap only for thread group leader
    741		 * see thread__init_maps()
    742		 */
    743		if (pid == tgid && needs_mmap &&
    744		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
    745						       process, machine, mmap_data))
    746			return -1;
    747
    748		return 0;
    749	}
    750
    751	if (machine__is_default_guest(machine))
    752		return 0;
    753
    754	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
    755		 machine->root_dir, pid);
    756
    757	n = scandir(filename, &dirent, filter_task, NULL);
    758	if (n < 0)
    759		return n;
    760
    761	for (i = 0; i < n; i++) {
    762		char *end;
    763		pid_t _pid;
    764		bool kernel_thread = false;
    765
    766		_pid = strtol(dirent[i]->d_name, &end, 10);
    767		if (*end)
    768			continue;
    769
    770		/* some threads may exit just after scan, ignore it */
    771		if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
    772					     &tgid, &ppid, &kernel_thread) != 0)
    773			continue;
    774
    775		rc = -1;
    776		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
    777						ppid, process, machine) < 0)
    778			break;
    779
    780		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
    781						      tgid, process, machine) < 0)
    782			break;
    783
    784		/*
    785		 * Send the prepared comm event
    786		 */
    787		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
    788			break;
    789
    790		rc = 0;
    791		if (_pid == pid && !kernel_thread && needs_mmap) {
    792			/* process the parent's maps too */
    793			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
    794						process, machine, mmap_data);
    795			if (rc)
    796				break;
    797		}
    798	}
    799
    800	for (i = 0; i < n; i++)
    801		zfree(&dirent[i]);
    802	free(dirent);
    803
    804	return rc;
    805}
    806
    807int perf_event__synthesize_thread_map(struct perf_tool *tool,
    808				      struct perf_thread_map *threads,
    809				      perf_event__handler_t process,
    810				      struct machine *machine,
    811				      bool needs_mmap, bool mmap_data)
    812{
    813	union perf_event *comm_event, *mmap_event, *fork_event;
    814	union perf_event *namespaces_event;
    815	int err = -1, thread, j;
    816
    817	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
    818	if (comm_event == NULL)
    819		goto out;
    820
    821	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
    822	if (mmap_event == NULL)
    823		goto out_free_comm;
    824
    825	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
    826	if (fork_event == NULL)
    827		goto out_free_mmap;
    828
    829	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
    830				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
    831				  machine->id_hdr_size);
    832	if (namespaces_event == NULL)
    833		goto out_free_fork;
    834
    835	err = 0;
    836	for (thread = 0; thread < threads->nr; ++thread) {
    837		if (__event__synthesize_thread(comm_event, mmap_event,
    838					       fork_event, namespaces_event,
    839					       perf_thread_map__pid(threads, thread), 0,
    840					       process, tool, machine,
    841					       needs_mmap, mmap_data)) {
    842			err = -1;
    843			break;
    844		}
    845
    846		/*
    847		 * comm.pid is set to thread group id by
    848		 * perf_event__synthesize_comm
    849		 */
    850		if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
    851			bool need_leader = true;
    852
    853			/* is thread group leader in thread_map? */
    854			for (j = 0; j < threads->nr; ++j) {
    855				if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
    856					need_leader = false;
    857					break;
    858				}
    859			}
    860
    861			/* if not, generate events for it */
    862			if (need_leader &&
    863			    __event__synthesize_thread(comm_event, mmap_event,
    864						       fork_event, namespaces_event,
    865						       comm_event->comm.pid, 0,
    866						       process, tool, machine,
    867						       needs_mmap, mmap_data)) {
    868				err = -1;
    869				break;
    870			}
    871		}
    872	}
    873	free(namespaces_event);
    874out_free_fork:
    875	free(fork_event);
    876out_free_mmap:
    877	free(mmap_event);
    878out_free_comm:
    879	free(comm_event);
    880out:
    881	return err;
    882}
    883
    884static int __perf_event__synthesize_threads(struct perf_tool *tool,
    885					    perf_event__handler_t process,
    886					    struct machine *machine,
    887					    bool needs_mmap,
    888					    bool mmap_data,
    889					    struct dirent **dirent,
    890					    int start,
    891					    int num)
    892{
    893	union perf_event *comm_event, *mmap_event, *fork_event;
    894	union perf_event *namespaces_event;
    895	int err = -1;
    896	char *end;
    897	pid_t pid;
    898	int i;
    899
    900	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
    901	if (comm_event == NULL)
    902		goto out;
    903
    904	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
    905	if (mmap_event == NULL)
    906		goto out_free_comm;
    907
    908	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
    909	if (fork_event == NULL)
    910		goto out_free_mmap;
    911
    912	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
    913				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
    914				  machine->id_hdr_size);
    915	if (namespaces_event == NULL)
    916		goto out_free_fork;
    917
    918	for (i = start; i < start + num; i++) {
    919		if (!isdigit(dirent[i]->d_name[0]))
    920			continue;
    921
    922		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
    923		/* only interested in proper numerical dirents */
    924		if (*end)
    925			continue;
    926		/*
    927		 * We may race with exiting thread, so don't stop just because
    928		 * one thread couldn't be synthesized.
    929		 */
    930		__event__synthesize_thread(comm_event, mmap_event, fork_event,
    931					   namespaces_event, pid, 1, process,
    932					   tool, machine, needs_mmap, mmap_data);
    933	}
    934	err = 0;
    935
    936	free(namespaces_event);
    937out_free_fork:
    938	free(fork_event);
    939out_free_mmap:
    940	free(mmap_event);
    941out_free_comm:
    942	free(comm_event);
    943out:
    944	return err;
    945}
    946
    947struct synthesize_threads_arg {
    948	struct perf_tool *tool;
    949	perf_event__handler_t process;
    950	struct machine *machine;
    951	bool needs_mmap;
    952	bool mmap_data;
    953	struct dirent **dirent;
    954	int num;
    955	int start;
    956};
    957
    958static void *synthesize_threads_worker(void *arg)
    959{
    960	struct synthesize_threads_arg *args = arg;
    961
    962	__perf_event__synthesize_threads(args->tool, args->process,
    963					 args->machine,
    964					 args->needs_mmap, args->mmap_data,
    965					 args->dirent,
    966					 args->start, args->num);
    967	return NULL;
    968}
    969
    970int perf_event__synthesize_threads(struct perf_tool *tool,
    971				   perf_event__handler_t process,
    972				   struct machine *machine,
    973				   bool needs_mmap, bool mmap_data,
    974				   unsigned int nr_threads_synthesize)
    975{
    976	struct synthesize_threads_arg *args = NULL;
    977	pthread_t *synthesize_threads = NULL;
    978	char proc_path[PATH_MAX];
    979	struct dirent **dirent;
    980	int num_per_thread;
    981	int m, n, i, j;
    982	int thread_nr;
    983	int base = 0;
    984	int err = -1;
    985
    986
    987	if (machine__is_default_guest(machine))
    988		return 0;
    989
    990	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
    991	n = scandir(proc_path, &dirent, filter_task, NULL);
    992	if (n < 0)
    993		return err;
    994
    995	if (nr_threads_synthesize == UINT_MAX)
    996		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
    997	else
    998		thread_nr = nr_threads_synthesize;
    999
   1000	if (thread_nr <= 1) {
   1001		err = __perf_event__synthesize_threads(tool, process,
   1002						       machine,
   1003						       needs_mmap, mmap_data,
   1004						       dirent, base, n);
   1005		goto free_dirent;
   1006	}
   1007	if (thread_nr > n)
   1008		thread_nr = n;
   1009
   1010	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
   1011	if (synthesize_threads == NULL)
   1012		goto free_dirent;
   1013
   1014	args = calloc(sizeof(*args), thread_nr);
   1015	if (args == NULL)
   1016		goto free_threads;
   1017
   1018	num_per_thread = n / thread_nr;
   1019	m = n % thread_nr;
   1020	for (i = 0; i < thread_nr; i++) {
   1021		args[i].tool = tool;
   1022		args[i].process = process;
   1023		args[i].machine = machine;
   1024		args[i].needs_mmap = needs_mmap;
   1025		args[i].mmap_data = mmap_data;
   1026		args[i].dirent = dirent;
   1027	}
   1028	for (i = 0; i < m; i++) {
   1029		args[i].num = num_per_thread + 1;
   1030		args[i].start = i * args[i].num;
   1031	}
   1032	if (i != 0)
   1033		base = args[i-1].start + args[i-1].num;
   1034	for (j = i; j < thread_nr; j++) {
   1035		args[j].num = num_per_thread;
   1036		args[j].start = base + (j - i) * args[i].num;
   1037	}
   1038
   1039	for (i = 0; i < thread_nr; i++) {
   1040		if (pthread_create(&synthesize_threads[i], NULL,
   1041				   synthesize_threads_worker, &args[i]))
   1042			goto out_join;
   1043	}
   1044	err = 0;
   1045out_join:
   1046	for (i = 0; i < thread_nr; i++)
   1047		pthread_join(synthesize_threads[i], NULL);
   1048	free(args);
   1049free_threads:
   1050	free(synthesize_threads);
   1051free_dirent:
   1052	for (i = 0; i < n; i++)
   1053		zfree(&dirent[i]);
   1054	free(dirent);
   1055
   1056	return err;
   1057}
   1058
   1059int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
   1060					      perf_event__handler_t process __maybe_unused,
   1061					      struct machine *machine __maybe_unused)
   1062{
   1063	return 0;
   1064}
   1065
   1066static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
   1067						perf_event__handler_t process,
   1068						struct machine *machine)
   1069{
   1070	union perf_event *event;
   1071	size_t size = symbol_conf.buildid_mmap2 ?
   1072			sizeof(event->mmap2) : sizeof(event->mmap);
   1073	struct map *map = machine__kernel_map(machine);
   1074	struct kmap *kmap;
   1075	int err;
   1076
   1077	if (map == NULL)
   1078		return -1;
   1079
   1080	kmap = map__kmap(map);
   1081	if (!kmap->ref_reloc_sym)
   1082		return -1;
   1083
   1084	/*
   1085	 * We should get this from /sys/kernel/sections/.text, but till that is
   1086	 * available use this, and after it is use this as a fallback for older
   1087	 * kernels.
   1088	 */
   1089	event = zalloc(size + machine->id_hdr_size);
   1090	if (event == NULL) {
   1091		pr_debug("Not enough memory synthesizing mmap event "
   1092			 "for kernel modules\n");
   1093		return -1;
   1094	}
   1095
   1096	if (machine__is_host(machine)) {
   1097		/*
   1098		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
   1099		 * see kernel/perf_event.c __perf_event_mmap
   1100		 */
   1101		event->header.misc = PERF_RECORD_MISC_KERNEL;
   1102	} else {
   1103		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
   1104	}
   1105
   1106	if (symbol_conf.buildid_mmap2) {
   1107		size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
   1108				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
   1109		size = PERF_ALIGN(size, sizeof(u64));
   1110		event->mmap2.header.type = PERF_RECORD_MMAP2;
   1111		event->mmap2.header.size = (sizeof(event->mmap2) -
   1112				(sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
   1113		event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
   1114		event->mmap2.start = map->start;
   1115		event->mmap2.len   = map->end - event->mmap.start;
   1116		event->mmap2.pid   = machine->pid;
   1117
   1118		perf_record_mmap2__read_build_id(&event->mmap2, true);
   1119	} else {
   1120		size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
   1121				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
   1122		size = PERF_ALIGN(size, sizeof(u64));
   1123		event->mmap.header.type = PERF_RECORD_MMAP;
   1124		event->mmap.header.size = (sizeof(event->mmap) -
   1125				(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
   1126		event->mmap.pgoff = kmap->ref_reloc_sym->addr;
   1127		event->mmap.start = map->start;
   1128		event->mmap.len   = map->end - event->mmap.start;
   1129		event->mmap.pid   = machine->pid;
   1130	}
   1131
   1132	err = perf_tool__process_synth_event(tool, event, machine, process);
   1133	free(event);
   1134
   1135	return err;
   1136}
   1137
   1138int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
   1139				       perf_event__handler_t process,
   1140				       struct machine *machine)
   1141{
   1142	int err;
   1143
   1144	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
   1145	if (err < 0)
   1146		return err;
   1147
   1148	return perf_event__synthesize_extra_kmaps(tool, process, machine);
   1149}
   1150
   1151int perf_event__synthesize_thread_map2(struct perf_tool *tool,
   1152				      struct perf_thread_map *threads,
   1153				      perf_event__handler_t process,
   1154				      struct machine *machine)
   1155{
   1156	union perf_event *event;
   1157	int i, err, size;
   1158
   1159	size  = sizeof(event->thread_map);
   1160	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
   1161
   1162	event = zalloc(size);
   1163	if (!event)
   1164		return -ENOMEM;
   1165
   1166	event->header.type = PERF_RECORD_THREAD_MAP;
   1167	event->header.size = size;
   1168	event->thread_map.nr = threads->nr;
   1169
   1170	for (i = 0; i < threads->nr; i++) {
   1171		struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
   1172		char *comm = perf_thread_map__comm(threads, i);
   1173
   1174		if (!comm)
   1175			comm = (char *) "";
   1176
   1177		entry->pid = perf_thread_map__pid(threads, i);
   1178		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
   1179	}
   1180
   1181	err = process(tool, event, NULL, machine);
   1182
   1183	free(event);
   1184	return err;
   1185}
   1186
   1187static void synthesize_cpus(struct cpu_map_entries *cpus,
   1188			    struct perf_cpu_map *map)
   1189{
   1190	int i, map_nr = perf_cpu_map__nr(map);
   1191
   1192	cpus->nr = map_nr;
   1193
   1194	for (i = 0; i < map_nr; i++)
   1195		cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
   1196}
   1197
   1198static void synthesize_mask(struct perf_record_record_cpu_map *mask,
   1199			    struct perf_cpu_map *map, int max)
   1200{
   1201	int i;
   1202
   1203	mask->nr = BITS_TO_LONGS(max);
   1204	mask->long_size = sizeof(long);
   1205
   1206	for (i = 0; i < perf_cpu_map__nr(map); i++)
   1207		set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
   1208}
   1209
   1210static size_t cpus_size(struct perf_cpu_map *map)
   1211{
   1212	return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
   1213}
   1214
   1215static size_t mask_size(struct perf_cpu_map *map, int *max)
   1216{
   1217	int i;
   1218
   1219	*max = 0;
   1220
   1221	for (i = 0; i < perf_cpu_map__nr(map); i++) {
   1222		/* bit position of the cpu is + 1 */
   1223		int bit = perf_cpu_map__cpu(map, i).cpu + 1;
   1224
   1225		if (bit > *max)
   1226			*max = bit;
   1227	}
   1228
   1229	return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
   1230}
   1231
   1232void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
   1233{
   1234	size_t size_cpus, size_mask;
   1235	bool is_dummy = perf_cpu_map__empty(map);
   1236
   1237	/*
   1238	 * Both array and mask data have variable size based
   1239	 * on the number of cpus and their actual values.
   1240	 * The size of the 'struct perf_record_cpu_map_data' is:
   1241	 *
   1242	 *   array = size of 'struct cpu_map_entries' +
   1243	 *           number of cpus * sizeof(u64)
   1244	 *
   1245	 *   mask  = size of 'struct perf_record_record_cpu_map' +
   1246	 *           maximum cpu bit converted to size of longs
   1247	 *
   1248	 * and finally + the size of 'struct perf_record_cpu_map_data'.
   1249	 */
   1250	size_cpus = cpus_size(map);
   1251	size_mask = mask_size(map, max);
   1252
   1253	if (is_dummy || (size_cpus < size_mask)) {
   1254		*size += size_cpus;
   1255		*type  = PERF_CPU_MAP__CPUS;
   1256	} else {
   1257		*size += size_mask;
   1258		*type  = PERF_CPU_MAP__MASK;
   1259	}
   1260
   1261	*size += sizeof(struct perf_record_cpu_map_data);
   1262	*size = PERF_ALIGN(*size, sizeof(u64));
   1263	return zalloc(*size);
   1264}
   1265
   1266void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
   1267			      u16 type, int max)
   1268{
   1269	data->type = type;
   1270
   1271	switch (type) {
   1272	case PERF_CPU_MAP__CPUS:
   1273		synthesize_cpus((struct cpu_map_entries *) data->data, map);
   1274		break;
   1275	case PERF_CPU_MAP__MASK:
   1276		synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
   1277	default:
   1278		break;
   1279	}
   1280}
   1281
   1282static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
   1283{
   1284	size_t size = sizeof(struct perf_record_cpu_map);
   1285	struct perf_record_cpu_map *event;
   1286	int max;
   1287	u16 type;
   1288
   1289	event = cpu_map_data__alloc(map, &size, &type, &max);
   1290	if (!event)
   1291		return NULL;
   1292
   1293	event->header.type = PERF_RECORD_CPU_MAP;
   1294	event->header.size = size;
   1295	event->data.type   = type;
   1296
   1297	cpu_map_data__synthesize(&event->data, map, type, max);
   1298	return event;
   1299}
   1300
   1301int perf_event__synthesize_cpu_map(struct perf_tool *tool,
   1302				   struct perf_cpu_map *map,
   1303				   perf_event__handler_t process,
   1304				   struct machine *machine)
   1305{
   1306	struct perf_record_cpu_map *event;
   1307	int err;
   1308
   1309	event = cpu_map_event__new(map);
   1310	if (!event)
   1311		return -ENOMEM;
   1312
   1313	err = process(tool, (union perf_event *) event, NULL, machine);
   1314
   1315	free(event);
   1316	return err;
   1317}
   1318
   1319int perf_event__synthesize_stat_config(struct perf_tool *tool,
   1320				       struct perf_stat_config *config,
   1321				       perf_event__handler_t process,
   1322				       struct machine *machine)
   1323{
   1324	struct perf_record_stat_config *event;
   1325	int size, i = 0, err;
   1326
   1327	size  = sizeof(*event);
   1328	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
   1329
   1330	event = zalloc(size);
   1331	if (!event)
   1332		return -ENOMEM;
   1333
   1334	event->header.type = PERF_RECORD_STAT_CONFIG;
   1335	event->header.size = size;
   1336	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
   1337
   1338#define ADD(__term, __val)					\
   1339	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
   1340	event->data[i].val = __val;				\
   1341	i++;
   1342
   1343	ADD(AGGR_MODE,	config->aggr_mode)
   1344	ADD(INTERVAL,	config->interval)
   1345	ADD(SCALE,	config->scale)
   1346
   1347	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
   1348		  "stat config terms unbalanced\n");
   1349#undef ADD
   1350
   1351	err = process(tool, (union perf_event *) event, NULL, machine);
   1352
   1353	free(event);
   1354	return err;
   1355}
   1356
   1357int perf_event__synthesize_stat(struct perf_tool *tool,
   1358				struct perf_cpu cpu, u32 thread, u64 id,
   1359				struct perf_counts_values *count,
   1360				perf_event__handler_t process,
   1361				struct machine *machine)
   1362{
   1363	struct perf_record_stat event;
   1364
   1365	event.header.type = PERF_RECORD_STAT;
   1366	event.header.size = sizeof(event);
   1367	event.header.misc = 0;
   1368
   1369	event.id        = id;
   1370	event.cpu       = cpu.cpu;
   1371	event.thread    = thread;
   1372	event.val       = count->val;
   1373	event.ena       = count->ena;
   1374	event.run       = count->run;
   1375
   1376	return process(tool, (union perf_event *) &event, NULL, machine);
   1377}
   1378
   1379int perf_event__synthesize_stat_round(struct perf_tool *tool,
   1380				      u64 evtime, u64 type,
   1381				      perf_event__handler_t process,
   1382				      struct machine *machine)
   1383{
   1384	struct perf_record_stat_round event;
   1385
   1386	event.header.type = PERF_RECORD_STAT_ROUND;
   1387	event.header.size = sizeof(event);
   1388	event.header.misc = 0;
   1389
   1390	event.time = evtime;
   1391	event.type = type;
   1392
   1393	return process(tool, (union perf_event *) &event, NULL, machine);
   1394}
   1395
   1396size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
   1397{
   1398	size_t sz, result = sizeof(struct perf_record_sample);
   1399
   1400	if (type & PERF_SAMPLE_IDENTIFIER)
   1401		result += sizeof(u64);
   1402
   1403	if (type & PERF_SAMPLE_IP)
   1404		result += sizeof(u64);
   1405
   1406	if (type & PERF_SAMPLE_TID)
   1407		result += sizeof(u64);
   1408
   1409	if (type & PERF_SAMPLE_TIME)
   1410		result += sizeof(u64);
   1411
   1412	if (type & PERF_SAMPLE_ADDR)
   1413		result += sizeof(u64);
   1414
   1415	if (type & PERF_SAMPLE_ID)
   1416		result += sizeof(u64);
   1417
   1418	if (type & PERF_SAMPLE_STREAM_ID)
   1419		result += sizeof(u64);
   1420
   1421	if (type & PERF_SAMPLE_CPU)
   1422		result += sizeof(u64);
   1423
   1424	if (type & PERF_SAMPLE_PERIOD)
   1425		result += sizeof(u64);
   1426
   1427	if (type & PERF_SAMPLE_READ) {
   1428		result += sizeof(u64);
   1429		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
   1430			result += sizeof(u64);
   1431		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
   1432			result += sizeof(u64);
   1433		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
   1434		if (read_format & PERF_FORMAT_GROUP) {
   1435			sz = sample->read.group.nr *
   1436			     sizeof(struct sample_read_value);
   1437			result += sz;
   1438		} else {
   1439			result += sizeof(u64);
   1440		}
   1441	}
   1442
   1443	if (type & PERF_SAMPLE_CALLCHAIN) {
   1444		sz = (sample->callchain->nr + 1) * sizeof(u64);
   1445		result += sz;
   1446	}
   1447
   1448	if (type & PERF_SAMPLE_RAW) {
   1449		result += sizeof(u32);
   1450		result += sample->raw_size;
   1451	}
   1452
   1453	if (type & PERF_SAMPLE_BRANCH_STACK) {
   1454		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
   1455		/* nr, hw_idx */
   1456		sz += 2 * sizeof(u64);
   1457		result += sz;
   1458	}
   1459
   1460	if (type & PERF_SAMPLE_REGS_USER) {
   1461		if (sample->user_regs.abi) {
   1462			result += sizeof(u64);
   1463			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
   1464			result += sz;
   1465		} else {
   1466			result += sizeof(u64);
   1467		}
   1468	}
   1469
   1470	if (type & PERF_SAMPLE_STACK_USER) {
   1471		sz = sample->user_stack.size;
   1472		result += sizeof(u64);
   1473		if (sz) {
   1474			result += sz;
   1475			result += sizeof(u64);
   1476		}
   1477	}
   1478
   1479	if (type & PERF_SAMPLE_WEIGHT_TYPE)
   1480		result += sizeof(u64);
   1481
   1482	if (type & PERF_SAMPLE_DATA_SRC)
   1483		result += sizeof(u64);
   1484
   1485	if (type & PERF_SAMPLE_TRANSACTION)
   1486		result += sizeof(u64);
   1487
   1488	if (type & PERF_SAMPLE_REGS_INTR) {
   1489		if (sample->intr_regs.abi) {
   1490			result += sizeof(u64);
   1491			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
   1492			result += sz;
   1493		} else {
   1494			result += sizeof(u64);
   1495		}
   1496	}
   1497
   1498	if (type & PERF_SAMPLE_PHYS_ADDR)
   1499		result += sizeof(u64);
   1500
   1501	if (type & PERF_SAMPLE_CGROUP)
   1502		result += sizeof(u64);
   1503
   1504	if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
   1505		result += sizeof(u64);
   1506
   1507	if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
   1508		result += sizeof(u64);
   1509
   1510	if (type & PERF_SAMPLE_AUX) {
   1511		result += sizeof(u64);
   1512		result += sample->aux_sample.size;
   1513	}
   1514
   1515	return result;
   1516}
   1517
   1518void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
   1519					       __u64 *array, u64 type __maybe_unused)
   1520{
   1521	*array = data->weight;
   1522}
   1523
   1524int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
   1525				  const struct perf_sample *sample)
   1526{
   1527	__u64 *array;
   1528	size_t sz;
   1529	/*
   1530	 * used for cross-endian analysis. See git commit 65014ab3
   1531	 * for why this goofiness is needed.
   1532	 */
   1533	union u64_swap u;
   1534
   1535	array = event->sample.array;
   1536
   1537	if (type & PERF_SAMPLE_IDENTIFIER) {
   1538		*array = sample->id;
   1539		array++;
   1540	}
   1541
   1542	if (type & PERF_SAMPLE_IP) {
   1543		*array = sample->ip;
   1544		array++;
   1545	}
   1546
   1547	if (type & PERF_SAMPLE_TID) {
   1548		u.val32[0] = sample->pid;
   1549		u.val32[1] = sample->tid;
   1550		*array = u.val64;
   1551		array++;
   1552	}
   1553
   1554	if (type & PERF_SAMPLE_TIME) {
   1555		*array = sample->time;
   1556		array++;
   1557	}
   1558
   1559	if (type & PERF_SAMPLE_ADDR) {
   1560		*array = sample->addr;
   1561		array++;
   1562	}
   1563
   1564	if (type & PERF_SAMPLE_ID) {
   1565		*array = sample->id;
   1566		array++;
   1567	}
   1568
   1569	if (type & PERF_SAMPLE_STREAM_ID) {
   1570		*array = sample->stream_id;
   1571		array++;
   1572	}
   1573
   1574	if (type & PERF_SAMPLE_CPU) {
   1575		u.val32[0] = sample->cpu;
   1576		u.val32[1] = 0;
   1577		*array = u.val64;
   1578		array++;
   1579	}
   1580
   1581	if (type & PERF_SAMPLE_PERIOD) {
   1582		*array = sample->period;
   1583		array++;
   1584	}
   1585
   1586	if (type & PERF_SAMPLE_READ) {
   1587		if (read_format & PERF_FORMAT_GROUP)
   1588			*array = sample->read.group.nr;
   1589		else
   1590			*array = sample->read.one.value;
   1591		array++;
   1592
   1593		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
   1594			*array = sample->read.time_enabled;
   1595			array++;
   1596		}
   1597
   1598		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
   1599			*array = sample->read.time_running;
   1600			array++;
   1601		}
   1602
   1603		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
   1604		if (read_format & PERF_FORMAT_GROUP) {
   1605			sz = sample->read.group.nr *
   1606			     sizeof(struct sample_read_value);
   1607			memcpy(array, sample->read.group.values, sz);
   1608			array = (void *)array + sz;
   1609		} else {
   1610			*array = sample->read.one.id;
   1611			array++;
   1612		}
   1613	}
   1614
   1615	if (type & PERF_SAMPLE_CALLCHAIN) {
   1616		sz = (sample->callchain->nr + 1) * sizeof(u64);
   1617		memcpy(array, sample->callchain, sz);
   1618		array = (void *)array + sz;
   1619	}
   1620
   1621	if (type & PERF_SAMPLE_RAW) {
   1622		u.val32[0] = sample->raw_size;
   1623		*array = u.val64;
   1624		array = (void *)array + sizeof(u32);
   1625
   1626		memcpy(array, sample->raw_data, sample->raw_size);
   1627		array = (void *)array + sample->raw_size;
   1628	}
   1629
   1630	if (type & PERF_SAMPLE_BRANCH_STACK) {
   1631		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
   1632		/* nr, hw_idx */
   1633		sz += 2 * sizeof(u64);
   1634		memcpy(array, sample->branch_stack, sz);
   1635		array = (void *)array + sz;
   1636	}
   1637
   1638	if (type & PERF_SAMPLE_REGS_USER) {
   1639		if (sample->user_regs.abi) {
   1640			*array++ = sample->user_regs.abi;
   1641			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
   1642			memcpy(array, sample->user_regs.regs, sz);
   1643			array = (void *)array + sz;
   1644		} else {
   1645			*array++ = 0;
   1646		}
   1647	}
   1648
   1649	if (type & PERF_SAMPLE_STACK_USER) {
   1650		sz = sample->user_stack.size;
   1651		*array++ = sz;
   1652		if (sz) {
   1653			memcpy(array, sample->user_stack.data, sz);
   1654			array = (void *)array + sz;
   1655			*array++ = sz;
   1656		}
   1657	}
   1658
   1659	if (type & PERF_SAMPLE_WEIGHT_TYPE) {
   1660		arch_perf_synthesize_sample_weight(sample, array, type);
   1661		array++;
   1662	}
   1663
   1664	if (type & PERF_SAMPLE_DATA_SRC) {
   1665		*array = sample->data_src;
   1666		array++;
   1667	}
   1668
   1669	if (type & PERF_SAMPLE_TRANSACTION) {
   1670		*array = sample->transaction;
   1671		array++;
   1672	}
   1673
   1674	if (type & PERF_SAMPLE_REGS_INTR) {
   1675		if (sample->intr_regs.abi) {
   1676			*array++ = sample->intr_regs.abi;
   1677			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
   1678			memcpy(array, sample->intr_regs.regs, sz);
   1679			array = (void *)array + sz;
   1680		} else {
   1681			*array++ = 0;
   1682		}
   1683	}
   1684
   1685	if (type & PERF_SAMPLE_PHYS_ADDR) {
   1686		*array = sample->phys_addr;
   1687		array++;
   1688	}
   1689
   1690	if (type & PERF_SAMPLE_CGROUP) {
   1691		*array = sample->cgroup;
   1692		array++;
   1693	}
   1694
   1695	if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
   1696		*array = sample->data_page_size;
   1697		array++;
   1698	}
   1699
   1700	if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
   1701		*array = sample->code_page_size;
   1702		array++;
   1703	}
   1704
   1705	if (type & PERF_SAMPLE_AUX) {
   1706		sz = sample->aux_sample.size;
   1707		*array++ = sz;
   1708		memcpy(array, sample->aux_sample.data, sz);
   1709		array = (void *)array + sz;
   1710	}
   1711
   1712	return 0;
   1713}
   1714
   1715int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
   1716				    struct evlist *evlist, struct machine *machine)
   1717{
   1718	union perf_event *ev;
   1719	struct evsel *evsel;
   1720	size_t nr = 0, i = 0, sz, max_nr, n;
   1721	int err;
   1722
   1723	pr_debug2("Synthesizing id index\n");
   1724
   1725	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
   1726		 sizeof(struct id_index_entry);
   1727
   1728	evlist__for_each_entry(evlist, evsel)
   1729		nr += evsel->core.ids;
   1730
   1731	n = nr > max_nr ? max_nr : nr;
   1732	sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
   1733	ev = zalloc(sz);
   1734	if (!ev)
   1735		return -ENOMEM;
   1736
   1737	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
   1738	ev->id_index.header.size = sz;
   1739	ev->id_index.nr = n;
   1740
   1741	evlist__for_each_entry(evlist, evsel) {
   1742		u32 j;
   1743
   1744		for (j = 0; j < evsel->core.ids; j++) {
   1745			struct id_index_entry *e;
   1746			struct perf_sample_id *sid;
   1747
   1748			if (i >= n) {
   1749				err = process(tool, ev, NULL, machine);
   1750				if (err)
   1751					goto out_err;
   1752				nr -= n;
   1753				i = 0;
   1754			}
   1755
   1756			e = &ev->id_index.entries[i++];
   1757
   1758			e->id = evsel->core.id[j];
   1759
   1760			sid = evlist__id2sid(evlist, e->id);
   1761			if (!sid) {
   1762				free(ev);
   1763				return -ENOENT;
   1764			}
   1765
   1766			e->idx = sid->idx;
   1767			e->cpu = sid->cpu.cpu;
   1768			e->tid = sid->tid;
   1769		}
   1770	}
   1771
   1772	sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
   1773	ev->id_index.header.size = sz;
   1774	ev->id_index.nr = nr;
   1775
   1776	err = process(tool, ev, NULL, machine);
   1777out_err:
   1778	free(ev);
   1779
   1780	return err;
   1781}
   1782
   1783int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
   1784				  struct target *target, struct perf_thread_map *threads,
   1785				  perf_event__handler_t process, bool needs_mmap,
   1786				  bool data_mmap, unsigned int nr_threads_synthesize)
   1787{
   1788	/*
   1789	 * When perf runs in non-root PID namespace, and the namespace's proc FS
   1790	 * is not mounted, nsinfo__is_in_root_namespace() returns false.
   1791	 * In this case, the proc FS is coming for the parent namespace, thus
   1792	 * perf tool will wrongly gather process info from its parent PID
   1793	 * namespace.
   1794	 *
   1795	 * To avoid the confusion that the perf tool runs in a child PID
   1796	 * namespace but it synthesizes thread info from its parent PID
   1797	 * namespace, returns failure with warning.
   1798	 */
   1799	if (!nsinfo__is_in_root_namespace()) {
   1800		pr_err("Perf runs in non-root PID namespace but it tries to ");
   1801		pr_err("gather process info from its parent PID namespace.\n");
   1802		pr_err("Please mount the proc file system properly, e.g. ");
   1803		pr_err("add the option '--mount-proc' for unshare command.\n");
   1804		return -EPERM;
   1805	}
   1806
   1807	if (target__has_task(target))
   1808		return perf_event__synthesize_thread_map(tool, threads, process, machine,
   1809							 needs_mmap, data_mmap);
   1810	else if (target__has_cpu(target))
   1811		return perf_event__synthesize_threads(tool, process, machine,
   1812						      needs_mmap, data_mmap,
   1813						      nr_threads_synthesize);
   1814	/* command specified */
   1815	return 0;
   1816}
   1817
   1818int machine__synthesize_threads(struct machine *machine, struct target *target,
   1819				struct perf_thread_map *threads, bool needs_mmap,
   1820				bool data_mmap, unsigned int nr_threads_synthesize)
   1821{
   1822	return __machine__synthesize_threads(machine, NULL, target, threads,
   1823					     perf_event__process, needs_mmap,
   1824					     data_mmap, nr_threads_synthesize);
   1825}
   1826
   1827static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
   1828{
   1829	struct perf_record_event_update *ev;
   1830
   1831	size += sizeof(*ev);
   1832	size  = PERF_ALIGN(size, sizeof(u64));
   1833
   1834	ev = zalloc(size);
   1835	if (ev) {
   1836		ev->header.type = PERF_RECORD_EVENT_UPDATE;
   1837		ev->header.size = (u16)size;
   1838		ev->type	= type;
   1839		ev->id		= id;
   1840	}
   1841	return ev;
   1842}
   1843
   1844int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
   1845					     perf_event__handler_t process)
   1846{
   1847	size_t size = strlen(evsel->unit);
   1848	struct perf_record_event_update *ev;
   1849	int err;
   1850
   1851	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
   1852	if (ev == NULL)
   1853		return -ENOMEM;
   1854
   1855	strlcpy(ev->data, evsel->unit, size + 1);
   1856	err = process(tool, (union perf_event *)ev, NULL, NULL);
   1857	free(ev);
   1858	return err;
   1859}
   1860
   1861int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
   1862					      perf_event__handler_t process)
   1863{
   1864	struct perf_record_event_update *ev;
   1865	struct perf_record_event_update_scale *ev_data;
   1866	int err;
   1867
   1868	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
   1869	if (ev == NULL)
   1870		return -ENOMEM;
   1871
   1872	ev_data = (struct perf_record_event_update_scale *)ev->data;
   1873	ev_data->scale = evsel->scale;
   1874	err = process(tool, (union perf_event *)ev, NULL, NULL);
   1875	free(ev);
   1876	return err;
   1877}
   1878
   1879int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
   1880					     perf_event__handler_t process)
   1881{
   1882	struct perf_record_event_update *ev;
   1883	size_t len = strlen(evsel->name);
   1884	int err;
   1885
   1886	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
   1887	if (ev == NULL)
   1888		return -ENOMEM;
   1889
   1890	strlcpy(ev->data, evsel->name, len + 1);
   1891	err = process(tool, (union perf_event *)ev, NULL, NULL);
   1892	free(ev);
   1893	return err;
   1894}
   1895
   1896int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
   1897					     perf_event__handler_t process)
   1898{
   1899	size_t size = sizeof(struct perf_record_event_update);
   1900	struct perf_record_event_update *ev;
   1901	int max, err;
   1902	u16 type;
   1903
   1904	if (!evsel->core.own_cpus)
   1905		return 0;
   1906
   1907	ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
   1908	if (!ev)
   1909		return -ENOMEM;
   1910
   1911	ev->header.type = PERF_RECORD_EVENT_UPDATE;
   1912	ev->header.size = (u16)size;
   1913	ev->type	= PERF_EVENT_UPDATE__CPUS;
   1914	ev->id		= evsel->core.id[0];
   1915
   1916	cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
   1917				 evsel->core.own_cpus, type, max);
   1918
   1919	err = process(tool, (union perf_event *)ev, NULL, NULL);
   1920	free(ev);
   1921	return err;
   1922}
   1923
   1924int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
   1925				 perf_event__handler_t process)
   1926{
   1927	struct evsel *evsel;
   1928	int err = 0;
   1929
   1930	evlist__for_each_entry(evlist, evsel) {
   1931		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
   1932						  evsel->core.id, process);
   1933		if (err) {
   1934			pr_debug("failed to create perf header attribute\n");
   1935			return err;
   1936		}
   1937	}
   1938
   1939	return err;
   1940}
   1941
   1942static bool has_unit(struct evsel *evsel)
   1943{
   1944	return evsel->unit && *evsel->unit;
   1945}
   1946
   1947static bool has_scale(struct evsel *evsel)
   1948{
   1949	return evsel->scale != 1;
   1950}
   1951
   1952int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
   1953				      perf_event__handler_t process, bool is_pipe)
   1954{
   1955	struct evsel *evsel;
   1956	int err;
   1957
   1958	/*
   1959	 * Synthesize other events stuff not carried within
   1960	 * attr event - unit, scale, name
   1961	 */
   1962	evlist__for_each_entry(evsel_list, evsel) {
   1963		if (!evsel->supported)
   1964			continue;
   1965
   1966		/*
   1967		 * Synthesize unit and scale only if it's defined.
   1968		 */
   1969		if (has_unit(evsel)) {
   1970			err = perf_event__synthesize_event_update_unit(tool, evsel, process);
   1971			if (err < 0) {
   1972				pr_err("Couldn't synthesize evsel unit.\n");
   1973				return err;
   1974			}
   1975		}
   1976
   1977		if (has_scale(evsel)) {
   1978			err = perf_event__synthesize_event_update_scale(tool, evsel, process);
   1979			if (err < 0) {
   1980				pr_err("Couldn't synthesize evsel evsel.\n");
   1981				return err;
   1982			}
   1983		}
   1984
   1985		if (evsel->core.own_cpus) {
   1986			err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
   1987			if (err < 0) {
   1988				pr_err("Couldn't synthesize evsel cpus.\n");
   1989				return err;
   1990			}
   1991		}
   1992
   1993		/*
   1994		 * Name is needed only for pipe output,
   1995		 * perf.data carries event names.
   1996		 */
   1997		if (is_pipe) {
   1998			err = perf_event__synthesize_event_update_name(tool, evsel, process);
   1999			if (err < 0) {
   2000				pr_err("Couldn't synthesize evsel name.\n");
   2001				return err;
   2002			}
   2003		}
   2004	}
   2005	return 0;
   2006}
   2007
   2008int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
   2009				u32 ids, u64 *id, perf_event__handler_t process)
   2010{
   2011	union perf_event *ev;
   2012	size_t size;
   2013	int err;
   2014
   2015	size = sizeof(struct perf_event_attr);
   2016	size = PERF_ALIGN(size, sizeof(u64));
   2017	size += sizeof(struct perf_event_header);
   2018	size += ids * sizeof(u64);
   2019
   2020	ev = zalloc(size);
   2021
   2022	if (ev == NULL)
   2023		return -ENOMEM;
   2024
   2025	ev->attr.attr = *attr;
   2026	memcpy(ev->attr.id, id, ids * sizeof(u64));
   2027
   2028	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
   2029	ev->attr.header.size = (u16)size;
   2030
   2031	if (ev->attr.header.size == size)
   2032		err = process(tool, ev, NULL, NULL);
   2033	else
   2034		err = -E2BIG;
   2035
   2036	free(ev);
   2037
   2038	return err;
   2039}
   2040
   2041int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
   2042					perf_event__handler_t process)
   2043{
   2044	union perf_event ev;
   2045	struct tracing_data *tdata;
   2046	ssize_t size = 0, aligned_size = 0, padding;
   2047	struct feat_fd ff;
   2048
   2049	/*
   2050	 * We are going to store the size of the data followed
   2051	 * by the data contents. Since the fd descriptor is a pipe,
   2052	 * we cannot seek back to store the size of the data once
   2053	 * we know it. Instead we:
   2054	 *
   2055	 * - write the tracing data to the temp file
   2056	 * - get/write the data size to pipe
   2057	 * - write the tracing data from the temp file
   2058	 *   to the pipe
   2059	 */
   2060	tdata = tracing_data_get(&evlist->core.entries, fd, true);
   2061	if (!tdata)
   2062		return -1;
   2063
   2064	memset(&ev, 0, sizeof(ev));
   2065
   2066	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
   2067	size = tdata->size;
   2068	aligned_size = PERF_ALIGN(size, sizeof(u64));
   2069	padding = aligned_size - size;
   2070	ev.tracing_data.header.size = sizeof(ev.tracing_data);
   2071	ev.tracing_data.size = aligned_size;
   2072
   2073	process(tool, &ev, NULL, NULL);
   2074
   2075	/*
   2076	 * The put function will copy all the tracing data
   2077	 * stored in temp file to the pipe.
   2078	 */
   2079	tracing_data_put(tdata);
   2080
   2081	ff = (struct feat_fd){ .fd = fd };
   2082	if (write_padded(&ff, NULL, 0, padding))
   2083		return -1;
   2084
   2085	return aligned_size;
   2086}
   2087
   2088int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
   2089				    perf_event__handler_t process, struct machine *machine)
   2090{
   2091	union perf_event ev;
   2092	size_t len;
   2093
   2094	if (!pos->hit)
   2095		return 0;
   2096
   2097	memset(&ev, 0, sizeof(ev));
   2098
   2099	len = pos->long_name_len + 1;
   2100	len = PERF_ALIGN(len, NAME_ALIGN);
   2101	memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
   2102	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
   2103	ev.build_id.header.misc = misc;
   2104	ev.build_id.pid = machine->pid;
   2105	ev.build_id.header.size = sizeof(ev.build_id) + len;
   2106	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
   2107
   2108	return process(tool, &ev, NULL, machine);
   2109}
   2110
   2111int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
   2112				       struct evlist *evlist, perf_event__handler_t process, bool attrs)
   2113{
   2114	int err;
   2115
   2116	if (attrs) {
   2117		err = perf_event__synthesize_attrs(tool, evlist, process);
   2118		if (err < 0) {
   2119			pr_err("Couldn't synthesize attrs.\n");
   2120			return err;
   2121		}
   2122	}
   2123
   2124	err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
   2125	err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
   2126	if (err < 0) {
   2127		pr_err("Couldn't synthesize thread map.\n");
   2128		return err;
   2129	}
   2130
   2131	err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
   2132	if (err < 0) {
   2133		pr_err("Couldn't synthesize thread map.\n");
   2134		return err;
   2135	}
   2136
   2137	err = perf_event__synthesize_stat_config(tool, config, process, NULL);
   2138	if (err < 0) {
   2139		pr_err("Couldn't synthesize config.\n");
   2140		return err;
   2141	}
   2142
   2143	return 0;
   2144}
   2145
   2146extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
   2147
   2148int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
   2149				    struct evlist *evlist, perf_event__handler_t process)
   2150{
   2151	struct perf_header *header = &session->header;
   2152	struct perf_record_header_feature *fe;
   2153	struct feat_fd ff;
   2154	size_t sz, sz_hdr;
   2155	int feat, ret;
   2156
   2157	sz_hdr = sizeof(fe->header);
   2158	sz = sizeof(union perf_event);
   2159	/* get a nice alignment */
   2160	sz = PERF_ALIGN(sz, page_size);
   2161
   2162	memset(&ff, 0, sizeof(ff));
   2163
   2164	ff.buf = malloc(sz);
   2165	if (!ff.buf)
   2166		return -ENOMEM;
   2167
   2168	ff.size = sz - sz_hdr;
   2169	ff.ph = &session->header;
   2170
   2171	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
   2172		if (!feat_ops[feat].synthesize) {
   2173			pr_debug("No record header feature for header :%d\n", feat);
   2174			continue;
   2175		}
   2176
   2177		ff.offset = sizeof(*fe);
   2178
   2179		ret = feat_ops[feat].write(&ff, evlist);
   2180		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
   2181			pr_debug("Error writing feature\n");
   2182			continue;
   2183		}
   2184		/* ff.buf may have changed due to realloc in do_write() */
   2185		fe = ff.buf;
   2186		memset(fe, 0, sizeof(*fe));
   2187
   2188		fe->feat_id = feat;
   2189		fe->header.type = PERF_RECORD_HEADER_FEATURE;
   2190		fe->header.size = ff.offset;
   2191
   2192		ret = process(tool, ff.buf, NULL, NULL);
   2193		if (ret) {
   2194			free(ff.buf);
   2195			return ret;
   2196		}
   2197	}
   2198
   2199	/* Send HEADER_LAST_FEATURE mark. */
   2200	fe = ff.buf;
   2201	fe->feat_id     = HEADER_LAST_FEATURE;
   2202	fe->header.type = PERF_RECORD_HEADER_FEATURE;
   2203	fe->header.size = sizeof(*fe);
   2204
   2205	ret = process(tool, ff.buf, NULL, NULL);
   2206
   2207	free(ff.buf);
   2208	return ret;
   2209}
   2210
   2211int perf_event__synthesize_for_pipe(struct perf_tool *tool,
   2212				    struct perf_session *session,
   2213				    struct perf_data *data,
   2214				    perf_event__handler_t process)
   2215{
   2216	int err;
   2217	int ret = 0;
   2218	struct evlist *evlist = session->evlist;
   2219
   2220	/*
   2221	 * We need to synthesize events first, because some
   2222	 * features works on top of them (on report side).
   2223	 */
   2224	err = perf_event__synthesize_attrs(tool, evlist, process);
   2225	if (err < 0) {
   2226		pr_err("Couldn't synthesize attrs.\n");
   2227		return err;
   2228	}
   2229	ret += err;
   2230
   2231	err = perf_event__synthesize_features(tool, session, evlist, process);
   2232	if (err < 0) {
   2233		pr_err("Couldn't synthesize features.\n");
   2234		return err;
   2235	}
   2236	ret += err;
   2237
   2238	if (have_tracepoints(&evlist->core.entries)) {
   2239		int fd = perf_data__fd(data);
   2240
   2241		/*
   2242		 * FIXME err <= 0 here actually means that
   2243		 * there were no tracepoints so its not really
   2244		 * an error, just that we don't need to
   2245		 * synthesize anything.  We really have to
   2246		 * return this more properly and also
   2247		 * propagate errors that now are calling die()
   2248		 */
   2249		err = perf_event__synthesize_tracing_data(tool,	fd, evlist,
   2250							  process);
   2251		if (err <= 0) {
   2252			pr_err("Couldn't record tracing data.\n");
   2253			return err;
   2254		}
   2255		ret += err;
   2256	}
   2257
   2258	return ret;
   2259}
   2260
   2261int parse_synth_opt(char *synth)
   2262{
   2263	char *p, *q;
   2264	int ret = 0;
   2265
   2266	if (synth == NULL)
   2267		return -1;
   2268
   2269	for (q = synth; (p = strsep(&q, ",")); p = q) {
   2270		if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
   2271			return 0;
   2272
   2273		if (!strcasecmp(p, "all"))
   2274			return PERF_SYNTH_ALL;
   2275
   2276		if (!strcasecmp(p, "task"))
   2277			ret |= PERF_SYNTH_TASK;
   2278		else if (!strcasecmp(p, "mmap"))
   2279			ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
   2280		else if (!strcasecmp(p, "cgroup"))
   2281			ret |= PERF_SYNTH_CGROUP;
   2282		else
   2283			return -1;
   2284	}
   2285
   2286	return ret;
   2287}