1// SPDX-License-Identifier: GPL-2.0-only
2
3#include "util/cgroup.h"
4#include "util/data.h"
5#include "util/debug.h"
6#include "util/dso.h"
7#include "util/event.h"
8#include "util/evlist.h"
9#include "util/machine.h"
10#include "util/map.h"
11#include "util/map_symbol.h"
12#include "util/branch.h"
13#include "util/memswap.h"
14#include "util/namespaces.h"
15#include "util/session.h"
16#include "util/stat.h"
17#include "util/symbol.h"
18#include "util/synthetic-events.h"
19#include "util/target.h"
20#include "util/time-utils.h"
21#include <linux/bitops.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/zalloc.h>
25#include <linux/perf_event.h>
26#include <asm/bug.h>
27#include <perf/evsel.h>
28#include <perf/cpumap.h>
29#include <internal/lib.h> // page_size
30#include <internal/threadmap.h>
31#include <perf/threadmap.h>
32#include <symbol/kallsyms.h>
33#include <dirent.h>
34#include <errno.h>
35#include <inttypes.h>
36#include <stdio.h>
37#include <string.h>
38#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39#include <api/fs/fs.h>
40#include <api/io.h>
41#include <sys/types.h>
42#include <sys/stat.h>
43#include <fcntl.h>
44#include <unistd.h>
45
46#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47
48unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49
50int perf_tool__process_synth_event(struct perf_tool *tool,
51				   union perf_event *event,
52				   struct machine *machine,
53				   perf_event__handler_t process)
54{
55	struct perf_sample synth_sample = {
56		.pid	   = -1,
57		.tid	   = -1,
58		.time	   = -1,
59		.stream_id = -1,
60		.cpu	   = -1,
61		.period	   = 1,
62		.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63	};
64
65	return process(tool, event, &synth_sample, machine);
66};
67
68/*
69 * Assumes that the first 4095 bytes of /proc/pid/stat contains
70 * the comm, tgid and ppid.
71 */
72static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
73				    pid_t *tgid, pid_t *ppid, bool *kernel)
74{
75	char bf[4096];
76	int fd;
77	size_t size = 0;
78	ssize_t n;
79	char *name, *tgids, *ppids, *vmpeak, *threads;
80
81	*tgid = -1;
82	*ppid = -1;
83
84	if (pid)
85		snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
86	else
87		snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
88
89	fd = open(bf, O_RDONLY);
90	if (fd < 0) {
91		pr_debug("couldn't open %s\n", bf);
92		return -1;
93	}
94
95	n = read(fd, bf, sizeof(bf) - 1);
96	close(fd);
97	if (n <= 0) {
98		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
99			   tid);
100		return -1;
101	}
102	bf[n] = '\0';
103
104	name = strstr(bf, "Name:");
105	tgids = strstr(name ?: bf, "Tgid:");
106	ppids = strstr(tgids ?: bf, "PPid:");
107	vmpeak = strstr(ppids ?: bf, "VmPeak:");
108
109	if (vmpeak)
110		threads = NULL;
111	else
112		threads = strstr(ppids ?: bf, "Threads:");
113
114	if (name) {
115		char *nl;
116
117		name = skip_spaces(name + 5);  /* strlen("Name:") */
118		nl = strchr(name, '\n');
119		if (nl)
120			*nl = '\0';
121
122		size = strlen(name);
123		if (size >= len)
124			size = len - 1;
125		memcpy(comm, name, size);
126		comm[size] = '\0';
127	} else {
128		pr_debug("Name: string not found for pid %d\n", tid);
129	}
130
131	if (tgids) {
132		tgids += 5;  /* strlen("Tgid:") */
133		*tgid = atoi(tgids);
134	} else {
135		pr_debug("Tgid: string not found for pid %d\n", tid);
136	}
137
138	if (ppids) {
139		ppids += 5;  /* strlen("PPid:") */
140		*ppid = atoi(ppids);
141	} else {
142		pr_debug("PPid: string not found for pid %d\n", tid);
143	}
144
145	if (!vmpeak && threads)
146		*kernel = true;
147	else
148		*kernel = false;
149
150	return 0;
151}
152
153static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
154				    struct machine *machine,
155				    pid_t *tgid, pid_t *ppid, bool *kernel)
156{
157	size_t size;
158
159	*ppid = -1;
160
161	memset(&event->comm, 0, sizeof(event->comm));
162
163	if (machine__is_host(machine)) {
164		if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
165					     sizeof(event->comm.comm),
166					     tgid, ppid, kernel) != 0) {
167			return -1;
168		}
169	} else {
170		*tgid = machine->pid;
171	}
172
173	if (*tgid < 0)
174		return -1;
175
176	event->comm.pid = *tgid;
177	event->comm.header.type = PERF_RECORD_COMM;
178
179	size = strlen(event->comm.comm) + 1;
180	size = PERF_ALIGN(size, sizeof(u64));
181	memset(event->comm.comm + size, 0, machine->id_hdr_size);
182	event->comm.header.size = (sizeof(event->comm) -
183				(sizeof(event->comm.comm) - size) +
184				machine->id_hdr_size);
185	event->comm.tid = tid;
186
187	return 0;
188}
189
190pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191					 union perf_event *event, pid_t pid,
192					 perf_event__handler_t process,
193					 struct machine *machine)
194{
195	pid_t tgid, ppid;
196	bool kernel_thread;
197
198	if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
199				     &kernel_thread) != 0)
200		return -1;
201
202	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
203		return -1;
204
205	return tgid;
206}
207
208static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
209					 struct perf_ns_link_info *ns_link_info)
210{
211	struct stat64 st;
212	char proc_ns[128];
213
214	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
215	if (stat64(proc_ns, &st) == 0) {
216		ns_link_info->dev = st.st_dev;
217		ns_link_info->ino = st.st_ino;
218	}
219}
220
221int perf_event__synthesize_namespaces(struct perf_tool *tool,
222				      union perf_event *event,
223				      pid_t pid, pid_t tgid,
224				      perf_event__handler_t process,
225				      struct machine *machine)
226{
227	u32 idx;
228	struct perf_ns_link_info *ns_link_info;
229
230	if (!tool || !tool->namespace_events)
231		return 0;
232
233	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
234	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235	       machine->id_hdr_size));
236
237	event->namespaces.pid = tgid;
238	event->namespaces.tid = pid;
239
240	event->namespaces.nr_namespaces = NR_NAMESPACES;
241
242	ns_link_info = event->namespaces.link_info;
243
244	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
245		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
246					     &ns_link_info[idx]);
247
248	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
249
250	event->namespaces.header.size = (sizeof(event->namespaces) +
251			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
252			machine->id_hdr_size);
253
254	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
255		return -1;
256
257	return 0;
258}
259
260static int perf_event__synthesize_fork(struct perf_tool *tool,
261				       union perf_event *event,
262				       pid_t pid, pid_t tgid, pid_t ppid,
263				       perf_event__handler_t process,
264				       struct machine *machine)
265{
266	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
267
268	/*
269	 * for main thread set parent to ppid from status file. For other
270	 * threads set parent pid to main thread. ie., assume main thread
271	 * spawns all threads in a process
272	*/
273	if (tgid == pid) {
274		event->fork.ppid = ppid;
275		event->fork.ptid = ppid;
276	} else {
277		event->fork.ppid = tgid;
278		event->fork.ptid = tgid;
279	}
280	event->fork.pid  = tgid;
281	event->fork.tid  = pid;
282	event->fork.header.type = PERF_RECORD_FORK;
283	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
284
285	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
286
287	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
288		return -1;
289
290	return 0;
291}
292
293static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
294				u32 *prot, u32 *flags, __u64 *offset,
295				u32 *maj, u32 *min,
296				__u64 *inode,
297				ssize_t pathname_size, char *pathname)
298{
299	__u64 temp;
300	int ch;
301	char *start_pathname = pathname;
302
303	if (io__get_hex(io, start) != '-')
304		return false;
305	if (io__get_hex(io, end) != ' ')
306		return false;
307
308	/* map protection and flags bits */
309	*prot = 0;
310	ch = io__get_char(io);
311	if (ch == 'r')
312		*prot |= PROT_READ;
313	else if (ch != '-')
314		return false;
315	ch = io__get_char(io);
316	if (ch == 'w')
317		*prot |= PROT_WRITE;
318	else if (ch != '-')
319		return false;
320	ch = io__get_char(io);
321	if (ch == 'x')
322		*prot |= PROT_EXEC;
323	else if (ch != '-')
324		return false;
325	ch = io__get_char(io);
326	if (ch == 's')
327		*flags = MAP_SHARED;
328	else if (ch == 'p')
329		*flags = MAP_PRIVATE;
330	else
331		return false;
332	if (io__get_char(io) != ' ')
333		return false;
334
335	if (io__get_hex(io, offset) != ' ')
336		return false;
337
338	if (io__get_hex(io, &temp) != ':')
339		return false;
340	*maj = temp;
341	if (io__get_hex(io, &temp) != ' ')
342		return false;
343	*min = temp;
344
345	ch = io__get_dec(io, inode);
346	if (ch != ' ') {
347		*pathname = '\0';
348		return ch == '\n';
349	}
350	do {
351		ch = io__get_char(io);
352	} while (ch == ' ');
353	while (true) {
354		if (ch < 0)
355			return false;
356		if (ch == '\0' || ch == '\n' ||
357		    (pathname + 1 - start_pathname) >= pathname_size) {
358			*pathname = '\0';
359			return true;
360		}
361		*pathname++ = ch;
362		ch = io__get_char(io);
363	}
364}
365
366static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
367					     struct machine *machine,
368					     bool is_kernel)
369{
370	struct build_id bid;
371	struct nsinfo *nsi;
372	struct nscookie nc;
373	struct dso *dso = NULL;
374	struct dso_id id;
375	int rc;
376
377	if (is_kernel) {
378		rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
379		goto out;
380	}
381
382	id.maj = event->maj;
383	id.min = event->min;
384	id.ino = event->ino;
385	id.ino_generation = event->ino_generation;
386
387	dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
388	if (dso && dso->has_build_id) {
389		bid = dso->bid;
390		rc = 0;
391		goto out;
392	}
393
394	nsi = nsinfo__new(event->pid);
395	nsinfo__mountns_enter(nsi, &nc);
396
397	rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
398
399	nsinfo__mountns_exit(&nc);
400	nsinfo__put(nsi);
401
402out:
403	if (rc == 0) {
404		memcpy(event->build_id, bid.data, sizeof(bid.data));
405		event->build_id_size = (u8) bid.size;
406		event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
407		event->__reserved_1 = 0;
408		event->__reserved_2 = 0;
409
410		if (dso && !dso->has_build_id)
411			dso__set_build_id(dso, &bid);
412	} else {
413		if (event->filename[0] == '/') {
414			pr_debug2("Failed to read build ID for %s\n",
415				  event->filename);
416		}
417	}
418	dso__put(dso);
419}
420
421int perf_event__synthesize_mmap_events(struct perf_tool *tool,
422				       union perf_event *event,
423				       pid_t pid, pid_t tgid,
424				       perf_event__handler_t process,
425				       struct machine *machine,
426				       bool mmap_data)
427{
428	unsigned long long t;
429	char bf[BUFSIZ];
430	struct io io;
431	bool truncation = false;
432	unsigned long long timeout = proc_map_timeout * 1000000ULL;
433	int rc = 0;
434	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
435	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
436
437	if (machine__is_default_guest(machine))
438		return 0;
439
440	snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
441		machine->root_dir, pid, pid);
442
443	io.fd = open(bf, O_RDONLY, 0);
444	if (io.fd < 0) {
445		/*
446		 * We raced with a task exiting - just return:
447		 */
448		pr_debug("couldn't open %s\n", bf);
449		return -1;
450	}
451	io__init(&io, io.fd, bf, sizeof(bf));
452
453	event->header.type = PERF_RECORD_MMAP2;
454	t = rdclock();
455
456	while (!io.eof) {
457		static const char anonstr[] = "//anon";
458		size_t size, aligned_size;
459
460		/* ensure null termination since stack will be reused. */
461		event->mmap2.filename[0] = '\0';
462
463		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
464		if (!read_proc_maps_line(&io,
465					&event->mmap2.start,
466					&event->mmap2.len,
467					&event->mmap2.prot,
468					&event->mmap2.flags,
469					&event->mmap2.pgoff,
470					&event->mmap2.maj,
471					&event->mmap2.min,
472					&event->mmap2.ino,
473					sizeof(event->mmap2.filename),
474					event->mmap2.filename))
475			continue;
476
477		if ((rdclock() - t) > timeout) {
478			pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
479				   "You may want to increase "
480				   "the time limit by --proc-map-timeout\n",
481				   machine->root_dir, pid, pid);
482			truncation = true;
483			goto out;
484		}
485
486		event->mmap2.ino_generation = 0;
487
488		/*
489		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
490		 */
491		if (machine__is_host(machine))
492			event->header.misc = PERF_RECORD_MISC_USER;
493		else
494			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
495
496		if ((event->mmap2.prot & PROT_EXEC) == 0) {
497			if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
498				continue;
499
500			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
501		}
502
503out:
504		if (truncation)
505			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
506
507		if (!strcmp(event->mmap2.filename, ""))
508			strcpy(event->mmap2.filename, anonstr);
509
510		if (hugetlbfs_mnt_len &&
511		    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
512			     hugetlbfs_mnt_len)) {
513			strcpy(event->mmap2.filename, anonstr);
514			event->mmap2.flags |= MAP_HUGETLB;
515		}
516
517		size = strlen(event->mmap2.filename) + 1;
518		aligned_size = PERF_ALIGN(size, sizeof(u64));
519		event->mmap2.len -= event->mmap.start;
520		event->mmap2.header.size = (sizeof(event->mmap2) -
521					(sizeof(event->mmap2.filename) - aligned_size));
522		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
523			(aligned_size - size));
524		event->mmap2.header.size += machine->id_hdr_size;
525		event->mmap2.pid = tgid;
526		event->mmap2.tid = pid;
527
528		if (symbol_conf.buildid_mmap2)
529			perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
530
531		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
532			rc = -1;
533			break;
534		}
535
536		if (truncation)
537			break;
538	}
539
540	close(io.fd);
541	return rc;
542}
543
544#ifdef HAVE_FILE_HANDLE
545static int perf_event__synthesize_cgroup(struct perf_tool *tool,
546					 union perf_event *event,
547					 char *path, size_t mount_len,
548					 perf_event__handler_t process,
549					 struct machine *machine)
550{
551	size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
552	size_t path_len = strlen(path) - mount_len + 1;
553	struct {
554		struct file_handle fh;
555		uint64_t cgroup_id;
556	} handle;
557	int mount_id;
558
559	while (path_len % sizeof(u64))
560		path[mount_len + path_len++] = '\0';
561
562	memset(&event->cgroup, 0, event_size);
563
564	event->cgroup.header.type = PERF_RECORD_CGROUP;
565	event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
566
567	handle.fh.handle_bytes = sizeof(handle.cgroup_id);
568	if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
569		pr_debug("stat failed: %s\n", path);
570		return -1;
571	}
572
573	event->cgroup.id = handle.cgroup_id;
574	strncpy(event->cgroup.path, path + mount_len, path_len);
575	memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
576
577	if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
578		pr_debug("process synth event failed\n");
579		return -1;
580	}
581
582	return 0;
583}
584
585static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
586					union perf_event *event,
587					char *path, size_t mount_len,
588					perf_event__handler_t process,
589					struct machine *machine)
590{
591	size_t pos = strlen(path);
592	DIR *d;
593	struct dirent *dent;
594	int ret = 0;
595
596	if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
597					  process, machine) < 0)
598		return -1;
599
600	d = opendir(path);
601	if (d == NULL) {
602		pr_debug("failed to open directory: %s\n", path);
603		return -1;
604	}
605
606	while ((dent = readdir(d)) != NULL) {
607		if (dent->d_type != DT_DIR)
608			continue;
609		if (!strcmp(dent->d_name, ".") ||
610		    !strcmp(dent->d_name, ".."))
611			continue;
612
613		/* any sane path should be less than PATH_MAX */
614		if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
615			continue;
616
617		if (path[pos - 1] != '/')
618			strcat(path, "/");
619		strcat(path, dent->d_name);
620
621		ret = perf_event__walk_cgroup_tree(tool, event, path,
622						   mount_len, process, machine);
623		if (ret < 0)
624			break;
625
626		path[pos] = '\0';
627	}
628
629	closedir(d);
630	return ret;
631}
632
633int perf_event__synthesize_cgroups(struct perf_tool *tool,
634				   perf_event__handler_t process,
635				   struct machine *machine)
636{
637	union perf_event event;
638	char cgrp_root[PATH_MAX];
639	size_t mount_len;  /* length of mount point in the path */
640
641	if (!tool || !tool->cgroup_events)
642		return 0;
643
644	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
645		pr_debug("cannot find cgroup mount point\n");
646		return -1;
647	}
648
649	mount_len = strlen(cgrp_root);
650	/* make sure the path starts with a slash (after mount point) */
651	strcat(cgrp_root, "/");
652
653	if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
654					 process, machine) < 0)
655		return -1;
656
657	return 0;
658}
659#else
660int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
661				   perf_event__handler_t process __maybe_unused,
662				   struct machine *machine __maybe_unused)
663{
664	return -1;
665}
666#endif
667
668int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
669				   struct machine *machine)
670{
671	int rc = 0;
672	struct map_rb_node *pos;
673	struct maps *maps = machine__kernel_maps(machine);
674	union perf_event *event;
675	size_t size = symbol_conf.buildid_mmap2 ?
676			sizeof(event->mmap2) : sizeof(event->mmap);
677
678	event = zalloc(size + machine->id_hdr_size);
679	if (event == NULL) {
680		pr_debug("Not enough memory synthesizing mmap event "
681			 "for kernel modules\n");
682		return -1;
683	}
684
685	/*
686	 * kernel uses 0 for user space maps, see kernel/perf_event.c
687	 * __perf_event_mmap
688	 */
689	if (machine__is_host(machine))
690		event->header.misc = PERF_RECORD_MISC_KERNEL;
691	else
692		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
693
694	maps__for_each_entry(maps, pos) {
695		struct map *map = pos->map;
696		struct dso *dso;
697
698		if (!__map__is_kmodule(map))
699			continue;
700
701		dso = map__dso(map);
702		if (symbol_conf.buildid_mmap2) {
703			size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
704			event->mmap2.header.type = PERF_RECORD_MMAP2;
705			event->mmap2.header.size = (sizeof(event->mmap2) -
706						(sizeof(event->mmap2.filename) - size));
707			memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
708			event->mmap2.header.size += machine->id_hdr_size;
709			event->mmap2.start = map__start(map);
710			event->mmap2.len   = map__size(map);
711			event->mmap2.pid   = machine->pid;
712
713			memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
714
715			perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
716		} else {
717			size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
718			event->mmap.header.type = PERF_RECORD_MMAP;
719			event->mmap.header.size = (sizeof(event->mmap) -
720						(sizeof(event->mmap.filename) - size));
721			memset(event->mmap.filename + size, 0, machine->id_hdr_size);
722			event->mmap.header.size += machine->id_hdr_size;
723			event->mmap.start = map__start(map);
724			event->mmap.len   = map__size(map);
725			event->mmap.pid   = machine->pid;
726
727			memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
728		}
729
730		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
731			rc = -1;
732			break;
733		}
734	}
735
736	free(event);
737	return rc;
738}
739
740static int filter_task(const struct dirent *dirent)
741{
742	return isdigit(dirent->d_name[0]);
743}
744
745static int __event__synthesize_thread(union perf_event *comm_event,
746				      union perf_event *mmap_event,
747				      union perf_event *fork_event,
748				      union perf_event *namespaces_event,
749				      pid_t pid, int full, perf_event__handler_t process,
750				      struct perf_tool *tool, struct machine *machine,
751				      bool needs_mmap, bool mmap_data)
752{
753	char filename[PATH_MAX];
754	struct dirent **dirent;
755	pid_t tgid, ppid;
756	int rc = 0;
757	int i, n;
758
759	/* special case: only send one comm event using passed in pid */
760	if (!full) {
761		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
762						   process, machine);
763
764		if (tgid == -1)
765			return -1;
766
767		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
768						      tgid, process, machine) < 0)
769			return -1;
770
771		/*
772		 * send mmap only for thread group leader
773		 * see thread__init_maps()
774		 */
775		if (pid == tgid && needs_mmap &&
776		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
777						       process, machine, mmap_data))
778			return -1;
779
780		return 0;
781	}
782
783	if (machine__is_default_guest(machine))
784		return 0;
785
786	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
787		 machine->root_dir, pid);
788
789	n = scandir(filename, &dirent, filter_task, NULL);
790	if (n < 0)
791		return n;
792
793	for (i = 0; i < n; i++) {
794		char *end;
795		pid_t _pid;
796		bool kernel_thread = false;
797
798		_pid = strtol(dirent[i]->d_name, &end, 10);
799		if (*end)
800			continue;
801
802		/* some threads may exit just after scan, ignore it */
803		if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
804					     &tgid, &ppid, &kernel_thread) != 0)
805			continue;
806
807		rc = -1;
808		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
809						ppid, process, machine) < 0)
810			break;
811
812		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
813						      tgid, process, machine) < 0)
814			break;
815
816		/*
817		 * Send the prepared comm event
818		 */
819		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
820			break;
821
822		rc = 0;
823		if (_pid == pid && !kernel_thread && needs_mmap) {
824			/* process the parent's maps too */
825			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
826						process, machine, mmap_data);
827			if (rc)
828				break;
829		}
830	}
831
832	for (i = 0; i < n; i++)
833		zfree(&dirent[i]);
834	free(dirent);
835
836	return rc;
837}
838
839int perf_event__synthesize_thread_map(struct perf_tool *tool,
840				      struct perf_thread_map *threads,
841				      perf_event__handler_t process,
842				      struct machine *machine,
843				      bool needs_mmap, bool mmap_data)
844{
845	union perf_event *comm_event, *mmap_event, *fork_event;
846	union perf_event *namespaces_event;
847	int err = -1, thread, j;
848
849	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
850	if (comm_event == NULL)
851		goto out;
852
853	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
854	if (mmap_event == NULL)
855		goto out_free_comm;
856
857	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
858	if (fork_event == NULL)
859		goto out_free_mmap;
860
861	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
862				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
863				  machine->id_hdr_size);
864	if (namespaces_event == NULL)
865		goto out_free_fork;
866
867	err = 0;
868	for (thread = 0; thread < threads->nr; ++thread) {
869		if (__event__synthesize_thread(comm_event, mmap_event,
870					       fork_event, namespaces_event,
871					       perf_thread_map__pid(threads, thread), 0,
872					       process, tool, machine,
873					       needs_mmap, mmap_data)) {
874			err = -1;
875			break;
876		}
877
878		/*
879		 * comm.pid is set to thread group id by
880		 * perf_event__synthesize_comm
881		 */
882		if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
883			bool need_leader = true;
884
885			/* is thread group leader in thread_map? */
886			for (j = 0; j < threads->nr; ++j) {
887				if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
888					need_leader = false;
889					break;
890				}
891			}
892
893			/* if not, generate events for it */
894			if (need_leader &&
895			    __event__synthesize_thread(comm_event, mmap_event,
896						       fork_event, namespaces_event,
897						       comm_event->comm.pid, 0,
898						       process, tool, machine,
899						       needs_mmap, mmap_data)) {
900				err = -1;
901				break;
902			}
903		}
904	}
905	free(namespaces_event);
906out_free_fork:
907	free(fork_event);
908out_free_mmap:
909	free(mmap_event);
910out_free_comm:
911	free(comm_event);
912out:
913	return err;
914}
915
916static int __perf_event__synthesize_threads(struct perf_tool *tool,
917					    perf_event__handler_t process,
918					    struct machine *machine,
919					    bool needs_mmap,
920					    bool mmap_data,
921					    struct dirent **dirent,
922					    int start,
923					    int num)
924{
925	union perf_event *comm_event, *mmap_event, *fork_event;
926	union perf_event *namespaces_event;
927	int err = -1;
928	char *end;
929	pid_t pid;
930	int i;
931
932	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
933	if (comm_event == NULL)
934		goto out;
935
936	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
937	if (mmap_event == NULL)
938		goto out_free_comm;
939
940	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
941	if (fork_event == NULL)
942		goto out_free_mmap;
943
944	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
945				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
946				  machine->id_hdr_size);
947	if (namespaces_event == NULL)
948		goto out_free_fork;
949
950	for (i = start; i < start + num; i++) {
951		if (!isdigit(dirent[i]->d_name[0]))
952			continue;
953
954		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
955		/* only interested in proper numerical dirents */
956		if (*end)
957			continue;
958		/*
959		 * We may race with exiting thread, so don't stop just because
960		 * one thread couldn't be synthesized.
961		 */
962		__event__synthesize_thread(comm_event, mmap_event, fork_event,
963					   namespaces_event, pid, 1, process,
964					   tool, machine, needs_mmap, mmap_data);
965	}
966	err = 0;
967
968	free(namespaces_event);
969out_free_fork:
970	free(fork_event);
971out_free_mmap:
972	free(mmap_event);
973out_free_comm:
974	free(comm_event);
975out:
976	return err;
977}
978
979struct synthesize_threads_arg {
980	struct perf_tool *tool;
981	perf_event__handler_t process;
982	struct machine *machine;
983	bool needs_mmap;
984	bool mmap_data;
985	struct dirent **dirent;
986	int num;
987	int start;
988};
989
990static void *synthesize_threads_worker(void *arg)
991{
992	struct synthesize_threads_arg *args = arg;
993
994	__perf_event__synthesize_threads(args->tool, args->process,
995					 args->machine,
996					 args->needs_mmap, args->mmap_data,
997					 args->dirent,
998					 args->start, args->num);
999	return NULL;
1000}
1001
1002int perf_event__synthesize_threads(struct perf_tool *tool,
1003				   perf_event__handler_t process,
1004				   struct machine *machine,
1005				   bool needs_mmap, bool mmap_data,
1006				   unsigned int nr_threads_synthesize)
1007{
1008	struct synthesize_threads_arg *args = NULL;
1009	pthread_t *synthesize_threads = NULL;
1010	char proc_path[PATH_MAX];
1011	struct dirent **dirent;
1012	int num_per_thread;
1013	int m, n, i, j;
1014	int thread_nr;
1015	int base = 0;
1016	int err = -1;
1017
1018
1019	if (machine__is_default_guest(machine))
1020		return 0;
1021
1022	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
1023	n = scandir(proc_path, &dirent, filter_task, NULL);
1024	if (n < 0)
1025		return err;
1026
1027	if (nr_threads_synthesize == UINT_MAX)
1028		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
1029	else
1030		thread_nr = nr_threads_synthesize;
1031
1032	if (thread_nr <= 1) {
1033		err = __perf_event__synthesize_threads(tool, process,
1034						       machine,
1035						       needs_mmap, mmap_data,
1036						       dirent, base, n);
1037		goto free_dirent;
1038	}
1039	if (thread_nr > n)
1040		thread_nr = n;
1041
1042	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1043	if (synthesize_threads == NULL)
1044		goto free_dirent;
1045
1046	args = calloc(sizeof(*args), thread_nr);
1047	if (args == NULL)
1048		goto free_threads;
1049
1050	num_per_thread = n / thread_nr;
1051	m = n % thread_nr;
1052	for (i = 0; i < thread_nr; i++) {
1053		args[i].tool = tool;
1054		args[i].process = process;
1055		args[i].machine = machine;
1056		args[i].needs_mmap = needs_mmap;
1057		args[i].mmap_data = mmap_data;
1058		args[i].dirent = dirent;
1059	}
1060	for (i = 0; i < m; i++) {
1061		args[i].num = num_per_thread + 1;
1062		args[i].start = i * args[i].num;
1063	}
1064	if (i != 0)
1065		base = args[i-1].start + args[i-1].num;
1066	for (j = i; j < thread_nr; j++) {
1067		args[j].num = num_per_thread;
1068		args[j].start = base + (j - i) * args[i].num;
1069	}
1070
1071	for (i = 0; i < thread_nr; i++) {
1072		if (pthread_create(&synthesize_threads[i], NULL,
1073				   synthesize_threads_worker, &args[i]))
1074			goto out_join;
1075	}
1076	err = 0;
1077out_join:
1078	for (i = 0; i < thread_nr; i++)
1079		pthread_join(synthesize_threads[i], NULL);
1080	free(args);
1081free_threads:
1082	free(synthesize_threads);
1083free_dirent:
1084	for (i = 0; i < n; i++)
1085		zfree(&dirent[i]);
1086	free(dirent);
1087
1088	return err;
1089}
1090
1091int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1092					      perf_event__handler_t process __maybe_unused,
1093					      struct machine *machine __maybe_unused)
1094{
1095	return 0;
1096}
1097
1098static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1099						perf_event__handler_t process,
1100						struct machine *machine)
1101{
1102	union perf_event *event;
1103	size_t size = symbol_conf.buildid_mmap2 ?
1104			sizeof(event->mmap2) : sizeof(event->mmap);
1105	struct map *map = machine__kernel_map(machine);
1106	struct kmap *kmap;
1107	int err;
1108
1109	if (map == NULL)
1110		return -1;
1111
1112	kmap = map__kmap(map);
1113	if (!kmap->ref_reloc_sym)
1114		return -1;
1115
1116	/*
1117	 * We should get this from /sys/kernel/sections/.text, but till that is
1118	 * available use this, and after it is use this as a fallback for older
1119	 * kernels.
1120	 */
1121	event = zalloc(size + machine->id_hdr_size);
1122	if (event == NULL) {
1123		pr_debug("Not enough memory synthesizing mmap event "
1124			 "for kernel modules\n");
1125		return -1;
1126	}
1127
1128	if (machine__is_host(machine)) {
1129		/*
1130		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1131		 * see kernel/perf_event.c __perf_event_mmap
1132		 */
1133		event->header.misc = PERF_RECORD_MISC_KERNEL;
1134	} else {
1135		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1136	}
1137
1138	if (symbol_conf.buildid_mmap2) {
1139		size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1140				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1141		size = PERF_ALIGN(size, sizeof(u64));
1142		event->mmap2.header.type = PERF_RECORD_MMAP2;
1143		event->mmap2.header.size = (sizeof(event->mmap2) -
1144				(sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1145		event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1146		event->mmap2.start = map__start(map);
1147		event->mmap2.len   = map__end(map) - event->mmap.start;
1148		event->mmap2.pid   = machine->pid;
1149
1150		perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
1151	} else {
1152		size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1153				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1154		size = PERF_ALIGN(size, sizeof(u64));
1155		event->mmap.header.type = PERF_RECORD_MMAP;
1156		event->mmap.header.size = (sizeof(event->mmap) -
1157				(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1158		event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1159		event->mmap.start = map__start(map);
1160		event->mmap.len   = map__end(map) - event->mmap.start;
1161		event->mmap.pid   = machine->pid;
1162	}
1163
1164	err = perf_tool__process_synth_event(tool, event, machine, process);
1165	free(event);
1166
1167	return err;
1168}
1169
1170int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1171				       perf_event__handler_t process,
1172				       struct machine *machine)
1173{
1174	int err;
1175
1176	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1177	if (err < 0)
1178		return err;
1179
1180	return perf_event__synthesize_extra_kmaps(tool, process, machine);
1181}
1182
1183int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1184				      struct perf_thread_map *threads,
1185				      perf_event__handler_t process,
1186				      struct machine *machine)
1187{
1188	union perf_event *event;
1189	int i, err, size;
1190
1191	size  = sizeof(event->thread_map);
1192	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
1193
1194	event = zalloc(size);
1195	if (!event)
1196		return -ENOMEM;
1197
1198	event->header.type = PERF_RECORD_THREAD_MAP;
1199	event->header.size = size;
1200	event->thread_map.nr = threads->nr;
1201
1202	for (i = 0; i < threads->nr; i++) {
1203		struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1204		char *comm = perf_thread_map__comm(threads, i);
1205
1206		if (!comm)
1207			comm = (char *) "";
1208
1209		entry->pid = perf_thread_map__pid(threads, i);
1210		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1211	}
1212
1213	err = process(tool, event, NULL, machine);
1214
1215	free(event);
1216	return err;
1217}
1218
1219struct synthesize_cpu_map_data {
1220	const struct perf_cpu_map *map;
1221	int nr;
1222	int min_cpu;
1223	int max_cpu;
1224	int has_any_cpu;
1225	int type;
1226	size_t size;
1227	struct perf_record_cpu_map_data *data;
1228};
1229
1230static void synthesize_cpus(struct synthesize_cpu_map_data *data)
1231{
1232	data->data->type = PERF_CPU_MAP__CPUS;
1233	data->data->cpus_data.nr = data->nr;
1234	for (int i = 0; i < data->nr; i++)
1235		data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
1236}
1237
1238static void synthesize_mask(struct synthesize_cpu_map_data *data)
1239{
1240	int idx;
1241	struct perf_cpu cpu;
1242
1243	/* Due to padding, the 4bytes per entry mask variant is always smaller. */
1244	data->data->type = PERF_CPU_MAP__MASK;
1245	data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
1246	data->data->mask32_data.long_size = 4;
1247
1248	perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
1249		int bit_word = cpu.cpu / 32;
1250		u32 bit_mask = 1U << (cpu.cpu & 31);
1251
1252		data->data->mask32_data.mask[bit_word] |= bit_mask;
1253	}
1254}
1255
1256static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
1257{
1258	data->data->type = PERF_CPU_MAP__RANGE_CPUS;
1259	data->data->range_cpu_data.any_cpu = data->has_any_cpu;
1260	data->data->range_cpu_data.start_cpu = data->min_cpu;
1261	data->data->range_cpu_data.end_cpu = data->max_cpu;
1262}
1263
1264static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
1265				 size_t header_size)
1266{
1267	size_t size_cpus, size_mask;
1268
1269	syn_data->nr = perf_cpu_map__nr(syn_data->map);
1270	syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
1271
1272	syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
1273	syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
1274	if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
1275		/* A consecutive range of CPUs can be encoded using a range. */
1276		assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
1277		syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
1278		syn_data->size = header_size + sizeof(u64);
1279		return zalloc(syn_data->size);
1280	}
1281
1282	size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
1283	/* Due to padding, the 4bytes per entry mask variant is always smaller. */
1284	size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
1285		BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
1286	if (syn_data->has_any_cpu || size_cpus < size_mask) {
1287		/* Follow the CPU map encoding. */
1288		syn_data->type = PERF_CPU_MAP__CPUS;
1289		syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
1290		return zalloc(syn_data->size);
1291	}
1292	/* Encode using a bitmask. */
1293	syn_data->type = PERF_CPU_MAP__MASK;
1294	syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
1295	return zalloc(syn_data->size);
1296}
1297
1298static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
1299{
1300	switch (data->type) {
1301	case PERF_CPU_MAP__CPUS:
1302		synthesize_cpus(data);
1303		break;
1304	case PERF_CPU_MAP__MASK:
1305		synthesize_mask(data);
1306		break;
1307	case PERF_CPU_MAP__RANGE_CPUS:
1308		synthesize_range_cpus(data);
1309		break;
1310	default:
1311		break;
1312	}
1313}
1314
1315static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
1316{
1317	struct synthesize_cpu_map_data syn_data = { .map = map };
1318	struct perf_record_cpu_map *event;
1319
1320
1321	event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
1322	if (!event)
1323		return NULL;
1324
1325	syn_data.data = &event->data;
1326	event->header.type = PERF_RECORD_CPU_MAP;
1327	event->header.size = syn_data.size;
1328	cpu_map_data__synthesize(&syn_data);
1329	return event;
1330}
1331
1332
1333int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1334				   const struct perf_cpu_map *map,
1335				   perf_event__handler_t process,
1336				   struct machine *machine)
1337{
1338	struct perf_record_cpu_map *event;
1339	int err;
1340
1341	event = cpu_map_event__new(map);
1342	if (!event)
1343		return -ENOMEM;
1344
1345	err = process(tool, (union perf_event *) event, NULL, machine);
1346
1347	free(event);
1348	return err;
1349}
1350
1351int perf_event__synthesize_stat_config(struct perf_tool *tool,
1352				       struct perf_stat_config *config,
1353				       perf_event__handler_t process,
1354				       struct machine *machine)
1355{
1356	struct perf_record_stat_config *event;
1357	int size, i = 0, err;
1358
1359	size  = sizeof(*event);
1360	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1361
1362	event = zalloc(size);
1363	if (!event)
1364		return -ENOMEM;
1365
1366	event->header.type = PERF_RECORD_STAT_CONFIG;
1367	event->header.size = size;
1368	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1369
1370#define ADD(__term, __val)					\
1371	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1372	event->data[i].val = __val;				\
1373	i++;
1374
1375	ADD(AGGR_MODE,	config->aggr_mode)
1376	ADD(INTERVAL,	config->interval)
1377	ADD(SCALE,	config->scale)
1378	ADD(AGGR_LEVEL,	config->aggr_level)
1379
1380	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1381		  "stat config terms unbalanced\n");
1382#undef ADD
1383
1384	err = process(tool, (union perf_event *) event, NULL, machine);
1385
1386	free(event);
1387	return err;
1388}
1389
1390int perf_event__synthesize_stat(struct perf_tool *tool,
1391				struct perf_cpu cpu, u32 thread, u64 id,
1392				struct perf_counts_values *count,
1393				perf_event__handler_t process,
1394				struct machine *machine)
1395{
1396	struct perf_record_stat event;
1397
1398	event.header.type = PERF_RECORD_STAT;
1399	event.header.size = sizeof(event);
1400	event.header.misc = 0;
1401
1402	event.id        = id;
1403	event.cpu       = cpu.cpu;
1404	event.thread    = thread;
1405	event.val       = count->val;
1406	event.ena       = count->ena;
1407	event.run       = count->run;
1408
1409	return process(tool, (union perf_event *) &event, NULL, machine);
1410}
1411
1412int perf_event__synthesize_stat_round(struct perf_tool *tool,
1413				      u64 evtime, u64 type,
1414				      perf_event__handler_t process,
1415				      struct machine *machine)
1416{
1417	struct perf_record_stat_round event;
1418
1419	event.header.type = PERF_RECORD_STAT_ROUND;
1420	event.header.size = sizeof(event);
1421	event.header.misc = 0;
1422
1423	event.time = evtime;
1424	event.type = type;
1425
1426	return process(tool, (union perf_event *) &event, NULL, machine);
1427}
1428
1429size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1430{
1431	size_t sz, result = sizeof(struct perf_record_sample);
1432
1433	if (type & PERF_SAMPLE_IDENTIFIER)
1434		result += sizeof(u64);
1435
1436	if (type & PERF_SAMPLE_IP)
1437		result += sizeof(u64);
1438
1439	if (type & PERF_SAMPLE_TID)
1440		result += sizeof(u64);
1441
1442	if (type & PERF_SAMPLE_TIME)
1443		result += sizeof(u64);
1444
1445	if (type & PERF_SAMPLE_ADDR)
1446		result += sizeof(u64);
1447
1448	if (type & PERF_SAMPLE_ID)
1449		result += sizeof(u64);
1450
1451	if (type & PERF_SAMPLE_STREAM_ID)
1452		result += sizeof(u64);
1453
1454	if (type & PERF_SAMPLE_CPU)
1455		result += sizeof(u64);
1456
1457	if (type & PERF_SAMPLE_PERIOD)
1458		result += sizeof(u64);
1459
1460	if (type & PERF_SAMPLE_READ) {
1461		result += sizeof(u64);
1462		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1463			result += sizeof(u64);
1464		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1465			result += sizeof(u64);
1466		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1467		if (read_format & PERF_FORMAT_GROUP) {
1468			sz = sample_read_value_size(read_format);
1469			result += sz * sample->read.group.nr;
1470		} else {
1471			result += sizeof(u64);
1472			if (read_format & PERF_FORMAT_LOST)
1473				result += sizeof(u64);
1474		}
1475	}
1476
1477	if (type & PERF_SAMPLE_CALLCHAIN) {
1478		sz = (sample->callchain->nr + 1) * sizeof(u64);
1479		result += sz;
1480	}
1481
1482	if (type & PERF_SAMPLE_RAW) {
1483		result += sizeof(u32);
1484		result += sample->raw_size;
1485	}
1486
1487	if (type & PERF_SAMPLE_BRANCH_STACK) {
1488		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1489		/* nr, hw_idx */
1490		sz += 2 * sizeof(u64);
1491		result += sz;
1492	}
1493
1494	if (type & PERF_SAMPLE_REGS_USER) {
1495		if (sample->user_regs.abi) {
1496			result += sizeof(u64);
1497			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1498			result += sz;
1499		} else {
1500			result += sizeof(u64);
1501		}
1502	}
1503
1504	if (type & PERF_SAMPLE_STACK_USER) {
1505		sz = sample->user_stack.size;
1506		result += sizeof(u64);
1507		if (sz) {
1508			result += sz;
1509			result += sizeof(u64);
1510		}
1511	}
1512
1513	if (type & PERF_SAMPLE_WEIGHT_TYPE)
1514		result += sizeof(u64);
1515
1516	if (type & PERF_SAMPLE_DATA_SRC)
1517		result += sizeof(u64);
1518
1519	if (type & PERF_SAMPLE_TRANSACTION)
1520		result += sizeof(u64);
1521
1522	if (type & PERF_SAMPLE_REGS_INTR) {
1523		if (sample->intr_regs.abi) {
1524			result += sizeof(u64);
1525			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1526			result += sz;
1527		} else {
1528			result += sizeof(u64);
1529		}
1530	}
1531
1532	if (type & PERF_SAMPLE_PHYS_ADDR)
1533		result += sizeof(u64);
1534
1535	if (type & PERF_SAMPLE_CGROUP)
1536		result += sizeof(u64);
1537
1538	if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1539		result += sizeof(u64);
1540
1541	if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1542		result += sizeof(u64);
1543
1544	if (type & PERF_SAMPLE_AUX) {
1545		result += sizeof(u64);
1546		result += sample->aux_sample.size;
1547	}
1548
1549	return result;
1550}
1551
1552void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1553					       __u64 *array, u64 type __maybe_unused)
1554{
1555	*array = data->weight;
1556}
1557
1558static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
1559				     const struct perf_sample *sample)
1560{
1561	size_t sz = sample_read_value_size(read_format);
1562	struct sample_read_value *v = sample->read.group.values;
1563
1564	sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1565		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1566		memcpy(array, v, sz);
1567		array = (void *)array + sz;
1568	}
1569	return array;
1570}
1571
1572int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1573				  const struct perf_sample *sample)
1574{
1575	__u64 *array;
1576	size_t sz;
1577	/*
1578	 * used for cross-endian analysis. See git commit 65014ab3
1579	 * for why this goofiness is needed.
1580	 */
1581	union u64_swap u;
1582
1583	array = event->sample.array;
1584
1585	if (type & PERF_SAMPLE_IDENTIFIER) {
1586		*array = sample->id;
1587		array++;
1588	}
1589
1590	if (type & PERF_SAMPLE_IP) {
1591		*array = sample->ip;
1592		array++;
1593	}
1594
1595	if (type & PERF_SAMPLE_TID) {
1596		u.val32[0] = sample->pid;
1597		u.val32[1] = sample->tid;
1598		*array = u.val64;
1599		array++;
1600	}
1601
1602	if (type & PERF_SAMPLE_TIME) {
1603		*array = sample->time;
1604		array++;
1605	}
1606
1607	if (type & PERF_SAMPLE_ADDR) {
1608		*array = sample->addr;
1609		array++;
1610	}
1611
1612	if (type & PERF_SAMPLE_ID) {
1613		*array = sample->id;
1614		array++;
1615	}
1616
1617	if (type & PERF_SAMPLE_STREAM_ID) {
1618		*array = sample->stream_id;
1619		array++;
1620	}
1621
1622	if (type & PERF_SAMPLE_CPU) {
1623		u.val32[0] = sample->cpu;
1624		u.val32[1] = 0;
1625		*array = u.val64;
1626		array++;
1627	}
1628
1629	if (type & PERF_SAMPLE_PERIOD) {
1630		*array = sample->period;
1631		array++;
1632	}
1633
1634	if (type & PERF_SAMPLE_READ) {
1635		if (read_format & PERF_FORMAT_GROUP)
1636			*array = sample->read.group.nr;
1637		else
1638			*array = sample->read.one.value;
1639		array++;
1640
1641		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1642			*array = sample->read.time_enabled;
1643			array++;
1644		}
1645
1646		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1647			*array = sample->read.time_running;
1648			array++;
1649		}
1650
1651		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1652		if (read_format & PERF_FORMAT_GROUP) {
1653			array = copy_read_group_values(array, read_format,
1654						       sample);
1655		} else {
1656			*array = sample->read.one.id;
1657			array++;
1658
1659			if (read_format & PERF_FORMAT_LOST) {
1660				*array = sample->read.one.lost;
1661				array++;
1662			}
1663		}
1664	}
1665
1666	if (type & PERF_SAMPLE_CALLCHAIN) {
1667		sz = (sample->callchain->nr + 1) * sizeof(u64);
1668		memcpy(array, sample->callchain, sz);
1669		array = (void *)array + sz;
1670	}
1671
1672	if (type & PERF_SAMPLE_RAW) {
1673		u.val32[0] = sample->raw_size;
1674		*array = u.val64;
1675		array = (void *)array + sizeof(u32);
1676
1677		memcpy(array, sample->raw_data, sample->raw_size);
1678		array = (void *)array + sample->raw_size;
1679	}
1680
1681	if (type & PERF_SAMPLE_BRANCH_STACK) {
1682		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1683		/* nr, hw_idx */
1684		sz += 2 * sizeof(u64);
1685		memcpy(array, sample->branch_stack, sz);
1686		array = (void *)array + sz;
1687	}
1688
1689	if (type & PERF_SAMPLE_REGS_USER) {
1690		if (sample->user_regs.abi) {
1691			*array++ = sample->user_regs.abi;
1692			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1693			memcpy(array, sample->user_regs.regs, sz);
1694			array = (void *)array + sz;
1695		} else {
1696			*array++ = 0;
1697		}
1698	}
1699
1700	if (type & PERF_SAMPLE_STACK_USER) {
1701		sz = sample->user_stack.size;
1702		*array++ = sz;
1703		if (sz) {
1704			memcpy(array, sample->user_stack.data, sz);
1705			array = (void *)array + sz;
1706			*array++ = sz;
1707		}
1708	}
1709
1710	if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1711		arch_perf_synthesize_sample_weight(sample, array, type);
1712		array++;
1713	}
1714
1715	if (type & PERF_SAMPLE_DATA_SRC) {
1716		*array = sample->data_src;
1717		array++;
1718	}
1719
1720	if (type & PERF_SAMPLE_TRANSACTION) {
1721		*array = sample->transaction;
1722		array++;
1723	}
1724
1725	if (type & PERF_SAMPLE_REGS_INTR) {
1726		if (sample->intr_regs.abi) {
1727			*array++ = sample->intr_regs.abi;
1728			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1729			memcpy(array, sample->intr_regs.regs, sz);
1730			array = (void *)array + sz;
1731		} else {
1732			*array++ = 0;
1733		}
1734	}
1735
1736	if (type & PERF_SAMPLE_PHYS_ADDR) {
1737		*array = sample->phys_addr;
1738		array++;
1739	}
1740
1741	if (type & PERF_SAMPLE_CGROUP) {
1742		*array = sample->cgroup;
1743		array++;
1744	}
1745
1746	if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1747		*array = sample->data_page_size;
1748		array++;
1749	}
1750
1751	if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1752		*array = sample->code_page_size;
1753		array++;
1754	}
1755
1756	if (type & PERF_SAMPLE_AUX) {
1757		sz = sample->aux_sample.size;
1758		*array++ = sz;
1759		memcpy(array, sample->aux_sample.data, sz);
1760		array = (void *)array + sz;
1761	}
1762
1763	return 0;
1764}
1765
1766int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
1767{
1768	__u64 *start = array;
1769
1770	/*
1771	 * used for cross-endian analysis. See git commit 65014ab3
1772	 * for why this goofiness is needed.
1773	 */
1774	union u64_swap u;
1775
1776	if (type & PERF_SAMPLE_TID) {
1777		u.val32[0] = sample->pid;
1778		u.val32[1] = sample->tid;
1779		*array = u.val64;
1780		array++;
1781	}
1782
1783	if (type & PERF_SAMPLE_TIME) {
1784		*array = sample->time;
1785		array++;
1786	}
1787
1788	if (type & PERF_SAMPLE_ID) {
1789		*array = sample->id;
1790		array++;
1791	}
1792
1793	if (type & PERF_SAMPLE_STREAM_ID) {
1794		*array = sample->stream_id;
1795		array++;
1796	}
1797
1798	if (type & PERF_SAMPLE_CPU) {
1799		u.val32[0] = sample->cpu;
1800		u.val32[1] = 0;
1801		*array = u.val64;
1802		array++;
1803	}
1804
1805	if (type & PERF_SAMPLE_IDENTIFIER) {
1806		*array = sample->id;
1807		array++;
1808	}
1809
1810	return (void *)array - (void *)start;
1811}
1812
1813int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1814				      struct evlist *evlist, struct machine *machine, size_t from)
1815{
1816	union perf_event *ev;
1817	struct evsel *evsel;
1818	size_t nr = 0, i = 0, sz, max_nr, n, pos;
1819	size_t e1_sz = sizeof(struct id_index_entry);
1820	size_t e2_sz = sizeof(struct id_index_entry_2);
1821	size_t etot_sz = e1_sz + e2_sz;
1822	bool e2_needed = false;
1823	int err;
1824
1825	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
1826
1827	pos = 0;
1828	evlist__for_each_entry(evlist, evsel) {
1829		if (pos++ < from)
1830			continue;
1831		nr += evsel->core.ids;
1832	}
1833
1834	if (!nr)
1835		return 0;
1836
1837	pr_debug2("Synthesizing id index\n");
1838
1839	n = nr > max_nr ? max_nr : nr;
1840	sz = sizeof(struct perf_record_id_index) + n * etot_sz;
1841	ev = zalloc(sz);
1842	if (!ev)
1843		return -ENOMEM;
1844
1845	sz = sizeof(struct perf_record_id_index) + n * e1_sz;
1846
1847	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1848	ev->id_index.nr = n;
1849
1850	pos = 0;
1851	evlist__for_each_entry(evlist, evsel) {
1852		u32 j;
1853
1854		if (pos++ < from)
1855			continue;
1856		for (j = 0; j < evsel->core.ids; j++, i++) {
1857			struct id_index_entry *e;
1858			struct id_index_entry_2 *e2;
1859			struct perf_sample_id *sid;
1860
1861			if (i >= n) {
1862				ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
1863				err = process(tool, ev, NULL, machine);
1864				if (err)
1865					goto out_err;
1866				nr -= n;
1867				i = 0;
1868				e2_needed = false;
1869			}
1870
1871			e = &ev->id_index.entries[i];
1872
1873			e->id = evsel->core.id[j];
1874
1875			sid = evlist__id2sid(evlist, e->id);
1876			if (!sid) {
1877				free(ev);
1878				return -ENOENT;
1879			}
1880
1881			e->idx = sid->idx;
1882			e->cpu = sid->cpu.cpu;
1883			e->tid = sid->tid;
1884
1885			if (sid->machine_pid)
1886				e2_needed = true;
1887
1888			e2 = (void *)ev + sz;
1889			e2[i].machine_pid = sid->machine_pid;
1890			e2[i].vcpu        = sid->vcpu.cpu;
1891		}
1892	}
1893
1894	sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
1895	ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
1896	ev->id_index.nr = nr;
1897
1898	err = process(tool, ev, NULL, machine);
1899out_err:
1900	free(ev);
1901
1902	return err;
1903}
1904
1905int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1906				    struct evlist *evlist, struct machine *machine)
1907{
1908	return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
1909}
1910
1911int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1912				  struct target *target, struct perf_thread_map *threads,
1913				  perf_event__handler_t process, bool needs_mmap,
1914				  bool data_mmap, unsigned int nr_threads_synthesize)
1915{
1916	/*
1917	 * When perf runs in non-root PID namespace, and the namespace's proc FS
1918	 * is not mounted, nsinfo__is_in_root_namespace() returns false.
1919	 * In this case, the proc FS is coming for the parent namespace, thus
1920	 * perf tool will wrongly gather process info from its parent PID
1921	 * namespace.
1922	 *
1923	 * To avoid the confusion that the perf tool runs in a child PID
1924	 * namespace but it synthesizes thread info from its parent PID
1925	 * namespace, returns failure with warning.
1926	 */
1927	if (!nsinfo__is_in_root_namespace()) {
1928		pr_err("Perf runs in non-root PID namespace but it tries to ");
1929		pr_err("gather process info from its parent PID namespace.\n");
1930		pr_err("Please mount the proc file system properly, e.g. ");
1931		pr_err("add the option '--mount-proc' for unshare command.\n");
1932		return -EPERM;
1933	}
1934
1935	if (target__has_task(target))
1936		return perf_event__synthesize_thread_map(tool, threads, process, machine,
1937							 needs_mmap, data_mmap);
1938	else if (target__has_cpu(target))
1939		return perf_event__synthesize_threads(tool, process, machine,
1940						      needs_mmap, data_mmap,
1941						      nr_threads_synthesize);
1942	/* command specified */
1943	return 0;
1944}
1945
1946int machine__synthesize_threads(struct machine *machine, struct target *target,
1947				struct perf_thread_map *threads, bool needs_mmap,
1948				bool data_mmap, unsigned int nr_threads_synthesize)
1949{
1950	return __machine__synthesize_threads(machine, NULL, target, threads,
1951					     perf_event__process, needs_mmap,
1952					     data_mmap, nr_threads_synthesize);
1953}
1954
1955static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1956{
1957	struct perf_record_event_update *ev;
1958
1959	size += sizeof(*ev);
1960	size  = PERF_ALIGN(size, sizeof(u64));
1961
1962	ev = zalloc(size);
1963	if (ev) {
1964		ev->header.type = PERF_RECORD_EVENT_UPDATE;
1965		ev->header.size = (u16)size;
1966		ev->type	= type;
1967		ev->id		= id;
1968	}
1969	return ev;
1970}
1971
1972int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1973					     perf_event__handler_t process)
1974{
1975	size_t size = strlen(evsel->unit);
1976	struct perf_record_event_update *ev;
1977	int err;
1978
1979	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1980	if (ev == NULL)
1981		return -ENOMEM;
1982
1983	strlcpy(ev->unit, evsel->unit, size + 1);
1984	err = process(tool, (union perf_event *)ev, NULL, NULL);
1985	free(ev);
1986	return err;
1987}
1988
1989int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1990					      perf_event__handler_t process)
1991{
1992	struct perf_record_event_update *ev;
1993	struct perf_record_event_update_scale *ev_data;
1994	int err;
1995
1996	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1997	if (ev == NULL)
1998		return -ENOMEM;
1999
2000	ev->scale.scale = evsel->scale;
2001	err = process(tool, (union perf_event *)ev, NULL, NULL);
2002	free(ev);
2003	return err;
2004}
2005
2006int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
2007					     perf_event__handler_t process)
2008{
2009	struct perf_record_event_update *ev;
2010	size_t len = strlen(evsel__name(evsel));
2011	int err;
2012
2013	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
2014	if (ev == NULL)
2015		return -ENOMEM;
2016
2017	strlcpy(ev->name, evsel->name, len + 1);
2018	err = process(tool, (union perf_event *)ev, NULL, NULL);
2019	free(ev);
2020	return err;
2021}
2022
2023int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
2024					     perf_event__handler_t process)
2025{
2026	struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
2027	struct perf_record_event_update *ev;
2028	int err;
2029
2030	ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
2031	if (!ev)
2032		return -ENOMEM;
2033
2034	syn_data.data = &ev->cpus.cpus;
2035	ev->header.type = PERF_RECORD_EVENT_UPDATE;
2036	ev->header.size = (u16)syn_data.size;
2037	ev->type	= PERF_EVENT_UPDATE__CPUS;
2038	ev->id		= evsel->core.id[0];
2039	cpu_map_data__synthesize(&syn_data);
2040
2041	err = process(tool, (union perf_event *)ev, NULL, NULL);
2042	free(ev);
2043	return err;
2044}
2045
2046int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
2047				 perf_event__handler_t process)
2048{
2049	struct evsel *evsel;
2050	int err = 0;
2051
2052	evlist__for_each_entry(evlist, evsel) {
2053		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
2054						  evsel->core.id, process);
2055		if (err) {
2056			pr_debug("failed to create perf header attribute\n");
2057			return err;
2058		}
2059	}
2060
2061	return err;
2062}
2063
2064static bool has_unit(struct evsel *evsel)
2065{
2066	return evsel->unit && *evsel->unit;
2067}
2068
2069static bool has_scale(struct evsel *evsel)
2070{
2071	return evsel->scale != 1;
2072}
2073
2074int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
2075				      perf_event__handler_t process, bool is_pipe)
2076{
2077	struct evsel *evsel;
2078	int err;
2079
2080	/*
2081	 * Synthesize other events stuff not carried within
2082	 * attr event - unit, scale, name
2083	 */
2084	evlist__for_each_entry(evsel_list, evsel) {
2085		if (!evsel->supported)
2086			continue;
2087
2088		/*
2089		 * Synthesize unit and scale only if it's defined.
2090		 */
2091		if (has_unit(evsel)) {
2092			err = perf_event__synthesize_event_update_unit(tool, evsel, process);
2093			if (err < 0) {
2094				pr_err("Couldn't synthesize evsel unit.\n");
2095				return err;
2096			}
2097		}
2098
2099		if (has_scale(evsel)) {
2100			err = perf_event__synthesize_event_update_scale(tool, evsel, process);
2101			if (err < 0) {
2102				pr_err("Couldn't synthesize evsel evsel.\n");
2103				return err;
2104			}
2105		}
2106
2107		if (evsel->core.own_cpus) {
2108			err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
2109			if (err < 0) {
2110				pr_err("Couldn't synthesize evsel cpus.\n");
2111				return err;
2112			}
2113		}
2114
2115		/*
2116		 * Name is needed only for pipe output,
2117		 * perf.data carries event names.
2118		 */
2119		if (is_pipe) {
2120			err = perf_event__synthesize_event_update_name(tool, evsel, process);
2121			if (err < 0) {
2122				pr_err("Couldn't synthesize evsel name.\n");
2123				return err;
2124			}
2125		}
2126	}
2127	return 0;
2128}
2129
2130int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
2131				u32 ids, u64 *id, perf_event__handler_t process)
2132{
2133	union perf_event *ev;
2134	size_t size;
2135	int err;
2136
2137	size = sizeof(struct perf_event_attr);
2138	size = PERF_ALIGN(size, sizeof(u64));
2139	size += sizeof(struct perf_event_header);
2140	size += ids * sizeof(u64);
2141
2142	ev = zalloc(size);
2143
2144	if (ev == NULL)
2145		return -ENOMEM;
2146
2147	ev->attr.attr = *attr;
2148	memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64));
2149
2150	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2151	ev->attr.header.size = (u16)size;
2152
2153	if (ev->attr.header.size == size)
2154		err = process(tool, ev, NULL, NULL);
2155	else
2156		err = -E2BIG;
2157
2158	free(ev);
2159
2160	return err;
2161}
2162
2163#ifdef HAVE_LIBTRACEEVENT
2164int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2165					perf_event__handler_t process)
2166{
2167	union perf_event ev;
2168	struct tracing_data *tdata;
2169	ssize_t size = 0, aligned_size = 0, padding;
2170	struct feat_fd ff;
2171
2172	/*
2173	 * We are going to store the size of the data followed
2174	 * by the data contents. Since the fd descriptor is a pipe,
2175	 * we cannot seek back to store the size of the data once
2176	 * we know it. Instead we:
2177	 *
2178	 * - write the tracing data to the temp file
2179	 * - get/write the data size to pipe
2180	 * - write the tracing data from the temp file
2181	 *   to the pipe
2182	 */
2183	tdata = tracing_data_get(&evlist->core.entries, fd, true);
2184	if (!tdata)
2185		return -1;
2186
2187	memset(&ev, 0, sizeof(ev));
2188
2189	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2190	size = tdata->size;
2191	aligned_size = PERF_ALIGN(size, sizeof(u64));
2192	padding = aligned_size - size;
2193	ev.tracing_data.header.size = sizeof(ev.tracing_data);
2194	ev.tracing_data.size = aligned_size;
2195
2196	process(tool, &ev, NULL, NULL);
2197
2198	/*
2199	 * The put function will copy all the tracing data
2200	 * stored in temp file to the pipe.
2201	 */
2202	tracing_data_put(tdata);
2203
2204	ff = (struct feat_fd){ .fd = fd };
2205	if (write_padded(&ff, NULL, 0, padding))
2206		return -1;
2207
2208	return aligned_size;
2209}
2210#endif
2211
2212int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2213				    perf_event__handler_t process, struct machine *machine)
2214{
2215	union perf_event ev;
2216	size_t len;
2217
2218	if (!pos->hit)
2219		return 0;
2220
2221	memset(&ev, 0, sizeof(ev));
2222
2223	len = pos->long_name_len + 1;
2224	len = PERF_ALIGN(len, NAME_ALIGN);
2225	ev.build_id.size = min(pos->bid.size, sizeof(pos->bid.data));
2226	memcpy(&ev.build_id.build_id, pos->bid.data, ev.build_id.size);
2227	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2228	ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
2229	ev.build_id.pid = machine->pid;
2230	ev.build_id.header.size = sizeof(ev.build_id) + len;
2231	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2232
2233	return process(tool, &ev, NULL, machine);
2234}
2235
2236int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2237				       struct evlist *evlist, perf_event__handler_t process, bool attrs)
2238{
2239	int err;
2240
2241	if (attrs) {
2242		err = perf_event__synthesize_attrs(tool, evlist, process);
2243		if (err < 0) {
2244			pr_err("Couldn't synthesize attrs.\n");
2245			return err;
2246		}
2247	}
2248
2249	err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2250	err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2251	if (err < 0) {
2252		pr_err("Couldn't synthesize thread map.\n");
2253		return err;
2254	}
2255
2256	err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
2257	if (err < 0) {
2258		pr_err("Couldn't synthesize thread map.\n");
2259		return err;
2260	}
2261
2262	err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2263	if (err < 0) {
2264		pr_err("Couldn't synthesize config.\n");
2265		return err;
2266	}
2267
2268	return 0;
2269}
2270
2271extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2272
2273int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2274				    struct evlist *evlist, perf_event__handler_t process)
2275{
2276	struct perf_header *header = &session->header;
2277	struct perf_record_header_feature *fe;
2278	struct feat_fd ff;
2279	size_t sz, sz_hdr;
2280	int feat, ret;
2281
2282	sz_hdr = sizeof(fe->header);
2283	sz = sizeof(union perf_event);
2284	/* get a nice alignment */
2285	sz = PERF_ALIGN(sz, page_size);
2286
2287	memset(&ff, 0, sizeof(ff));
2288
2289	ff.buf = malloc(sz);
2290	if (!ff.buf)
2291		return -ENOMEM;
2292
2293	ff.size = sz - sz_hdr;
2294	ff.ph = &session->header;
2295
2296	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2297		if (!feat_ops[feat].synthesize) {
2298			pr_debug("No record header feature for header :%d\n", feat);
2299			continue;
2300		}
2301
2302		ff.offset = sizeof(*fe);
2303
2304		ret = feat_ops[feat].write(&ff, evlist);
2305		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2306			pr_debug("Error writing feature\n");
2307			continue;
2308		}
2309		/* ff.buf may have changed due to realloc in do_write() */
2310		fe = ff.buf;
2311		memset(fe, 0, sizeof(*fe));
2312
2313		fe->feat_id = feat;
2314		fe->header.type = PERF_RECORD_HEADER_FEATURE;
2315		fe->header.size = ff.offset;
2316
2317		ret = process(tool, ff.buf, NULL, NULL);
2318		if (ret) {
2319			free(ff.buf);
2320			return ret;
2321		}
2322	}
2323
2324	/* Send HEADER_LAST_FEATURE mark. */
2325	fe = ff.buf;
2326	fe->feat_id     = HEADER_LAST_FEATURE;
2327	fe->header.type = PERF_RECORD_HEADER_FEATURE;
2328	fe->header.size = sizeof(*fe);
2329
2330	ret = process(tool, ff.buf, NULL, NULL);
2331
2332	free(ff.buf);
2333	return ret;
2334}
2335
2336int perf_event__synthesize_for_pipe(struct perf_tool *tool,
2337				    struct perf_session *session,
2338				    struct perf_data *data,
2339				    perf_event__handler_t process)
2340{
2341	int err;
2342	int ret = 0;
2343	struct evlist *evlist = session->evlist;
2344
2345	/*
2346	 * We need to synthesize events first, because some
2347	 * features works on top of them (on report side).
2348	 */
2349	err = perf_event__synthesize_attrs(tool, evlist, process);
2350	if (err < 0) {
2351		pr_err("Couldn't synthesize attrs.\n");
2352		return err;
2353	}
2354	ret += err;
2355
2356	err = perf_event__synthesize_features(tool, session, evlist, process);
2357	if (err < 0) {
2358		pr_err("Couldn't synthesize features.\n");
2359		return err;
2360	}
2361	ret += err;
2362
2363#ifdef HAVE_LIBTRACEEVENT
2364	if (have_tracepoints(&evlist->core.entries)) {
2365		int fd = perf_data__fd(data);
2366
2367		/*
2368		 * FIXME err <= 0 here actually means that
2369		 * there were no tracepoints so its not really
2370		 * an error, just that we don't need to
2371		 * synthesize anything.  We really have to
2372		 * return this more properly and also
2373		 * propagate errors that now are calling die()
2374		 */
2375		err = perf_event__synthesize_tracing_data(tool,	fd, evlist,
2376							  process);
2377		if (err <= 0) {
2378			pr_err("Couldn't record tracing data.\n");
2379			return err;
2380		}
2381		ret += err;
2382	}
2383#else
2384	(void)data;
2385#endif
2386
2387	return ret;
2388}
2389
2390int parse_synth_opt(char *synth)
2391{
2392	char *p, *q;
2393	int ret = 0;
2394
2395	if (synth == NULL)
2396		return -1;
2397
2398	for (q = synth; (p = strsep(&q, ",")); p = q) {
2399		if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
2400			return 0;
2401
2402		if (!strcasecmp(p, "all"))
2403			return PERF_SYNTH_ALL;
2404
2405		if (!strcasecmp(p, "task"))
2406			ret |= PERF_SYNTH_TASK;
2407		else if (!strcasecmp(p, "mmap"))
2408			ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
2409		else if (!strcasecmp(p, "cgroup"))
2410			ret |= PERF_SYNTH_CGROUP;
2411		else
2412			return -1;
2413	}
2414
2415	return ret;
2416}
2417