1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7#include <inttypes.h>
8#include <sys/types.h>
9#include <sys/mman.h>
10#include <stdbool.h>
11#include <string.h>
12#include <limits.h>
13#include <errno.h>
14
15#include <linux/kernel.h>
16#include <linux/perf_event.h>
17#include <linux/types.h>
18#include <linux/bitops.h>
19#include <linux/log2.h>
20#include <linux/string.h>
21#include <linux/time64.h>
22
23#include <sys/param.h>
24#include <stdlib.h>
25#include <stdio.h>
26#include <linux/list.h>
27#include <linux/zalloc.h>
28
29#include "config.h"
30#include "evlist.h"
31#include "dso.h"
32#include "map.h"
33#include "pmu.h"
34#include "evsel.h"
35#include "evsel_config.h"
36#include "symbol.h"
37#include "util/perf_api_probe.h"
38#include "util/synthetic-events.h"
39#include "thread_map.h"
40#include "asm/bug.h"
41#include "auxtrace.h"
42
43#include <linux/hash.h>
44
45#include "event.h"
46#include "record.h"
47#include "session.h"
48#include "debug.h"
49#include <subcmd/parse-options.h>
50
51#include "cs-etm.h"
52#include "intel-pt.h"
53#include "intel-bts.h"
54#include "arm-spe.h"
55#include "hisi-ptt.h"
56#include "s390-cpumsf.h"
57#include "util/mmap.h"
58
59#include <linux/ctype.h>
60#include "symbol/kallsyms.h"
61#include <internal/lib.h>
62#include "util/sample.h"
63
64/*
65 * Make a group from 'leader' to 'last', requiring that the events were not
66 * already grouped to a different leader.
67 */
68static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
69{
70	struct evsel *evsel;
71	bool grp;
72
73	if (!evsel__is_group_leader(leader))
74		return -EINVAL;
75
76	grp = false;
77	evlist__for_each_entry(evlist, evsel) {
78		if (grp) {
79			if (!(evsel__leader(evsel) == leader ||
80			     (evsel__leader(evsel) == evsel &&
81			      evsel->core.nr_members <= 1)))
82				return -EINVAL;
83		} else if (evsel == leader) {
84			grp = true;
85		}
86		if (evsel == last)
87			break;
88	}
89
90	grp = false;
91	evlist__for_each_entry(evlist, evsel) {
92		if (grp) {
93			if (!evsel__has_leader(evsel, leader)) {
94				evsel__set_leader(evsel, leader);
95				if (leader->core.nr_members < 1)
96					leader->core.nr_members = 1;
97				leader->core.nr_members += 1;
98			}
99		} else if (evsel == leader) {
100			grp = true;
101		}
102		if (evsel == last)
103			break;
104	}
105
106	return 0;
107}
108
109static bool auxtrace__dont_decode(struct perf_session *session)
110{
111	return !session->itrace_synth_opts ||
112	       session->itrace_synth_opts->dont_decode;
113}
114
115int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
116			struct auxtrace_mmap_params *mp,
117			void *userpg, int fd)
118{
119	struct perf_event_mmap_page *pc = userpg;
120
121	WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
122
123	mm->userpg = userpg;
124	mm->mask = mp->mask;
125	mm->len = mp->len;
126	mm->prev = 0;
127	mm->idx = mp->idx;
128	mm->tid = mp->tid;
129	mm->cpu = mp->cpu.cpu;
130
131	if (!mp->len || !mp->mmap_needed) {
132		mm->base = NULL;
133		return 0;
134	}
135
136	pc->aux_offset = mp->offset;
137	pc->aux_size = mp->len;
138
139	mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
140	if (mm->base == MAP_FAILED) {
141		pr_debug2("failed to mmap AUX area\n");
142		mm->base = NULL;
143		return -1;
144	}
145
146	return 0;
147}
148
149void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
150{
151	if (mm->base) {
152		munmap(mm->base, mm->len);
153		mm->base = NULL;
154	}
155}
156
157void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
158				off_t auxtrace_offset,
159				unsigned int auxtrace_pages,
160				bool auxtrace_overwrite)
161{
162	if (auxtrace_pages) {
163		mp->offset = auxtrace_offset;
164		mp->len = auxtrace_pages * (size_t)page_size;
165		mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
166		mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
167		pr_debug2("AUX area mmap length %zu\n", mp->len);
168	} else {
169		mp->len = 0;
170	}
171}
172
173void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
174				   struct evlist *evlist,
175				   struct evsel *evsel, int idx)
176{
177	bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
178
179	mp->mmap_needed = evsel->needs_auxtrace_mmap;
180
181	if (!mp->mmap_needed)
182		return;
183
184	mp->idx = idx;
185
186	if (per_cpu) {
187		mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
188		if (evlist->core.threads)
189			mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
190		else
191			mp->tid = -1;
192	} else {
193		mp->cpu.cpu = -1;
194		mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
195	}
196}
197
198#define AUXTRACE_INIT_NR_QUEUES	32
199
200static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
201{
202	struct auxtrace_queue *queue_array;
203	unsigned int max_nr_queues, i;
204
205	max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
206	if (nr_queues > max_nr_queues)
207		return NULL;
208
209	queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
210	if (!queue_array)
211		return NULL;
212
213	for (i = 0; i < nr_queues; i++) {
214		INIT_LIST_HEAD(&queue_array[i].head);
215		queue_array[i].priv = NULL;
216	}
217
218	return queue_array;
219}
220
221int auxtrace_queues__init(struct auxtrace_queues *queues)
222{
223	queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
224	queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
225	if (!queues->queue_array)
226		return -ENOMEM;
227	return 0;
228}
229
230static int auxtrace_queues__grow(struct auxtrace_queues *queues,
231				 unsigned int new_nr_queues)
232{
233	unsigned int nr_queues = queues->nr_queues;
234	struct auxtrace_queue *queue_array;
235	unsigned int i;
236
237	if (!nr_queues)
238		nr_queues = AUXTRACE_INIT_NR_QUEUES;
239
240	while (nr_queues && nr_queues < new_nr_queues)
241		nr_queues <<= 1;
242
243	if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
244		return -EINVAL;
245
246	queue_array = auxtrace_alloc_queue_array(nr_queues);
247	if (!queue_array)
248		return -ENOMEM;
249
250	for (i = 0; i < queues->nr_queues; i++) {
251		list_splice_tail(&queues->queue_array[i].head,
252				 &queue_array[i].head);
253		queue_array[i].tid = queues->queue_array[i].tid;
254		queue_array[i].cpu = queues->queue_array[i].cpu;
255		queue_array[i].set = queues->queue_array[i].set;
256		queue_array[i].priv = queues->queue_array[i].priv;
257	}
258
259	queues->nr_queues = nr_queues;
260	queues->queue_array = queue_array;
261
262	return 0;
263}
264
265static void *auxtrace_copy_data(u64 size, struct perf_session *session)
266{
267	int fd = perf_data__fd(session->data);
268	void *p;
269	ssize_t ret;
270
271	if (size > SSIZE_MAX)
272		return NULL;
273
274	p = malloc(size);
275	if (!p)
276		return NULL;
277
278	ret = readn(fd, p, size);
279	if (ret != (ssize_t)size) {
280		free(p);
281		return NULL;
282	}
283
284	return p;
285}
286
287static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
288					 unsigned int idx,
289					 struct auxtrace_buffer *buffer)
290{
291	struct auxtrace_queue *queue;
292	int err;
293
294	if (idx >= queues->nr_queues) {
295		err = auxtrace_queues__grow(queues, idx + 1);
296		if (err)
297			return err;
298	}
299
300	queue = &queues->queue_array[idx];
301
302	if (!queue->set) {
303		queue->set = true;
304		queue->tid = buffer->tid;
305		queue->cpu = buffer->cpu.cpu;
306	}
307
308	buffer->buffer_nr = queues->next_buffer_nr++;
309
310	list_add_tail(&buffer->list, &queue->head);
311
312	queues->new_data = true;
313	queues->populated = true;
314
315	return 0;
316}
317
318/* Limit buffers to 32MiB on 32-bit */
319#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
320
321static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
322					 unsigned int idx,
323					 struct auxtrace_buffer *buffer)
324{
325	u64 sz = buffer->size;
326	bool consecutive = false;
327	struct auxtrace_buffer *b;
328	int err;
329
330	while (sz > BUFFER_LIMIT_FOR_32_BIT) {
331		b = memdup(buffer, sizeof(struct auxtrace_buffer));
332		if (!b)
333			return -ENOMEM;
334		b->size = BUFFER_LIMIT_FOR_32_BIT;
335		b->consecutive = consecutive;
336		err = auxtrace_queues__queue_buffer(queues, idx, b);
337		if (err) {
338			auxtrace_buffer__free(b);
339			return err;
340		}
341		buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
342		sz -= BUFFER_LIMIT_FOR_32_BIT;
343		consecutive = true;
344	}
345
346	buffer->size = sz;
347	buffer->consecutive = consecutive;
348
349	return 0;
350}
351
352static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu)
353{
354	unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
355
356	return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap);
357}
358
359static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
360				       struct perf_session *session,
361				       unsigned int idx,
362				       struct auxtrace_buffer *buffer,
363				       struct auxtrace_buffer **buffer_ptr)
364{
365	int err = -ENOMEM;
366
367	if (filter_cpu(session, buffer->cpu))
368		return 0;
369
370	buffer = memdup(buffer, sizeof(*buffer));
371	if (!buffer)
372		return -ENOMEM;
373
374	if (session->one_mmap) {
375		buffer->data = buffer->data_offset - session->one_mmap_offset +
376			       session->one_mmap_addr;
377	} else if (perf_data__is_pipe(session->data)) {
378		buffer->data = auxtrace_copy_data(buffer->size, session);
379		if (!buffer->data)
380			goto out_free;
381		buffer->data_needs_freeing = true;
382	} else if (BITS_PER_LONG == 32 &&
383		   buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
384		err = auxtrace_queues__split_buffer(queues, idx, buffer);
385		if (err)
386			goto out_free;
387	}
388
389	err = auxtrace_queues__queue_buffer(queues, idx, buffer);
390	if (err)
391		goto out_free;
392
393	/* FIXME: Doesn't work for split buffer */
394	if (buffer_ptr)
395		*buffer_ptr = buffer;
396
397	return 0;
398
399out_free:
400	auxtrace_buffer__free(buffer);
401	return err;
402}
403
404int auxtrace_queues__add_event(struct auxtrace_queues *queues,
405			       struct perf_session *session,
406			       union perf_event *event, off_t data_offset,
407			       struct auxtrace_buffer **buffer_ptr)
408{
409	struct auxtrace_buffer buffer = {
410		.pid = -1,
411		.tid = event->auxtrace.tid,
412		.cpu = { event->auxtrace.cpu },
413		.data_offset = data_offset,
414		.offset = event->auxtrace.offset,
415		.reference = event->auxtrace.reference,
416		.size = event->auxtrace.size,
417	};
418	unsigned int idx = event->auxtrace.idx;
419
420	return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
421					   buffer_ptr);
422}
423
424static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
425					      struct perf_session *session,
426					      off_t file_offset, size_t sz)
427{
428	union perf_event *event;
429	int err;
430	char buf[PERF_SAMPLE_MAX_SIZE];
431
432	err = perf_session__peek_event(session, file_offset, buf,
433				       PERF_SAMPLE_MAX_SIZE, &event, NULL);
434	if (err)
435		return err;
436
437	if (event->header.type == PERF_RECORD_AUXTRACE) {
438		if (event->header.size < sizeof(struct perf_record_auxtrace) ||
439		    event->header.size != sz) {
440			err = -EINVAL;
441			goto out;
442		}
443		file_offset += event->header.size;
444		err = auxtrace_queues__add_event(queues, session, event,
445						 file_offset, NULL);
446	}
447out:
448	return err;
449}
450
451void auxtrace_queues__free(struct auxtrace_queues *queues)
452{
453	unsigned int i;
454
455	for (i = 0; i < queues->nr_queues; i++) {
456		while (!list_empty(&queues->queue_array[i].head)) {
457			struct auxtrace_buffer *buffer;
458
459			buffer = list_entry(queues->queue_array[i].head.next,
460					    struct auxtrace_buffer, list);
461			list_del_init(&buffer->list);
462			auxtrace_buffer__free(buffer);
463		}
464	}
465
466	zfree(&queues->queue_array);
467	queues->nr_queues = 0;
468}
469
470static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
471			     unsigned int pos, unsigned int queue_nr,
472			     u64 ordinal)
473{
474	unsigned int parent;
475
476	while (pos) {
477		parent = (pos - 1) >> 1;
478		if (heap_array[parent].ordinal <= ordinal)
479			break;
480		heap_array[pos] = heap_array[parent];
481		pos = parent;
482	}
483	heap_array[pos].queue_nr = queue_nr;
484	heap_array[pos].ordinal = ordinal;
485}
486
487int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
488		       u64 ordinal)
489{
490	struct auxtrace_heap_item *heap_array;
491
492	if (queue_nr >= heap->heap_sz) {
493		unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
494
495		while (heap_sz <= queue_nr)
496			heap_sz <<= 1;
497		heap_array = realloc(heap->heap_array,
498				     heap_sz * sizeof(struct auxtrace_heap_item));
499		if (!heap_array)
500			return -ENOMEM;
501		heap->heap_array = heap_array;
502		heap->heap_sz = heap_sz;
503	}
504
505	auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
506
507	return 0;
508}
509
510void auxtrace_heap__free(struct auxtrace_heap *heap)
511{
512	zfree(&heap->heap_array);
513	heap->heap_cnt = 0;
514	heap->heap_sz = 0;
515}
516
517void auxtrace_heap__pop(struct auxtrace_heap *heap)
518{
519	unsigned int pos, last, heap_cnt = heap->heap_cnt;
520	struct auxtrace_heap_item *heap_array;
521
522	if (!heap_cnt)
523		return;
524
525	heap->heap_cnt -= 1;
526
527	heap_array = heap->heap_array;
528
529	pos = 0;
530	while (1) {
531		unsigned int left, right;
532
533		left = (pos << 1) + 1;
534		if (left >= heap_cnt)
535			break;
536		right = left + 1;
537		if (right >= heap_cnt) {
538			heap_array[pos] = heap_array[left];
539			return;
540		}
541		if (heap_array[left].ordinal < heap_array[right].ordinal) {
542			heap_array[pos] = heap_array[left];
543			pos = left;
544		} else {
545			heap_array[pos] = heap_array[right];
546			pos = right;
547		}
548	}
549
550	last = heap_cnt - 1;
551	auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
552			 heap_array[last].ordinal);
553}
554
555size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
556				       struct evlist *evlist)
557{
558	if (itr)
559		return itr->info_priv_size(itr, evlist);
560	return 0;
561}
562
563static int auxtrace_not_supported(void)
564{
565	pr_err("AUX area tracing is not supported on this architecture\n");
566	return -EINVAL;
567}
568
569int auxtrace_record__info_fill(struct auxtrace_record *itr,
570			       struct perf_session *session,
571			       struct perf_record_auxtrace_info *auxtrace_info,
572			       size_t priv_size)
573{
574	if (itr)
575		return itr->info_fill(itr, session, auxtrace_info, priv_size);
576	return auxtrace_not_supported();
577}
578
579void auxtrace_record__free(struct auxtrace_record *itr)
580{
581	if (itr)
582		itr->free(itr);
583}
584
585int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
586{
587	if (itr && itr->snapshot_start)
588		return itr->snapshot_start(itr);
589	return 0;
590}
591
592int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
593{
594	if (!on_exit && itr && itr->snapshot_finish)
595		return itr->snapshot_finish(itr);
596	return 0;
597}
598
599int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
600				   struct auxtrace_mmap *mm,
601				   unsigned char *data, u64 *head, u64 *old)
602{
603	if (itr && itr->find_snapshot)
604		return itr->find_snapshot(itr, idx, mm, data, head, old);
605	return 0;
606}
607
608int auxtrace_record__options(struct auxtrace_record *itr,
609			     struct evlist *evlist,
610			     struct record_opts *opts)
611{
612	if (itr) {
613		itr->evlist = evlist;
614		return itr->recording_options(itr, evlist, opts);
615	}
616	return 0;
617}
618
619u64 auxtrace_record__reference(struct auxtrace_record *itr)
620{
621	if (itr)
622		return itr->reference(itr);
623	return 0;
624}
625
626int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
627				    struct record_opts *opts, const char *str)
628{
629	if (!str)
630		return 0;
631
632	/* PMU-agnostic options */
633	switch (*str) {
634	case 'e':
635		opts->auxtrace_snapshot_on_exit = true;
636		str++;
637		break;
638	default:
639		break;
640	}
641
642	if (itr && itr->parse_snapshot_options)
643		return itr->parse_snapshot_options(itr, opts, str);
644
645	pr_err("No AUX area tracing to snapshot\n");
646	return -EINVAL;
647}
648
649static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
650{
651	bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
652
653	if (per_cpu_mmaps) {
654		struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
655		int cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
656
657		if (cpu_map_idx == -1)
658			return -EINVAL;
659		return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
660	}
661
662	return perf_evsel__enable_thread(&evsel->core, idx);
663}
664
665int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
666{
667	struct evsel *evsel;
668
669	if (!itr->evlist || !itr->pmu)
670		return -EINVAL;
671
672	evlist__for_each_entry(itr->evlist, evsel) {
673		if (evsel->core.attr.type == itr->pmu->type) {
674			if (evsel->disabled)
675				return 0;
676			return evlist__enable_event_idx(itr->evlist, evsel, idx);
677		}
678	}
679	return -EINVAL;
680}
681
682/*
683 * Event record size is 16-bit which results in a maximum size of about 64KiB.
684 * Allow about 4KiB for the rest of the sample record, to give a maximum
685 * AUX area sample size of 60KiB.
686 */
687#define MAX_AUX_SAMPLE_SIZE (60 * 1024)
688
689/* Arbitrary default size if no other default provided */
690#define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
691
692static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
693					     struct record_opts *opts)
694{
695	struct evsel *evsel;
696	bool has_aux_leader = false;
697	u32 sz;
698
699	evlist__for_each_entry(evlist, evsel) {
700		sz = evsel->core.attr.aux_sample_size;
701		if (evsel__is_group_leader(evsel)) {
702			has_aux_leader = evsel__is_aux_event(evsel);
703			if (sz) {
704				if (has_aux_leader)
705					pr_err("Cannot add AUX area sampling to an AUX area event\n");
706				else
707					pr_err("Cannot add AUX area sampling to a group leader\n");
708				return -EINVAL;
709			}
710		}
711		if (sz > MAX_AUX_SAMPLE_SIZE) {
712			pr_err("AUX area sample size %u too big, max. %d\n",
713			       sz, MAX_AUX_SAMPLE_SIZE);
714			return -EINVAL;
715		}
716		if (sz) {
717			if (!has_aux_leader) {
718				pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
719				return -EINVAL;
720			}
721			evsel__set_sample_bit(evsel, AUX);
722			opts->auxtrace_sample_mode = true;
723		} else {
724			evsel__reset_sample_bit(evsel, AUX);
725		}
726	}
727
728	if (!opts->auxtrace_sample_mode) {
729		pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
730		return -EINVAL;
731	}
732
733	if (!perf_can_aux_sample()) {
734		pr_err("AUX area sampling is not supported by kernel\n");
735		return -EINVAL;
736	}
737
738	return 0;
739}
740
741int auxtrace_parse_sample_options(struct auxtrace_record *itr,
742				  struct evlist *evlist,
743				  struct record_opts *opts, const char *str)
744{
745	struct evsel_config_term *term;
746	struct evsel *aux_evsel;
747	bool has_aux_sample_size = false;
748	bool has_aux_leader = false;
749	struct evsel *evsel;
750	char *endptr;
751	unsigned long sz;
752
753	if (!str)
754		goto no_opt;
755
756	if (!itr) {
757		pr_err("No AUX area event to sample\n");
758		return -EINVAL;
759	}
760
761	sz = strtoul(str, &endptr, 0);
762	if (*endptr || sz > UINT_MAX) {
763		pr_err("Bad AUX area sampling option: '%s'\n", str);
764		return -EINVAL;
765	}
766
767	if (!sz)
768		sz = itr->default_aux_sample_size;
769
770	if (!sz)
771		sz = DEFAULT_AUX_SAMPLE_SIZE;
772
773	/* Set aux_sample_size based on --aux-sample option */
774	evlist__for_each_entry(evlist, evsel) {
775		if (evsel__is_group_leader(evsel)) {
776			has_aux_leader = evsel__is_aux_event(evsel);
777		} else if (has_aux_leader) {
778			evsel->core.attr.aux_sample_size = sz;
779		}
780	}
781no_opt:
782	aux_evsel = NULL;
783	/* Override with aux_sample_size from config term */
784	evlist__for_each_entry(evlist, evsel) {
785		if (evsel__is_aux_event(evsel))
786			aux_evsel = evsel;
787		term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
788		if (term) {
789			has_aux_sample_size = true;
790			evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
791			/* If possible, group with the AUX event */
792			if (aux_evsel && evsel->core.attr.aux_sample_size)
793				evlist__regroup(evlist, aux_evsel, evsel);
794		}
795	}
796
797	if (!str && !has_aux_sample_size)
798		return 0;
799
800	if (!itr) {
801		pr_err("No AUX area event to sample\n");
802		return -EINVAL;
803	}
804
805	return auxtrace_validate_aux_sample_size(evlist, opts);
806}
807
808void auxtrace_regroup_aux_output(struct evlist *evlist)
809{
810	struct evsel *evsel, *aux_evsel = NULL;
811	struct evsel_config_term *term;
812
813	evlist__for_each_entry(evlist, evsel) {
814		if (evsel__is_aux_event(evsel))
815			aux_evsel = evsel;
816		term = evsel__get_config_term(evsel, AUX_OUTPUT);
817		/* If possible, group with the AUX event */
818		if (term && aux_evsel)
819			evlist__regroup(evlist, aux_evsel, evsel);
820	}
821}
822
823struct auxtrace_record *__weak
824auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
825{
826	*err = 0;
827	return NULL;
828}
829
830static int auxtrace_index__alloc(struct list_head *head)
831{
832	struct auxtrace_index *auxtrace_index;
833
834	auxtrace_index = malloc(sizeof(struct auxtrace_index));
835	if (!auxtrace_index)
836		return -ENOMEM;
837
838	auxtrace_index->nr = 0;
839	INIT_LIST_HEAD(&auxtrace_index->list);
840
841	list_add_tail(&auxtrace_index->list, head);
842
843	return 0;
844}
845
846void auxtrace_index__free(struct list_head *head)
847{
848	struct auxtrace_index *auxtrace_index, *n;
849
850	list_for_each_entry_safe(auxtrace_index, n, head, list) {
851		list_del_init(&auxtrace_index->list);
852		free(auxtrace_index);
853	}
854}
855
856static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
857{
858	struct auxtrace_index *auxtrace_index;
859	int err;
860
861	if (list_empty(head)) {
862		err = auxtrace_index__alloc(head);
863		if (err)
864			return NULL;
865	}
866
867	auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
868
869	if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
870		err = auxtrace_index__alloc(head);
871		if (err)
872			return NULL;
873		auxtrace_index = list_entry(head->prev, struct auxtrace_index,
874					    list);
875	}
876
877	return auxtrace_index;
878}
879
880int auxtrace_index__auxtrace_event(struct list_head *head,
881				   union perf_event *event, off_t file_offset)
882{
883	struct auxtrace_index *auxtrace_index;
884	size_t nr;
885
886	auxtrace_index = auxtrace_index__last(head);
887	if (!auxtrace_index)
888		return -ENOMEM;
889
890	nr = auxtrace_index->nr;
891	auxtrace_index->entries[nr].file_offset = file_offset;
892	auxtrace_index->entries[nr].sz = event->header.size;
893	auxtrace_index->nr += 1;
894
895	return 0;
896}
897
898static int auxtrace_index__do_write(int fd,
899				    struct auxtrace_index *auxtrace_index)
900{
901	struct auxtrace_index_entry ent;
902	size_t i;
903
904	for (i = 0; i < auxtrace_index->nr; i++) {
905		ent.file_offset = auxtrace_index->entries[i].file_offset;
906		ent.sz = auxtrace_index->entries[i].sz;
907		if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
908			return -errno;
909	}
910	return 0;
911}
912
913int auxtrace_index__write(int fd, struct list_head *head)
914{
915	struct auxtrace_index *auxtrace_index;
916	u64 total = 0;
917	int err;
918
919	list_for_each_entry(auxtrace_index, head, list)
920		total += auxtrace_index->nr;
921
922	if (writen(fd, &total, sizeof(total)) != sizeof(total))
923		return -errno;
924
925	list_for_each_entry(auxtrace_index, head, list) {
926		err = auxtrace_index__do_write(fd, auxtrace_index);
927		if (err)
928			return err;
929	}
930
931	return 0;
932}
933
934static int auxtrace_index__process_entry(int fd, struct list_head *head,
935					 bool needs_swap)
936{
937	struct auxtrace_index *auxtrace_index;
938	struct auxtrace_index_entry ent;
939	size_t nr;
940
941	if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
942		return -1;
943
944	auxtrace_index = auxtrace_index__last(head);
945	if (!auxtrace_index)
946		return -1;
947
948	nr = auxtrace_index->nr;
949	if (needs_swap) {
950		auxtrace_index->entries[nr].file_offset =
951						bswap_64(ent.file_offset);
952		auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
953	} else {
954		auxtrace_index->entries[nr].file_offset = ent.file_offset;
955		auxtrace_index->entries[nr].sz = ent.sz;
956	}
957
958	auxtrace_index->nr = nr + 1;
959
960	return 0;
961}
962
963int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
964			    bool needs_swap)
965{
966	struct list_head *head = &session->auxtrace_index;
967	u64 nr;
968
969	if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
970		return -1;
971
972	if (needs_swap)
973		nr = bswap_64(nr);
974
975	if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
976		return -1;
977
978	while (nr--) {
979		int err;
980
981		err = auxtrace_index__process_entry(fd, head, needs_swap);
982		if (err)
983			return -1;
984	}
985
986	return 0;
987}
988
989static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
990						struct perf_session *session,
991						struct auxtrace_index_entry *ent)
992{
993	return auxtrace_queues__add_indexed_event(queues, session,
994						  ent->file_offset, ent->sz);
995}
996
997int auxtrace_queues__process_index(struct auxtrace_queues *queues,
998				   struct perf_session *session)
999{
1000	struct auxtrace_index *auxtrace_index;
1001	struct auxtrace_index_entry *ent;
1002	size_t i;
1003	int err;
1004
1005	if (auxtrace__dont_decode(session))
1006		return 0;
1007
1008	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
1009		for (i = 0; i < auxtrace_index->nr; i++) {
1010			ent = &auxtrace_index->entries[i];
1011			err = auxtrace_queues__process_index_entry(queues,
1012								   session,
1013								   ent);
1014			if (err)
1015				return err;
1016		}
1017	}
1018	return 0;
1019}
1020
1021struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
1022					      struct auxtrace_buffer *buffer)
1023{
1024	if (buffer) {
1025		if (list_is_last(&buffer->list, &queue->head))
1026			return NULL;
1027		return list_entry(buffer->list.next, struct auxtrace_buffer,
1028				  list);
1029	} else {
1030		if (list_empty(&queue->head))
1031			return NULL;
1032		return list_entry(queue->head.next, struct auxtrace_buffer,
1033				  list);
1034	}
1035}
1036
1037struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1038						     struct perf_sample *sample,
1039						     struct perf_session *session)
1040{
1041	struct perf_sample_id *sid;
1042	unsigned int idx;
1043	u64 id;
1044
1045	id = sample->id;
1046	if (!id)
1047		return NULL;
1048
1049	sid = evlist__id2sid(session->evlist, id);
1050	if (!sid)
1051		return NULL;
1052
1053	idx = sid->idx;
1054
1055	if (idx >= queues->nr_queues)
1056		return NULL;
1057
1058	return &queues->queue_array[idx];
1059}
1060
1061int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1062				struct perf_session *session,
1063				struct perf_sample *sample, u64 data_offset,
1064				u64 reference)
1065{
1066	struct auxtrace_buffer buffer = {
1067		.pid = -1,
1068		.data_offset = data_offset,
1069		.reference = reference,
1070		.size = sample->aux_sample.size,
1071	};
1072	struct perf_sample_id *sid;
1073	u64 id = sample->id;
1074	unsigned int idx;
1075
1076	if (!id)
1077		return -EINVAL;
1078
1079	sid = evlist__id2sid(session->evlist, id);
1080	if (!sid)
1081		return -ENOENT;
1082
1083	idx = sid->idx;
1084	buffer.tid = sid->tid;
1085	buffer.cpu = sid->cpu;
1086
1087	return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1088}
1089
1090struct queue_data {
1091	bool samples;
1092	bool events;
1093};
1094
1095static int auxtrace_queue_data_cb(struct perf_session *session,
1096				  union perf_event *event, u64 offset,
1097				  void *data)
1098{
1099	struct queue_data *qd = data;
1100	struct perf_sample sample;
1101	int err;
1102
1103	if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1104		if (event->header.size < sizeof(struct perf_record_auxtrace))
1105			return -EINVAL;
1106		offset += event->header.size;
1107		return session->auxtrace->queue_data(session, NULL, event,
1108						     offset);
1109	}
1110
1111	if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1112		return 0;
1113
1114	err = evlist__parse_sample(session->evlist, event, &sample);
1115	if (err)
1116		return err;
1117
1118	if (!sample.aux_sample.size)
1119		return 0;
1120
1121	offset += sample.aux_sample.data - (void *)event;
1122
1123	return session->auxtrace->queue_data(session, &sample, NULL, offset);
1124}
1125
1126int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1127{
1128	struct queue_data qd = {
1129		.samples = samples,
1130		.events = events,
1131	};
1132
1133	if (auxtrace__dont_decode(session))
1134		return 0;
1135
1136	if (perf_data__is_pipe(session->data))
1137		return 0;
1138
1139	if (!session->auxtrace || !session->auxtrace->queue_data)
1140		return -EINVAL;
1141
1142	return perf_session__peek_events(session, session->header.data_offset,
1143					 session->header.data_size,
1144					 auxtrace_queue_data_cb, &qd);
1145}
1146
1147void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
1148{
1149	int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
1150	size_t adj = buffer->data_offset & (page_size - 1);
1151	size_t size = buffer->size + adj;
1152	off_t file_offset = buffer->data_offset - adj;
1153	void *addr;
1154
1155	if (buffer->data)
1156		return buffer->data;
1157
1158	addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
1159	if (addr == MAP_FAILED)
1160		return NULL;
1161
1162	buffer->mmap_addr = addr;
1163	buffer->mmap_size = size;
1164
1165	buffer->data = addr + adj;
1166
1167	return buffer->data;
1168}
1169
1170void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1171{
1172	if (!buffer->data || !buffer->mmap_addr)
1173		return;
1174	munmap(buffer->mmap_addr, buffer->mmap_size);
1175	buffer->mmap_addr = NULL;
1176	buffer->mmap_size = 0;
1177	buffer->data = NULL;
1178	buffer->use_data = NULL;
1179}
1180
1181void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1182{
1183	auxtrace_buffer__put_data(buffer);
1184	if (buffer->data_needs_freeing) {
1185		buffer->data_needs_freeing = false;
1186		zfree(&buffer->data);
1187		buffer->use_data = NULL;
1188		buffer->size = 0;
1189	}
1190}
1191
1192void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1193{
1194	auxtrace_buffer__drop_data(buffer);
1195	free(buffer);
1196}
1197
1198void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1199				int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1200				const char *msg, u64 timestamp,
1201				pid_t machine_pid, int vcpu)
1202{
1203	size_t size;
1204
1205	memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1206
1207	auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1208	auxtrace_error->type = type;
1209	auxtrace_error->code = code;
1210	auxtrace_error->cpu = cpu;
1211	auxtrace_error->pid = pid;
1212	auxtrace_error->tid = tid;
1213	auxtrace_error->fmt = 1;
1214	auxtrace_error->ip = ip;
1215	auxtrace_error->time = timestamp;
1216	strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1217	if (machine_pid) {
1218		auxtrace_error->fmt = 2;
1219		auxtrace_error->machine_pid = machine_pid;
1220		auxtrace_error->vcpu = vcpu;
1221		size = sizeof(*auxtrace_error);
1222	} else {
1223		size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1224		       strlen(auxtrace_error->msg) + 1;
1225	}
1226	auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1227}
1228
1229void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1230			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1231			  const char *msg, u64 timestamp)
1232{
1233	auxtrace_synth_guest_error(auxtrace_error, type, code, cpu, pid, tid,
1234				   ip, msg, timestamp, 0, -1);
1235}
1236
1237int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1238					 struct perf_tool *tool,
1239					 struct perf_session *session,
1240					 perf_event__handler_t process)
1241{
1242	union perf_event *ev;
1243	size_t priv_size;
1244	int err;
1245
1246	pr_debug2("Synthesizing auxtrace information\n");
1247	priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1248	ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1249	if (!ev)
1250		return -ENOMEM;
1251
1252	ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1253	ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1254					priv_size;
1255	err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1256					 priv_size);
1257	if (err)
1258		goto out_free;
1259
1260	err = process(tool, ev, NULL, NULL);
1261out_free:
1262	free(ev);
1263	return err;
1264}
1265
1266static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1267{
1268	struct evsel *new_leader = NULL;
1269	struct evsel *evsel;
1270
1271	/* Find new leader for the group */
1272	evlist__for_each_entry(evlist, evsel) {
1273		if (!evsel__has_leader(evsel, leader) || evsel == leader)
1274			continue;
1275		if (!new_leader)
1276			new_leader = evsel;
1277		evsel__set_leader(evsel, new_leader);
1278	}
1279
1280	/* Update group information */
1281	if (new_leader) {
1282		zfree(&new_leader->group_name);
1283		new_leader->group_name = leader->group_name;
1284		leader->group_name = NULL;
1285
1286		new_leader->core.nr_members = leader->core.nr_members - 1;
1287		leader->core.nr_members = 1;
1288	}
1289}
1290
1291static void unleader_auxtrace(struct perf_session *session)
1292{
1293	struct evsel *evsel;
1294
1295	evlist__for_each_entry(session->evlist, evsel) {
1296		if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1297		    evsel__is_group_leader(evsel)) {
1298			unleader_evsel(session->evlist, evsel);
1299		}
1300	}
1301}
1302
1303int perf_event__process_auxtrace_info(struct perf_session *session,
1304				      union perf_event *event)
1305{
1306	enum auxtrace_type type = event->auxtrace_info.type;
1307	int err;
1308
1309	if (dump_trace)
1310		fprintf(stdout, " type: %u\n", type);
1311
1312	switch (type) {
1313	case PERF_AUXTRACE_INTEL_PT:
1314		err = intel_pt_process_auxtrace_info(event, session);
1315		break;
1316	case PERF_AUXTRACE_INTEL_BTS:
1317		err = intel_bts_process_auxtrace_info(event, session);
1318		break;
1319	case PERF_AUXTRACE_ARM_SPE:
1320		err = arm_spe_process_auxtrace_info(event, session);
1321		break;
1322	case PERF_AUXTRACE_CS_ETM:
1323		err = cs_etm__process_auxtrace_info(event, session);
1324		break;
1325	case PERF_AUXTRACE_S390_CPUMSF:
1326		err = s390_cpumsf_process_auxtrace_info(event, session);
1327		break;
1328	case PERF_AUXTRACE_HISI_PTT:
1329		err = hisi_ptt_process_auxtrace_info(event, session);
1330		break;
1331	case PERF_AUXTRACE_UNKNOWN:
1332	default:
1333		return -EINVAL;
1334	}
1335
1336	if (err)
1337		return err;
1338
1339	unleader_auxtrace(session);
1340
1341	return 0;
1342}
1343
1344s64 perf_event__process_auxtrace(struct perf_session *session,
1345				 union perf_event *event)
1346{
1347	s64 err;
1348
1349	if (dump_trace)
1350		fprintf(stdout, " size: %#"PRI_lx64"  offset: %#"PRI_lx64"  ref: %#"PRI_lx64"  idx: %u  tid: %d  cpu: %d\n",
1351			event->auxtrace.size, event->auxtrace.offset,
1352			event->auxtrace.reference, event->auxtrace.idx,
1353			event->auxtrace.tid, event->auxtrace.cpu);
1354
1355	if (auxtrace__dont_decode(session))
1356		return event->auxtrace.size;
1357
1358	if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1359		return -EINVAL;
1360
1361	err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1362	if (err < 0)
1363		return err;
1364
1365	return event->auxtrace.size;
1366}
1367
1368#define PERF_ITRACE_DEFAULT_PERIOD_TYPE		PERF_ITRACE_PERIOD_NANOSECS
1369#define PERF_ITRACE_DEFAULT_PERIOD		100000
1370#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ	16
1371#define PERF_ITRACE_MAX_CALLCHAIN_SZ		1024
1372#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ	64
1373#define PERF_ITRACE_MAX_LAST_BRANCH_SZ		1024
1374
1375void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1376				    bool no_sample)
1377{
1378	synth_opts->branches = true;
1379	synth_opts->transactions = true;
1380	synth_opts->ptwrites = true;
1381	synth_opts->pwr_events = true;
1382	synth_opts->other_events = true;
1383	synth_opts->intr_events = true;
1384	synth_opts->errors = true;
1385	synth_opts->flc = true;
1386	synth_opts->llc = true;
1387	synth_opts->tlb = true;
1388	synth_opts->mem = true;
1389	synth_opts->remote_access = true;
1390
1391	if (no_sample) {
1392		synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1393		synth_opts->period = 1;
1394		synth_opts->calls = true;
1395	} else {
1396		synth_opts->instructions = true;
1397		synth_opts->cycles = true;
1398		synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1399		synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1400	}
1401	synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1402	synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1403	synth_opts->initial_skip = 0;
1404}
1405
1406static int get_flag(const char **ptr, unsigned int *flags)
1407{
1408	while (1) {
1409		char c = **ptr;
1410
1411		if (c >= 'a' && c <= 'z') {
1412			*flags |= 1 << (c - 'a');
1413			++*ptr;
1414			return 0;
1415		} else if (c == ' ') {
1416			++*ptr;
1417			continue;
1418		} else {
1419			return -1;
1420		}
1421	}
1422}
1423
1424static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1425{
1426	while (1) {
1427		switch (**ptr) {
1428		case '+':
1429			++*ptr;
1430			if (get_flag(ptr, plus_flags))
1431				return -1;
1432			break;
1433		case '-':
1434			++*ptr;
1435			if (get_flag(ptr, minus_flags))
1436				return -1;
1437			break;
1438		case ' ':
1439			++*ptr;
1440			break;
1441		default:
1442			return 0;
1443		}
1444	}
1445}
1446
1447#define ITRACE_DFLT_LOG_ON_ERROR_SZ 16384
1448
1449static unsigned int itrace_log_on_error_size(void)
1450{
1451	unsigned int sz = 0;
1452
1453	perf_config_scan("itrace.debug-log-buffer-size", "%u", &sz);
1454	return sz ?: ITRACE_DFLT_LOG_ON_ERROR_SZ;
1455}
1456
1457/*
1458 * Please check tools/perf/Documentation/perf-script.txt for information
1459 * about the options parsed here, which is introduced after this cset,
1460 * when support in 'perf script' for these options is introduced.
1461 */
1462int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
1463			       const char *str, int unset)
1464{
1465	const char *p;
1466	char *endptr;
1467	bool period_type_set = false;
1468	bool period_set = false;
1469
1470	synth_opts->set = true;
1471
1472	if (unset) {
1473		synth_opts->dont_decode = true;
1474		return 0;
1475	}
1476
1477	if (!str) {
1478		itrace_synth_opts__set_default(synth_opts,
1479					       synth_opts->default_no_sample);
1480		return 0;
1481	}
1482
1483	for (p = str; *p;) {
1484		switch (*p++) {
1485		case 'i':
1486		case 'y':
1487			if (p[-1] == 'y')
1488				synth_opts->cycles = true;
1489			else
1490				synth_opts->instructions = true;
1491			while (*p == ' ' || *p == ',')
1492				p += 1;
1493			if (isdigit(*p)) {
1494				synth_opts->period = strtoull(p, &endptr, 10);
1495				period_set = true;
1496				p = endptr;
1497				while (*p == ' ' || *p == ',')
1498					p += 1;
1499				switch (*p++) {
1500				case 'i':
1501					synth_opts->period_type =
1502						PERF_ITRACE_PERIOD_INSTRUCTIONS;
1503					period_type_set = true;
1504					break;
1505				case 't':
1506					synth_opts->period_type =
1507						PERF_ITRACE_PERIOD_TICKS;
1508					period_type_set = true;
1509					break;
1510				case 'm':
1511					synth_opts->period *= 1000;
1512					/* Fall through */
1513				case 'u':
1514					synth_opts->period *= 1000;
1515					/* Fall through */
1516				case 'n':
1517					if (*p++ != 's')
1518						goto out_err;
1519					synth_opts->period_type =
1520						PERF_ITRACE_PERIOD_NANOSECS;
1521					period_type_set = true;
1522					break;
1523				case '\0':
1524					goto out;
1525				default:
1526					goto out_err;
1527				}
1528			}
1529			break;
1530		case 'b':
1531			synth_opts->branches = true;
1532			break;
1533		case 'x':
1534			synth_opts->transactions = true;
1535			break;
1536		case 'w':
1537			synth_opts->ptwrites = true;
1538			break;
1539		case 'p':
1540			synth_opts->pwr_events = true;
1541			break;
1542		case 'o':
1543			synth_opts->other_events = true;
1544			break;
1545		case 'I':
1546			synth_opts->intr_events = true;
1547			break;
1548		case 'e':
1549			synth_opts->errors = true;
1550			if (get_flags(&p, &synth_opts->error_plus_flags,
1551				      &synth_opts->error_minus_flags))
1552				goto out_err;
1553			break;
1554		case 'd':
1555			synth_opts->log = true;
1556			if (get_flags(&p, &synth_opts->log_plus_flags,
1557				      &synth_opts->log_minus_flags))
1558				goto out_err;
1559			if (synth_opts->log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR)
1560				synth_opts->log_on_error_size = itrace_log_on_error_size();
1561			break;
1562		case 'c':
1563			synth_opts->branches = true;
1564			synth_opts->calls = true;
1565			break;
1566		case 'r':
1567			synth_opts->branches = true;
1568			synth_opts->returns = true;
1569			break;
1570		case 'G':
1571		case 'g':
1572			if (p[-1] == 'G')
1573				synth_opts->add_callchain = true;
1574			else
1575				synth_opts->callchain = true;
1576			synth_opts->callchain_sz =
1577					PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1578			while (*p == ' ' || *p == ',')
1579				p += 1;
1580			if (isdigit(*p)) {
1581				unsigned int val;
1582
1583				val = strtoul(p, &endptr, 10);
1584				p = endptr;
1585				if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1586					goto out_err;
1587				synth_opts->callchain_sz = val;
1588			}
1589			break;
1590		case 'L':
1591		case 'l':
1592			if (p[-1] == 'L')
1593				synth_opts->add_last_branch = true;
1594			else
1595				synth_opts->last_branch = true;
1596			synth_opts->last_branch_sz =
1597					PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1598			while (*p == ' ' || *p == ',')
1599				p += 1;
1600			if (isdigit(*p)) {
1601				unsigned int val;
1602
1603				val = strtoul(p, &endptr, 10);
1604				p = endptr;
1605				if (!val ||
1606				    val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1607					goto out_err;
1608				synth_opts->last_branch_sz = val;
1609			}
1610			break;
1611		case 's':
1612			synth_opts->initial_skip = strtoul(p, &endptr, 10);
1613			if (p == endptr)
1614				goto out_err;
1615			p = endptr;
1616			break;
1617		case 'f':
1618			synth_opts->flc = true;
1619			break;
1620		case 'm':
1621			synth_opts->llc = true;
1622			break;
1623		case 't':
1624			synth_opts->tlb = true;
1625			break;
1626		case 'a':
1627			synth_opts->remote_access = true;
1628			break;
1629		case 'M':
1630			synth_opts->mem = true;
1631			break;
1632		case 'q':
1633			synth_opts->quick += 1;
1634			break;
1635		case 'A':
1636			synth_opts->approx_ipc = true;
1637			break;
1638		case 'Z':
1639			synth_opts->timeless_decoding = true;
1640			break;
1641		case ' ':
1642		case ',':
1643			break;
1644		default:
1645			goto out_err;
1646		}
1647	}
1648out:
1649	if (synth_opts->instructions || synth_opts->cycles) {
1650		if (!period_type_set)
1651			synth_opts->period_type =
1652					PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1653		if (!period_set)
1654			synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1655	}
1656
1657	return 0;
1658
1659out_err:
1660	pr_err("Bad Instruction Tracing options '%s'\n", str);
1661	return -EINVAL;
1662}
1663
1664int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
1665{
1666	return itrace_do_parse_synth_opts(opt->value, str, unset);
1667}
1668
1669static const char * const auxtrace_error_type_name[] = {
1670	[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1671};
1672
1673static const char *auxtrace_error_name(int type)
1674{
1675	const char *error_type_name = NULL;
1676
1677	if (type < PERF_AUXTRACE_ERROR_MAX)
1678		error_type_name = auxtrace_error_type_name[type];
1679	if (!error_type_name)
1680		error_type_name = "unknown AUX";
1681	return error_type_name;
1682}
1683
1684size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1685{
1686	struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1687	unsigned long long nsecs = e->time;
1688	const char *msg = e->msg;
1689	int ret;
1690
1691	ret = fprintf(fp, " %s error type %u",
1692		      auxtrace_error_name(e->type), e->type);
1693
1694	if (e->fmt && nsecs) {
1695		unsigned long secs = nsecs / NSEC_PER_SEC;
1696
1697		nsecs -= secs * NSEC_PER_SEC;
1698		ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1699	} else {
1700		ret += fprintf(fp, " time 0");
1701	}
1702
1703	if (!e->fmt)
1704		msg = (const char *)&e->time;
1705
1706	if (e->fmt >= 2 && e->machine_pid)
1707		ret += fprintf(fp, " machine_pid %d vcpu %d", e->machine_pid, e->vcpu);
1708
1709	ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1710		       e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1711	return ret;
1712}
1713
1714void perf_session__auxtrace_error_inc(struct perf_session *session,
1715				      union perf_event *event)
1716{
1717	struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1718
1719	if (e->type < PERF_AUXTRACE_ERROR_MAX)
1720		session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1721}
1722
1723void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1724{
1725	int i;
1726
1727	for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1728		if (!stats->nr_auxtrace_errors[i])
1729			continue;
1730		ui__warning("%u %s errors\n",
1731			    stats->nr_auxtrace_errors[i],
1732			    auxtrace_error_name(i));
1733	}
1734}
1735
1736int perf_event__process_auxtrace_error(struct perf_session *session,
1737				       union perf_event *event)
1738{
1739	if (auxtrace__dont_decode(session))
1740		return 0;
1741
1742	perf_event__fprintf_auxtrace_error(event, stdout);
1743	return 0;
1744}
1745
1746/*
1747 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode,
1748 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to
1749 * the issues caused by the below sequence on multiple CPUs: when perf tool
1750 * accesses either the load operation or the store operation for 64-bit value,
1751 * on some architectures the operation is divided into two instructions, one
1752 * is for accessing the low 32-bit value and another is for the high 32-bit;
1753 * thus these two user operations can give the kernel chances to access the
1754 * 64-bit value, and thus leads to the unexpected load values.
1755 *
1756 *   kernel (64-bit)                        user (32-bit)
1757 *
1758 *   if (LOAD ->aux_tail) { --,             LOAD ->aux_head_lo
1759 *       STORE $aux_data      |       ,--->
1760 *       FLUSH $aux_data      |       |     LOAD ->aux_head_hi
1761 *       STORE ->aux_head   --|-------`     smp_rmb()
1762 *   }                        |             LOAD $data
1763 *                            |             smp_mb()
1764 *                            |             STORE ->aux_tail_lo
1765 *                            `----------->
1766 *                                          STORE ->aux_tail_hi
1767 *
1768 * For this reason, it's impossible for the perf tool to work correctly when
1769 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we
1770 * can not simply limit the AUX ring buffer to less than 4GB, the reason is
1771 * the pointers can be increased monotonically, whatever the buffer size it is,
1772 * at the end the head and tail can be bigger than 4GB and carry out to the
1773 * high 32-bit.
1774 *
1775 * To mitigate the issues and improve the user experience, we can allow the
1776 * perf tool working in certain conditions and bail out with error if detect
1777 * any overflow cannot be handled.
1778 *
1779 * For reading the AUX head, it reads out the values for three times, and
1780 * compares the high 4 bytes of the values between the first time and the last
1781 * time, if there has no change for high 4 bytes injected by the kernel during
1782 * the user reading sequence, it's safe for use the second value.
1783 *
1784 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high
1785 * 32 bits, it means there have two store operations in user space and it cannot
1786 * promise the atomicity for 64-bit write, so return '-1' in this case to tell
1787 * the caller an overflow error has happened.
1788 */
1789u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
1790{
1791	struct perf_event_mmap_page *pc = mm->userpg;
1792	u64 first, second, last;
1793	u64 mask = (u64)(UINT32_MAX) << 32;
1794
1795	do {
1796		first = READ_ONCE(pc->aux_head);
1797		/* Ensure all reads are done after we read the head */
1798		smp_rmb();
1799		second = READ_ONCE(pc->aux_head);
1800		/* Ensure all reads are done after we read the head */
1801		smp_rmb();
1802		last = READ_ONCE(pc->aux_head);
1803	} while ((first & mask) != (last & mask));
1804
1805	return second;
1806}
1807
1808int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
1809{
1810	struct perf_event_mmap_page *pc = mm->userpg;
1811	u64 mask = (u64)(UINT32_MAX) << 32;
1812
1813	if (tail & mask)
1814		return -1;
1815
1816	/* Ensure all reads are done before we write the tail out */
1817	smp_mb();
1818	WRITE_ONCE(pc->aux_tail, tail);
1819	return 0;
1820}
1821
1822static int __auxtrace_mmap__read(struct mmap *map,
1823				 struct auxtrace_record *itr,
1824				 struct perf_tool *tool, process_auxtrace_t fn,
1825				 bool snapshot, size_t snapshot_size)
1826{
1827	struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1828	u64 head, old = mm->prev, offset, ref;
1829	unsigned char *data = mm->base;
1830	size_t size, head_off, old_off, len1, len2, padding;
1831	union perf_event ev;
1832	void *data1, *data2;
1833	int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
1834
1835	head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
1836
1837	if (snapshot &&
1838	    auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old))
1839		return -1;
1840
1841	if (old == head)
1842		return 0;
1843
1844	pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1845		  mm->idx, old, head, head - old);
1846
1847	if (mm->mask) {
1848		head_off = head & mm->mask;
1849		old_off = old & mm->mask;
1850	} else {
1851		head_off = head % mm->len;
1852		old_off = old % mm->len;
1853	}
1854
1855	if (head_off > old_off)
1856		size = head_off - old_off;
1857	else
1858		size = mm->len - (old_off - head_off);
1859
1860	if (snapshot && size > snapshot_size)
1861		size = snapshot_size;
1862
1863	ref = auxtrace_record__reference(itr);
1864
1865	if (head > old || size <= head || mm->mask) {
1866		offset = head - size;
1867	} else {
1868		/*
1869		 * When the buffer size is not a power of 2, 'head' wraps at the
1870		 * highest multiple of the buffer size, so we have to subtract
1871		 * the remainder here.
1872		 */
1873		u64 rem = (0ULL - mm->len) % mm->len;
1874
1875		offset = head - size - rem;
1876	}
1877
1878	if (size > head_off) {
1879		len1 = size - head_off;
1880		data1 = &data[mm->len - len1];
1881		len2 = head_off;
1882		data2 = &data[0];
1883	} else {
1884		len1 = size;
1885		data1 = &data[head_off - len1];
1886		len2 = 0;
1887		data2 = NULL;
1888	}
1889
1890	if (itr->alignment) {
1891		unsigned int unwanted = len1 % itr->alignment;
1892
1893		len1 -= unwanted;
1894		size -= unwanted;
1895	}
1896
1897	/* padding must be written by fn() e.g. record__process_auxtrace() */
1898	padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1899	if (padding)
1900		padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1901
1902	memset(&ev, 0, sizeof(ev));
1903	ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1904	ev.auxtrace.header.size = sizeof(ev.auxtrace);
1905	ev.auxtrace.size = size + padding;
1906	ev.auxtrace.offset = offset;
1907	ev.auxtrace.reference = ref;
1908	ev.auxtrace.idx = mm->idx;
1909	ev.auxtrace.tid = mm->tid;
1910	ev.auxtrace.cpu = mm->cpu;
1911
1912	if (fn(tool, map, &ev, data1, len1, data2, len2))
1913		return -1;
1914
1915	mm->prev = head;
1916
1917	if (!snapshot) {
1918		int err;
1919
1920		err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit);
1921		if (err < 0)
1922			return err;
1923
1924		if (itr->read_finish) {
1925			err = itr->read_finish(itr, mm->idx);
1926			if (err < 0)
1927				return err;
1928		}
1929	}
1930
1931	return 1;
1932}
1933
1934int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1935			struct perf_tool *tool, process_auxtrace_t fn)
1936{
1937	return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1938}
1939
1940int auxtrace_mmap__read_snapshot(struct mmap *map,
1941				 struct auxtrace_record *itr,
1942				 struct perf_tool *tool, process_auxtrace_t fn,
1943				 size_t snapshot_size)
1944{
1945	return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1946}
1947
1948/**
1949 * struct auxtrace_cache - hash table to implement a cache
1950 * @hashtable: the hashtable
1951 * @sz: hashtable size (number of hlists)
1952 * @entry_size: size of an entry
1953 * @limit: limit the number of entries to this maximum, when reached the cache
1954 *         is dropped and caching begins again with an empty cache
1955 * @cnt: current number of entries
1956 * @bits: hashtable size (@sz = 2^@bits)
1957 */
1958struct auxtrace_cache {
1959	struct hlist_head *hashtable;
1960	size_t sz;
1961	size_t entry_size;
1962	size_t limit;
1963	size_t cnt;
1964	unsigned int bits;
1965};
1966
1967struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1968					   unsigned int limit_percent)
1969{
1970	struct auxtrace_cache *c;
1971	struct hlist_head *ht;
1972	size_t sz, i;
1973
1974	c = zalloc(sizeof(struct auxtrace_cache));
1975	if (!c)
1976		return NULL;
1977
1978	sz = 1UL << bits;
1979
1980	ht = calloc(sz, sizeof(struct hlist_head));
1981	if (!ht)
1982		goto out_free;
1983
1984	for (i = 0; i < sz; i++)
1985		INIT_HLIST_HEAD(&ht[i]);
1986
1987	c->hashtable = ht;
1988	c->sz = sz;
1989	c->entry_size = entry_size;
1990	c->limit = (c->sz * limit_percent) / 100;
1991	c->bits = bits;
1992
1993	return c;
1994
1995out_free:
1996	free(c);
1997	return NULL;
1998}
1999
2000static void auxtrace_cache__drop(struct auxtrace_cache *c)
2001{
2002	struct auxtrace_cache_entry *entry;
2003	struct hlist_node *tmp;
2004	size_t i;
2005
2006	if (!c)
2007		return;
2008
2009	for (i = 0; i < c->sz; i++) {
2010		hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
2011			hlist_del(&entry->hash);
2012			auxtrace_cache__free_entry(c, entry);
2013		}
2014	}
2015
2016	c->cnt = 0;
2017}
2018
2019void auxtrace_cache__free(struct auxtrace_cache *c)
2020{
2021	if (!c)
2022		return;
2023
2024	auxtrace_cache__drop(c);
2025	zfree(&c->hashtable);
2026	free(c);
2027}
2028
2029void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
2030{
2031	return malloc(c->entry_size);
2032}
2033
2034void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
2035				void *entry)
2036{
2037	free(entry);
2038}
2039
2040int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
2041			struct auxtrace_cache_entry *entry)
2042{
2043	if (c->limit && ++c->cnt > c->limit)
2044		auxtrace_cache__drop(c);
2045
2046	entry->key = key;
2047	hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
2048
2049	return 0;
2050}
2051
2052static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
2053						       u32 key)
2054{
2055	struct auxtrace_cache_entry *entry;
2056	struct hlist_head *hlist;
2057	struct hlist_node *n;
2058
2059	if (!c)
2060		return NULL;
2061
2062	hlist = &c->hashtable[hash_32(key, c->bits)];
2063	hlist_for_each_entry_safe(entry, n, hlist, hash) {
2064		if (entry->key == key) {
2065			hlist_del(&entry->hash);
2066			return entry;
2067		}
2068	}
2069
2070	return NULL;
2071}
2072
2073void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
2074{
2075	struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
2076
2077	auxtrace_cache__free_entry(c, entry);
2078}
2079
2080void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
2081{
2082	struct auxtrace_cache_entry *entry;
2083	struct hlist_head *hlist;
2084
2085	if (!c)
2086		return NULL;
2087
2088	hlist = &c->hashtable[hash_32(key, c->bits)];
2089	hlist_for_each_entry(entry, hlist, hash) {
2090		if (entry->key == key)
2091			return entry;
2092	}
2093
2094	return NULL;
2095}
2096
2097static void addr_filter__free_str(struct addr_filter *filt)
2098{
2099	zfree(&filt->str);
2100	filt->action   = NULL;
2101	filt->sym_from = NULL;
2102	filt->sym_to   = NULL;
2103	filt->filename = NULL;
2104}
2105
2106static struct addr_filter *addr_filter__new(void)
2107{
2108	struct addr_filter *filt = zalloc(sizeof(*filt));
2109
2110	if (filt)
2111		INIT_LIST_HEAD(&filt->list);
2112
2113	return filt;
2114}
2115
2116static void addr_filter__free(struct addr_filter *filt)
2117{
2118	if (filt)
2119		addr_filter__free_str(filt);
2120	free(filt);
2121}
2122
2123static void addr_filters__add(struct addr_filters *filts,
2124			      struct addr_filter *filt)
2125{
2126	list_add_tail(&filt->list, &filts->head);
2127	filts->cnt += 1;
2128}
2129
2130static void addr_filters__del(struct addr_filters *filts,
2131			      struct addr_filter *filt)
2132{
2133	list_del_init(&filt->list);
2134	filts->cnt -= 1;
2135}
2136
2137void addr_filters__init(struct addr_filters *filts)
2138{
2139	INIT_LIST_HEAD(&filts->head);
2140	filts->cnt = 0;
2141}
2142
2143void addr_filters__exit(struct addr_filters *filts)
2144{
2145	struct addr_filter *filt, *n;
2146
2147	list_for_each_entry_safe(filt, n, &filts->head, list) {
2148		addr_filters__del(filts, filt);
2149		addr_filter__free(filt);
2150	}
2151}
2152
2153static int parse_num_or_str(char **inp, u64 *num, const char **str,
2154			    const char *str_delim)
2155{
2156	*inp += strspn(*inp, " ");
2157
2158	if (isdigit(**inp)) {
2159		char *endptr;
2160
2161		if (!num)
2162			return -EINVAL;
2163		errno = 0;
2164		*num = strtoull(*inp, &endptr, 0);
2165		if (errno)
2166			return -errno;
2167		if (endptr == *inp)
2168			return -EINVAL;
2169		*inp = endptr;
2170	} else {
2171		size_t n;
2172
2173		if (!str)
2174			return -EINVAL;
2175		*inp += strspn(*inp, " ");
2176		*str = *inp;
2177		n = strcspn(*inp, str_delim);
2178		if (!n)
2179			return -EINVAL;
2180		*inp += n;
2181		if (**inp) {
2182			**inp = '\0';
2183			*inp += 1;
2184		}
2185	}
2186	return 0;
2187}
2188
2189static int parse_action(struct addr_filter *filt)
2190{
2191	if (!strcmp(filt->action, "filter")) {
2192		filt->start = true;
2193		filt->range = true;
2194	} else if (!strcmp(filt->action, "start")) {
2195		filt->start = true;
2196	} else if (!strcmp(filt->action, "stop")) {
2197		filt->start = false;
2198	} else if (!strcmp(filt->action, "tracestop")) {
2199		filt->start = false;
2200		filt->range = true;
2201		filt->action += 5; /* Change 'tracestop' to 'stop' */
2202	} else {
2203		return -EINVAL;
2204	}
2205	return 0;
2206}
2207
2208static int parse_sym_idx(char **inp, int *idx)
2209{
2210	*idx = -1;
2211
2212	*inp += strspn(*inp, " ");
2213
2214	if (**inp != '#')
2215		return 0;
2216
2217	*inp += 1;
2218
2219	if (**inp == 'g' || **inp == 'G') {
2220		*inp += 1;
2221		*idx = 0;
2222	} else {
2223		unsigned long num;
2224		char *endptr;
2225
2226		errno = 0;
2227		num = strtoul(*inp, &endptr, 0);
2228		if (errno)
2229			return -errno;
2230		if (endptr == *inp || num > INT_MAX)
2231			return -EINVAL;
2232		*inp = endptr;
2233		*idx = num;
2234	}
2235
2236	return 0;
2237}
2238
2239static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2240{
2241	int err = parse_num_or_str(inp, num, str, " ");
2242
2243	if (!err && *str)
2244		err = parse_sym_idx(inp, idx);
2245
2246	return err;
2247}
2248
2249static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2250{
2251	char *fstr;
2252	int err;
2253
2254	filt->str = fstr = strdup(*filter_inp);
2255	if (!fstr)
2256		return -ENOMEM;
2257
2258	err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2259	if (err)
2260		goto out_err;
2261
2262	err = parse_action(filt);
2263	if (err)
2264		goto out_err;
2265
2266	err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2267			      &filt->sym_from_idx);
2268	if (err)
2269		goto out_err;
2270
2271	fstr += strspn(fstr, " ");
2272
2273	if (*fstr == '/') {
2274		fstr += 1;
2275		err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2276				      &filt->sym_to_idx);
2277		if (err)
2278			goto out_err;
2279		filt->range = true;
2280	}
2281
2282	fstr += strspn(fstr, " ");
2283
2284	if (*fstr == '@') {
2285		fstr += 1;
2286		err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2287		if (err)
2288			goto out_err;
2289	}
2290
2291	fstr += strspn(fstr, " ,");
2292
2293	*filter_inp += fstr - filt->str;
2294
2295	return 0;
2296
2297out_err:
2298	addr_filter__free_str(filt);
2299
2300	return err;
2301}
2302
2303int addr_filters__parse_bare_filter(struct addr_filters *filts,
2304				    const char *filter)
2305{
2306	struct addr_filter *filt;
2307	const char *fstr = filter;
2308	int err;
2309
2310	while (*fstr) {
2311		filt = addr_filter__new();
2312		err = parse_one_filter(filt, &fstr);
2313		if (err) {
2314			addr_filter__free(filt);
2315			addr_filters__exit(filts);
2316			return err;
2317		}
2318		addr_filters__add(filts, filt);
2319	}
2320
2321	return 0;
2322}
2323
2324struct sym_args {
2325	const char	*name;
2326	u64		start;
2327	u64		size;
2328	int		idx;
2329	int		cnt;
2330	bool		started;
2331	bool		global;
2332	bool		selected;
2333	bool		duplicate;
2334	bool		near;
2335};
2336
2337static bool kern_sym_name_match(const char *kname, const char *name)
2338{
2339	size_t n = strlen(name);
2340
2341	return !strcmp(kname, name) ||
2342	       (!strncmp(kname, name, n) && kname[n] == '\t');
2343}
2344
2345static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2346{
2347	/* A function with the same name, and global or the n'th found or any */
2348	return kallsyms__is_function(type) &&
2349	       kern_sym_name_match(name, args->name) &&
2350	       ((args->global && isupper(type)) ||
2351		(args->selected && ++(args->cnt) == args->idx) ||
2352		(!args->global && !args->selected));
2353}
2354
2355static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2356{
2357	struct sym_args *args = arg;
2358
2359	if (args->started) {
2360		if (!args->size)
2361			args->size = start - args->start;
2362		if (args->selected) {
2363			if (args->size)
2364				return 1;
2365		} else if (kern_sym_match(args, name, type)) {
2366			args->duplicate = true;
2367			return 1;
2368		}
2369	} else if (kern_sym_match(args, name, type)) {
2370		args->started = true;
2371		args->start = start;
2372	}
2373
2374	return 0;
2375}
2376
2377static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2378{
2379	struct sym_args *args = arg;
2380
2381	if (kern_sym_match(args, name, type)) {
2382		pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2383		       ++args->cnt, start, type, name);
2384		args->near = true;
2385	} else if (args->near) {
2386		args->near = false;
2387		pr_err("\t\twhich is near\t\t%s\n", name);
2388	}
2389
2390	return 0;
2391}
2392
2393static int sym_not_found_error(const char *sym_name, int idx)
2394{
2395	if (idx > 0) {
2396		pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2397		       idx, sym_name);
2398	} else if (!idx) {
2399		pr_err("Global symbol '%s' not found.\n", sym_name);
2400	} else {
2401		pr_err("Symbol '%s' not found.\n", sym_name);
2402	}
2403	pr_err("Note that symbols must be functions.\n");
2404
2405	return -EINVAL;
2406}
2407
2408static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2409{
2410	struct sym_args args = {
2411		.name = sym_name,
2412		.idx = idx,
2413		.global = !idx,
2414		.selected = idx > 0,
2415	};
2416	int err;
2417
2418	*start = 0;
2419	*size = 0;
2420
2421	err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2422	if (err < 0) {
2423		pr_err("Failed to parse /proc/kallsyms\n");
2424		return err;
2425	}
2426
2427	if (args.duplicate) {
2428		pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2429		args.cnt = 0;
2430		kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2431		pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2432		       sym_name);
2433		pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2434		return -EINVAL;
2435	}
2436
2437	if (!args.started) {
2438		pr_err("Kernel symbol lookup: ");
2439		return sym_not_found_error(sym_name, idx);
2440	}
2441
2442	*start = args.start;
2443	*size = args.size;
2444
2445	return 0;
2446}
2447
2448static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2449			       char type, u64 start)
2450{
2451	struct sym_args *args = arg;
2452	u64 size;
2453
2454	if (!kallsyms__is_function(type))
2455		return 0;
2456
2457	if (!args->started) {
2458		args->started = true;
2459		args->start = start;
2460	}
2461	/* Don't know exactly where the kernel ends, so we add a page */
2462	size = round_up(start, page_size) + page_size - args->start;
2463	if (size > args->size)
2464		args->size = size;
2465
2466	return 0;
2467}
2468
2469static int addr_filter__entire_kernel(struct addr_filter *filt)
2470{
2471	struct sym_args args = { .started = false };
2472	int err;
2473
2474	err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2475	if (err < 0 || !args.started) {
2476		pr_err("Failed to parse /proc/kallsyms\n");
2477		return err;
2478	}
2479
2480	filt->addr = args.start;
2481	filt->size = args.size;
2482
2483	return 0;
2484}
2485
2486static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2487{
2488	if (start + size >= filt->addr)
2489		return 0;
2490
2491	if (filt->sym_from) {
2492		pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2493		       filt->sym_to, start, filt->sym_from, filt->addr);
2494	} else {
2495		pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2496		       filt->sym_to, start, filt->addr);
2497	}
2498
2499	return -EINVAL;
2500}
2501
2502static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2503{
2504	bool no_size = false;
2505	u64 start, size;
2506	int err;
2507
2508	if (symbol_conf.kptr_restrict) {
2509		pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2510		return -EINVAL;
2511	}
2512
2513	if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2514		return addr_filter__entire_kernel(filt);
2515
2516	if (filt->sym_from) {
2517		err = find_kern_sym(filt->sym_from, &start, &size,
2518				    filt->sym_from_idx);
2519		if (err)
2520			return err;
2521		filt->addr = start;
2522		if (filt->range && !filt->size && !filt->sym_to) {
2523			filt->size = size;
2524			no_size = !size;
2525		}
2526	}
2527
2528	if (filt->sym_to) {
2529		err = find_kern_sym(filt->sym_to, &start, &size,
2530				    filt->sym_to_idx);
2531		if (err)
2532			return err;
2533
2534		err = check_end_after_start(filt, start, size);
2535		if (err)
2536			return err;
2537		filt->size = start + size - filt->addr;
2538		no_size = !size;
2539	}
2540
2541	/* The very last symbol in kallsyms does not imply a particular size */
2542	if (no_size) {
2543		pr_err("Cannot determine size of symbol '%s'\n",
2544		       filt->sym_to ? filt->sym_to : filt->sym_from);
2545		return -EINVAL;
2546	}
2547
2548	return 0;
2549}
2550
2551static struct dso *load_dso(const char *name)
2552{
2553	struct map *map;
2554	struct dso *dso;
2555
2556	map = dso__new_map(name);
2557	if (!map)
2558		return NULL;
2559
2560	if (map__load(map) < 0)
2561		pr_err("File '%s' not found or has no symbols.\n", name);
2562
2563	dso = dso__get(map__dso(map));
2564
2565	map__put(map);
2566
2567	return dso;
2568}
2569
2570static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2571			  int idx)
2572{
2573	/* Same name, and global or the n'th found or any */
2574	return !arch__compare_symbol_names(name, sym->name) &&
2575	       ((!idx && sym->binding == STB_GLOBAL) ||
2576		(idx > 0 && ++*cnt == idx) ||
2577		idx < 0);
2578}
2579
2580static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2581{
2582	struct symbol *sym;
2583	bool near = false;
2584	int cnt = 0;
2585
2586	pr_err("Multiple symbols with name '%s'\n", sym_name);
2587
2588	sym = dso__first_symbol(dso);
2589	while (sym) {
2590		if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2591			pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2592			       ++cnt, sym->start,
2593			       sym->binding == STB_GLOBAL ? 'g' :
2594			       sym->binding == STB_LOCAL  ? 'l' : 'w',
2595			       sym->name);
2596			near = true;
2597		} else if (near) {
2598			near = false;
2599			pr_err("\t\twhich is near\t\t%s\n", sym->name);
2600		}
2601		sym = dso__next_symbol(sym);
2602	}
2603
2604	pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2605	       sym_name);
2606	pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2607}
2608
2609static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2610			u64 *size, int idx)
2611{
2612	struct symbol *sym;
2613	int cnt = 0;
2614
2615	*start = 0;
2616	*size = 0;
2617
2618	sym = dso__first_symbol(dso);
2619	while (sym) {
2620		if (*start) {
2621			if (!*size)
2622				*size = sym->start - *start;
2623			if (idx > 0) {
2624				if (*size)
2625					return 0;
2626			} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2627				print_duplicate_syms(dso, sym_name);
2628				return -EINVAL;
2629			}
2630		} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2631			*start = sym->start;
2632			*size = sym->end - sym->start;
2633		}
2634		sym = dso__next_symbol(sym);
2635	}
2636
2637	if (!*start)
2638		return sym_not_found_error(sym_name, idx);
2639
2640	return 0;
2641}
2642
2643static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2644{
2645	if (dso__data_file_size(dso, NULL)) {
2646		pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2647		       filt->filename);
2648		return -EINVAL;
2649	}
2650
2651	filt->addr = 0;
2652	filt->size = dso->data.file_size;
2653
2654	return 0;
2655}
2656
2657static int addr_filter__resolve_syms(struct addr_filter *filt)
2658{
2659	u64 start, size;
2660	struct dso *dso;
2661	int err = 0;
2662
2663	if (!filt->sym_from && !filt->sym_to)
2664		return 0;
2665
2666	if (!filt->filename)
2667		return addr_filter__resolve_kernel_syms(filt);
2668
2669	dso = load_dso(filt->filename);
2670	if (!dso) {
2671		pr_err("Failed to load symbols from: %s\n", filt->filename);
2672		return -EINVAL;
2673	}
2674
2675	if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2676		err = addr_filter__entire_dso(filt, dso);
2677		goto put_dso;
2678	}
2679
2680	if (filt->sym_from) {
2681		err = find_dso_sym(dso, filt->sym_from, &start, &size,
2682				   filt->sym_from_idx);
2683		if (err)
2684			goto put_dso;
2685		filt->addr = start;
2686		if (filt->range && !filt->size && !filt->sym_to)
2687			filt->size = size;
2688	}
2689
2690	if (filt->sym_to) {
2691		err = find_dso_sym(dso, filt->sym_to, &start, &size,
2692				   filt->sym_to_idx);
2693		if (err)
2694			goto put_dso;
2695
2696		err = check_end_after_start(filt, start, size);
2697		if (err)
2698			return err;
2699
2700		filt->size = start + size - filt->addr;
2701	}
2702
2703put_dso:
2704	dso__put(dso);
2705
2706	return err;
2707}
2708
2709static char *addr_filter__to_str(struct addr_filter *filt)
2710{
2711	char filename_buf[PATH_MAX];
2712	const char *at = "";
2713	const char *fn = "";
2714	char *filter;
2715	int err;
2716
2717	if (filt->filename) {
2718		at = "@";
2719		fn = realpath(filt->filename, filename_buf);
2720		if (!fn)
2721			return NULL;
2722	}
2723
2724	if (filt->range) {
2725		err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2726			       filt->action, filt->addr, filt->size, at, fn);
2727	} else {
2728		err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2729			       filt->action, filt->addr, at, fn);
2730	}
2731
2732	return err < 0 ? NULL : filter;
2733}
2734
2735static int parse_addr_filter(struct evsel *evsel, const char *filter,
2736			     int max_nr)
2737{
2738	struct addr_filters filts;
2739	struct addr_filter *filt;
2740	int err;
2741
2742	addr_filters__init(&filts);
2743
2744	err = addr_filters__parse_bare_filter(&filts, filter);
2745	if (err)
2746		goto out_exit;
2747
2748	if (filts.cnt > max_nr) {
2749		pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2750		       filts.cnt, max_nr);
2751		err = -EINVAL;
2752		goto out_exit;
2753	}
2754
2755	list_for_each_entry(filt, &filts.head, list) {
2756		char *new_filter;
2757
2758		err = addr_filter__resolve_syms(filt);
2759		if (err)
2760			goto out_exit;
2761
2762		new_filter = addr_filter__to_str(filt);
2763		if (!new_filter) {
2764			err = -ENOMEM;
2765			goto out_exit;
2766		}
2767
2768		if (evsel__append_addr_filter(evsel, new_filter)) {
2769			err = -ENOMEM;
2770			goto out_exit;
2771		}
2772	}
2773
2774out_exit:
2775	addr_filters__exit(&filts);
2776
2777	if (err) {
2778		pr_err("Failed to parse address filter: '%s'\n", filter);
2779		pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2780		pr_err("Where multiple filters are separated by space or comma.\n");
2781	}
2782
2783	return err;
2784}
2785
2786static int evsel__nr_addr_filter(struct evsel *evsel)
2787{
2788	struct perf_pmu *pmu = evsel__find_pmu(evsel);
2789	int nr_addr_filters = 0;
2790
2791	if (!pmu)
2792		return 0;
2793
2794	perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2795
2796	return nr_addr_filters;
2797}
2798
2799int auxtrace_parse_filters(struct evlist *evlist)
2800{
2801	struct evsel *evsel;
2802	char *filter;
2803	int err, max_nr;
2804
2805	evlist__for_each_entry(evlist, evsel) {
2806		filter = evsel->filter;
2807		max_nr = evsel__nr_addr_filter(evsel);
2808		if (!filter || !max_nr)
2809			continue;
2810		evsel->filter = NULL;
2811		err = parse_addr_filter(evsel, filter, max_nr);
2812		free(filter);
2813		if (err)
2814			return err;
2815		pr_debug("Address filter: %s\n", evsel->filter);
2816	}
2817
2818	return 0;
2819}
2820
2821int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2822			    struct perf_sample *sample, struct perf_tool *tool)
2823{
2824	if (!session->auxtrace)
2825		return 0;
2826
2827	return session->auxtrace->process_event(session, event, sample, tool);
2828}
2829
2830void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2831				    struct perf_sample *sample)
2832{
2833	if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2834	    auxtrace__dont_decode(session))
2835		return;
2836
2837	session->auxtrace->dump_auxtrace_sample(session, sample);
2838}
2839
2840int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2841{
2842	if (!session->auxtrace)
2843		return 0;
2844
2845	return session->auxtrace->flush_events(session, tool);
2846}
2847
2848void auxtrace__free_events(struct perf_session *session)
2849{
2850	if (!session->auxtrace)
2851		return;
2852
2853	return session->auxtrace->free_events(session);
2854}
2855
2856void auxtrace__free(struct perf_session *session)
2857{
2858	if (!session->auxtrace)
2859		return;
2860
2861	return session->auxtrace->free(session);
2862}
2863
2864bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2865				 struct evsel *evsel)
2866{
2867	if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2868		return false;
2869
2870	return session->auxtrace->evsel_is_auxtrace(session, evsel);
2871}
2872