xref: /kernel/linux/linux-5.10/tools/lib/perf/evlist.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2#include <perf/evlist.h>
3#include <perf/evsel.h>
4#include <linux/bitops.h>
5#include <linux/list.h>
6#include <linux/hash.h>
7#include <sys/ioctl.h>
8#include <internal/evlist.h>
9#include <internal/evsel.h>
10#include <internal/xyarray.h>
11#include <internal/mmap.h>
12#include <internal/cpumap.h>
13#include <internal/threadmap.h>
14#include <internal/lib.h>
15#include <linux/zalloc.h>
16#include <stdlib.h>
17#include <errno.h>
18#include <unistd.h>
19#include <fcntl.h>
20#include <signal.h>
21#include <poll.h>
22#include <sys/mman.h>
23#include <perf/cpumap.h>
24#include <perf/threadmap.h>
25#include <api/fd/array.h>
26
27void perf_evlist__init(struct perf_evlist *evlist)
28{
29	int i;
30
31	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
32		INIT_HLIST_HEAD(&evlist->heads[i]);
33	INIT_LIST_HEAD(&evlist->entries);
34	evlist->nr_entries = 0;
35	fdarray__init(&evlist->pollfd, 64);
36}
37
38static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
39					  struct perf_evsel *evsel)
40{
41	/*
42	 * We already have cpus for evsel (via PMU sysfs) so
43	 * keep it, if there's no target cpu list defined.
44	 */
45	if (!evsel->own_cpus || evlist->has_user_cpus) {
46		perf_cpu_map__put(evsel->cpus);
47		evsel->cpus = perf_cpu_map__get(evlist->cpus);
48	} else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
49		perf_cpu_map__put(evsel->cpus);
50		evsel->cpus = perf_cpu_map__get(evlist->cpus);
51	} else if (evsel->cpus != evsel->own_cpus) {
52		perf_cpu_map__put(evsel->cpus);
53		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
54	}
55
56	perf_thread_map__put(evsel->threads);
57	evsel->threads = perf_thread_map__get(evlist->threads);
58	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
59}
60
61static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
62{
63	struct perf_evsel *evsel;
64
65	perf_evlist__for_each_evsel(evlist, evsel)
66		__perf_evlist__propagate_maps(evlist, evsel);
67}
68
69void perf_evlist__add(struct perf_evlist *evlist,
70		      struct perf_evsel *evsel)
71{
72	list_add_tail(&evsel->node, &evlist->entries);
73	evlist->nr_entries += 1;
74	__perf_evlist__propagate_maps(evlist, evsel);
75}
76
77void perf_evlist__remove(struct perf_evlist *evlist,
78			 struct perf_evsel *evsel)
79{
80	list_del_init(&evsel->node);
81	evlist->nr_entries -= 1;
82}
83
84struct perf_evlist *perf_evlist__new(void)
85{
86	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
87
88	if (evlist != NULL)
89		perf_evlist__init(evlist);
90
91	return evlist;
92}
93
94struct perf_evsel *
95perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
96{
97	struct perf_evsel *next;
98
99	if (!prev) {
100		next = list_first_entry(&evlist->entries,
101					struct perf_evsel,
102					node);
103	} else {
104		next = list_next_entry(prev, node);
105	}
106
107	/* Empty list is noticed here so don't need checking on entry. */
108	if (&next->node == &evlist->entries)
109		return NULL;
110
111	return next;
112}
113
114static void perf_evlist__purge(struct perf_evlist *evlist)
115{
116	struct perf_evsel *pos, *n;
117
118	perf_evlist__for_each_entry_safe(evlist, n, pos) {
119		list_del_init(&pos->node);
120		perf_evsel__delete(pos);
121	}
122
123	evlist->nr_entries = 0;
124}
125
126void perf_evlist__exit(struct perf_evlist *evlist)
127{
128	perf_cpu_map__put(evlist->cpus);
129	perf_cpu_map__put(evlist->all_cpus);
130	perf_thread_map__put(evlist->threads);
131	evlist->cpus = NULL;
132	evlist->all_cpus = NULL;
133	evlist->threads = NULL;
134	fdarray__exit(&evlist->pollfd);
135}
136
137void perf_evlist__delete(struct perf_evlist *evlist)
138{
139	if (evlist == NULL)
140		return;
141
142	perf_evlist__munmap(evlist);
143	perf_evlist__close(evlist);
144	perf_evlist__purge(evlist);
145	perf_evlist__exit(evlist);
146	free(evlist);
147}
148
149void perf_evlist__set_maps(struct perf_evlist *evlist,
150			   struct perf_cpu_map *cpus,
151			   struct perf_thread_map *threads)
152{
153	/*
154	 * Allow for the possibility that one or another of the maps isn't being
155	 * changed i.e. don't put it.  Note we are assuming the maps that are
156	 * being applied are brand new and evlist is taking ownership of the
157	 * original reference count of 1.  If that is not the case it is up to
158	 * the caller to increase the reference count.
159	 */
160	if (cpus != evlist->cpus) {
161		perf_cpu_map__put(evlist->cpus);
162		evlist->cpus = perf_cpu_map__get(cpus);
163	}
164
165	if (threads != evlist->threads) {
166		perf_thread_map__put(evlist->threads);
167		evlist->threads = perf_thread_map__get(threads);
168	}
169
170	if (!evlist->all_cpus && cpus)
171		evlist->all_cpus = perf_cpu_map__get(cpus);
172
173	perf_evlist__propagate_maps(evlist);
174}
175
176int perf_evlist__open(struct perf_evlist *evlist)
177{
178	struct perf_evsel *evsel;
179	int err;
180
181	perf_evlist__for_each_entry(evlist, evsel) {
182		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
183		if (err < 0)
184			goto out_err;
185	}
186
187	return 0;
188
189out_err:
190	perf_evlist__close(evlist);
191	return err;
192}
193
194void perf_evlist__close(struct perf_evlist *evlist)
195{
196	struct perf_evsel *evsel;
197
198	perf_evlist__for_each_entry_reverse(evlist, evsel)
199		perf_evsel__close(evsel);
200}
201
202void perf_evlist__enable(struct perf_evlist *evlist)
203{
204	struct perf_evsel *evsel;
205
206	perf_evlist__for_each_entry(evlist, evsel)
207		perf_evsel__enable(evsel);
208}
209
210void perf_evlist__disable(struct perf_evlist *evlist)
211{
212	struct perf_evsel *evsel;
213
214	perf_evlist__for_each_entry(evlist, evsel)
215		perf_evsel__disable(evsel);
216}
217
218u64 perf_evlist__read_format(struct perf_evlist *evlist)
219{
220	struct perf_evsel *first = perf_evlist__first(evlist);
221
222	return first->attr.read_format;
223}
224
225#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
226
227static void perf_evlist__id_hash(struct perf_evlist *evlist,
228				 struct perf_evsel *evsel,
229				 int cpu, int thread, u64 id)
230{
231	int hash;
232	struct perf_sample_id *sid = SID(evsel, cpu, thread);
233
234	sid->id = id;
235	sid->evsel = evsel;
236	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
237	hlist_add_head(&sid->node, &evlist->heads[hash]);
238}
239
240void perf_evlist__id_add(struct perf_evlist *evlist,
241			 struct perf_evsel *evsel,
242			 int cpu, int thread, u64 id)
243{
244	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
245	evsel->id[evsel->ids++] = id;
246}
247
248int perf_evlist__id_add_fd(struct perf_evlist *evlist,
249			   struct perf_evsel *evsel,
250			   int cpu, int thread, int fd)
251{
252	u64 read_data[4] = { 0, };
253	int id_idx = 1; /* The first entry is the counter value */
254	u64 id;
255	int ret;
256
257	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
258	if (!ret)
259		goto add;
260
261	if (errno != ENOTTY)
262		return -1;
263
264	/* Legacy way to get event id.. All hail to old kernels! */
265
266	/*
267	 * This way does not work with group format read, so bail
268	 * out in that case.
269	 */
270	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
271		return -1;
272
273	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
274	    read(fd, &read_data, sizeof(read_data)) == -1)
275		return -1;
276
277	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
278		++id_idx;
279	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
280		++id_idx;
281
282	id = read_data[id_idx];
283
284add:
285	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
286	return 0;
287}
288
289int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
290{
291	int nr_cpus = perf_cpu_map__nr(evlist->cpus);
292	int nr_threads = perf_thread_map__nr(evlist->threads);
293	int nfds = 0;
294	struct perf_evsel *evsel;
295
296	perf_evlist__for_each_entry(evlist, evsel) {
297		if (evsel->system_wide)
298			nfds += nr_cpus;
299		else
300			nfds += nr_cpus * nr_threads;
301	}
302
303	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
304	    fdarray__grow(&evlist->pollfd, nfds) < 0)
305		return -ENOMEM;
306
307	return 0;
308}
309
310int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
311			    void *ptr, short revent, enum fdarray_flags flags)
312{
313	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
314
315	if (pos >= 0) {
316		evlist->pollfd.priv[pos].ptr = ptr;
317		fcntl(fd, F_SETFL, O_NONBLOCK);
318	}
319
320	return pos;
321}
322
323static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
324					 void *arg __maybe_unused)
325{
326	struct perf_mmap *map = fda->priv[fd].ptr;
327
328	if (map)
329		perf_mmap__put(map);
330}
331
332int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
333{
334	return fdarray__filter(&evlist->pollfd, revents_and_mask,
335			       perf_evlist__munmap_filtered, NULL);
336}
337
338int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
339{
340	return fdarray__poll(&evlist->pollfd, timeout);
341}
342
343static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
344{
345	int i;
346	struct perf_mmap *map;
347
348	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
349	if (!map)
350		return NULL;
351
352	for (i = 0; i < evlist->nr_mmaps; i++) {
353		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
354
355		/*
356		 * When the perf_mmap() call is made we grab one refcount, plus
357		 * one extra to let perf_mmap__consume() get the last
358		 * events after all real references (perf_mmap__get()) are
359		 * dropped.
360		 *
361		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
362		 * thus does perf_mmap__get() on it.
363		 */
364		perf_mmap__init(&map[i], prev, overwrite, NULL);
365	}
366
367	return map;
368}
369
370static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
371{
372	struct perf_sample_id *sid = SID(evsel, cpu, thread);
373
374	sid->idx = idx;
375	sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
376	sid->tid = perf_thread_map__pid(evsel->threads, thread);
377}
378
379static struct perf_mmap*
380perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
381{
382	struct perf_mmap *maps;
383
384	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
385
386	if (!maps) {
387		maps = perf_evlist__alloc_mmap(evlist, overwrite);
388		if (!maps)
389			return NULL;
390
391		if (overwrite)
392			evlist->mmap_ovw = maps;
393		else
394			evlist->mmap = maps;
395	}
396
397	return &maps[idx];
398}
399
400#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
401
402static int
403perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
404			  int output, int cpu)
405{
406	return perf_mmap__mmap(map, mp, output, cpu);
407}
408
409static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
410					bool overwrite)
411{
412	if (overwrite)
413		evlist->mmap_ovw_first = map;
414	else
415		evlist->mmap_first = map;
416}
417
418static int
419mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
420	       int idx, struct perf_mmap_param *mp, int cpu_idx,
421	       int thread, int *_output, int *_output_overwrite)
422{
423	int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
424	struct perf_evsel *evsel;
425	int revent;
426
427	perf_evlist__for_each_entry(evlist, evsel) {
428		bool overwrite = evsel->attr.write_backward;
429		struct perf_mmap *map;
430		int *output, fd, cpu;
431
432		if (evsel->system_wide && thread)
433			continue;
434
435		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
436		if (cpu == -1)
437			continue;
438
439		map = ops->get(evlist, overwrite, idx);
440		if (map == NULL)
441			return -ENOMEM;
442
443		if (overwrite) {
444			mp->prot = PROT_READ;
445			output   = _output_overwrite;
446		} else {
447			mp->prot = PROT_READ | PROT_WRITE;
448			output   = _output;
449		}
450
451		fd = FD(evsel, cpu, thread);
452
453		if (*output == -1) {
454			*output = fd;
455
456			/*
457			 * The last one will be done at perf_mmap__consume(), so that we
458			 * make sure we don't prevent tools from consuming every last event in
459			 * the ring buffer.
460			 *
461			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
462			 * anymore, but the last events for it are still in the ring buffer,
463			 * waiting to be consumed.
464			 *
465			 * Tools can chose to ignore this at their own discretion, but the
466			 * evlist layer can't just drop it when filtering events in
467			 * perf_evlist__filter_pollfd().
468			 */
469			refcount_set(&map->refcnt, 2);
470
471			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
472				return -1;
473
474			if (!idx)
475				perf_evlist__set_mmap_first(evlist, map, overwrite);
476		} else {
477			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
478				return -1;
479
480			perf_mmap__get(map);
481		}
482
483		revent = !overwrite ? POLLIN : 0;
484
485		if (!evsel->system_wide &&
486		    perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
487			perf_mmap__put(map);
488			return -1;
489		}
490
491		if (evsel->attr.read_format & PERF_FORMAT_ID) {
492			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
493						   fd) < 0)
494				return -1;
495			perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
496		}
497	}
498
499	return 0;
500}
501
502static int
503mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
504		struct perf_mmap_param *mp)
505{
506	int thread;
507	int nr_threads = perf_thread_map__nr(evlist->threads);
508
509	for (thread = 0; thread < nr_threads; thread++) {
510		int output = -1;
511		int output_overwrite = -1;
512
513		if (ops->idx)
514			ops->idx(evlist, mp, thread, false);
515
516		if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
517				   &output, &output_overwrite))
518			goto out_unmap;
519	}
520
521	return 0;
522
523out_unmap:
524	perf_evlist__munmap(evlist);
525	return -1;
526}
527
528static int
529mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
530	     struct perf_mmap_param *mp)
531{
532	int nr_threads = perf_thread_map__nr(evlist->threads);
533	int nr_cpus    = perf_cpu_map__nr(evlist->cpus);
534	int cpu, thread;
535
536	for (cpu = 0; cpu < nr_cpus; cpu++) {
537		int output = -1;
538		int output_overwrite = -1;
539
540		if (ops->idx)
541			ops->idx(evlist, mp, cpu, true);
542
543		for (thread = 0; thread < nr_threads; thread++) {
544			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
545					   thread, &output, &output_overwrite))
546				goto out_unmap;
547		}
548	}
549
550	return 0;
551
552out_unmap:
553	perf_evlist__munmap(evlist);
554	return -1;
555}
556
557static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
558{
559	int nr_mmaps;
560
561	nr_mmaps = perf_cpu_map__nr(evlist->cpus);
562	if (perf_cpu_map__empty(evlist->cpus))
563		nr_mmaps = perf_thread_map__nr(evlist->threads);
564
565	return nr_mmaps;
566}
567
568int perf_evlist__mmap_ops(struct perf_evlist *evlist,
569			  struct perf_evlist_mmap_ops *ops,
570			  struct perf_mmap_param *mp)
571{
572	struct perf_evsel *evsel;
573	const struct perf_cpu_map *cpus = evlist->cpus;
574
575	if (!ops || !ops->get || !ops->mmap)
576		return -EINVAL;
577
578	mp->mask = evlist->mmap_len - page_size - 1;
579
580	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
581
582	perf_evlist__for_each_entry(evlist, evsel) {
583		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
584		    evsel->sample_id == NULL &&
585		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
586			return -ENOMEM;
587	}
588
589	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
590		return -ENOMEM;
591
592	if (perf_cpu_map__empty(cpus))
593		return mmap_per_thread(evlist, ops, mp);
594
595	return mmap_per_cpu(evlist, ops, mp);
596}
597
598int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
599{
600	struct perf_mmap_param mp;
601	struct perf_evlist_mmap_ops ops = {
602		.get  = perf_evlist__mmap_cb_get,
603		.mmap = perf_evlist__mmap_cb_mmap,
604	};
605
606	evlist->mmap_len = (pages + 1) * page_size;
607
608	return perf_evlist__mmap_ops(evlist, &ops, &mp);
609}
610
611void perf_evlist__munmap(struct perf_evlist *evlist)
612{
613	int i;
614
615	if (evlist->mmap) {
616		for (i = 0; i < evlist->nr_mmaps; i++)
617			perf_mmap__munmap(&evlist->mmap[i]);
618	}
619
620	if (evlist->mmap_ovw) {
621		for (i = 0; i < evlist->nr_mmaps; i++)
622			perf_mmap__munmap(&evlist->mmap_ovw[i]);
623	}
624
625	zfree(&evlist->mmap);
626	zfree(&evlist->mmap_ovw);
627}
628
629struct perf_mmap*
630perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
631		       bool overwrite)
632{
633	if (map)
634		return map->next;
635
636	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
637}
638