xref: /kernel/linux/linux-6.6/tools/perf/util/env.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2#include "cpumap.h"
3#include "debug.h"
4#include "env.h"
5#include "util/header.h"
6#include <linux/ctype.h>
7#include <linux/zalloc.h>
8#include "cgroup.h"
9#include <errno.h>
10#include <sys/utsname.h>
11#include <stdlib.h>
12#include <string.h>
13#include "pmus.h"
14#include "strbuf.h"
15
16struct perf_env perf_env;
17
18#ifdef HAVE_LIBBPF_SUPPORT
19#include "bpf-event.h"
20#include "bpf-utils.h"
21#include <bpf/libbpf.h>
22
23void perf_env__insert_bpf_prog_info(struct perf_env *env,
24				    struct bpf_prog_info_node *info_node)
25{
26	down_write(&env->bpf_progs.lock);
27	__perf_env__insert_bpf_prog_info(env, info_node);
28	up_write(&env->bpf_progs.lock);
29}
30
31void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
32{
33	__u32 prog_id = info_node->info_linear->info.id;
34	struct bpf_prog_info_node *node;
35	struct rb_node *parent = NULL;
36	struct rb_node **p;
37
38	p = &env->bpf_progs.infos.rb_node;
39
40	while (*p != NULL) {
41		parent = *p;
42		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
43		if (prog_id < node->info_linear->info.id) {
44			p = &(*p)->rb_left;
45		} else if (prog_id > node->info_linear->info.id) {
46			p = &(*p)->rb_right;
47		} else {
48			pr_debug("duplicated bpf prog info %u\n", prog_id);
49			return;
50		}
51	}
52
53	rb_link_node(&info_node->rb_node, parent, p);
54	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
55	env->bpf_progs.infos_cnt++;
56}
57
58struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
59							__u32 prog_id)
60{
61	struct bpf_prog_info_node *node = NULL;
62	struct rb_node *n;
63
64	down_read(&env->bpf_progs.lock);
65	n = env->bpf_progs.infos.rb_node;
66
67	while (n) {
68		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
69		if (prog_id < node->info_linear->info.id)
70			n = n->rb_left;
71		else if (prog_id > node->info_linear->info.id)
72			n = n->rb_right;
73		else
74			goto out;
75	}
76	node = NULL;
77
78out:
79	up_read(&env->bpf_progs.lock);
80	return node;
81}
82
83bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
84{
85	bool ret;
86
87	down_write(&env->bpf_progs.lock);
88	ret = __perf_env__insert_btf(env, btf_node);
89	up_write(&env->bpf_progs.lock);
90	return ret;
91}
92
93bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
94{
95	struct rb_node *parent = NULL;
96	__u32 btf_id = btf_node->id;
97	struct btf_node *node;
98	struct rb_node **p;
99
100	p = &env->bpf_progs.btfs.rb_node;
101
102	while (*p != NULL) {
103		parent = *p;
104		node = rb_entry(parent, struct btf_node, rb_node);
105		if (btf_id < node->id) {
106			p = &(*p)->rb_left;
107		} else if (btf_id > node->id) {
108			p = &(*p)->rb_right;
109		} else {
110			pr_debug("duplicated btf %u\n", btf_id);
111			return false;
112		}
113	}
114
115	rb_link_node(&btf_node->rb_node, parent, p);
116	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
117	env->bpf_progs.btfs_cnt++;
118	return true;
119}
120
121struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
122{
123	struct btf_node *res;
124
125	down_read(&env->bpf_progs.lock);
126	res = __perf_env__find_btf(env, btf_id);
127	up_read(&env->bpf_progs.lock);
128	return res;
129}
130
131struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
132{
133	struct btf_node *node = NULL;
134	struct rb_node *n;
135
136	n = env->bpf_progs.btfs.rb_node;
137
138	while (n) {
139		node = rb_entry(n, struct btf_node, rb_node);
140		if (btf_id < node->id)
141			n = n->rb_left;
142		else if (btf_id > node->id)
143			n = n->rb_right;
144		else
145			return node;
146	}
147	return NULL;
148}
149
150/* purge data in bpf_progs.infos tree */
151static void perf_env__purge_bpf(struct perf_env *env)
152{
153	struct rb_root *root;
154	struct rb_node *next;
155
156	down_write(&env->bpf_progs.lock);
157
158	root = &env->bpf_progs.infos;
159	next = rb_first(root);
160
161	while (next) {
162		struct bpf_prog_info_node *node;
163
164		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
165		next = rb_next(&node->rb_node);
166		rb_erase(&node->rb_node, root);
167		zfree(&node->info_linear);
168		free(node);
169	}
170
171	env->bpf_progs.infos_cnt = 0;
172
173	root = &env->bpf_progs.btfs;
174	next = rb_first(root);
175
176	while (next) {
177		struct btf_node *node;
178
179		node = rb_entry(next, struct btf_node, rb_node);
180		next = rb_next(&node->rb_node);
181		rb_erase(&node->rb_node, root);
182		free(node);
183	}
184
185	env->bpf_progs.btfs_cnt = 0;
186
187	up_write(&env->bpf_progs.lock);
188}
189#else // HAVE_LIBBPF_SUPPORT
190static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
191{
192}
193#endif // HAVE_LIBBPF_SUPPORT
194
195void perf_env__exit(struct perf_env *env)
196{
197	int i, j;
198
199	perf_env__purge_bpf(env);
200	perf_env__purge_cgroups(env);
201	zfree(&env->hostname);
202	zfree(&env->os_release);
203	zfree(&env->version);
204	zfree(&env->arch);
205	zfree(&env->cpu_desc);
206	zfree(&env->cpuid);
207	zfree(&env->cmdline);
208	zfree(&env->cmdline_argv);
209	zfree(&env->sibling_dies);
210	zfree(&env->sibling_cores);
211	zfree(&env->sibling_threads);
212	zfree(&env->pmu_mappings);
213	zfree(&env->cpu);
214	for (i = 0; i < env->nr_cpu_pmu_caps; i++)
215		zfree(&env->cpu_pmu_caps[i]);
216	zfree(&env->cpu_pmu_caps);
217	zfree(&env->numa_map);
218
219	for (i = 0; i < env->nr_numa_nodes; i++)
220		perf_cpu_map__put(env->numa_nodes[i].map);
221	zfree(&env->numa_nodes);
222
223	for (i = 0; i < env->caches_cnt; i++)
224		cpu_cache_level__free(&env->caches[i]);
225	zfree(&env->caches);
226
227	for (i = 0; i < env->nr_memory_nodes; i++)
228		zfree(&env->memory_nodes[i].set);
229	zfree(&env->memory_nodes);
230
231	for (i = 0; i < env->nr_hybrid_nodes; i++) {
232		zfree(&env->hybrid_nodes[i].pmu_name);
233		zfree(&env->hybrid_nodes[i].cpus);
234	}
235	zfree(&env->hybrid_nodes);
236
237	for (i = 0; i < env->nr_pmus_with_caps; i++) {
238		for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
239			zfree(&env->pmu_caps[i].caps[j]);
240		zfree(&env->pmu_caps[i].caps);
241		zfree(&env->pmu_caps[i].pmu_name);
242	}
243	zfree(&env->pmu_caps);
244}
245
246void perf_env__init(struct perf_env *env)
247{
248#ifdef HAVE_LIBBPF_SUPPORT
249	env->bpf_progs.infos = RB_ROOT;
250	env->bpf_progs.btfs = RB_ROOT;
251	init_rwsem(&env->bpf_progs.lock);
252#endif
253	env->kernel_is_64_bit = -1;
254}
255
256static void perf_env__init_kernel_mode(struct perf_env *env)
257{
258	const char *arch = perf_env__raw_arch(env);
259
260	if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
261	    !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
262	    !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
263	    !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
264		env->kernel_is_64_bit = 1;
265	else
266		env->kernel_is_64_bit = 0;
267}
268
269int perf_env__kernel_is_64_bit(struct perf_env *env)
270{
271	if (env->kernel_is_64_bit == -1)
272		perf_env__init_kernel_mode(env);
273
274	return env->kernel_is_64_bit;
275}
276
277int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
278{
279	int i;
280
281	/* do not include NULL termination */
282	env->cmdline_argv = calloc(argc, sizeof(char *));
283	if (env->cmdline_argv == NULL)
284		goto out_enomem;
285
286	/*
287	 * Must copy argv contents because it gets moved around during option
288	 * parsing:
289	 */
290	for (i = 0; i < argc ; i++) {
291		env->cmdline_argv[i] = argv[i];
292		if (env->cmdline_argv[i] == NULL)
293			goto out_free;
294	}
295
296	env->nr_cmdline = argc;
297
298	return 0;
299out_free:
300	zfree(&env->cmdline_argv);
301out_enomem:
302	return -ENOMEM;
303}
304
305int perf_env__read_cpu_topology_map(struct perf_env *env)
306{
307	int idx, nr_cpus;
308
309	if (env->cpu != NULL)
310		return 0;
311
312	if (env->nr_cpus_avail == 0)
313		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
314
315	nr_cpus = env->nr_cpus_avail;
316	if (nr_cpus == -1)
317		return -EINVAL;
318
319	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
320	if (env->cpu == NULL)
321		return -ENOMEM;
322
323	for (idx = 0; idx < nr_cpus; ++idx) {
324		struct perf_cpu cpu = { .cpu = idx };
325
326		env->cpu[idx].core_id	= cpu__get_core_id(cpu);
327		env->cpu[idx].socket_id	= cpu__get_socket_id(cpu);
328		env->cpu[idx].die_id	= cpu__get_die_id(cpu);
329	}
330
331	env->nr_cpus_avail = nr_cpus;
332	return 0;
333}
334
335int perf_env__read_pmu_mappings(struct perf_env *env)
336{
337	struct perf_pmu *pmu = NULL;
338	u32 pmu_num = 0;
339	struct strbuf sb;
340
341	while ((pmu = perf_pmus__scan(pmu)))
342		pmu_num++;
343
344	if (!pmu_num) {
345		pr_debug("pmu mappings not available\n");
346		return -ENOENT;
347	}
348	env->nr_pmu_mappings = pmu_num;
349
350	if (strbuf_init(&sb, 128 * pmu_num) < 0)
351		return -ENOMEM;
352
353	while ((pmu = perf_pmus__scan(pmu))) {
354		if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
355			goto error;
356		/* include a NULL character at the end */
357		if (strbuf_add(&sb, "", 1) < 0)
358			goto error;
359	}
360
361	env->pmu_mappings = strbuf_detach(&sb, NULL);
362
363	return 0;
364
365error:
366	strbuf_release(&sb);
367	return -1;
368}
369
370int perf_env__read_cpuid(struct perf_env *env)
371{
372	char cpuid[128];
373	int err = get_cpuid(cpuid, sizeof(cpuid));
374
375	if (err)
376		return err;
377
378	free(env->cpuid);
379	env->cpuid = strdup(cpuid);
380	if (env->cpuid == NULL)
381		return ENOMEM;
382	return 0;
383}
384
385static int perf_env__read_arch(struct perf_env *env)
386{
387	struct utsname uts;
388
389	if (env->arch)
390		return 0;
391
392	if (!uname(&uts))
393		env->arch = strdup(uts.machine);
394
395	return env->arch ? 0 : -ENOMEM;
396}
397
398static int perf_env__read_nr_cpus_avail(struct perf_env *env)
399{
400	if (env->nr_cpus_avail == 0)
401		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
402
403	return env->nr_cpus_avail ? 0 : -ENOENT;
404}
405
406const char *perf_env__raw_arch(struct perf_env *env)
407{
408	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
409}
410
411int perf_env__nr_cpus_avail(struct perf_env *env)
412{
413	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
414}
415
416void cpu_cache_level__free(struct cpu_cache_level *cache)
417{
418	zfree(&cache->type);
419	zfree(&cache->map);
420	zfree(&cache->size);
421}
422
423/*
424 * Return architecture name in a normalized form.
425 * The conversion logic comes from the Makefile.
426 */
427static const char *normalize_arch(char *arch)
428{
429	if (!strcmp(arch, "x86_64"))
430		return "x86";
431	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
432		return "x86";
433	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
434		return "sparc";
435	if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
436		return "arm64";
437	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
438		return "arm";
439	if (!strncmp(arch, "s390", 4))
440		return "s390";
441	if (!strncmp(arch, "parisc", 6))
442		return "parisc";
443	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
444		return "powerpc";
445	if (!strncmp(arch, "mips", 4))
446		return "mips";
447	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
448		return "sh";
449	if (!strncmp(arch, "loongarch", 9))
450		return "loongarch";
451
452	return arch;
453}
454
455const char *perf_env__arch(struct perf_env *env)
456{
457	char *arch_name;
458
459	if (!env || !env->arch) { /* Assume local operation */
460		static struct utsname uts = { .machine[0] = '\0', };
461		if (uts.machine[0] == '\0' && uname(&uts) < 0)
462			return NULL;
463		arch_name = uts.machine;
464	} else
465		arch_name = env->arch;
466
467	return normalize_arch(arch_name);
468}
469
470const char *perf_env__cpuid(struct perf_env *env)
471{
472	int status;
473
474	if (!env || !env->cpuid) { /* Assume local operation */
475		status = perf_env__read_cpuid(env);
476		if (status)
477			return NULL;
478	}
479
480	return env->cpuid;
481}
482
483int perf_env__nr_pmu_mappings(struct perf_env *env)
484{
485	int status;
486
487	if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
488		status = perf_env__read_pmu_mappings(env);
489		if (status)
490			return 0;
491	}
492
493	return env->nr_pmu_mappings;
494}
495
496const char *perf_env__pmu_mappings(struct perf_env *env)
497{
498	int status;
499
500	if (!env || !env->pmu_mappings) { /* Assume local operation */
501		status = perf_env__read_pmu_mappings(env);
502		if (status)
503			return NULL;
504	}
505
506	return env->pmu_mappings;
507}
508
509int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
510{
511	if (!env->nr_numa_map) {
512		struct numa_node *nn;
513		int i, nr = 0;
514
515		for (i = 0; i < env->nr_numa_nodes; i++) {
516			nn = &env->numa_nodes[i];
517			nr = max(nr, perf_cpu_map__max(nn->map).cpu);
518		}
519
520		nr++;
521
522		/*
523		 * We initialize the numa_map array to prepare
524		 * it for missing cpus, which return node -1
525		 */
526		env->numa_map = malloc(nr * sizeof(int));
527		if (!env->numa_map)
528			return -1;
529
530		for (i = 0; i < nr; i++)
531			env->numa_map[i] = -1;
532
533		env->nr_numa_map = nr;
534
535		for (i = 0; i < env->nr_numa_nodes; i++) {
536			struct perf_cpu tmp;
537			int j;
538
539			nn = &env->numa_nodes[i];
540			perf_cpu_map__for_each_cpu(tmp, j, nn->map)
541				env->numa_map[tmp.cpu] = i;
542		}
543	}
544
545	return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
546}
547
548char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
549			     const char *cap)
550{
551	char *cap_eq;
552	int cap_size;
553	char **ptr;
554	int i, j;
555
556	if (!pmu_name || !cap)
557		return NULL;
558
559	cap_size = strlen(cap);
560	cap_eq = zalloc(cap_size + 2);
561	if (!cap_eq)
562		return NULL;
563
564	memcpy(cap_eq, cap, cap_size);
565	cap_eq[cap_size] = '=';
566
567	if (!strcmp(pmu_name, "cpu")) {
568		for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
569			if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
570				free(cap_eq);
571				return &env->cpu_pmu_caps[i][cap_size + 1];
572			}
573		}
574		goto out;
575	}
576
577	for (i = 0; i < env->nr_pmus_with_caps; i++) {
578		if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
579			continue;
580
581		ptr = env->pmu_caps[i].caps;
582
583		for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
584			if (!strncmp(ptr[j], cap_eq, cap_size + 1)) {
585				free(cap_eq);
586				return &ptr[j][cap_size + 1];
587			}
588		}
589	}
590
591out:
592	free(cap_eq);
593	return NULL;
594}
595