xref: /kernel/linux/linux-6.6/tools/bpf/bpftool/link.c (revision 62306a36)
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2020 Facebook */
3
4#include <errno.h>
5#include <linux/err.h>
6#include <linux/netfilter.h>
7#include <linux/netfilter_arp.h>
8#include <linux/perf_event.h>
9#include <net/if.h>
10#include <stdio.h>
11#include <unistd.h>
12
13#include <bpf/bpf.h>
14#include <bpf/hashmap.h>
15
16#include "json_writer.h"
17#include "main.h"
18#include "xlated_dumper.h"
19
20#define PERF_HW_CACHE_LEN 128
21
22static struct hashmap *link_table;
23static struct dump_data dd;
24
25static const char *perf_type_name[PERF_TYPE_MAX] = {
26	[PERF_TYPE_HARDWARE]			= "hardware",
27	[PERF_TYPE_SOFTWARE]			= "software",
28	[PERF_TYPE_TRACEPOINT]			= "tracepoint",
29	[PERF_TYPE_HW_CACHE]			= "hw-cache",
30	[PERF_TYPE_RAW]				= "raw",
31	[PERF_TYPE_BREAKPOINT]			= "breakpoint",
32};
33
34const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
35	[PERF_COUNT_HW_CPU_CYCLES]		= "cpu-cycles",
36	[PERF_COUNT_HW_INSTRUCTIONS]		= "instructions",
37	[PERF_COUNT_HW_CACHE_REFERENCES]	= "cache-references",
38	[PERF_COUNT_HW_CACHE_MISSES]		= "cache-misses",
39	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= "branch-instructions",
40	[PERF_COUNT_HW_BRANCH_MISSES]		= "branch-misses",
41	[PERF_COUNT_HW_BUS_CYCLES]		= "bus-cycles",
42	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= "stalled-cycles-frontend",
43	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= "stalled-cycles-backend",
44	[PERF_COUNT_HW_REF_CPU_CYCLES]		= "ref-cycles",
45};
46
47const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
48	[PERF_COUNT_SW_CPU_CLOCK]		= "cpu-clock",
49	[PERF_COUNT_SW_TASK_CLOCK]		= "task-clock",
50	[PERF_COUNT_SW_PAGE_FAULTS]		= "page-faults",
51	[PERF_COUNT_SW_CONTEXT_SWITCHES]	= "context-switches",
52	[PERF_COUNT_SW_CPU_MIGRATIONS]		= "cpu-migrations",
53	[PERF_COUNT_SW_PAGE_FAULTS_MIN]		= "minor-faults",
54	[PERF_COUNT_SW_PAGE_FAULTS_MAJ]		= "major-faults",
55	[PERF_COUNT_SW_ALIGNMENT_FAULTS]	= "alignment-faults",
56	[PERF_COUNT_SW_EMULATION_FAULTS]	= "emulation-faults",
57	[PERF_COUNT_SW_DUMMY]			= "dummy",
58	[PERF_COUNT_SW_BPF_OUTPUT]		= "bpf-output",
59	[PERF_COUNT_SW_CGROUP_SWITCHES]		= "cgroup-switches",
60};
61
62const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
63	[PERF_COUNT_HW_CACHE_L1D]		= "L1-dcache",
64	[PERF_COUNT_HW_CACHE_L1I]		= "L1-icache",
65	[PERF_COUNT_HW_CACHE_LL]		= "LLC",
66	[PERF_COUNT_HW_CACHE_DTLB]		= "dTLB",
67	[PERF_COUNT_HW_CACHE_ITLB]		= "iTLB",
68	[PERF_COUNT_HW_CACHE_BPU]		= "branch",
69	[PERF_COUNT_HW_CACHE_NODE]		= "node",
70};
71
72const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
73	[PERF_COUNT_HW_CACHE_OP_READ]		= "load",
74	[PERF_COUNT_HW_CACHE_OP_WRITE]		= "store",
75	[PERF_COUNT_HW_CACHE_OP_PREFETCH]	= "prefetch",
76};
77
78const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
79	[PERF_COUNT_HW_CACHE_RESULT_ACCESS]	= "refs",
80	[PERF_COUNT_HW_CACHE_RESULT_MISS]	= "misses",
81};
82
83#define perf_event_name(array, id) ({			\
84	const char *event_str = NULL;			\
85							\
86	if ((id) < ARRAY_SIZE(array))			\
87		event_str = array[id];			\
88	event_str;					\
89})
90
91static int link_parse_fd(int *argc, char ***argv)
92{
93	int fd;
94
95	if (is_prefix(**argv, "id")) {
96		unsigned int id;
97		char *endptr;
98
99		NEXT_ARGP();
100
101		id = strtoul(**argv, &endptr, 0);
102		if (*endptr) {
103			p_err("can't parse %s as ID", **argv);
104			return -1;
105		}
106		NEXT_ARGP();
107
108		fd = bpf_link_get_fd_by_id(id);
109		if (fd < 0)
110			p_err("failed to get link with ID %d: %s", id, strerror(errno));
111		return fd;
112	} else if (is_prefix(**argv, "pinned")) {
113		char *path;
114
115		NEXT_ARGP();
116
117		path = **argv;
118		NEXT_ARGP();
119
120		return open_obj_pinned_any(path, BPF_OBJ_LINK);
121	}
122
123	p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
124	return -1;
125}
126
127static void
128show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
129{
130	const char *link_type_str;
131
132	jsonw_uint_field(wtr, "id", info->id);
133	link_type_str = libbpf_bpf_link_type_str(info->type);
134	if (link_type_str)
135		jsonw_string_field(wtr, "type", link_type_str);
136	else
137		jsonw_uint_field(wtr, "type", info->type);
138
139	jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
140}
141
142static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
143{
144	const char *attach_type_str;
145
146	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
147	if (attach_type_str)
148		jsonw_string_field(wtr, "attach_type", attach_type_str);
149	else
150		jsonw_uint_field(wtr, "attach_type", attach_type);
151}
152
153static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
154{
155	char devname[IF_NAMESIZE] = "(unknown)";
156
157	if (ifindex)
158		if_indextoname(ifindex, devname);
159	else
160		snprintf(devname, sizeof(devname), "(detached)");
161	jsonw_string_field(wtr, "devname", devname);
162	jsonw_uint_field(wtr, "ifindex", ifindex);
163}
164
165static bool is_iter_map_target(const char *target_name)
166{
167	return strcmp(target_name, "bpf_map_elem") == 0 ||
168	       strcmp(target_name, "bpf_sk_storage_map") == 0;
169}
170
171static bool is_iter_cgroup_target(const char *target_name)
172{
173	return strcmp(target_name, "cgroup") == 0;
174}
175
176static const char *cgroup_order_string(__u32 order)
177{
178	switch (order) {
179	case BPF_CGROUP_ITER_ORDER_UNSPEC:
180		return "order_unspec";
181	case BPF_CGROUP_ITER_SELF_ONLY:
182		return "self_only";
183	case BPF_CGROUP_ITER_DESCENDANTS_PRE:
184		return "descendants_pre";
185	case BPF_CGROUP_ITER_DESCENDANTS_POST:
186		return "descendants_post";
187	case BPF_CGROUP_ITER_ANCESTORS_UP:
188		return "ancestors_up";
189	default: /* won't happen */
190		return "unknown";
191	}
192}
193
194static bool is_iter_task_target(const char *target_name)
195{
196	return strcmp(target_name, "task") == 0 ||
197		strcmp(target_name, "task_file") == 0 ||
198		strcmp(target_name, "task_vma") == 0;
199}
200
201static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
202{
203	const char *target_name = u64_to_ptr(info->iter.target_name);
204
205	jsonw_string_field(wtr, "target_name", target_name);
206
207	if (is_iter_map_target(target_name))
208		jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
209	else if (is_iter_task_target(target_name)) {
210		if (info->iter.task.tid)
211			jsonw_uint_field(wtr, "tid", info->iter.task.tid);
212		else if (info->iter.task.pid)
213			jsonw_uint_field(wtr, "pid", info->iter.task.pid);
214	}
215
216	if (is_iter_cgroup_target(target_name)) {
217		jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
218		jsonw_string_field(wtr, "order",
219				   cgroup_order_string(info->iter.cgroup.order));
220	}
221}
222
223void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
224{
225	jsonw_uint_field(json_wtr, "pf",
226			 info->netfilter.pf);
227	jsonw_uint_field(json_wtr, "hook",
228			 info->netfilter.hooknum);
229	jsonw_int_field(json_wtr, "prio",
230			 info->netfilter.priority);
231	jsonw_uint_field(json_wtr, "flags",
232			 info->netfilter.flags);
233}
234
235static int get_prog_info(int prog_id, struct bpf_prog_info *info)
236{
237	__u32 len = sizeof(*info);
238	int err, prog_fd;
239
240	prog_fd = bpf_prog_get_fd_by_id(prog_id);
241	if (prog_fd < 0)
242		return prog_fd;
243
244	memset(info, 0, sizeof(*info));
245	err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
246	if (err)
247		p_err("can't get prog info: %s", strerror(errno));
248	close(prog_fd);
249	return err;
250}
251
252static int cmp_u64(const void *A, const void *B)
253{
254	const __u64 *a = A, *b = B;
255
256	return *a - *b;
257}
258
259static void
260show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
261{
262	__u32 i, j = 0;
263	__u64 *addrs;
264
265	jsonw_bool_field(json_wtr, "retprobe",
266			 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
267	jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
268	jsonw_name(json_wtr, "funcs");
269	jsonw_start_array(json_wtr);
270	addrs = u64_to_ptr(info->kprobe_multi.addrs);
271	qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64);
272
273	/* Load it once for all. */
274	if (!dd.sym_count)
275		kernel_syms_load(&dd);
276	for (i = 0; i < dd.sym_count; i++) {
277		if (dd.sym_mapping[i].address != addrs[j])
278			continue;
279		jsonw_start_object(json_wtr);
280		jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
281		jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
282		/* Print null if it is vmlinux */
283		if (dd.sym_mapping[i].module[0] == '\0') {
284			jsonw_name(json_wtr, "module");
285			jsonw_null(json_wtr);
286		} else {
287			jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
288		}
289		jsonw_end_object(json_wtr);
290		if (j++ == info->kprobe_multi.count)
291			break;
292	}
293	jsonw_end_array(json_wtr);
294}
295
296static void
297show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
298{
299	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
300	jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
301	jsonw_string_field(wtr, "func",
302			   u64_to_ptr(info->perf_event.kprobe.func_name));
303	jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
304}
305
306static void
307show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
308{
309	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
310	jsonw_string_field(wtr, "file",
311			   u64_to_ptr(info->perf_event.uprobe.file_name));
312	jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
313}
314
315static void
316show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
317{
318	jsonw_string_field(wtr, "tracepoint",
319			   u64_to_ptr(info->perf_event.tracepoint.tp_name));
320}
321
322static char *perf_config_hw_cache_str(__u64 config)
323{
324	const char *hw_cache, *result, *op;
325	char *str = malloc(PERF_HW_CACHE_LEN);
326
327	if (!str) {
328		p_err("mem alloc failed");
329		return NULL;
330	}
331
332	hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
333	if (hw_cache)
334		snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
335	else
336		snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff);
337
338	op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
339	if (op)
340		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
341			 "%s-", op);
342	else
343		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
344			 "%lld-", (config >> 8) & 0xff);
345
346	result = perf_event_name(evsel__hw_cache_result, config >> 16);
347	if (result)
348		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
349			 "%s", result);
350	else
351		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
352			 "%lld", config >> 16);
353	return str;
354}
355
356static const char *perf_config_str(__u32 type, __u64 config)
357{
358	const char *perf_config;
359
360	switch (type) {
361	case PERF_TYPE_HARDWARE:
362		perf_config = perf_event_name(event_symbols_hw, config);
363		break;
364	case PERF_TYPE_SOFTWARE:
365		perf_config = perf_event_name(event_symbols_sw, config);
366		break;
367	case PERF_TYPE_HW_CACHE:
368		perf_config = perf_config_hw_cache_str(config);
369		break;
370	default:
371		perf_config = NULL;
372		break;
373	}
374	return perf_config;
375}
376
377static void
378show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
379{
380	__u64 config = info->perf_event.event.config;
381	__u32 type = info->perf_event.event.type;
382	const char *perf_type, *perf_config;
383
384	perf_type = perf_event_name(perf_type_name, type);
385	if (perf_type)
386		jsonw_string_field(wtr, "event_type", perf_type);
387	else
388		jsonw_uint_field(wtr, "event_type", type);
389
390	perf_config = perf_config_str(type, config);
391	if (perf_config)
392		jsonw_string_field(wtr, "event_config", perf_config);
393	else
394		jsonw_uint_field(wtr, "event_config", config);
395
396	if (type == PERF_TYPE_HW_CACHE && perf_config)
397		free((void *)perf_config);
398}
399
400static int show_link_close_json(int fd, struct bpf_link_info *info)
401{
402	struct bpf_prog_info prog_info;
403	const char *prog_type_str;
404	int err;
405
406	jsonw_start_object(json_wtr);
407
408	show_link_header_json(info, json_wtr);
409
410	switch (info->type) {
411	case BPF_LINK_TYPE_RAW_TRACEPOINT:
412		jsonw_string_field(json_wtr, "tp_name",
413				   u64_to_ptr(info->raw_tracepoint.tp_name));
414		break;
415	case BPF_LINK_TYPE_TRACING:
416		err = get_prog_info(info->prog_id, &prog_info);
417		if (err)
418			return err;
419
420		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
421		/* libbpf will return NULL for variants unknown to it. */
422		if (prog_type_str)
423			jsonw_string_field(json_wtr, "prog_type", prog_type_str);
424		else
425			jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
426
427		show_link_attach_type_json(info->tracing.attach_type,
428					   json_wtr);
429		jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
430		jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
431		break;
432	case BPF_LINK_TYPE_CGROUP:
433		jsonw_lluint_field(json_wtr, "cgroup_id",
434				   info->cgroup.cgroup_id);
435		show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
436		break;
437	case BPF_LINK_TYPE_ITER:
438		show_iter_json(info, json_wtr);
439		break;
440	case BPF_LINK_TYPE_NETNS:
441		jsonw_uint_field(json_wtr, "netns_ino",
442				 info->netns.netns_ino);
443		show_link_attach_type_json(info->netns.attach_type, json_wtr);
444		break;
445	case BPF_LINK_TYPE_NETFILTER:
446		netfilter_dump_json(info, json_wtr);
447		break;
448	case BPF_LINK_TYPE_TCX:
449		show_link_ifindex_json(info->tcx.ifindex, json_wtr);
450		show_link_attach_type_json(info->tcx.attach_type, json_wtr);
451		break;
452	case BPF_LINK_TYPE_XDP:
453		show_link_ifindex_json(info->xdp.ifindex, json_wtr);
454		break;
455	case BPF_LINK_TYPE_STRUCT_OPS:
456		jsonw_uint_field(json_wtr, "map_id",
457				 info->struct_ops.map_id);
458		break;
459	case BPF_LINK_TYPE_KPROBE_MULTI:
460		show_kprobe_multi_json(info, json_wtr);
461		break;
462	case BPF_LINK_TYPE_PERF_EVENT:
463		switch (info->perf_event.type) {
464		case BPF_PERF_EVENT_EVENT:
465			show_perf_event_event_json(info, json_wtr);
466			break;
467		case BPF_PERF_EVENT_TRACEPOINT:
468			show_perf_event_tracepoint_json(info, json_wtr);
469			break;
470		case BPF_PERF_EVENT_KPROBE:
471		case BPF_PERF_EVENT_KRETPROBE:
472			show_perf_event_kprobe_json(info, json_wtr);
473			break;
474		case BPF_PERF_EVENT_UPROBE:
475		case BPF_PERF_EVENT_URETPROBE:
476			show_perf_event_uprobe_json(info, json_wtr);
477			break;
478		default:
479			break;
480		}
481		break;
482	default:
483		break;
484	}
485
486	if (!hashmap__empty(link_table)) {
487		struct hashmap_entry *entry;
488
489		jsonw_name(json_wtr, "pinned");
490		jsonw_start_array(json_wtr);
491		hashmap__for_each_key_entry(link_table, entry, info->id)
492			jsonw_string(json_wtr, entry->pvalue);
493		jsonw_end_array(json_wtr);
494	}
495
496	emit_obj_refs_json(refs_table, info->id, json_wtr);
497
498	jsonw_end_object(json_wtr);
499
500	return 0;
501}
502
503static void show_link_header_plain(struct bpf_link_info *info)
504{
505	const char *link_type_str;
506
507	printf("%u: ", info->id);
508	link_type_str = libbpf_bpf_link_type_str(info->type);
509	if (link_type_str)
510		printf("%s  ", link_type_str);
511	else
512		printf("type %u  ", info->type);
513
514	if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
515		printf("map %u  ", info->struct_ops.map_id);
516	else
517		printf("prog %u  ", info->prog_id);
518}
519
520static void show_link_attach_type_plain(__u32 attach_type)
521{
522	const char *attach_type_str;
523
524	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
525	if (attach_type_str)
526		printf("attach_type %s  ", attach_type_str);
527	else
528		printf("attach_type %u  ", attach_type);
529}
530
531static void show_link_ifindex_plain(__u32 ifindex)
532{
533	char devname[IF_NAMESIZE * 2] = "(unknown)";
534	char tmpname[IF_NAMESIZE];
535	char *ret = NULL;
536
537	if (ifindex)
538		ret = if_indextoname(ifindex, tmpname);
539	else
540		snprintf(devname, sizeof(devname), "(detached)");
541	if (ret)
542		snprintf(devname, sizeof(devname), "%s(%d)",
543			 tmpname, ifindex);
544	printf("ifindex %s  ", devname);
545}
546
547static void show_iter_plain(struct bpf_link_info *info)
548{
549	const char *target_name = u64_to_ptr(info->iter.target_name);
550
551	printf("target_name %s  ", target_name);
552
553	if (is_iter_map_target(target_name))
554		printf("map_id %u  ", info->iter.map.map_id);
555	else if (is_iter_task_target(target_name)) {
556		if (info->iter.task.tid)
557			printf("tid %u ", info->iter.task.tid);
558		else if (info->iter.task.pid)
559			printf("pid %u ", info->iter.task.pid);
560	}
561
562	if (is_iter_cgroup_target(target_name)) {
563		printf("cgroup_id %llu  ", info->iter.cgroup.cgroup_id);
564		printf("order %s  ",
565		       cgroup_order_string(info->iter.cgroup.order));
566	}
567}
568
569static const char * const pf2name[] = {
570	[NFPROTO_INET] = "inet",
571	[NFPROTO_IPV4] = "ip",
572	[NFPROTO_ARP] = "arp",
573	[NFPROTO_NETDEV] = "netdev",
574	[NFPROTO_BRIDGE] = "bridge",
575	[NFPROTO_IPV6] = "ip6",
576};
577
578static const char * const inethook2name[] = {
579	[NF_INET_PRE_ROUTING] = "prerouting",
580	[NF_INET_LOCAL_IN] = "input",
581	[NF_INET_FORWARD] = "forward",
582	[NF_INET_LOCAL_OUT] = "output",
583	[NF_INET_POST_ROUTING] = "postrouting",
584};
585
586static const char * const arphook2name[] = {
587	[NF_ARP_IN] = "input",
588	[NF_ARP_OUT] = "output",
589};
590
591void netfilter_dump_plain(const struct bpf_link_info *info)
592{
593	const char *hookname = NULL, *pfname = NULL;
594	unsigned int hook = info->netfilter.hooknum;
595	unsigned int pf = info->netfilter.pf;
596
597	if (pf < ARRAY_SIZE(pf2name))
598		pfname = pf2name[pf];
599
600	switch (pf) {
601	case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
602	case NFPROTO_IPV4:
603	case NFPROTO_IPV6:
604	case NFPROTO_INET:
605		if (hook < ARRAY_SIZE(inethook2name))
606			hookname = inethook2name[hook];
607		break;
608	case NFPROTO_ARP:
609		if (hook < ARRAY_SIZE(arphook2name))
610			hookname = arphook2name[hook];
611	default:
612		break;
613	}
614
615	if (pfname)
616		printf("\n\t%s", pfname);
617	else
618		printf("\n\tpf: %d", pf);
619
620	if (hookname)
621		printf(" %s", hookname);
622	else
623		printf(", hook %u,", hook);
624
625	printf(" prio %d", info->netfilter.priority);
626
627	if (info->netfilter.flags)
628		printf(" flags 0x%x", info->netfilter.flags);
629}
630
631static void show_kprobe_multi_plain(struct bpf_link_info *info)
632{
633	__u32 i, j = 0;
634	__u64 *addrs;
635
636	if (!info->kprobe_multi.count)
637		return;
638
639	if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
640		printf("\n\tkretprobe.multi  ");
641	else
642		printf("\n\tkprobe.multi  ");
643	printf("func_cnt %u  ", info->kprobe_multi.count);
644	addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
645	qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
646
647	/* Load it once for all. */
648	if (!dd.sym_count)
649		kernel_syms_load(&dd);
650	if (!dd.sym_count)
651		return;
652
653	printf("\n\t%-16s %s", "addr", "func [module]");
654	for (i = 0; i < dd.sym_count; i++) {
655		if (dd.sym_mapping[i].address != addrs[j])
656			continue;
657		printf("\n\t%016lx %s",
658		       dd.sym_mapping[i].address, dd.sym_mapping[i].name);
659		if (dd.sym_mapping[i].module[0] != '\0')
660			printf(" [%s]  ", dd.sym_mapping[i].module);
661		else
662			printf("  ");
663
664		if (j++ == info->kprobe_multi.count)
665			break;
666	}
667}
668
669static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
670{
671	const char *buf;
672
673	buf = u64_to_ptr(info->perf_event.kprobe.func_name);
674	if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
675		return;
676
677	if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
678		printf("\n\tkretprobe ");
679	else
680		printf("\n\tkprobe ");
681	if (info->perf_event.kprobe.addr)
682		printf("%llx ", info->perf_event.kprobe.addr);
683	printf("%s", buf);
684	if (info->perf_event.kprobe.offset)
685		printf("+%#x", info->perf_event.kprobe.offset);
686	printf("  ");
687}
688
689static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
690{
691	const char *buf;
692
693	buf = u64_to_ptr(info->perf_event.uprobe.file_name);
694	if (buf[0] == '\0')
695		return;
696
697	if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
698		printf("\n\turetprobe ");
699	else
700		printf("\n\tuprobe ");
701	printf("%s+%#x  ", buf, info->perf_event.uprobe.offset);
702}
703
704static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
705{
706	const char *buf;
707
708	buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
709	if (buf[0] == '\0')
710		return;
711
712	printf("\n\ttracepoint %s  ", buf);
713}
714
715static void show_perf_event_event_plain(struct bpf_link_info *info)
716{
717	__u64 config = info->perf_event.event.config;
718	__u32 type = info->perf_event.event.type;
719	const char *perf_type, *perf_config;
720
721	printf("\n\tevent ");
722	perf_type = perf_event_name(perf_type_name, type);
723	if (perf_type)
724		printf("%s:", perf_type);
725	else
726		printf("%u :", type);
727
728	perf_config = perf_config_str(type, config);
729	if (perf_config)
730		printf("%s  ", perf_config);
731	else
732		printf("%llu  ", config);
733
734	if (type == PERF_TYPE_HW_CACHE && perf_config)
735		free((void *)perf_config);
736}
737
738static int show_link_close_plain(int fd, struct bpf_link_info *info)
739{
740	struct bpf_prog_info prog_info;
741	const char *prog_type_str;
742	int err;
743
744	show_link_header_plain(info);
745
746	switch (info->type) {
747	case BPF_LINK_TYPE_RAW_TRACEPOINT:
748		printf("\n\ttp '%s'  ",
749		       (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
750		break;
751	case BPF_LINK_TYPE_TRACING:
752		err = get_prog_info(info->prog_id, &prog_info);
753		if (err)
754			return err;
755
756		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
757		/* libbpf will return NULL for variants unknown to it. */
758		if (prog_type_str)
759			printf("\n\tprog_type %s  ", prog_type_str);
760		else
761			printf("\n\tprog_type %u  ", prog_info.type);
762
763		show_link_attach_type_plain(info->tracing.attach_type);
764		if (info->tracing.target_obj_id || info->tracing.target_btf_id)
765			printf("\n\ttarget_obj_id %u  target_btf_id %u  ",
766			       info->tracing.target_obj_id,
767			       info->tracing.target_btf_id);
768		break;
769	case BPF_LINK_TYPE_CGROUP:
770		printf("\n\tcgroup_id %zu  ", (size_t)info->cgroup.cgroup_id);
771		show_link_attach_type_plain(info->cgroup.attach_type);
772		break;
773	case BPF_LINK_TYPE_ITER:
774		show_iter_plain(info);
775		break;
776	case BPF_LINK_TYPE_NETNS:
777		printf("\n\tnetns_ino %u  ", info->netns.netns_ino);
778		show_link_attach_type_plain(info->netns.attach_type);
779		break;
780	case BPF_LINK_TYPE_NETFILTER:
781		netfilter_dump_plain(info);
782		break;
783	case BPF_LINK_TYPE_TCX:
784		printf("\n\t");
785		show_link_ifindex_plain(info->tcx.ifindex);
786		show_link_attach_type_plain(info->tcx.attach_type);
787		break;
788	case BPF_LINK_TYPE_XDP:
789		printf("\n\t");
790		show_link_ifindex_plain(info->xdp.ifindex);
791		break;
792	case BPF_LINK_TYPE_KPROBE_MULTI:
793		show_kprobe_multi_plain(info);
794		break;
795	case BPF_LINK_TYPE_PERF_EVENT:
796		switch (info->perf_event.type) {
797		case BPF_PERF_EVENT_EVENT:
798			show_perf_event_event_plain(info);
799			break;
800		case BPF_PERF_EVENT_TRACEPOINT:
801			show_perf_event_tracepoint_plain(info);
802			break;
803		case BPF_PERF_EVENT_KPROBE:
804		case BPF_PERF_EVENT_KRETPROBE:
805			show_perf_event_kprobe_plain(info);
806			break;
807		case BPF_PERF_EVENT_UPROBE:
808		case BPF_PERF_EVENT_URETPROBE:
809			show_perf_event_uprobe_plain(info);
810			break;
811		default:
812			break;
813		}
814		break;
815	default:
816		break;
817	}
818
819	if (!hashmap__empty(link_table)) {
820		struct hashmap_entry *entry;
821
822		hashmap__for_each_key_entry(link_table, entry, info->id)
823			printf("\n\tpinned %s", (char *)entry->pvalue);
824	}
825	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
826
827	printf("\n");
828
829	return 0;
830}
831
832static int do_show_link(int fd)
833{
834	struct bpf_link_info info;
835	__u32 len = sizeof(info);
836	__u64 *addrs = NULL;
837	char buf[PATH_MAX];
838	int count;
839	int err;
840
841	memset(&info, 0, sizeof(info));
842	buf[0] = '\0';
843again:
844	err = bpf_link_get_info_by_fd(fd, &info, &len);
845	if (err) {
846		p_err("can't get link info: %s",
847		      strerror(errno));
848		close(fd);
849		return err;
850	}
851	if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
852	    !info.raw_tracepoint.tp_name) {
853		info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
854		info.raw_tracepoint.tp_name_len = sizeof(buf);
855		goto again;
856	}
857	if (info.type == BPF_LINK_TYPE_ITER &&
858	    !info.iter.target_name) {
859		info.iter.target_name = ptr_to_u64(&buf);
860		info.iter.target_name_len = sizeof(buf);
861		goto again;
862	}
863	if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
864	    !info.kprobe_multi.addrs) {
865		count = info.kprobe_multi.count;
866		if (count) {
867			addrs = calloc(count, sizeof(__u64));
868			if (!addrs) {
869				p_err("mem alloc failed");
870				close(fd);
871				return -ENOMEM;
872			}
873			info.kprobe_multi.addrs = ptr_to_u64(addrs);
874			goto again;
875		}
876	}
877	if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
878		switch (info.perf_event.type) {
879		case BPF_PERF_EVENT_TRACEPOINT:
880			if (!info.perf_event.tracepoint.tp_name) {
881				info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
882				info.perf_event.tracepoint.name_len = sizeof(buf);
883				goto again;
884			}
885			break;
886		case BPF_PERF_EVENT_KPROBE:
887		case BPF_PERF_EVENT_KRETPROBE:
888			if (!info.perf_event.kprobe.func_name) {
889				info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
890				info.perf_event.kprobe.name_len = sizeof(buf);
891				goto again;
892			}
893			break;
894		case BPF_PERF_EVENT_UPROBE:
895		case BPF_PERF_EVENT_URETPROBE:
896			if (!info.perf_event.uprobe.file_name) {
897				info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
898				info.perf_event.uprobe.name_len = sizeof(buf);
899				goto again;
900			}
901			break;
902		default:
903			break;
904		}
905	}
906
907	if (json_output)
908		show_link_close_json(fd, &info);
909	else
910		show_link_close_plain(fd, &info);
911
912	if (addrs)
913		free(addrs);
914	close(fd);
915	return 0;
916}
917
918static int do_show(int argc, char **argv)
919{
920	__u32 id = 0;
921	int err, fd;
922
923	if (show_pinned) {
924		link_table = hashmap__new(hash_fn_for_key_as_id,
925					  equal_fn_for_key_as_id, NULL);
926		if (IS_ERR(link_table)) {
927			p_err("failed to create hashmap for pinned paths");
928			return -1;
929		}
930		build_pinned_obj_table(link_table, BPF_OBJ_LINK);
931	}
932	build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
933
934	if (argc == 2) {
935		fd = link_parse_fd(&argc, &argv);
936		if (fd < 0)
937			return fd;
938		do_show_link(fd);
939		goto out;
940	}
941
942	if (argc)
943		return BAD_ARG();
944
945	if (json_output)
946		jsonw_start_array(json_wtr);
947	while (true) {
948		err = bpf_link_get_next_id(id, &id);
949		if (err) {
950			if (errno == ENOENT)
951				break;
952			p_err("can't get next link: %s%s", strerror(errno),
953			      errno == EINVAL ? " -- kernel too old?" : "");
954			break;
955		}
956
957		fd = bpf_link_get_fd_by_id(id);
958		if (fd < 0) {
959			if (errno == ENOENT)
960				continue;
961			p_err("can't get link by id (%u): %s",
962			      id, strerror(errno));
963			break;
964		}
965
966		err = do_show_link(fd);
967		if (err)
968			break;
969	}
970	if (json_output)
971		jsonw_end_array(json_wtr);
972
973	delete_obj_refs_table(refs_table);
974
975	if (show_pinned)
976		delete_pinned_obj_table(link_table);
977
978out:
979	if (dd.sym_count)
980		kernel_syms_destroy(&dd);
981	return errno == ENOENT ? 0 : -1;
982}
983
984static int do_pin(int argc, char **argv)
985{
986	int err;
987
988	err = do_pin_any(argc, argv, link_parse_fd);
989	if (!err && json_output)
990		jsonw_null(json_wtr);
991	return err;
992}
993
994static int do_detach(int argc, char **argv)
995{
996	int err, fd;
997
998	if (argc != 2) {
999		p_err("link specifier is invalid or missing\n");
1000		return 1;
1001	}
1002
1003	fd = link_parse_fd(&argc, &argv);
1004	if (fd < 0)
1005		return 1;
1006
1007	err = bpf_link_detach(fd);
1008	if (err)
1009		err = -errno;
1010	close(fd);
1011	if (err) {
1012		p_err("failed link detach: %s", strerror(-err));
1013		return 1;
1014	}
1015
1016	if (json_output)
1017		jsonw_null(json_wtr);
1018
1019	return 0;
1020}
1021
1022static int do_help(int argc, char **argv)
1023{
1024	if (json_output) {
1025		jsonw_null(json_wtr);
1026		return 0;
1027	}
1028
1029	fprintf(stderr,
1030		"Usage: %1$s %2$s { show | list }   [LINK]\n"
1031		"       %1$s %2$s pin        LINK  FILE\n"
1032		"       %1$s %2$s detach     LINK\n"
1033		"       %1$s %2$s help\n"
1034		"\n"
1035		"       " HELP_SPEC_LINK "\n"
1036		"       " HELP_SPEC_OPTIONS " |\n"
1037		"                    {-f|--bpffs} | {-n|--nomount} }\n"
1038		"",
1039		bin_name, argv[-2]);
1040
1041	return 0;
1042}
1043
1044static const struct cmd cmds[] = {
1045	{ "show",	do_show },
1046	{ "list",	do_show },
1047	{ "help",	do_help },
1048	{ "pin",	do_pin },
1049	{ "detach",	do_detach },
1050	{ 0 }
1051};
1052
1053int do_link(int argc, char **argv)
1054{
1055	return cmd_select(cmds, argc, argv, do_help);
1056}
1057