xref: /kernel/linux/linux-5.10/tools/bpf/bpftool/prog.c (revision 8c2ecf20)
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4#define _GNU_SOURCE
5#include <errno.h>
6#include <fcntl.h>
7#include <signal.h>
8#include <stdarg.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <time.h>
13#include <unistd.h>
14#include <net/if.h>
15#include <sys/ioctl.h>
16#include <sys/types.h>
17#include <sys/stat.h>
18#include <sys/syscall.h>
19
20#include <linux/err.h>
21#include <linux/perf_event.h>
22#include <linux/sizes.h>
23
24#include <bpf/bpf.h>
25#include <bpf/btf.h>
26#include <bpf/libbpf.h>
27
28#include "cfg.h"
29#include "main.h"
30#include "xlated_dumper.h"
31
32#define BPF_METADATA_PREFIX "bpf_metadata_"
33#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
34
35const char * const prog_type_name[] = {
36	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
37	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
38	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
39	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
40	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
41	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
42	[BPF_PROG_TYPE_XDP]			= "xdp",
43	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
44	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
45	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
46	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
47	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
48	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
49	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
50	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
51	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
52	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
53	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
54	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
55	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
56	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
57	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
58	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
59	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
60	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
61	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
62	[BPF_PROG_TYPE_TRACING]			= "tracing",
63	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
64	[BPF_PROG_TYPE_EXT]			= "ext",
65	[BPF_PROG_TYPE_LSM]			= "lsm",
66	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
67};
68
69const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
70
71enum dump_mode {
72	DUMP_JITED,
73	DUMP_XLATED,
74};
75
76static const char * const attach_type_strings[] = {
77	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
78	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
79	[BPF_SK_MSG_VERDICT] = "msg_verdict",
80	[BPF_FLOW_DISSECTOR] = "flow_dissector",
81	[__MAX_BPF_ATTACH_TYPE] = NULL,
82};
83
84static enum bpf_attach_type parse_attach_type(const char *str)
85{
86	enum bpf_attach_type type;
87
88	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
89		if (attach_type_strings[type] &&
90		    is_prefix(str, attach_type_strings[type]))
91			return type;
92	}
93
94	return __MAX_BPF_ATTACH_TYPE;
95}
96
97static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
98{
99	struct timespec real_time_ts, boot_time_ts;
100	time_t wallclock_secs;
101	struct tm load_tm;
102
103	buf[--size] = '\0';
104
105	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
106	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
107		perror("Can't read clocks");
108		snprintf(buf, size, "%llu", nsecs / 1000000000);
109		return;
110	}
111
112	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
113		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
114		1000000000;
115
116
117	if (!localtime_r(&wallclock_secs, &load_tm)) {
118		snprintf(buf, size, "%llu", nsecs / 1000000000);
119		return;
120	}
121
122	if (json_output)
123		strftime(buf, size, "%s", &load_tm);
124	else
125		strftime(buf, size, "%FT%T%z", &load_tm);
126}
127
128static void show_prog_maps(int fd, __u32 num_maps)
129{
130	struct bpf_prog_info info = {};
131	__u32 len = sizeof(info);
132	__u32 map_ids[num_maps];
133	unsigned int i;
134	int err;
135
136	info.nr_map_ids = num_maps;
137	info.map_ids = ptr_to_u64(map_ids);
138
139	err = bpf_obj_get_info_by_fd(fd, &info, &len);
140	if (err || !info.nr_map_ids)
141		return;
142
143	if (json_output) {
144		jsonw_name(json_wtr, "map_ids");
145		jsonw_start_array(json_wtr);
146		for (i = 0; i < info.nr_map_ids; i++)
147			jsonw_uint(json_wtr, map_ids[i]);
148		jsonw_end_array(json_wtr);
149	} else {
150		printf("  map_ids ");
151		for (i = 0; i < info.nr_map_ids; i++)
152			printf("%u%s", map_ids[i],
153			       i == info.nr_map_ids - 1 ? "" : ",");
154	}
155}
156
157static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
158{
159	struct bpf_prog_info prog_info;
160	__u32 prog_info_len;
161	__u32 map_info_len;
162	void *value = NULL;
163	__u32 *map_ids;
164	int nr_maps;
165	int key = 0;
166	int map_fd;
167	int ret;
168	__u32 i;
169
170	memset(&prog_info, 0, sizeof(prog_info));
171	prog_info_len = sizeof(prog_info);
172	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
173	if (ret)
174		return NULL;
175
176	if (!prog_info.nr_map_ids)
177		return NULL;
178
179	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
180	if (!map_ids)
181		return NULL;
182
183	nr_maps = prog_info.nr_map_ids;
184	memset(&prog_info, 0, sizeof(prog_info));
185	prog_info.nr_map_ids = nr_maps;
186	prog_info.map_ids = ptr_to_u64(map_ids);
187	prog_info_len = sizeof(prog_info);
188
189	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
190	if (ret)
191		goto free_map_ids;
192
193	for (i = 0; i < prog_info.nr_map_ids; i++) {
194		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
195		if (map_fd < 0)
196			goto free_map_ids;
197
198		memset(map_info, 0, sizeof(*map_info));
199		map_info_len = sizeof(*map_info);
200		ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
201		if (ret < 0) {
202			close(map_fd);
203			goto free_map_ids;
204		}
205
206		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
207		    map_info->key_size != sizeof(int) ||
208		    map_info->max_entries != 1 ||
209		    !map_info->btf_value_type_id ||
210		    !strstr(map_info->name, ".rodata")) {
211			close(map_fd);
212			continue;
213		}
214
215		value = malloc(map_info->value_size);
216		if (!value) {
217			close(map_fd);
218			goto free_map_ids;
219		}
220
221		if (bpf_map_lookup_elem(map_fd, &key, value)) {
222			close(map_fd);
223			free(value);
224			value = NULL;
225			goto free_map_ids;
226		}
227
228		close(map_fd);
229		break;
230	}
231
232free_map_ids:
233	free(map_ids);
234	return value;
235}
236
237static bool has_metadata_prefix(const char *s)
238{
239	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
240}
241
242static void show_prog_metadata(int fd, __u32 num_maps)
243{
244	const struct btf_type *t_datasec, *t_var;
245	struct bpf_map_info map_info;
246	struct btf_var_secinfo *vsi;
247	bool printed_header = false;
248	struct btf *btf = NULL;
249	unsigned int i, vlen;
250	void *value = NULL;
251	const char *name;
252	int err;
253
254	if (!num_maps)
255		return;
256
257	memset(&map_info, 0, sizeof(map_info));
258	value = find_metadata(fd, &map_info);
259	if (!value)
260		return;
261
262	err = btf__get_from_id(map_info.btf_id, &btf);
263	if (err || !btf)
264		goto out_free;
265
266	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
267	if (!btf_is_datasec(t_datasec))
268		goto out_free;
269
270	vlen = btf_vlen(t_datasec);
271	vsi = btf_var_secinfos(t_datasec);
272
273	/* We don't proceed to check the kinds of the elements of the DATASEC.
274	 * The verifier enforces them to be BTF_KIND_VAR.
275	 */
276
277	if (json_output) {
278		struct btf_dumper d = {
279			.btf = btf,
280			.jw = json_wtr,
281			.is_plain_text = false,
282		};
283
284		for (i = 0; i < vlen; i++, vsi++) {
285			t_var = btf__type_by_id(btf, vsi->type);
286			name = btf__name_by_offset(btf, t_var->name_off);
287
288			if (!has_metadata_prefix(name))
289				continue;
290
291			if (!printed_header) {
292				jsonw_name(json_wtr, "metadata");
293				jsonw_start_object(json_wtr);
294				printed_header = true;
295			}
296
297			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
298			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
299			if (err) {
300				p_err("btf dump failed: %d", err);
301				break;
302			}
303		}
304		if (printed_header)
305			jsonw_end_object(json_wtr);
306	} else {
307		json_writer_t *btf_wtr;
308		struct btf_dumper d = {
309			.btf = btf,
310			.is_plain_text = true,
311		};
312
313		for (i = 0; i < vlen; i++, vsi++) {
314			t_var = btf__type_by_id(btf, vsi->type);
315			name = btf__name_by_offset(btf, t_var->name_off);
316
317			if (!has_metadata_prefix(name))
318				continue;
319
320			if (!printed_header) {
321				printf("\tmetadata:");
322
323				btf_wtr = jsonw_new(stdout);
324				if (!btf_wtr) {
325					p_err("jsonw alloc failed");
326					goto out_free;
327				}
328				d.jw = btf_wtr,
329
330				printed_header = true;
331			}
332
333			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
334
335			jsonw_reset(btf_wtr);
336			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
337			if (err) {
338				p_err("btf dump failed: %d", err);
339				break;
340			}
341		}
342		if (printed_header)
343			jsonw_destroy(&btf_wtr);
344	}
345
346out_free:
347	btf__free(btf);
348	free(value);
349}
350
351static void print_prog_header_json(struct bpf_prog_info *info)
352{
353	jsonw_uint_field(json_wtr, "id", info->id);
354	if (info->type < ARRAY_SIZE(prog_type_name))
355		jsonw_string_field(json_wtr, "type",
356				   prog_type_name[info->type]);
357	else
358		jsonw_uint_field(json_wtr, "type", info->type);
359
360	if (*info->name)
361		jsonw_string_field(json_wtr, "name", info->name);
362
363	jsonw_name(json_wtr, "tag");
364	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
365		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
366		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
367
368	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
369	if (info->run_time_ns) {
370		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
371		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
372	}
373}
374
375static void print_prog_json(struct bpf_prog_info *info, int fd)
376{
377	char *memlock;
378
379	jsonw_start_object(json_wtr);
380	print_prog_header_json(info);
381	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
382
383	if (info->load_time) {
384		char buf[32];
385
386		print_boot_time(info->load_time, buf, sizeof(buf));
387
388		/* Piggy back on load_time, since 0 uid is a valid one */
389		jsonw_name(json_wtr, "loaded_at");
390		jsonw_printf(json_wtr, "%s", buf);
391		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
392	}
393
394	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
395
396	if (info->jited_prog_len) {
397		jsonw_bool_field(json_wtr, "jited", true);
398		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
399	} else {
400		jsonw_bool_field(json_wtr, "jited", false);
401	}
402
403	memlock = get_fdinfo(fd, "memlock");
404	if (memlock)
405		jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
406	free(memlock);
407
408	if (info->nr_map_ids)
409		show_prog_maps(fd, info->nr_map_ids);
410
411	if (info->btf_id)
412		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
413
414	if (!hash_empty(prog_table.table)) {
415		struct pinned_obj *obj;
416
417		jsonw_name(json_wtr, "pinned");
418		jsonw_start_array(json_wtr);
419		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
420			if (obj->id == info->id)
421				jsonw_string(json_wtr, obj->path);
422		}
423		jsonw_end_array(json_wtr);
424	}
425
426	emit_obj_refs_json(&refs_table, info->id, json_wtr);
427
428	show_prog_metadata(fd, info->nr_map_ids);
429
430	jsonw_end_object(json_wtr);
431}
432
433static void print_prog_header_plain(struct bpf_prog_info *info)
434{
435	printf("%u: ", info->id);
436	if (info->type < ARRAY_SIZE(prog_type_name))
437		printf("%s  ", prog_type_name[info->type]);
438	else
439		printf("type %u  ", info->type);
440
441	if (*info->name)
442		printf("name %s  ", info->name);
443
444	printf("tag ");
445	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
446	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
447	printf("%s", info->gpl_compatible ? "  gpl" : "");
448	if (info->run_time_ns)
449		printf(" run_time_ns %lld run_cnt %lld",
450		       info->run_time_ns, info->run_cnt);
451	printf("\n");
452}
453
454static void print_prog_plain(struct bpf_prog_info *info, int fd)
455{
456	char *memlock;
457
458	print_prog_header_plain(info);
459
460	if (info->load_time) {
461		char buf[32];
462
463		print_boot_time(info->load_time, buf, sizeof(buf));
464
465		/* Piggy back on load_time, since 0 uid is a valid one */
466		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
467	}
468
469	printf("\txlated %uB", info->xlated_prog_len);
470
471	if (info->jited_prog_len)
472		printf("  jited %uB", info->jited_prog_len);
473	else
474		printf("  not jited");
475
476	memlock = get_fdinfo(fd, "memlock");
477	if (memlock)
478		printf("  memlock %sB", memlock);
479	free(memlock);
480
481	if (info->nr_map_ids)
482		show_prog_maps(fd, info->nr_map_ids);
483
484	if (!hash_empty(prog_table.table)) {
485		struct pinned_obj *obj;
486
487		hash_for_each_possible(prog_table.table, obj, hash, info->id) {
488			if (obj->id == info->id)
489				printf("\n\tpinned %s", obj->path);
490		}
491	}
492
493	if (info->btf_id)
494		printf("\n\tbtf_id %d", info->btf_id);
495
496	emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
497
498	printf("\n");
499
500	show_prog_metadata(fd, info->nr_map_ids);
501}
502
503static int show_prog(int fd)
504{
505	struct bpf_prog_info info = {};
506	__u32 len = sizeof(info);
507	int err;
508
509	err = bpf_obj_get_info_by_fd(fd, &info, &len);
510	if (err) {
511		p_err("can't get prog info: %s", strerror(errno));
512		return -1;
513	}
514
515	if (json_output)
516		print_prog_json(&info, fd);
517	else
518		print_prog_plain(&info, fd);
519
520	return 0;
521}
522
523static int do_show_subset(int argc, char **argv)
524{
525	int *fds = NULL;
526	int nb_fds, i;
527	int err = -1;
528
529	fds = malloc(sizeof(int));
530	if (!fds) {
531		p_err("mem alloc failed");
532		return -1;
533	}
534	nb_fds = prog_parse_fds(&argc, &argv, &fds);
535	if (nb_fds < 1)
536		goto exit_free;
537
538	if (json_output && nb_fds > 1)
539		jsonw_start_array(json_wtr);	/* root array */
540	for (i = 0; i < nb_fds; i++) {
541		err = show_prog(fds[i]);
542		if (err) {
543			for (; i < nb_fds; i++)
544				close(fds[i]);
545			break;
546		}
547		close(fds[i]);
548	}
549	if (json_output && nb_fds > 1)
550		jsonw_end_array(json_wtr);	/* root array */
551
552exit_free:
553	free(fds);
554	return err;
555}
556
557static int do_show(int argc, char **argv)
558{
559	__u32 id = 0;
560	int err;
561	int fd;
562
563	if (show_pinned)
564		build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
565	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
566
567	if (argc == 2)
568		return do_show_subset(argc, argv);
569
570	if (argc)
571		return BAD_ARG();
572
573	if (json_output)
574		jsonw_start_array(json_wtr);
575	while (true) {
576		err = bpf_prog_get_next_id(id, &id);
577		if (err) {
578			if (errno == ENOENT) {
579				err = 0;
580				break;
581			}
582			p_err("can't get next program: %s%s", strerror(errno),
583			      errno == EINVAL ? " -- kernel too old?" : "");
584			err = -1;
585			break;
586		}
587
588		fd = bpf_prog_get_fd_by_id(id);
589		if (fd < 0) {
590			if (errno == ENOENT)
591				continue;
592			p_err("can't get prog by id (%u): %s",
593			      id, strerror(errno));
594			err = -1;
595			break;
596		}
597
598		err = show_prog(fd);
599		close(fd);
600		if (err)
601			break;
602	}
603
604	if (json_output)
605		jsonw_end_array(json_wtr);
606
607	delete_obj_refs_table(&refs_table);
608
609	return err;
610}
611
612static int
613prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
614	  char *filepath, bool opcodes, bool visual, bool linum)
615{
616	struct bpf_prog_linfo *prog_linfo = NULL;
617	const char *disasm_opt = NULL;
618	struct dump_data dd = {};
619	void *func_info = NULL;
620	struct btf *btf = NULL;
621	char func_sig[1024];
622	unsigned char *buf;
623	__u32 member_len;
624	ssize_t n;
625	int fd;
626
627	if (mode == DUMP_JITED) {
628		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
629			p_info("no instructions returned");
630			return -1;
631		}
632		buf = u64_to_ptr(info->jited_prog_insns);
633		member_len = info->jited_prog_len;
634	} else {	/* DUMP_XLATED */
635		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
636			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
637			return -1;
638		}
639		buf = u64_to_ptr(info->xlated_prog_insns);
640		member_len = info->xlated_prog_len;
641	}
642
643	if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
644		p_err("failed to get btf");
645		return -1;
646	}
647
648	func_info = u64_to_ptr(info->func_info);
649
650	if (info->nr_line_info) {
651		prog_linfo = bpf_prog_linfo__new(info);
652		if (!prog_linfo)
653			p_info("error in processing bpf_line_info.  continue without it.");
654	}
655
656	if (filepath) {
657		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
658		if (fd < 0) {
659			p_err("can't open file %s: %s", filepath,
660			      strerror(errno));
661			return -1;
662		}
663
664		n = write(fd, buf, member_len);
665		close(fd);
666		if (n != (ssize_t)member_len) {
667			p_err("error writing output file: %s",
668			      n < 0 ? strerror(errno) : "short write");
669			return -1;
670		}
671
672		if (json_output)
673			jsonw_null(json_wtr);
674	} else if (mode == DUMP_JITED) {
675		const char *name = NULL;
676
677		if (info->ifindex) {
678			name = ifindex_to_bfd_params(info->ifindex,
679						     info->netns_dev,
680						     info->netns_ino,
681						     &disasm_opt);
682			if (!name)
683				return -1;
684		}
685
686		if (info->nr_jited_func_lens && info->jited_func_lens) {
687			struct kernel_sym *sym = NULL;
688			struct bpf_func_info *record;
689			char sym_name[SYM_MAX_NAME];
690			unsigned char *img = buf;
691			__u64 *ksyms = NULL;
692			__u32 *lens;
693			__u32 i;
694			if (info->nr_jited_ksyms) {
695				kernel_syms_load(&dd);
696				ksyms = u64_to_ptr(info->jited_ksyms);
697			}
698
699			if (json_output)
700				jsonw_start_array(json_wtr);
701
702			lens = u64_to_ptr(info->jited_func_lens);
703			for (i = 0; i < info->nr_jited_func_lens; i++) {
704				if (ksyms) {
705					sym = kernel_syms_search(&dd, ksyms[i]);
706					if (sym)
707						sprintf(sym_name, "%s", sym->name);
708					else
709						sprintf(sym_name, "0x%016llx", ksyms[i]);
710				} else {
711					strcpy(sym_name, "unknown");
712				}
713
714				if (func_info) {
715					record = func_info + i * info->func_info_rec_size;
716					btf_dumper_type_only(btf, record->type_id,
717							     func_sig,
718							     sizeof(func_sig));
719				}
720
721				if (json_output) {
722					jsonw_start_object(json_wtr);
723					if (func_info && func_sig[0] != '\0') {
724						jsonw_name(json_wtr, "proto");
725						jsonw_string(json_wtr, func_sig);
726					}
727					jsonw_name(json_wtr, "name");
728					jsonw_string(json_wtr, sym_name);
729					jsonw_name(json_wtr, "insns");
730				} else {
731					if (func_info && func_sig[0] != '\0')
732						printf("%s:\n", func_sig);
733					printf("%s:\n", sym_name);
734				}
735
736				disasm_print_insn(img, lens[i], opcodes,
737						  name, disasm_opt, btf,
738						  prog_linfo, ksyms[i], i,
739						  linum);
740
741				img += lens[i];
742
743				if (json_output)
744					jsonw_end_object(json_wtr);
745				else
746					printf("\n");
747			}
748
749			if (json_output)
750				jsonw_end_array(json_wtr);
751		} else {
752			disasm_print_insn(buf, member_len, opcodes, name,
753					  disasm_opt, btf, NULL, 0, 0, false);
754		}
755	} else if (visual) {
756		if (json_output)
757			jsonw_null(json_wtr);
758		else
759			dump_xlated_cfg(buf, member_len);
760	} else {
761		kernel_syms_load(&dd);
762		dd.nr_jited_ksyms = info->nr_jited_ksyms;
763		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
764		dd.btf = btf;
765		dd.func_info = func_info;
766		dd.finfo_rec_size = info->func_info_rec_size;
767		dd.prog_linfo = prog_linfo;
768
769		if (json_output)
770			dump_xlated_json(&dd, buf, member_len, opcodes,
771					 linum);
772		else
773			dump_xlated_plain(&dd, buf, member_len, opcodes,
774					  linum);
775		kernel_syms_destroy(&dd);
776	}
777
778	return 0;
779}
780
781static int do_dump(int argc, char **argv)
782{
783	struct bpf_prog_info_linear *info_linear;
784	char *filepath = NULL;
785	bool opcodes = false;
786	bool visual = false;
787	enum dump_mode mode;
788	bool linum = false;
789	int *fds = NULL;
790	int nb_fds, i = 0;
791	int err = -1;
792	__u64 arrays;
793
794	if (is_prefix(*argv, "jited")) {
795		if (disasm_init())
796			return -1;
797		mode = DUMP_JITED;
798	} else if (is_prefix(*argv, "xlated")) {
799		mode = DUMP_XLATED;
800	} else {
801		p_err("expected 'xlated' or 'jited', got: %s", *argv);
802		return -1;
803	}
804	NEXT_ARG();
805
806	if (argc < 2)
807		usage();
808
809	fds = malloc(sizeof(int));
810	if (!fds) {
811		p_err("mem alloc failed");
812		return -1;
813	}
814	nb_fds = prog_parse_fds(&argc, &argv, &fds);
815	if (nb_fds < 1)
816		goto exit_free;
817
818	if (is_prefix(*argv, "file")) {
819		NEXT_ARG();
820		if (!argc) {
821			p_err("expected file path");
822			goto exit_close;
823		}
824		if (nb_fds > 1) {
825			p_err("several programs matched");
826			goto exit_close;
827		}
828
829		filepath = *argv;
830		NEXT_ARG();
831	} else if (is_prefix(*argv, "opcodes")) {
832		opcodes = true;
833		NEXT_ARG();
834	} else if (is_prefix(*argv, "visual")) {
835		if (nb_fds > 1) {
836			p_err("several programs matched");
837			goto exit_close;
838		}
839
840		visual = true;
841		NEXT_ARG();
842	} else if (is_prefix(*argv, "linum")) {
843		linum = true;
844		NEXT_ARG();
845	}
846
847	if (argc) {
848		usage();
849		goto exit_close;
850	}
851
852	if (mode == DUMP_JITED)
853		arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
854	else
855		arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
856
857	arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
858	arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
859	arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
860	arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
861	arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
862
863	if (json_output && nb_fds > 1)
864		jsonw_start_array(json_wtr);	/* root array */
865	for (i = 0; i < nb_fds; i++) {
866		info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
867		if (IS_ERR_OR_NULL(info_linear)) {
868			p_err("can't get prog info: %s", strerror(errno));
869			break;
870		}
871
872		if (json_output && nb_fds > 1) {
873			jsonw_start_object(json_wtr);	/* prog object */
874			print_prog_header_json(&info_linear->info);
875			jsonw_name(json_wtr, "insns");
876		} else if (nb_fds > 1) {
877			print_prog_header_plain(&info_linear->info);
878		}
879
880		err = prog_dump(&info_linear->info, mode, filepath, opcodes,
881				visual, linum);
882
883		if (json_output && nb_fds > 1)
884			jsonw_end_object(json_wtr);	/* prog object */
885		else if (i != nb_fds - 1 && nb_fds > 1)
886			printf("\n");
887
888		free(info_linear);
889		if (err)
890			break;
891		close(fds[i]);
892	}
893	if (json_output && nb_fds > 1)
894		jsonw_end_array(json_wtr);	/* root array */
895
896exit_close:
897	for (; i < nb_fds; i++)
898		close(fds[i]);
899exit_free:
900	free(fds);
901	return err;
902}
903
904static int do_pin(int argc, char **argv)
905{
906	int err;
907
908	err = do_pin_any(argc, argv, prog_parse_fd);
909	if (!err && json_output)
910		jsonw_null(json_wtr);
911	return err;
912}
913
914struct map_replace {
915	int idx;
916	int fd;
917	char *name;
918};
919
920static int map_replace_compar(const void *p1, const void *p2)
921{
922	const struct map_replace *a = p1, *b = p2;
923
924	return a->idx - b->idx;
925}
926
927static int parse_attach_detach_args(int argc, char **argv, int *progfd,
928				    enum bpf_attach_type *attach_type,
929				    int *mapfd)
930{
931	if (!REQ_ARGS(3))
932		return -EINVAL;
933
934	*progfd = prog_parse_fd(&argc, &argv);
935	if (*progfd < 0)
936		return *progfd;
937
938	*attach_type = parse_attach_type(*argv);
939	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
940		p_err("invalid attach/detach type");
941		return -EINVAL;
942	}
943
944	if (*attach_type == BPF_FLOW_DISSECTOR) {
945		*mapfd = 0;
946		return 0;
947	}
948
949	NEXT_ARG();
950	if (!REQ_ARGS(2))
951		return -EINVAL;
952
953	*mapfd = map_parse_fd(&argc, &argv);
954	if (*mapfd < 0)
955		return *mapfd;
956
957	return 0;
958}
959
960static int do_attach(int argc, char **argv)
961{
962	enum bpf_attach_type attach_type;
963	int err, progfd;
964	int mapfd;
965
966	err = parse_attach_detach_args(argc, argv,
967				       &progfd, &attach_type, &mapfd);
968	if (err)
969		return err;
970
971	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
972	if (err) {
973		p_err("failed prog attach to map");
974		return -EINVAL;
975	}
976
977	if (json_output)
978		jsonw_null(json_wtr);
979	return 0;
980}
981
982static int do_detach(int argc, char **argv)
983{
984	enum bpf_attach_type attach_type;
985	int err, progfd;
986	int mapfd;
987
988	err = parse_attach_detach_args(argc, argv,
989				       &progfd, &attach_type, &mapfd);
990	if (err)
991		return err;
992
993	err = bpf_prog_detach2(progfd, mapfd, attach_type);
994	if (err) {
995		p_err("failed prog detach from map");
996		return -EINVAL;
997	}
998
999	if (json_output)
1000		jsonw_null(json_wtr);
1001	return 0;
1002}
1003
1004static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1005{
1006	if (file_data_in && file_ctx_in &&
1007	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1008		p_err("cannot use standard input for both data_in and ctx_in");
1009		return -1;
1010	}
1011
1012	return 0;
1013}
1014
1015static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1016{
1017	size_t block_size = 256;
1018	size_t buf_size = block_size;
1019	size_t nb_read = 0;
1020	void *tmp;
1021	FILE *f;
1022
1023	if (!fname) {
1024		*data_ptr = NULL;
1025		*size = 0;
1026		return 0;
1027	}
1028
1029	if (!strcmp(fname, "-"))
1030		f = stdin;
1031	else
1032		f = fopen(fname, "r");
1033	if (!f) {
1034		p_err("failed to open %s: %s", fname, strerror(errno));
1035		return -1;
1036	}
1037
1038	*data_ptr = malloc(block_size);
1039	if (!*data_ptr) {
1040		p_err("failed to allocate memory for data_in/ctx_in: %s",
1041		      strerror(errno));
1042		goto err_fclose;
1043	}
1044
1045	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1046		if (feof(f))
1047			break;
1048		if (ferror(f)) {
1049			p_err("failed to read data_in/ctx_in from %s: %s",
1050			      fname, strerror(errno));
1051			goto err_free;
1052		}
1053		if (nb_read > buf_size - block_size) {
1054			if (buf_size == UINT32_MAX) {
1055				p_err("data_in/ctx_in is too long (max: %d)",
1056				      UINT32_MAX);
1057				goto err_free;
1058			}
1059			/* No space for fread()-ing next chunk; realloc() */
1060			buf_size *= 2;
1061			tmp = realloc(*data_ptr, buf_size);
1062			if (!tmp) {
1063				p_err("failed to reallocate data_in/ctx_in: %s",
1064				      strerror(errno));
1065				goto err_free;
1066			}
1067			*data_ptr = tmp;
1068		}
1069	}
1070	if (f != stdin)
1071		fclose(f);
1072
1073	*size = nb_read;
1074	return 0;
1075
1076err_free:
1077	free(*data_ptr);
1078	*data_ptr = NULL;
1079err_fclose:
1080	if (f != stdin)
1081		fclose(f);
1082	return -1;
1083}
1084
1085static void hex_print(void *data, unsigned int size, FILE *f)
1086{
1087	size_t i, j;
1088	char c;
1089
1090	for (i = 0; i < size; i += 16) {
1091		/* Row offset */
1092		fprintf(f, "%07zx\t", i);
1093
1094		/* Hexadecimal values */
1095		for (j = i; j < i + 16 && j < size; j++)
1096			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1097				j % 2 ? " " : "");
1098		for (; j < i + 16; j++)
1099			fprintf(f, "  %s", j % 2 ? " " : "");
1100
1101		/* ASCII values (if relevant), '.' otherwise */
1102		fprintf(f, "| ");
1103		for (j = i; j < i + 16 && j < size; j++) {
1104			c = *(char *)(data + j);
1105			if (c < ' ' || c > '~')
1106				c = '.';
1107			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1108		}
1109
1110		fprintf(f, "\n");
1111	}
1112}
1113
1114static int
1115print_run_output(void *data, unsigned int size, const char *fname,
1116		 const char *json_key)
1117{
1118	size_t nb_written;
1119	FILE *f;
1120
1121	if (!fname)
1122		return 0;
1123
1124	if (!strcmp(fname, "-")) {
1125		f = stdout;
1126		if (json_output) {
1127			jsonw_name(json_wtr, json_key);
1128			print_data_json(data, size);
1129		} else {
1130			hex_print(data, size, f);
1131		}
1132		return 0;
1133	}
1134
1135	f = fopen(fname, "w");
1136	if (!f) {
1137		p_err("failed to open %s: %s", fname, strerror(errno));
1138		return -1;
1139	}
1140
1141	nb_written = fwrite(data, 1, size, f);
1142	fclose(f);
1143	if (nb_written != size) {
1144		p_err("failed to write output data/ctx: %s", strerror(errno));
1145		return -1;
1146	}
1147
1148	return 0;
1149}
1150
1151static int alloc_run_data(void **data_ptr, unsigned int size_out)
1152{
1153	*data_ptr = calloc(size_out, 1);
1154	if (!*data_ptr) {
1155		p_err("failed to allocate memory for output data/ctx: %s",
1156		      strerror(errno));
1157		return -1;
1158	}
1159
1160	return 0;
1161}
1162
1163static int do_run(int argc, char **argv)
1164{
1165	char *data_fname_in = NULL, *data_fname_out = NULL;
1166	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
1167	struct bpf_prog_test_run_attr test_attr = {0};
1168	const unsigned int default_size = SZ_32K;
1169	void *data_in = NULL, *data_out = NULL;
1170	void *ctx_in = NULL, *ctx_out = NULL;
1171	unsigned int repeat = 1;
1172	int fd, err;
1173
1174	if (!REQ_ARGS(4))
1175		return -1;
1176
1177	fd = prog_parse_fd(&argc, &argv);
1178	if (fd < 0)
1179		return -1;
1180
1181	while (argc) {
1182		if (detect_common_prefix(*argv, "data_in", "data_out",
1183					 "data_size_out", NULL))
1184			return -1;
1185		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1186					 "ctx_size_out", NULL))
1187			return -1;
1188
1189		if (is_prefix(*argv, "data_in")) {
1190			NEXT_ARG();
1191			if (!REQ_ARGS(1))
1192				return -1;
1193
1194			data_fname_in = GET_ARG();
1195			if (check_single_stdin(data_fname_in, ctx_fname_in))
1196				return -1;
1197		} else if (is_prefix(*argv, "data_out")) {
1198			NEXT_ARG();
1199			if (!REQ_ARGS(1))
1200				return -1;
1201
1202			data_fname_out = GET_ARG();
1203		} else if (is_prefix(*argv, "data_size_out")) {
1204			char *endptr;
1205
1206			NEXT_ARG();
1207			if (!REQ_ARGS(1))
1208				return -1;
1209
1210			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1211			if (*endptr) {
1212				p_err("can't parse %s as output data size",
1213				      *argv);
1214				return -1;
1215			}
1216			NEXT_ARG();
1217		} else if (is_prefix(*argv, "ctx_in")) {
1218			NEXT_ARG();
1219			if (!REQ_ARGS(1))
1220				return -1;
1221
1222			ctx_fname_in = GET_ARG();
1223			if (check_single_stdin(data_fname_in, ctx_fname_in))
1224				return -1;
1225		} else if (is_prefix(*argv, "ctx_out")) {
1226			NEXT_ARG();
1227			if (!REQ_ARGS(1))
1228				return -1;
1229
1230			ctx_fname_out = GET_ARG();
1231		} else if (is_prefix(*argv, "ctx_size_out")) {
1232			char *endptr;
1233
1234			NEXT_ARG();
1235			if (!REQ_ARGS(1))
1236				return -1;
1237
1238			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1239			if (*endptr) {
1240				p_err("can't parse %s as output context size",
1241				      *argv);
1242				return -1;
1243			}
1244			NEXT_ARG();
1245		} else if (is_prefix(*argv, "repeat")) {
1246			char *endptr;
1247
1248			NEXT_ARG();
1249			if (!REQ_ARGS(1))
1250				return -1;
1251
1252			repeat = strtoul(*argv, &endptr, 0);
1253			if (*endptr) {
1254				p_err("can't parse %s as repeat number",
1255				      *argv);
1256				return -1;
1257			}
1258			NEXT_ARG();
1259		} else {
1260			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1261			      *argv);
1262			return -1;
1263		}
1264	}
1265
1266	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1267	if (err)
1268		return -1;
1269
1270	if (data_in) {
1271		if (!test_attr.data_size_out)
1272			test_attr.data_size_out = default_size;
1273		err = alloc_run_data(&data_out, test_attr.data_size_out);
1274		if (err)
1275			goto free_data_in;
1276	}
1277
1278	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1279	if (err)
1280		goto free_data_out;
1281
1282	if (ctx_in) {
1283		if (!test_attr.ctx_size_out)
1284			test_attr.ctx_size_out = default_size;
1285		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1286		if (err)
1287			goto free_ctx_in;
1288	}
1289
1290	test_attr.prog_fd	= fd;
1291	test_attr.repeat	= repeat;
1292	test_attr.data_in	= data_in;
1293	test_attr.data_out	= data_out;
1294	test_attr.ctx_in	= ctx_in;
1295	test_attr.ctx_out	= ctx_out;
1296
1297	err = bpf_prog_test_run_xattr(&test_attr);
1298	if (err) {
1299		p_err("failed to run program: %s", strerror(errno));
1300		goto free_ctx_out;
1301	}
1302
1303	err = 0;
1304
1305	if (json_output)
1306		jsonw_start_object(json_wtr);	/* root */
1307
1308	/* Do not exit on errors occurring when printing output data/context,
1309	 * we still want to print return value and duration for program run.
1310	 */
1311	if (test_attr.data_size_out)
1312		err += print_run_output(test_attr.data_out,
1313					test_attr.data_size_out,
1314					data_fname_out, "data_out");
1315	if (test_attr.ctx_size_out)
1316		err += print_run_output(test_attr.ctx_out,
1317					test_attr.ctx_size_out,
1318					ctx_fname_out, "ctx_out");
1319
1320	if (json_output) {
1321		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1322		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1323		jsonw_end_object(json_wtr);	/* root */
1324	} else {
1325		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1326			test_attr.retval,
1327			repeat > 1 ? " (average)" : "", test_attr.duration);
1328	}
1329
1330free_ctx_out:
1331	free(ctx_out);
1332free_ctx_in:
1333	free(ctx_in);
1334free_data_out:
1335	free(data_out);
1336free_data_in:
1337	free(data_in);
1338
1339	return err;
1340}
1341
1342static int
1343get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1344		      enum bpf_attach_type *expected_attach_type)
1345{
1346	libbpf_print_fn_t print_backup;
1347	int ret;
1348
1349	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1350	if (!ret)
1351		return ret;
1352
1353	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1354	print_backup = libbpf_set_print(print_all_levels);
1355	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1356	libbpf_set_print(print_backup);
1357
1358	return ret;
1359}
1360
1361static int load_with_options(int argc, char **argv, bool first_prog_only)
1362{
1363	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1364	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1365		.relaxed_maps = relaxed_maps,
1366	);
1367	struct bpf_object_load_attr load_attr = { 0 };
1368	enum bpf_attach_type expected_attach_type;
1369	struct map_replace *map_replace = NULL;
1370	struct bpf_program *prog = NULL, *pos;
1371	unsigned int old_map_fds = 0;
1372	const char *pinmaps = NULL;
1373	struct bpf_object *obj;
1374	struct bpf_map *map;
1375	const char *pinfile;
1376	unsigned int i, j;
1377	__u32 ifindex = 0;
1378	const char *file;
1379	int idx, err;
1380
1381
1382	if (!REQ_ARGS(2))
1383		return -1;
1384	file = GET_ARG();
1385	pinfile = GET_ARG();
1386
1387	while (argc) {
1388		if (is_prefix(*argv, "type")) {
1389			char *type;
1390
1391			NEXT_ARG();
1392
1393			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1394				p_err("program type already specified");
1395				goto err_free_reuse_maps;
1396			}
1397			if (!REQ_ARGS(1))
1398				goto err_free_reuse_maps;
1399
1400			/* Put a '/' at the end of type to appease libbpf */
1401			type = malloc(strlen(*argv) + 2);
1402			if (!type) {
1403				p_err("mem alloc failed");
1404				goto err_free_reuse_maps;
1405			}
1406			*type = 0;
1407			strcat(type, *argv);
1408			strcat(type, "/");
1409
1410			err = get_prog_type_by_name(type, &common_prog_type,
1411						    &expected_attach_type);
1412			free(type);
1413			if (err < 0)
1414				goto err_free_reuse_maps;
1415
1416			NEXT_ARG();
1417		} else if (is_prefix(*argv, "map")) {
1418			void *new_map_replace;
1419			char *endptr, *name;
1420			int fd;
1421
1422			NEXT_ARG();
1423
1424			if (!REQ_ARGS(4))
1425				goto err_free_reuse_maps;
1426
1427			if (is_prefix(*argv, "idx")) {
1428				NEXT_ARG();
1429
1430				idx = strtoul(*argv, &endptr, 0);
1431				if (*endptr) {
1432					p_err("can't parse %s as IDX", *argv);
1433					goto err_free_reuse_maps;
1434				}
1435				name = NULL;
1436			} else if (is_prefix(*argv, "name")) {
1437				NEXT_ARG();
1438
1439				name = *argv;
1440				idx = -1;
1441			} else {
1442				p_err("expected 'idx' or 'name', got: '%s'?",
1443				      *argv);
1444				goto err_free_reuse_maps;
1445			}
1446			NEXT_ARG();
1447
1448			fd = map_parse_fd(&argc, &argv);
1449			if (fd < 0)
1450				goto err_free_reuse_maps;
1451
1452			new_map_replace = reallocarray(map_replace,
1453						       old_map_fds + 1,
1454						       sizeof(*map_replace));
1455			if (!new_map_replace) {
1456				p_err("mem alloc failed");
1457				goto err_free_reuse_maps;
1458			}
1459			map_replace = new_map_replace;
1460
1461			map_replace[old_map_fds].idx = idx;
1462			map_replace[old_map_fds].name = name;
1463			map_replace[old_map_fds].fd = fd;
1464			old_map_fds++;
1465		} else if (is_prefix(*argv, "dev")) {
1466			NEXT_ARG();
1467
1468			if (ifindex) {
1469				p_err("offload device already specified");
1470				goto err_free_reuse_maps;
1471			}
1472			if (!REQ_ARGS(1))
1473				goto err_free_reuse_maps;
1474
1475			ifindex = if_nametoindex(*argv);
1476			if (!ifindex) {
1477				p_err("unrecognized netdevice '%s': %s",
1478				      *argv, strerror(errno));
1479				goto err_free_reuse_maps;
1480			}
1481			NEXT_ARG();
1482		} else if (is_prefix(*argv, "pinmaps")) {
1483			NEXT_ARG();
1484
1485			if (!REQ_ARGS(1))
1486				goto err_free_reuse_maps;
1487
1488			pinmaps = GET_ARG();
1489		} else {
1490			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1491			      *argv);
1492			goto err_free_reuse_maps;
1493		}
1494	}
1495
1496	set_max_rlimit();
1497
1498	obj = bpf_object__open_file(file, &open_opts);
1499	if (IS_ERR_OR_NULL(obj)) {
1500		p_err("failed to open object file");
1501		goto err_free_reuse_maps;
1502	}
1503
1504	bpf_object__for_each_program(pos, obj) {
1505		enum bpf_prog_type prog_type = common_prog_type;
1506
1507		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1508			const char *sec_name = bpf_program__section_name(pos);
1509
1510			err = get_prog_type_by_name(sec_name, &prog_type,
1511						    &expected_attach_type);
1512			if (err < 0)
1513				goto err_close_obj;
1514		}
1515
1516		bpf_program__set_ifindex(pos, ifindex);
1517		bpf_program__set_type(pos, prog_type);
1518		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1519	}
1520
1521	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1522	      map_replace_compar);
1523
1524	/* After the sort maps by name will be first on the list, because they
1525	 * have idx == -1.  Resolve them.
1526	 */
1527	j = 0;
1528	while (j < old_map_fds && map_replace[j].name) {
1529		i = 0;
1530		bpf_object__for_each_map(map, obj) {
1531			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1532				map_replace[j].idx = i;
1533				break;
1534			}
1535			i++;
1536		}
1537		if (map_replace[j].idx == -1) {
1538			p_err("unable to find map '%s'", map_replace[j].name);
1539			goto err_close_obj;
1540		}
1541		j++;
1542	}
1543	/* Resort if any names were resolved */
1544	if (j)
1545		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1546		      map_replace_compar);
1547
1548	/* Set ifindex and name reuse */
1549	j = 0;
1550	idx = 0;
1551	bpf_object__for_each_map(map, obj) {
1552		if (!bpf_map__is_offload_neutral(map))
1553			bpf_map__set_ifindex(map, ifindex);
1554
1555		if (j < old_map_fds && idx == map_replace[j].idx) {
1556			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1557			if (err) {
1558				p_err("unable to set up map reuse: %d", err);
1559				goto err_close_obj;
1560			}
1561
1562			/* Next reuse wants to apply to the same map */
1563			if (j < old_map_fds && map_replace[j].idx == idx) {
1564				p_err("replacement for map idx %d specified more than once",
1565				      idx);
1566				goto err_close_obj;
1567			}
1568		}
1569
1570		idx++;
1571	}
1572	if (j < old_map_fds) {
1573		p_err("map idx '%d' not used", map_replace[j].idx);
1574		goto err_close_obj;
1575	}
1576
1577	load_attr.obj = obj;
1578	if (verifier_logs)
1579		/* log_level1 + log_level2 + stats, but not stable UAPI */
1580		load_attr.log_level = 1 + 2 + 4;
1581
1582	err = bpf_object__load_xattr(&load_attr);
1583	if (err) {
1584		p_err("failed to load object file");
1585		goto err_close_obj;
1586	}
1587
1588	err = mount_bpffs_for_pin(pinfile);
1589	if (err)
1590		goto err_close_obj;
1591
1592	if (first_prog_only) {
1593		prog = bpf_program__next(NULL, obj);
1594		if (!prog) {
1595			p_err("object file doesn't contain any bpf program");
1596			goto err_close_obj;
1597		}
1598
1599		err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1600		if (err) {
1601			p_err("failed to pin program %s",
1602			      bpf_program__section_name(prog));
1603			goto err_close_obj;
1604		}
1605	} else {
1606		err = bpf_object__pin_programs(obj, pinfile);
1607		if (err) {
1608			p_err("failed to pin all programs");
1609			goto err_close_obj;
1610		}
1611	}
1612
1613	if (pinmaps) {
1614		err = bpf_object__pin_maps(obj, pinmaps);
1615		if (err) {
1616			p_err("failed to pin all maps");
1617			goto err_unpin;
1618		}
1619	}
1620
1621	if (json_output)
1622		jsonw_null(json_wtr);
1623
1624	bpf_object__close(obj);
1625	for (i = 0; i < old_map_fds; i++)
1626		close(map_replace[i].fd);
1627	free(map_replace);
1628
1629	return 0;
1630
1631err_unpin:
1632	if (first_prog_only)
1633		unlink(pinfile);
1634	else
1635		bpf_object__unpin_programs(obj, pinfile);
1636err_close_obj:
1637	bpf_object__close(obj);
1638err_free_reuse_maps:
1639	for (i = 0; i < old_map_fds; i++)
1640		close(map_replace[i].fd);
1641	free(map_replace);
1642	return -1;
1643}
1644
1645static int do_load(int argc, char **argv)
1646{
1647	return load_with_options(argc, argv, true);
1648}
1649
1650static int do_loadall(int argc, char **argv)
1651{
1652	return load_with_options(argc, argv, false);
1653}
1654
1655#ifdef BPFTOOL_WITHOUT_SKELETONS
1656
1657static int do_profile(int argc, char **argv)
1658{
1659	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1660	return 0;
1661}
1662
1663#else /* BPFTOOL_WITHOUT_SKELETONS */
1664
1665#include "profiler.skel.h"
1666
1667struct profile_metric {
1668	const char *name;
1669	struct bpf_perf_event_value val;
1670	struct perf_event_attr attr;
1671	bool selected;
1672
1673	/* calculate ratios like instructions per cycle */
1674	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1675	const char *ratio_desc;
1676	const float ratio_mul;
1677} metrics[] = {
1678	{
1679		.name = "cycles",
1680		.attr = {
1681			.type = PERF_TYPE_HARDWARE,
1682			.config = PERF_COUNT_HW_CPU_CYCLES,
1683			.exclude_user = 1,
1684		},
1685	},
1686	{
1687		.name = "instructions",
1688		.attr = {
1689			.type = PERF_TYPE_HARDWARE,
1690			.config = PERF_COUNT_HW_INSTRUCTIONS,
1691			.exclude_user = 1,
1692		},
1693		.ratio_metric = 1,
1694		.ratio_desc = "insns per cycle",
1695		.ratio_mul = 1.0,
1696	},
1697	{
1698		.name = "l1d_loads",
1699		.attr = {
1700			.type = PERF_TYPE_HW_CACHE,
1701			.config =
1702				PERF_COUNT_HW_CACHE_L1D |
1703				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1704				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1705			.exclude_user = 1,
1706		},
1707	},
1708	{
1709		.name = "llc_misses",
1710		.attr = {
1711			.type = PERF_TYPE_HW_CACHE,
1712			.config =
1713				PERF_COUNT_HW_CACHE_LL |
1714				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1715				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1716			.exclude_user = 1
1717		},
1718		.ratio_metric = 2,
1719		.ratio_desc = "LLC misses per million insns",
1720		.ratio_mul = 1e6,
1721	},
1722};
1723
1724static __u64 profile_total_count;
1725
1726#define MAX_NUM_PROFILE_METRICS 4
1727
1728static int profile_parse_metrics(int argc, char **argv)
1729{
1730	unsigned int metric_cnt;
1731	int selected_cnt = 0;
1732	unsigned int i;
1733
1734	metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
1735
1736	while (argc > 0) {
1737		for (i = 0; i < metric_cnt; i++) {
1738			if (is_prefix(argv[0], metrics[i].name)) {
1739				if (!metrics[i].selected)
1740					selected_cnt++;
1741				metrics[i].selected = true;
1742				break;
1743			}
1744		}
1745		if (i == metric_cnt) {
1746			p_err("unknown metric %s", argv[0]);
1747			return -1;
1748		}
1749		NEXT_ARG();
1750	}
1751	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
1752		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1753		      selected_cnt, MAX_NUM_PROFILE_METRICS);
1754		return -1;
1755	}
1756	return selected_cnt;
1757}
1758
1759static void profile_read_values(struct profiler_bpf *obj)
1760{
1761	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
1762	int reading_map_fd, count_map_fd;
1763	__u64 counts[num_cpu];
1764	__u32 key = 0;
1765	int err;
1766
1767	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
1768	count_map_fd = bpf_map__fd(obj->maps.counts);
1769	if (reading_map_fd < 0 || count_map_fd < 0) {
1770		p_err("failed to get fd for map");
1771		return;
1772	}
1773
1774	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
1775	if (err) {
1776		p_err("failed to read count_map: %s", strerror(errno));
1777		return;
1778	}
1779
1780	profile_total_count = 0;
1781	for (cpu = 0; cpu < num_cpu; cpu++)
1782		profile_total_count += counts[cpu];
1783
1784	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1785		struct bpf_perf_event_value values[num_cpu];
1786
1787		if (!metrics[m].selected)
1788			continue;
1789
1790		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
1791		if (err) {
1792			p_err("failed to read reading_map: %s",
1793			      strerror(errno));
1794			return;
1795		}
1796		for (cpu = 0; cpu < num_cpu; cpu++) {
1797			metrics[m].val.counter += values[cpu].counter;
1798			metrics[m].val.enabled += values[cpu].enabled;
1799			metrics[m].val.running += values[cpu].running;
1800		}
1801		key++;
1802	}
1803}
1804
1805static void profile_print_readings_json(void)
1806{
1807	__u32 m;
1808
1809	jsonw_start_array(json_wtr);
1810	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1811		if (!metrics[m].selected)
1812			continue;
1813		jsonw_start_object(json_wtr);
1814		jsonw_string_field(json_wtr, "metric", metrics[m].name);
1815		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
1816		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
1817		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
1818		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
1819
1820		jsonw_end_object(json_wtr);
1821	}
1822	jsonw_end_array(json_wtr);
1823}
1824
1825static void profile_print_readings_plain(void)
1826{
1827	__u32 m;
1828
1829	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
1830	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1831		struct bpf_perf_event_value *val = &metrics[m].val;
1832		int r;
1833
1834		if (!metrics[m].selected)
1835			continue;
1836		printf("%18llu %-20s", val->counter, metrics[m].name);
1837
1838		r = metrics[m].ratio_metric - 1;
1839		if (r >= 0 && metrics[r].selected &&
1840		    metrics[r].val.counter > 0) {
1841			printf("# %8.2f %-30s",
1842			       val->counter * metrics[m].ratio_mul /
1843			       metrics[r].val.counter,
1844			       metrics[m].ratio_desc);
1845		} else {
1846			printf("%-41s", "");
1847		}
1848
1849		if (val->enabled > val->running)
1850			printf("(%4.2f%%)",
1851			       val->running * 100.0 / val->enabled);
1852		printf("\n");
1853	}
1854}
1855
1856static void profile_print_readings(void)
1857{
1858	if (json_output)
1859		profile_print_readings_json();
1860	else
1861		profile_print_readings_plain();
1862}
1863
1864static char *profile_target_name(int tgt_fd)
1865{
1866	struct bpf_prog_info_linear *info_linear;
1867	struct bpf_func_info *func_info;
1868	const struct btf_type *t;
1869	char *name = NULL;
1870	struct btf *btf;
1871
1872	info_linear = bpf_program__get_prog_info_linear(
1873		tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
1874	if (IS_ERR_OR_NULL(info_linear)) {
1875		p_err("failed to get info_linear for prog FD %d", tgt_fd);
1876		return NULL;
1877	}
1878
1879	if (info_linear->info.btf_id == 0 ||
1880	    btf__get_from_id(info_linear->info.btf_id, &btf)) {
1881		p_err("prog FD %d doesn't have valid btf", tgt_fd);
1882		goto out;
1883	}
1884
1885	func_info = u64_to_ptr(info_linear->info.func_info);
1886	t = btf__type_by_id(btf, func_info[0].type_id);
1887	if (!t) {
1888		p_err("btf %d doesn't have type %d",
1889		      info_linear->info.btf_id, func_info[0].type_id);
1890		goto out;
1891	}
1892	name = strdup(btf__name_by_offset(btf, t->name_off));
1893out:
1894	free(info_linear);
1895	return name;
1896}
1897
1898static struct profiler_bpf *profile_obj;
1899static int profile_tgt_fd = -1;
1900static char *profile_tgt_name;
1901static int *profile_perf_events;
1902static int profile_perf_event_cnt;
1903
1904static void profile_close_perf_events(struct profiler_bpf *obj)
1905{
1906	int i;
1907
1908	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
1909		close(profile_perf_events[i]);
1910
1911	free(profile_perf_events);
1912	profile_perf_event_cnt = 0;
1913}
1914
1915static int profile_open_perf_event(int mid, int cpu, int map_fd)
1916{
1917	int pmu_fd;
1918
1919	pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
1920			 -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
1921	if (pmu_fd < 0) {
1922		if (errno == ENODEV) {
1923			p_info("cpu %d may be offline, skip %s profiling.",
1924				cpu, metrics[mid].name);
1925			profile_perf_event_cnt++;
1926			return 0;
1927		}
1928		return -1;
1929	}
1930
1931	if (bpf_map_update_elem(map_fd,
1932				&profile_perf_event_cnt,
1933				&pmu_fd, BPF_ANY) ||
1934	    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
1935		close(pmu_fd);
1936		return -1;
1937	}
1938
1939	profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
1940	return 0;
1941}
1942
1943static int profile_open_perf_events(struct profiler_bpf *obj)
1944{
1945	unsigned int cpu, m;
1946	int map_fd;
1947
1948	profile_perf_events = calloc(
1949		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
1950	if (!profile_perf_events) {
1951		p_err("failed to allocate memory for perf_event array: %s",
1952		      strerror(errno));
1953		return -1;
1954	}
1955	map_fd = bpf_map__fd(obj->maps.events);
1956	if (map_fd < 0) {
1957		p_err("failed to get fd for events map");
1958		return -1;
1959	}
1960
1961	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1962		if (!metrics[m].selected)
1963			continue;
1964		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
1965			if (profile_open_perf_event(m, cpu, map_fd)) {
1966				p_err("failed to create event %s on cpu %d",
1967				      metrics[m].name, cpu);
1968				return -1;
1969			}
1970		}
1971	}
1972	return 0;
1973}
1974
1975static void profile_print_and_cleanup(void)
1976{
1977	profile_close_perf_events(profile_obj);
1978	profile_read_values(profile_obj);
1979	profile_print_readings();
1980	profiler_bpf__destroy(profile_obj);
1981
1982	close(profile_tgt_fd);
1983	free(profile_tgt_name);
1984}
1985
1986static void int_exit(int signo)
1987{
1988	profile_print_and_cleanup();
1989	exit(0);
1990}
1991
1992static int do_profile(int argc, char **argv)
1993{
1994	int num_metric, num_cpu, err = -1;
1995	struct bpf_program *prog;
1996	unsigned long duration;
1997	char *endptr;
1998
1999	/* we at least need two args for the prog and one metric */
2000	if (!REQ_ARGS(3))
2001		return -EINVAL;
2002
2003	/* parse target fd */
2004	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2005	if (profile_tgt_fd < 0) {
2006		p_err("failed to parse fd");
2007		return -1;
2008	}
2009
2010	/* parse profiling optional duration */
2011	if (argc > 2 && is_prefix(argv[0], "duration")) {
2012		NEXT_ARG();
2013		duration = strtoul(*argv, &endptr, 0);
2014		if (*endptr)
2015			usage();
2016		NEXT_ARG();
2017	} else {
2018		duration = UINT_MAX;
2019	}
2020
2021	num_metric = profile_parse_metrics(argc, argv);
2022	if (num_metric <= 0)
2023		goto out;
2024
2025	num_cpu = libbpf_num_possible_cpus();
2026	if (num_cpu <= 0) {
2027		p_err("failed to identify number of CPUs");
2028		goto out;
2029	}
2030
2031	profile_obj = profiler_bpf__open();
2032	if (!profile_obj) {
2033		p_err("failed to open and/or load BPF object");
2034		goto out;
2035	}
2036
2037	profile_obj->rodata->num_cpu = num_cpu;
2038	profile_obj->rodata->num_metric = num_metric;
2039
2040	/* adjust map sizes */
2041	bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
2042	bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
2043	bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
2044	bpf_map__resize(profile_obj->maps.counts, 1);
2045
2046	/* change target name */
2047	profile_tgt_name = profile_target_name(profile_tgt_fd);
2048	if (!profile_tgt_name)
2049		goto out;
2050
2051	bpf_object__for_each_program(prog, profile_obj->obj) {
2052		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2053						     profile_tgt_name);
2054		if (err) {
2055			p_err("failed to set attach target\n");
2056			goto out;
2057		}
2058	}
2059
2060	set_max_rlimit();
2061	err = profiler_bpf__load(profile_obj);
2062	if (err) {
2063		p_err("failed to load profile_obj");
2064		goto out;
2065	}
2066
2067	err = profile_open_perf_events(profile_obj);
2068	if (err)
2069		goto out;
2070
2071	err = profiler_bpf__attach(profile_obj);
2072	if (err) {
2073		p_err("failed to attach profile_obj");
2074		goto out;
2075	}
2076	signal(SIGINT, int_exit);
2077
2078	sleep(duration);
2079	profile_print_and_cleanup();
2080	return 0;
2081
2082out:
2083	profile_close_perf_events(profile_obj);
2084	if (profile_obj)
2085		profiler_bpf__destroy(profile_obj);
2086	close(profile_tgt_fd);
2087	free(profile_tgt_name);
2088	return err;
2089}
2090
2091#endif /* BPFTOOL_WITHOUT_SKELETONS */
2092
2093static int do_help(int argc, char **argv)
2094{
2095	if (json_output) {
2096		jsonw_null(json_wtr);
2097		return 0;
2098	}
2099
2100	fprintf(stderr,
2101		"Usage: %1$s %2$s { show | list } [PROG]\n"
2102		"       %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2103		"       %1$s %2$s dump jited  PROG [{ file FILE | opcodes | linum }]\n"
2104		"       %1$s %2$s pin   PROG FILE\n"
2105		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2106		"                         [type TYPE] [dev NAME] \\\n"
2107		"                         [map { idx IDX | name NAME } MAP]\\\n"
2108		"                         [pinmaps MAP_DIR]\n"
2109		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2110		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2111		"       %1$s %2$s run PROG \\\n"
2112		"                         data_in FILE \\\n"
2113		"                         [data_out FILE [data_size_out L]] \\\n"
2114		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2115		"                         [repeat N]\n"
2116		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2117		"       %1$s %2$s tracelog\n"
2118		"       %1$s %2$s help\n"
2119		"\n"
2120		"       " HELP_SPEC_MAP "\n"
2121		"       " HELP_SPEC_PROGRAM "\n"
2122		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2123		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2124		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2125		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2126		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2127		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2128		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2129		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
2130		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2131		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
2132		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2133		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2134		"       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
2135		"                        flow_dissector }\n"
2136		"       METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
2137		"       " HELP_SPEC_OPTIONS "\n"
2138		"",
2139		bin_name, argv[-2]);
2140
2141	return 0;
2142}
2143
2144static const struct cmd cmds[] = {
2145	{ "show",	do_show },
2146	{ "list",	do_show },
2147	{ "help",	do_help },
2148	{ "dump",	do_dump },
2149	{ "pin",	do_pin },
2150	{ "load",	do_load },
2151	{ "loadall",	do_loadall },
2152	{ "attach",	do_attach },
2153	{ "detach",	do_detach },
2154	{ "tracelog",	do_tracelog },
2155	{ "run",	do_run },
2156	{ "profile",	do_profile },
2157	{ 0 }
2158};
2159
2160int do_prog(int argc, char **argv)
2161{
2162	return cmd_select(cmds, argc, argv, do_help);
2163}
2164