1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Testsuite for eBPF verifier
4 *
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 */
9
10#include <endian.h>
11#include <asm/types.h>
12#include <linux/types.h>
13#include <stdint.h>
14#include <stdio.h>
15#include <stdlib.h>
16#include <unistd.h>
17#include <errno.h>
18#include <string.h>
19#include <stddef.h>
20#include <stdbool.h>
21#include <sched.h>
22#include <limits.h>
23#include <assert.h>
24
25#include <linux/unistd.h>
26#include <linux/filter.h>
27#include <linux/bpf_perf_event.h>
28#include <linux/bpf.h>
29#include <linux/if_ether.h>
30#include <linux/btf.h>
31
32#include <bpf/btf.h>
33#include <bpf/bpf.h>
34#include <bpf/libbpf.h>
35
36#include "autoconf_helper.h"
37#include "unpriv_helpers.h"
38#include "cap_helpers.h"
39#include "bpf_rand.h"
40#include "bpf_util.h"
41#include "test_btf.h"
42#include "../../../include/linux/filter.h"
43#include "testing_helpers.h"
44
45#ifndef ENOTSUPP
46#define ENOTSUPP 524
47#endif
48
49#define MAX_INSNS	BPF_MAXINSNS
50#define MAX_EXPECTED_INSNS	32
51#define MAX_UNEXPECTED_INSNS	32
52#define MAX_TEST_INSNS	1000000
53#define MAX_FIXUPS	8
54#define MAX_NR_MAPS	23
55#define MAX_TEST_RUNS	8
56#define POINTER_VALUE	0xcafe4all
57#define TEST_DATA_LEN	64
58#define MAX_FUNC_INFOS	8
59#define MAX_BTF_STRINGS	256
60#define MAX_BTF_TYPES	256
61
62#define INSN_OFF_MASK	((__s16)0xFFFF)
63#define INSN_IMM_MASK	((__s32)0xFFFFFFFF)
64#define SKIP_INSNS()	BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
65
66#define DEFAULT_LIBBPF_LOG_LEVEL	4
67
68#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
69#define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
70
71/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
72#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN |	\
73		    1ULL << CAP_PERFMON |	\
74		    1ULL << CAP_BPF)
75#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
76static bool unpriv_disabled = false;
77static int skips;
78static bool verbose = false;
79static int verif_log_level = 0;
80
81struct kfunc_btf_id_pair {
82	const char *kfunc;
83	int insn_idx;
84};
85
86struct bpf_test {
87	const char *descr;
88	struct bpf_insn	insns[MAX_INSNS];
89	struct bpf_insn	*fill_insns;
90	/* If specified, test engine looks for this sequence of
91	 * instructions in the BPF program after loading. Allows to
92	 * test rewrites applied by verifier.  Use values
93	 * INSN_OFF_MASK and INSN_IMM_MASK to mask `off` and `imm`
94	 * fields if content does not matter.  The test case fails if
95	 * specified instructions are not found.
96	 *
97	 * The sequence could be split into sub-sequences by adding
98	 * SKIP_INSNS instruction at the end of each sub-sequence. In
99	 * such case sub-sequences are searched for one after another.
100	 */
101	struct bpf_insn expected_insns[MAX_EXPECTED_INSNS];
102	/* If specified, test engine applies same pattern matching
103	 * logic as for `expected_insns`. If the specified pattern is
104	 * matched test case is marked as failed.
105	 */
106	struct bpf_insn unexpected_insns[MAX_UNEXPECTED_INSNS];
107	int fixup_map_hash_8b[MAX_FIXUPS];
108	int fixup_map_hash_48b[MAX_FIXUPS];
109	int fixup_map_hash_16b[MAX_FIXUPS];
110	int fixup_map_array_48b[MAX_FIXUPS];
111	int fixup_map_sockmap[MAX_FIXUPS];
112	int fixup_map_sockhash[MAX_FIXUPS];
113	int fixup_map_xskmap[MAX_FIXUPS];
114	int fixup_map_stacktrace[MAX_FIXUPS];
115	int fixup_prog1[MAX_FIXUPS];
116	int fixup_prog2[MAX_FIXUPS];
117	int fixup_map_in_map[MAX_FIXUPS];
118	int fixup_cgroup_storage[MAX_FIXUPS];
119	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
120	int fixup_map_spin_lock[MAX_FIXUPS];
121	int fixup_map_array_ro[MAX_FIXUPS];
122	int fixup_map_array_wo[MAX_FIXUPS];
123	int fixup_map_array_small[MAX_FIXUPS];
124	int fixup_sk_storage_map[MAX_FIXUPS];
125	int fixup_map_event_output[MAX_FIXUPS];
126	int fixup_map_reuseport_array[MAX_FIXUPS];
127	int fixup_map_ringbuf[MAX_FIXUPS];
128	int fixup_map_timer[MAX_FIXUPS];
129	int fixup_map_kptr[MAX_FIXUPS];
130	struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
131	/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
132	 * Can be a tab-separated sequence of expected strings. An empty string
133	 * means no log verification.
134	 */
135	const char *errstr;
136	const char *errstr_unpriv;
137	uint32_t insn_processed;
138	int prog_len;
139	enum {
140		UNDEF,
141		ACCEPT,
142		REJECT,
143		VERBOSE_ACCEPT,
144	} result, result_unpriv;
145	enum bpf_prog_type prog_type;
146	uint8_t flags;
147	void (*fill_helper)(struct bpf_test *self);
148	int runs;
149#define bpf_testdata_struct_t					\
150	struct {						\
151		uint32_t retval, retval_unpriv;			\
152		union {						\
153			__u8 data[TEST_DATA_LEN];		\
154			__u64 data64[TEST_DATA_LEN / 8];	\
155		};						\
156	}
157	union {
158		bpf_testdata_struct_t;
159		bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
160	};
161	enum bpf_attach_type expected_attach_type;
162	const char *kfunc;
163	struct bpf_func_info func_info[MAX_FUNC_INFOS];
164	int func_info_cnt;
165	char btf_strings[MAX_BTF_STRINGS];
166	/* A set of BTF types to load when specified,
167	 * use macro definitions from test_btf.h,
168	 * must end with BTF_END_RAW
169	 */
170	__u32 btf_types[MAX_BTF_TYPES];
171};
172
173/* Note we want this to be 64 bit aligned so that the end of our array is
174 * actually the end of the structure.
175 */
176#define MAX_ENTRIES 11
177
178struct test_val {
179	unsigned int index;
180	int foo[MAX_ENTRIES];
181};
182
183struct other_val {
184	long long foo;
185	long long bar;
186};
187
188static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
189{
190	/* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
191#define PUSH_CNT 51
192	/* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
193	unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
194	struct bpf_insn *insn = self->fill_insns;
195	int i = 0, j, k = 0;
196
197	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
198loop:
199	for (j = 0; j < PUSH_CNT; j++) {
200		insn[i++] = BPF_LD_ABS(BPF_B, 0);
201		/* jump to error label */
202		insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
203		i++;
204		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
205		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
206		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
207		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
208					 BPF_FUNC_skb_vlan_push);
209		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
210		i++;
211	}
212
213	for (j = 0; j < PUSH_CNT; j++) {
214		insn[i++] = BPF_LD_ABS(BPF_B, 0);
215		insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
216		i++;
217		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
218		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
219					 BPF_FUNC_skb_vlan_pop);
220		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
221		i++;
222	}
223	if (++k < 5)
224		goto loop;
225
226	for (; i < len - 3; i++)
227		insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
228	insn[len - 3] = BPF_JMP_A(1);
229	/* error label */
230	insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
231	insn[len - 1] = BPF_EXIT_INSN();
232	self->prog_len = len;
233}
234
235static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
236{
237	struct bpf_insn *insn = self->fill_insns;
238	/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
239	 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
240	 * to extend the error value of the inlined ld_abs sequence which then
241	 * contains 7 insns. so, set the dividend to 7 so the testcase could
242	 * work on all arches.
243	 */
244	unsigned int len = (1 << 15) / 7;
245	int i = 0;
246
247	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
248	insn[i++] = BPF_LD_ABS(BPF_B, 0);
249	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
250	i++;
251	while (i < len - 1)
252		insn[i++] = BPF_LD_ABS(BPF_B, 1);
253	insn[i] = BPF_EXIT_INSN();
254	self->prog_len = i + 1;
255}
256
257static void bpf_fill_rand_ld_dw(struct bpf_test *self)
258{
259	struct bpf_insn *insn = self->fill_insns;
260	uint64_t res = 0;
261	int i = 0;
262
263	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
264	while (i < self->retval) {
265		uint64_t val = bpf_semi_rand_get();
266		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
267
268		res ^= val;
269		insn[i++] = tmp[0];
270		insn[i++] = tmp[1];
271		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
272	}
273	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
274	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
275	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
276	insn[i] = BPF_EXIT_INSN();
277	self->prog_len = i + 1;
278	res ^= (res >> 32);
279	self->retval = (uint32_t)res;
280}
281
282#define MAX_JMP_SEQ 8192
283
284/* test the sequence of 8k jumps */
285static void bpf_fill_scale1(struct bpf_test *self)
286{
287	struct bpf_insn *insn = self->fill_insns;
288	int i = 0, k = 0;
289
290	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
291	/* test to check that the long sequence of jumps is acceptable */
292	while (k++ < MAX_JMP_SEQ) {
293		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
294					 BPF_FUNC_get_prandom_u32);
295		insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
296		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
297		insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
298					-8 * (k % 64 + 1));
299	}
300	/* is_state_visited() doesn't allocate state for pruning for every jump.
301	 * Hence multiply jmps by 4 to accommodate that heuristic
302	 */
303	while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
304		insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
305	insn[i] = BPF_EXIT_INSN();
306	self->prog_len = i + 1;
307	self->retval = 42;
308}
309
310/* test the sequence of 8k jumps in inner most function (function depth 8)*/
311static void bpf_fill_scale2(struct bpf_test *self)
312{
313	struct bpf_insn *insn = self->fill_insns;
314	int i = 0, k = 0;
315
316#define FUNC_NEST 7
317	for (k = 0; k < FUNC_NEST; k++) {
318		insn[i++] = BPF_CALL_REL(1);
319		insn[i++] = BPF_EXIT_INSN();
320	}
321	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
322	/* test to check that the long sequence of jumps is acceptable */
323	k = 0;
324	while (k++ < MAX_JMP_SEQ) {
325		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
326					 BPF_FUNC_get_prandom_u32);
327		insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
328		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
329		insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
330					-8 * (k % (64 - 4 * FUNC_NEST) + 1));
331	}
332	while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
333		insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
334	insn[i] = BPF_EXIT_INSN();
335	self->prog_len = i + 1;
336	self->retval = 42;
337}
338
339static void bpf_fill_scale(struct bpf_test *self)
340{
341	switch (self->retval) {
342	case 1:
343		return bpf_fill_scale1(self);
344	case 2:
345		return bpf_fill_scale2(self);
346	default:
347		self->prog_len = 0;
348		break;
349	}
350}
351
352static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
353{
354	unsigned int len = 259, hlen = 128;
355	int i;
356
357	insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
358	for (i = 1; i <= hlen; i++) {
359		insn[i]        = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
360		insn[i + hlen] = BPF_JMP_A(hlen - i);
361	}
362	insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
363	insn[len - 1] = BPF_EXIT_INSN();
364
365	return len;
366}
367
368static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
369{
370	unsigned int len = 4100, jmp_off = 2048;
371	int i, j;
372
373	insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
374	for (i = 1; i <= jmp_off; i++) {
375		insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
376	}
377	insn[i++] = BPF_JMP_A(jmp_off);
378	for (; i <= jmp_off * 2 + 1; i+=16) {
379		for (j = 0; j < 16; j++) {
380			insn[i + j] = BPF_JMP_A(16 - j - 1);
381		}
382	}
383
384	insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
385	insn[len - 1] = BPF_EXIT_INSN();
386
387	return len;
388}
389
390static void bpf_fill_torturous_jumps(struct bpf_test *self)
391{
392	struct bpf_insn *insn = self->fill_insns;
393	int i = 0;
394
395	switch (self->retval) {
396	case 1:
397		self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
398		return;
399	case 2:
400		self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
401		return;
402	case 3:
403		/* main */
404		insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
405		insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
406		insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
407		insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
408		insn[i++] = BPF_EXIT_INSN();
409
410		/* subprog 1 */
411		i += bpf_fill_torturous_jumps_insn_1(insn + i);
412
413		/* subprog 2 */
414		i += bpf_fill_torturous_jumps_insn_2(insn + i);
415
416		self->prog_len = i;
417		return;
418	default:
419		self->prog_len = 0;
420		break;
421	}
422}
423
424static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self)
425{
426	struct bpf_insn *insn = self->fill_insns;
427	/* This test was added to catch a specific use after free
428	 * error, which happened upon BPF program reallocation.
429	 * Reallocation is handled by core.c:bpf_prog_realloc, which
430	 * reuses old memory if page boundary is not crossed. The
431	 * value of `len` is chosen to cross this boundary on bpf_loop
432	 * patching.
433	 */
434	const int len = getpagesize() - 25;
435	int callback_load_idx;
436	int callback_idx;
437	int i = 0;
438
439	insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1);
440	callback_load_idx = i;
441	insn[i++] = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW,
442				 BPF_REG_2, BPF_PSEUDO_FUNC, 0,
443				 777 /* filled below */);
444	insn[i++] = BPF_RAW_INSN(0, 0, 0, 0, 0);
445	insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0);
446	insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0);
447	insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop);
448
449	while (i < len - 3)
450		insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
451	insn[i++] = BPF_EXIT_INSN();
452
453	callback_idx = i;
454	insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
455	insn[i++] = BPF_EXIT_INSN();
456
457	insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1;
458	self->func_info[1].insn_off = callback_idx;
459	self->prog_len = i;
460	assert(i == len);
461}
462
463/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
464#define BPF_SK_LOOKUP(func)						\
465	/* struct bpf_sock_tuple tuple = {} */				\
466	BPF_MOV64_IMM(BPF_REG_2, 0),					\
467	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
468	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
469	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
470	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
471	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
472	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
473	/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */		\
474	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
475	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
476	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
477	BPF_MOV64_IMM(BPF_REG_4, 0),					\
478	BPF_MOV64_IMM(BPF_REG_5, 0),					\
479	BPF_EMIT_CALL(BPF_FUNC_ ## func)
480
481/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
482 * value into 0 and does necessary preparation for direct packet access
483 * through r2. The allowed access range is 8 bytes.
484 */
485#define BPF_DIRECT_PKT_R2						\
486	BPF_MOV64_IMM(BPF_REG_0, 0),					\
487	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,			\
488		    offsetof(struct __sk_buff, data)),			\
489	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,			\
490		    offsetof(struct __sk_buff, data_end)),		\
491	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),				\
492	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),				\
493	BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),			\
494	BPF_EXIT_INSN()
495
496/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
497 * positive u32, and zero-extend it into 64-bit.
498 */
499#define BPF_RAND_UEXT_R7						\
500	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
501		     BPF_FUNC_get_prandom_u32),				\
502	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
503	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),				\
504	BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
505
506/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
507 * negative u32, and sign-extend it into 64-bit.
508 */
509#define BPF_RAND_SEXT_R7						\
510	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
511		     BPF_FUNC_get_prandom_u32),				\
512	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
513	BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),			\
514	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),				\
515	BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
516
517static struct bpf_test tests[] = {
518#define FILL_ARRAY
519#include <verifier/tests.h>
520#undef FILL_ARRAY
521};
522
523static int probe_filter_length(const struct bpf_insn *fp)
524{
525	int len;
526
527	for (len = MAX_INSNS - 1; len > 0; --len)
528		if (fp[len].code != 0 || fp[len].imm != 0)
529			break;
530	return len + 1;
531}
532
533static bool skip_unsupported_map(enum bpf_map_type map_type)
534{
535	if (!libbpf_probe_bpf_map_type(map_type, NULL)) {
536		printf("SKIP (unsupported map type %d)\n", map_type);
537		skips++;
538		return true;
539	}
540	return false;
541}
542
543static int __create_map(uint32_t type, uint32_t size_key,
544			uint32_t size_value, uint32_t max_elem,
545			uint32_t extra_flags)
546{
547	LIBBPF_OPTS(bpf_map_create_opts, opts);
548	int fd;
549
550	opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
551	fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
552	if (fd < 0) {
553		if (skip_unsupported_map(type))
554			return -1;
555		printf("Failed to create hash map '%s'!\n", strerror(errno));
556	}
557
558	return fd;
559}
560
561static int create_map(uint32_t type, uint32_t size_key,
562		      uint32_t size_value, uint32_t max_elem)
563{
564	return __create_map(type, size_key, size_value, max_elem, 0);
565}
566
567static void update_map(int fd, int index)
568{
569	struct test_val value = {
570		.index = (6 + 1) * sizeof(int),
571		.foo[6] = 0xabcdef12,
572	};
573
574	assert(!bpf_map_update_elem(fd, &index, &value, 0));
575}
576
577static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
578{
579	struct bpf_insn prog[] = {
580		BPF_MOV64_IMM(BPF_REG_0, ret),
581		BPF_EXIT_INSN(),
582	};
583
584	return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
585}
586
587static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
588				  int idx, int ret)
589{
590	struct bpf_insn prog[] = {
591		BPF_MOV64_IMM(BPF_REG_3, idx),
592		BPF_LD_MAP_FD(BPF_REG_2, mfd),
593		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
594			     BPF_FUNC_tail_call),
595		BPF_MOV64_IMM(BPF_REG_0, ret),
596		BPF_EXIT_INSN(),
597	};
598
599	return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
600}
601
602static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
603			     int p1key, int p2key, int p3key)
604{
605	int mfd, p1fd, p2fd, p3fd;
606
607	mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
608			     sizeof(int), max_elem, NULL);
609	if (mfd < 0) {
610		if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
611			return -1;
612		printf("Failed to create prog array '%s'!\n", strerror(errno));
613		return -1;
614	}
615
616	p1fd = create_prog_dummy_simple(prog_type, 42);
617	p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
618	p3fd = create_prog_dummy_simple(prog_type, 24);
619	if (p1fd < 0 || p2fd < 0 || p3fd < 0)
620		goto err;
621	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
622		goto err;
623	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
624		goto err;
625	if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
626err:
627		close(mfd);
628		mfd = -1;
629	}
630	close(p3fd);
631	close(p2fd);
632	close(p1fd);
633	return mfd;
634}
635
636static int create_map_in_map(void)
637{
638	LIBBPF_OPTS(bpf_map_create_opts, opts);
639	int inner_map_fd, outer_map_fd;
640
641	inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
642				      sizeof(int), 1, NULL);
643	if (inner_map_fd < 0) {
644		if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
645			return -1;
646		printf("Failed to create array '%s'!\n", strerror(errno));
647		return inner_map_fd;
648	}
649
650	opts.inner_map_fd = inner_map_fd;
651	outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
652				      sizeof(int), sizeof(int), 1, &opts);
653	if (outer_map_fd < 0) {
654		if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
655			return -1;
656		printf("Failed to create array of maps '%s'!\n",
657		       strerror(errno));
658	}
659
660	close(inner_map_fd);
661
662	return outer_map_fd;
663}
664
665static int create_cgroup_storage(bool percpu)
666{
667	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
668		BPF_MAP_TYPE_CGROUP_STORAGE;
669	int fd;
670
671	fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
672			    TEST_DATA_LEN, 0, NULL);
673	if (fd < 0) {
674		if (skip_unsupported_map(type))
675			return -1;
676		printf("Failed to create cgroup storage '%s'!\n",
677		       strerror(errno));
678	}
679
680	return fd;
681}
682
683/* struct bpf_spin_lock {
684 *   int val;
685 * };
686 * struct val {
687 *   int cnt;
688 *   struct bpf_spin_lock l;
689 * };
690 * struct bpf_timer {
691 *   __u64 :64;
692 *   __u64 :64;
693 * } __attribute__((aligned(8)));
694 * struct timer {
695 *   struct bpf_timer t;
696 * };
697 * struct btf_ptr {
698 *   struct prog_test_ref_kfunc __kptr_untrusted *ptr;
699 *   struct prog_test_ref_kfunc __kptr *ptr;
700 *   struct prog_test_member __kptr *ptr;
701 * }
702 */
703static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
704				  "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_untrusted"
705				  "\0prog_test_member";
706static __u32 btf_raw_types[] = {
707	/* int */
708	BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
709	/* struct bpf_spin_lock */                      /* [2] */
710	BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
711	BTF_MEMBER_ENC(15, 1, 0), /* int val; */
712	/* struct val */                                /* [3] */
713	BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
714	BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
715	BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
716	/* struct bpf_timer */                          /* [4] */
717	BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
718	/* struct timer */                              /* [5] */
719	BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
720	BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
721	/* struct prog_test_ref_kfunc */		/* [6] */
722	BTF_STRUCT_ENC(51, 0, 0),
723	BTF_STRUCT_ENC(95, 0, 0),			/* [7] */
724	/* type tag "kptr_untrusted" */
725	BTF_TYPE_TAG_ENC(80, 6),			/* [8] */
726	/* type tag "kptr" */
727	BTF_TYPE_TAG_ENC(75, 6),			/* [9] */
728	BTF_TYPE_TAG_ENC(75, 7),			/* [10] */
729	BTF_PTR_ENC(8),					/* [11] */
730	BTF_PTR_ENC(9),					/* [12] */
731	BTF_PTR_ENC(10),				/* [13] */
732	/* struct btf_ptr */				/* [14] */
733	BTF_STRUCT_ENC(43, 3, 24),
734	BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr_untrusted *ptr; */
735	BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr *ptr; */
736	BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr *ptr; */
737};
738
739static char bpf_vlog[UINT_MAX >> 8];
740
741static int load_btf_spec(__u32 *types, int types_len,
742			 const char *strings, int strings_len)
743{
744	struct btf_header hdr = {
745		.magic = BTF_MAGIC,
746		.version = BTF_VERSION,
747		.hdr_len = sizeof(struct btf_header),
748		.type_len = types_len,
749		.str_off = types_len,
750		.str_len = strings_len,
751	};
752	void *ptr, *raw_btf;
753	int btf_fd;
754	LIBBPF_OPTS(bpf_btf_load_opts, opts,
755		    .log_buf = bpf_vlog,
756		    .log_size = sizeof(bpf_vlog),
757		    .log_level = (verbose
758				  ? verif_log_level
759				  : DEFAULT_LIBBPF_LOG_LEVEL),
760	);
761
762	raw_btf = malloc(sizeof(hdr) + types_len + strings_len);
763
764	ptr = raw_btf;
765	memcpy(ptr, &hdr, sizeof(hdr));
766	ptr += sizeof(hdr);
767	memcpy(ptr, types, hdr.type_len);
768	ptr += hdr.type_len;
769	memcpy(ptr, strings, hdr.str_len);
770	ptr += hdr.str_len;
771
772	btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts);
773	if (btf_fd < 0)
774		printf("Failed to load BTF spec: '%s'\n", strerror(errno));
775
776	free(raw_btf);
777
778	return btf_fd < 0 ? -1 : btf_fd;
779}
780
781static int load_btf(void)
782{
783	return load_btf_spec(btf_raw_types, sizeof(btf_raw_types),
784			     btf_str_sec, sizeof(btf_str_sec));
785}
786
787static int load_btf_for_test(struct bpf_test *test)
788{
789	int types_num = 0;
790
791	while (types_num < MAX_BTF_TYPES &&
792	       test->btf_types[types_num] != BTF_END_RAW)
793		++types_num;
794
795	int types_len = types_num * sizeof(test->btf_types[0]);
796
797	return load_btf_spec(test->btf_types, types_len,
798			     test->btf_strings, sizeof(test->btf_strings));
799}
800
801static int create_map_spin_lock(void)
802{
803	LIBBPF_OPTS(bpf_map_create_opts, opts,
804		.btf_key_type_id = 1,
805		.btf_value_type_id = 3,
806	);
807	int fd, btf_fd;
808
809	btf_fd = load_btf();
810	if (btf_fd < 0)
811		return -1;
812	opts.btf_fd = btf_fd;
813	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
814	if (fd < 0)
815		printf("Failed to create map with spin_lock\n");
816	return fd;
817}
818
819static int create_sk_storage_map(void)
820{
821	LIBBPF_OPTS(bpf_map_create_opts, opts,
822		.map_flags = BPF_F_NO_PREALLOC,
823		.btf_key_type_id = 1,
824		.btf_value_type_id = 3,
825	);
826	int fd, btf_fd;
827
828	btf_fd = load_btf();
829	if (btf_fd < 0)
830		return -1;
831	opts.btf_fd = btf_fd;
832	fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
833	close(opts.btf_fd);
834	if (fd < 0)
835		printf("Failed to create sk_storage_map\n");
836	return fd;
837}
838
839static int create_map_timer(void)
840{
841	LIBBPF_OPTS(bpf_map_create_opts, opts,
842		.btf_key_type_id = 1,
843		.btf_value_type_id = 5,
844	);
845	int fd, btf_fd;
846
847	btf_fd = load_btf();
848	if (btf_fd < 0)
849		return -1;
850
851	opts.btf_fd = btf_fd;
852	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
853	if (fd < 0)
854		printf("Failed to create map with timer\n");
855	return fd;
856}
857
858static int create_map_kptr(void)
859{
860	LIBBPF_OPTS(bpf_map_create_opts, opts,
861		.btf_key_type_id = 1,
862		.btf_value_type_id = 14,
863	);
864	int fd, btf_fd;
865
866	btf_fd = load_btf();
867	if (btf_fd < 0)
868		return -1;
869
870	opts.btf_fd = btf_fd;
871	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
872	if (fd < 0)
873		printf("Failed to create map with btf_id pointer\n");
874	return fd;
875}
876
877static void set_root(bool set)
878{
879	__u64 caps;
880
881	if (set) {
882		if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
883			perror("cap_disable_effective(CAP_SYS_ADMIN)");
884	} else {
885		if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
886			perror("cap_disable_effective(CAP_SYS_ADMIN)");
887	}
888}
889
890static __u64 ptr_to_u64(const void *ptr)
891{
892	return (uintptr_t) ptr;
893}
894
895static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
896{
897	struct bpf_btf_info info;
898	__u32 len = sizeof(info);
899	struct btf *btf = NULL;
900	char name[64];
901	__u32 id = 0;
902	int err, fd;
903
904	/* Iterate all loaded BTF objects and find bpf_testmod,
905	 * we need SYS_ADMIN cap for that.
906	 */
907	set_root(true);
908
909	while (true) {
910		err = bpf_btf_get_next_id(id, &id);
911		if (err) {
912			if (errno == ENOENT)
913				break;
914			perror("bpf_btf_get_next_id failed");
915			break;
916		}
917
918		fd = bpf_btf_get_fd_by_id(id);
919		if (fd < 0) {
920			if (errno == ENOENT)
921				continue;
922			perror("bpf_btf_get_fd_by_id failed");
923			break;
924		}
925
926		memset(&info, 0, sizeof(info));
927		info.name_len = sizeof(name);
928		info.name = ptr_to_u64(name);
929		len = sizeof(info);
930
931		err = bpf_obj_get_info_by_fd(fd, &info, &len);
932		if (err) {
933			close(fd);
934			perror("bpf_obj_get_info_by_fd failed");
935			break;
936		}
937
938		if (strcmp("bpf_testmod", name)) {
939			close(fd);
940			continue;
941		}
942
943		btf = btf__load_from_kernel_by_id_split(id, vmlinux);
944		if (!btf) {
945			close(fd);
946			break;
947		}
948
949		/* We need the fd to stay open so it can be used in fd_array.
950		 * The final cleanup call to btf__free will free btf object
951		 * and close the file descriptor.
952		 */
953		btf__set_fd(btf, fd);
954		break;
955	}
956
957	set_root(false);
958	return btf;
959}
960
961static struct btf *testmod_btf;
962static struct btf *vmlinux_btf;
963
964static void kfuncs_cleanup(void)
965{
966	btf__free(testmod_btf);
967	btf__free(vmlinux_btf);
968}
969
970static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
971			      struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
972{
973	/* Patch in kfunc BTF IDs */
974	while (fixup_kfunc_btf_id->kfunc) {
975		int btf_id = 0;
976
977		/* try to find kfunc in kernel BTF */
978		vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
979		if (vmlinux_btf) {
980			btf_id = btf__find_by_name_kind(vmlinux_btf,
981							fixup_kfunc_btf_id->kfunc,
982							BTF_KIND_FUNC);
983			btf_id = btf_id < 0 ? 0 : btf_id;
984		}
985
986		/* kfunc not found in kernel BTF, try bpf_testmod BTF */
987		if (!btf_id) {
988			testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
989			if (testmod_btf) {
990				btf_id = btf__find_by_name_kind(testmod_btf,
991								fixup_kfunc_btf_id->kfunc,
992								BTF_KIND_FUNC);
993				btf_id = btf_id < 0 ? 0 : btf_id;
994				if (btf_id) {
995					/* We put bpf_testmod module fd into fd_array
996					 * and its index 1 into instruction 'off'.
997					 */
998					*fd_array = btf__fd(testmod_btf);
999					prog[fixup_kfunc_btf_id->insn_idx].off = 1;
1000				}
1001			}
1002		}
1003
1004		prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
1005		fixup_kfunc_btf_id++;
1006	}
1007}
1008
1009static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
1010			  struct bpf_insn *prog, int *map_fds, int *fd_array)
1011{
1012	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
1013	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
1014	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
1015	int *fixup_map_array_48b = test->fixup_map_array_48b;
1016	int *fixup_map_sockmap = test->fixup_map_sockmap;
1017	int *fixup_map_sockhash = test->fixup_map_sockhash;
1018	int *fixup_map_xskmap = test->fixup_map_xskmap;
1019	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
1020	int *fixup_prog1 = test->fixup_prog1;
1021	int *fixup_prog2 = test->fixup_prog2;
1022	int *fixup_map_in_map = test->fixup_map_in_map;
1023	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
1024	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
1025	int *fixup_map_spin_lock = test->fixup_map_spin_lock;
1026	int *fixup_map_array_ro = test->fixup_map_array_ro;
1027	int *fixup_map_array_wo = test->fixup_map_array_wo;
1028	int *fixup_map_array_small = test->fixup_map_array_small;
1029	int *fixup_sk_storage_map = test->fixup_sk_storage_map;
1030	int *fixup_map_event_output = test->fixup_map_event_output;
1031	int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
1032	int *fixup_map_ringbuf = test->fixup_map_ringbuf;
1033	int *fixup_map_timer = test->fixup_map_timer;
1034	int *fixup_map_kptr = test->fixup_map_kptr;
1035
1036	if (test->fill_helper) {
1037		test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
1038		test->fill_helper(test);
1039	}
1040
1041	/* Allocating HTs with 1 elem is fine here, since we only test
1042	 * for verifier and not do a runtime lookup, so the only thing
1043	 * that really matters is value size in this case.
1044	 */
1045	if (*fixup_map_hash_8b) {
1046		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
1047					sizeof(long long), 1);
1048		do {
1049			prog[*fixup_map_hash_8b].imm = map_fds[0];
1050			fixup_map_hash_8b++;
1051		} while (*fixup_map_hash_8b);
1052	}
1053
1054	if (*fixup_map_hash_48b) {
1055		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
1056					sizeof(struct test_val), 1);
1057		do {
1058			prog[*fixup_map_hash_48b].imm = map_fds[1];
1059			fixup_map_hash_48b++;
1060		} while (*fixup_map_hash_48b);
1061	}
1062
1063	if (*fixup_map_hash_16b) {
1064		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
1065					sizeof(struct other_val), 1);
1066		do {
1067			prog[*fixup_map_hash_16b].imm = map_fds[2];
1068			fixup_map_hash_16b++;
1069		} while (*fixup_map_hash_16b);
1070	}
1071
1072	if (*fixup_map_array_48b) {
1073		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1074					sizeof(struct test_val), 1);
1075		update_map(map_fds[3], 0);
1076		do {
1077			prog[*fixup_map_array_48b].imm = map_fds[3];
1078			fixup_map_array_48b++;
1079		} while (*fixup_map_array_48b);
1080	}
1081
1082	if (*fixup_prog1) {
1083		map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
1084		do {
1085			prog[*fixup_prog1].imm = map_fds[4];
1086			fixup_prog1++;
1087		} while (*fixup_prog1);
1088	}
1089
1090	if (*fixup_prog2) {
1091		map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
1092		do {
1093			prog[*fixup_prog2].imm = map_fds[5];
1094			fixup_prog2++;
1095		} while (*fixup_prog2);
1096	}
1097
1098	if (*fixup_map_in_map) {
1099		map_fds[6] = create_map_in_map();
1100		do {
1101			prog[*fixup_map_in_map].imm = map_fds[6];
1102			fixup_map_in_map++;
1103		} while (*fixup_map_in_map);
1104	}
1105
1106	if (*fixup_cgroup_storage) {
1107		map_fds[7] = create_cgroup_storage(false);
1108		do {
1109			prog[*fixup_cgroup_storage].imm = map_fds[7];
1110			fixup_cgroup_storage++;
1111		} while (*fixup_cgroup_storage);
1112	}
1113
1114	if (*fixup_percpu_cgroup_storage) {
1115		map_fds[8] = create_cgroup_storage(true);
1116		do {
1117			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
1118			fixup_percpu_cgroup_storage++;
1119		} while (*fixup_percpu_cgroup_storage);
1120	}
1121	if (*fixup_map_sockmap) {
1122		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
1123					sizeof(int), 1);
1124		do {
1125			prog[*fixup_map_sockmap].imm = map_fds[9];
1126			fixup_map_sockmap++;
1127		} while (*fixup_map_sockmap);
1128	}
1129	if (*fixup_map_sockhash) {
1130		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
1131					sizeof(int), 1);
1132		do {
1133			prog[*fixup_map_sockhash].imm = map_fds[10];
1134			fixup_map_sockhash++;
1135		} while (*fixup_map_sockhash);
1136	}
1137	if (*fixup_map_xskmap) {
1138		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
1139					sizeof(int), 1);
1140		do {
1141			prog[*fixup_map_xskmap].imm = map_fds[11];
1142			fixup_map_xskmap++;
1143		} while (*fixup_map_xskmap);
1144	}
1145	if (*fixup_map_stacktrace) {
1146		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
1147					 sizeof(u64), 1);
1148		do {
1149			prog[*fixup_map_stacktrace].imm = map_fds[12];
1150			fixup_map_stacktrace++;
1151		} while (*fixup_map_stacktrace);
1152	}
1153	if (*fixup_map_spin_lock) {
1154		map_fds[13] = create_map_spin_lock();
1155		do {
1156			prog[*fixup_map_spin_lock].imm = map_fds[13];
1157			fixup_map_spin_lock++;
1158		} while (*fixup_map_spin_lock);
1159	}
1160	if (*fixup_map_array_ro) {
1161		map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1162					   sizeof(struct test_val), 1,
1163					   BPF_F_RDONLY_PROG);
1164		update_map(map_fds[14], 0);
1165		do {
1166			prog[*fixup_map_array_ro].imm = map_fds[14];
1167			fixup_map_array_ro++;
1168		} while (*fixup_map_array_ro);
1169	}
1170	if (*fixup_map_array_wo) {
1171		map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1172					   sizeof(struct test_val), 1,
1173					   BPF_F_WRONLY_PROG);
1174		update_map(map_fds[15], 0);
1175		do {
1176			prog[*fixup_map_array_wo].imm = map_fds[15];
1177			fixup_map_array_wo++;
1178		} while (*fixup_map_array_wo);
1179	}
1180	if (*fixup_map_array_small) {
1181		map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1182					   1, 1, 0);
1183		update_map(map_fds[16], 0);
1184		do {
1185			prog[*fixup_map_array_small].imm = map_fds[16];
1186			fixup_map_array_small++;
1187		} while (*fixup_map_array_small);
1188	}
1189	if (*fixup_sk_storage_map) {
1190		map_fds[17] = create_sk_storage_map();
1191		do {
1192			prog[*fixup_sk_storage_map].imm = map_fds[17];
1193			fixup_sk_storage_map++;
1194		} while (*fixup_sk_storage_map);
1195	}
1196	if (*fixup_map_event_output) {
1197		map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1198					   sizeof(int), sizeof(int), 1, 0);
1199		do {
1200			prog[*fixup_map_event_output].imm = map_fds[18];
1201			fixup_map_event_output++;
1202		} while (*fixup_map_event_output);
1203	}
1204	if (*fixup_map_reuseport_array) {
1205		map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1206					   sizeof(u32), sizeof(u64), 1, 0);
1207		do {
1208			prog[*fixup_map_reuseport_array].imm = map_fds[19];
1209			fixup_map_reuseport_array++;
1210		} while (*fixup_map_reuseport_array);
1211	}
1212	if (*fixup_map_ringbuf) {
1213		map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
1214					 0, getpagesize());
1215		do {
1216			prog[*fixup_map_ringbuf].imm = map_fds[20];
1217			fixup_map_ringbuf++;
1218		} while (*fixup_map_ringbuf);
1219	}
1220	if (*fixup_map_timer) {
1221		map_fds[21] = create_map_timer();
1222		do {
1223			prog[*fixup_map_timer].imm = map_fds[21];
1224			fixup_map_timer++;
1225		} while (*fixup_map_timer);
1226	}
1227	if (*fixup_map_kptr) {
1228		map_fds[22] = create_map_kptr();
1229		do {
1230			prog[*fixup_map_kptr].imm = map_fds[22];
1231			fixup_map_kptr++;
1232		} while (*fixup_map_kptr);
1233	}
1234
1235	fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
1236}
1237
1238struct libcap {
1239	struct __user_cap_header_struct hdr;
1240	struct __user_cap_data_struct data[2];
1241};
1242
1243static int set_admin(bool admin)
1244{
1245	int err;
1246
1247	if (admin) {
1248		err = cap_enable_effective(ADMIN_CAPS, NULL);
1249		if (err)
1250			perror("cap_enable_effective(ADMIN_CAPS)");
1251	} else {
1252		err = cap_disable_effective(ADMIN_CAPS, NULL);
1253		if (err)
1254			perror("cap_disable_effective(ADMIN_CAPS)");
1255	}
1256
1257	return err;
1258}
1259
1260static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
1261			    void *data, size_t size_data)
1262{
1263	__u8 tmp[TEST_DATA_LEN << 2];
1264	__u32 size_tmp = sizeof(tmp);
1265	int err, saved_errno;
1266	LIBBPF_OPTS(bpf_test_run_opts, topts,
1267		.data_in = data,
1268		.data_size_in = size_data,
1269		.data_out = tmp,
1270		.data_size_out = size_tmp,
1271		.repeat = 1,
1272	);
1273
1274	if (unpriv)
1275		set_admin(true);
1276	err = bpf_prog_test_run_opts(fd_prog, &topts);
1277	saved_errno = errno;
1278
1279	if (unpriv)
1280		set_admin(false);
1281
1282	if (err) {
1283		switch (saved_errno) {
1284		case ENOTSUPP:
1285			printf("Did not run the program (not supported) ");
1286			return 0;
1287		case EPERM:
1288			if (unpriv) {
1289				printf("Did not run the program (no permission) ");
1290				return 0;
1291			}
1292			/* fallthrough; */
1293		default:
1294			printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
1295				strerror(saved_errno));
1296			return err;
1297		}
1298	}
1299
1300	if (topts.retval != expected_val && expected_val != POINTER_VALUE) {
1301		printf("FAIL retval %d != %d ", topts.retval, expected_val);
1302		return 1;
1303	}
1304
1305	return 0;
1306}
1307
1308/* Returns true if every part of exp (tab-separated) appears in log, in order.
1309 *
1310 * If exp is an empty string, returns true.
1311 */
1312static bool cmp_str_seq(const char *log, const char *exp)
1313{
1314	char needle[200];
1315	const char *p, *q;
1316	int len;
1317
1318	do {
1319		if (!strlen(exp))
1320			break;
1321		p = strchr(exp, '\t');
1322		if (!p)
1323			p = exp + strlen(exp);
1324
1325		len = p - exp;
1326		if (len >= sizeof(needle) || !len) {
1327			printf("FAIL\nTestcase bug\n");
1328			return false;
1329		}
1330		strncpy(needle, exp, len);
1331		needle[len] = 0;
1332		q = strstr(log, needle);
1333		if (!q) {
1334			printf("FAIL\nUnexpected verifier log!\n"
1335			       "EXP: %s\nRES:\n", needle);
1336			return false;
1337		}
1338		log = q + len;
1339		exp = p + 1;
1340	} while (*p);
1341	return true;
1342}
1343
1344static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
1345{
1346	__u32 buf_element_size = sizeof(struct bpf_insn);
1347	struct bpf_prog_info info = {};
1348	__u32 info_len = sizeof(info);
1349	__u32 xlated_prog_len;
1350	struct bpf_insn *buf;
1351
1352	if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
1353		perror("bpf_prog_get_info_by_fd failed");
1354		return NULL;
1355	}
1356
1357	xlated_prog_len = info.xlated_prog_len;
1358	if (xlated_prog_len % buf_element_size) {
1359		printf("Program length %d is not multiple of %d\n",
1360		       xlated_prog_len, buf_element_size);
1361		return NULL;
1362	}
1363
1364	*cnt = xlated_prog_len / buf_element_size;
1365	buf = calloc(*cnt, buf_element_size);
1366	if (!buf) {
1367		perror("can't allocate xlated program buffer");
1368		return NULL;
1369	}
1370
1371	bzero(&info, sizeof(info));
1372	info.xlated_prog_len = xlated_prog_len;
1373	info.xlated_prog_insns = (__u64)(unsigned long)buf;
1374	if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
1375		perror("second bpf_prog_get_info_by_fd failed");
1376		goto out_free_buf;
1377	}
1378
1379	return buf;
1380
1381out_free_buf:
1382	free(buf);
1383	return NULL;
1384}
1385
1386static bool is_null_insn(struct bpf_insn *insn)
1387{
1388	struct bpf_insn null_insn = {};
1389
1390	return memcmp(insn, &null_insn, sizeof(null_insn)) == 0;
1391}
1392
1393static bool is_skip_insn(struct bpf_insn *insn)
1394{
1395	struct bpf_insn skip_insn = SKIP_INSNS();
1396
1397	return memcmp(insn, &skip_insn, sizeof(skip_insn)) == 0;
1398}
1399
1400static int null_terminated_insn_len(struct bpf_insn *seq, int max_len)
1401{
1402	int i;
1403
1404	for (i = 0; i < max_len; ++i) {
1405		if (is_null_insn(&seq[i]))
1406			return i;
1407	}
1408	return max_len;
1409}
1410
1411static bool compare_masked_insn(struct bpf_insn *orig, struct bpf_insn *masked)
1412{
1413	struct bpf_insn orig_masked;
1414
1415	memcpy(&orig_masked, orig, sizeof(orig_masked));
1416	if (masked->imm == INSN_IMM_MASK)
1417		orig_masked.imm = INSN_IMM_MASK;
1418	if (masked->off == INSN_OFF_MASK)
1419		orig_masked.off = INSN_OFF_MASK;
1420
1421	return memcmp(&orig_masked, masked, sizeof(orig_masked)) == 0;
1422}
1423
1424static int find_insn_subseq(struct bpf_insn *seq, struct bpf_insn *subseq,
1425			    int seq_len, int subseq_len)
1426{
1427	int i, j;
1428
1429	if (subseq_len > seq_len)
1430		return -1;
1431
1432	for (i = 0; i < seq_len - subseq_len + 1; ++i) {
1433		bool found = true;
1434
1435		for (j = 0; j < subseq_len; ++j) {
1436			if (!compare_masked_insn(&seq[i + j], &subseq[j])) {
1437				found = false;
1438				break;
1439			}
1440		}
1441		if (found)
1442			return i;
1443	}
1444
1445	return -1;
1446}
1447
1448static int find_skip_insn_marker(struct bpf_insn *seq, int len)
1449{
1450	int i;
1451
1452	for (i = 0; i < len; ++i)
1453		if (is_skip_insn(&seq[i]))
1454			return i;
1455
1456	return -1;
1457}
1458
1459/* Return true if all sub-sequences in `subseqs` could be found in
1460 * `seq` one after another. Sub-sequences are separated by a single
1461 * nil instruction.
1462 */
1463static bool find_all_insn_subseqs(struct bpf_insn *seq, struct bpf_insn *subseqs,
1464				  int seq_len, int max_subseqs_len)
1465{
1466	int subseqs_len = null_terminated_insn_len(subseqs, max_subseqs_len);
1467
1468	while (subseqs_len > 0) {
1469		int skip_idx = find_skip_insn_marker(subseqs, subseqs_len);
1470		int cur_subseq_len = skip_idx < 0 ? subseqs_len : skip_idx;
1471		int subseq_idx = find_insn_subseq(seq, subseqs,
1472						  seq_len, cur_subseq_len);
1473
1474		if (subseq_idx < 0)
1475			return false;
1476		seq += subseq_idx + cur_subseq_len;
1477		seq_len -= subseq_idx + cur_subseq_len;
1478		subseqs += cur_subseq_len + 1;
1479		subseqs_len -= cur_subseq_len + 1;
1480	}
1481
1482	return true;
1483}
1484
1485static void print_insn(struct bpf_insn *buf, int cnt)
1486{
1487	int i;
1488
1489	printf("  addr  op d s off  imm\n");
1490	for (i = 0; i < cnt; ++i) {
1491		struct bpf_insn *insn = &buf[i];
1492
1493		if (is_null_insn(insn))
1494			break;
1495
1496		if (is_skip_insn(insn))
1497			printf("  ...\n");
1498		else
1499			printf("  %04x: %02x %1x %x %04hx %08x\n",
1500			       i, insn->code, insn->dst_reg,
1501			       insn->src_reg, insn->off, insn->imm);
1502	}
1503}
1504
1505static bool check_xlated_program(struct bpf_test *test, int fd_prog)
1506{
1507	struct bpf_insn *buf;
1508	int cnt;
1509	bool result = true;
1510	bool check_expected = !is_null_insn(test->expected_insns);
1511	bool check_unexpected = !is_null_insn(test->unexpected_insns);
1512
1513	if (!check_expected && !check_unexpected)
1514		goto out;
1515
1516	buf = get_xlated_program(fd_prog, &cnt);
1517	if (!buf) {
1518		printf("FAIL: can't get xlated program\n");
1519		result = false;
1520		goto out;
1521	}
1522
1523	if (check_expected &&
1524	    !find_all_insn_subseqs(buf, test->expected_insns,
1525				   cnt, MAX_EXPECTED_INSNS)) {
1526		printf("FAIL: can't find expected subsequence of instructions\n");
1527		result = false;
1528		if (verbose) {
1529			printf("Program:\n");
1530			print_insn(buf, cnt);
1531			printf("Expected subsequence:\n");
1532			print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
1533		}
1534	}
1535
1536	if (check_unexpected &&
1537	    find_all_insn_subseqs(buf, test->unexpected_insns,
1538				  cnt, MAX_UNEXPECTED_INSNS)) {
1539		printf("FAIL: found unexpected subsequence of instructions\n");
1540		result = false;
1541		if (verbose) {
1542			printf("Program:\n");
1543			print_insn(buf, cnt);
1544			printf("Un-expected subsequence:\n");
1545			print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
1546		}
1547	}
1548
1549	free(buf);
1550 out:
1551	return result;
1552}
1553
1554static void do_test_single(struct bpf_test *test, bool unpriv,
1555			   int *passes, int *errors)
1556{
1557	int fd_prog, btf_fd, expected_ret, alignment_prevented_execution;
1558	int prog_len, prog_type = test->prog_type;
1559	struct bpf_insn *prog = test->insns;
1560	LIBBPF_OPTS(bpf_prog_load_opts, opts);
1561	int run_errs, run_successes;
1562	int map_fds[MAX_NR_MAPS];
1563	const char *expected_err;
1564	int fd_array[2] = { -1, -1 };
1565	int saved_errno;
1566	int fixup_skips;
1567	__u32 pflags;
1568	int i, err;
1569
1570	fd_prog = -1;
1571	for (i = 0; i < MAX_NR_MAPS; i++)
1572		map_fds[i] = -1;
1573	btf_fd = -1;
1574
1575	if (!prog_type)
1576		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1577	fixup_skips = skips;
1578	do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
1579	if (test->fill_insns) {
1580		prog = test->fill_insns;
1581		prog_len = test->prog_len;
1582	} else {
1583		prog_len = probe_filter_length(prog);
1584	}
1585	/* If there were some map skips during fixup due to missing bpf
1586	 * features, skip this test.
1587	 */
1588	if (fixup_skips != skips)
1589		return;
1590
1591	pflags = BPF_F_TEST_RND_HI32;
1592	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
1593		pflags |= BPF_F_STRICT_ALIGNMENT;
1594	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1595		pflags |= BPF_F_ANY_ALIGNMENT;
1596	if (test->flags & ~3)
1597		pflags |= test->flags;
1598
1599	expected_ret = unpriv && test->result_unpriv != UNDEF ?
1600		       test->result_unpriv : test->result;
1601	expected_err = unpriv && test->errstr_unpriv ?
1602		       test->errstr_unpriv : test->errstr;
1603
1604	opts.expected_attach_type = test->expected_attach_type;
1605	if (verbose)
1606		opts.log_level = verif_log_level | 4; /* force stats */
1607	else if (expected_ret == VERBOSE_ACCEPT)
1608		opts.log_level = 2;
1609	else
1610		opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
1611	opts.prog_flags = pflags;
1612	if (fd_array[1] != -1)
1613		opts.fd_array = &fd_array[0];
1614
1615	if ((prog_type == BPF_PROG_TYPE_TRACING ||
1616	     prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
1617		int attach_btf_id;
1618
1619		attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1620						opts.expected_attach_type);
1621		if (attach_btf_id < 0) {
1622			printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1623				test->kfunc);
1624			(*errors)++;
1625			return;
1626		}
1627
1628		opts.attach_btf_id = attach_btf_id;
1629	}
1630
1631	if (test->btf_types[0] != 0) {
1632		btf_fd = load_btf_for_test(test);
1633		if (btf_fd < 0)
1634			goto fail_log;
1635		opts.prog_btf_fd = btf_fd;
1636	}
1637
1638	if (test->func_info_cnt != 0) {
1639		opts.func_info = test->func_info;
1640		opts.func_info_cnt = test->func_info_cnt;
1641		opts.func_info_rec_size = sizeof(test->func_info[0]);
1642	}
1643
1644	opts.log_buf = bpf_vlog;
1645	opts.log_size = sizeof(bpf_vlog);
1646	fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
1647	saved_errno = errno;
1648
1649	/* BPF_PROG_TYPE_TRACING requires more setup and
1650	 * bpf_probe_prog_type won't give correct answer
1651	 */
1652	if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1653	    !libbpf_probe_bpf_prog_type(prog_type, NULL)) {
1654		printf("SKIP (unsupported program type %d)\n", prog_type);
1655		skips++;
1656		goto close_fds;
1657	}
1658
1659	if (fd_prog < 0 && saved_errno == ENOTSUPP) {
1660		printf("SKIP (program uses an unsupported feature)\n");
1661		skips++;
1662		goto close_fds;
1663	}
1664
1665	alignment_prevented_execution = 0;
1666
1667	if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1668		if (fd_prog < 0) {
1669			printf("FAIL\nFailed to load prog '%s'!\n",
1670			       strerror(saved_errno));
1671			goto fail_log;
1672		}
1673#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1674		if (fd_prog >= 0 &&
1675		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1676			alignment_prevented_execution = 1;
1677#endif
1678		if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1679			goto fail_log;
1680		}
1681	} else {
1682		if (fd_prog >= 0) {
1683			printf("FAIL\nUnexpected success to load!\n");
1684			goto fail_log;
1685		}
1686		if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
1687			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1688			      expected_err, bpf_vlog);
1689			goto fail_log;
1690		}
1691	}
1692
1693	if (!unpriv && test->insn_processed) {
1694		uint32_t insn_processed;
1695		char *proc;
1696
1697		proc = strstr(bpf_vlog, "processed ");
1698		insn_processed = atoi(proc + 10);
1699		if (test->insn_processed != insn_processed) {
1700			printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1701			       insn_processed, test->insn_processed);
1702			goto fail_log;
1703		}
1704	}
1705
1706	if (verbose)
1707		printf(", verifier log:\n%s", bpf_vlog);
1708
1709	if (!check_xlated_program(test, fd_prog))
1710		goto fail_log;
1711
1712	run_errs = 0;
1713	run_successes = 0;
1714	if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1715		uint32_t expected_val;
1716		int i;
1717
1718		if (!test->runs)
1719			test->runs = 1;
1720
1721		for (i = 0; i < test->runs; i++) {
1722			if (unpriv && test->retvals[i].retval_unpriv)
1723				expected_val = test->retvals[i].retval_unpriv;
1724			else
1725				expected_val = test->retvals[i].retval;
1726
1727			err = do_prog_test_run(fd_prog, unpriv, expected_val,
1728					       test->retvals[i].data,
1729					       sizeof(test->retvals[i].data));
1730			if (err) {
1731				printf("(run %d/%d) ", i + 1, test->runs);
1732				run_errs++;
1733			} else {
1734				run_successes++;
1735			}
1736		}
1737	}
1738
1739	if (!run_errs) {
1740		(*passes)++;
1741		if (run_successes > 1)
1742			printf("%d cases ", run_successes);
1743		printf("OK");
1744		if (alignment_prevented_execution)
1745			printf(" (NOTE: not executed due to unknown alignment)");
1746		printf("\n");
1747	} else {
1748		printf("\n");
1749		goto fail_log;
1750	}
1751close_fds:
1752	if (test->fill_insns)
1753		free(test->fill_insns);
1754	close(fd_prog);
1755	close(btf_fd);
1756	for (i = 0; i < MAX_NR_MAPS; i++)
1757		close(map_fds[i]);
1758	sched_yield();
1759	return;
1760fail_log:
1761	(*errors)++;
1762	printf("%s", bpf_vlog);
1763	goto close_fds;
1764}
1765
1766static bool is_admin(void)
1767{
1768	__u64 caps;
1769
1770	/* The test checks for finer cap as CAP_NET_ADMIN,
1771	 * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
1772	 * Thus, disable CAP_SYS_ADMIN at the beginning.
1773	 */
1774	if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
1775		perror("cap_disable_effective(CAP_SYS_ADMIN)");
1776		return false;
1777	}
1778
1779	return (caps & ADMIN_CAPS) == ADMIN_CAPS;
1780}
1781
1782static bool test_as_unpriv(struct bpf_test *test)
1783{
1784#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1785	/* Some architectures have strict alignment requirements. In
1786	 * that case, the BPF verifier detects if a program has
1787	 * unaligned accesses and rejects them. A user can pass
1788	 * BPF_F_ANY_ALIGNMENT to a program to override this
1789	 * check. That, however, will only work when a privileged user
1790	 * loads a program. An unprivileged user loading a program
1791	 * with this flag will be rejected prior entering the
1792	 * verifier.
1793	 */
1794	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1795		return false;
1796#endif
1797	return !test->prog_type ||
1798	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1799	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1800}
1801
1802static int do_test(bool unpriv, unsigned int from, unsigned int to)
1803{
1804	int i, passes = 0, errors = 0;
1805
1806	/* ensure previous instance of the module is unloaded */
1807	unload_bpf_testmod(verbose);
1808
1809	if (load_bpf_testmod(verbose))
1810		return EXIT_FAILURE;
1811
1812	for (i = from; i < to; i++) {
1813		struct bpf_test *test = &tests[i];
1814
1815		/* Program types that are not supported by non-root we
1816		 * skip right away.
1817		 */
1818		if (test_as_unpriv(test) && unpriv_disabled) {
1819			printf("#%d/u %s SKIP\n", i, test->descr);
1820			skips++;
1821		} else if (test_as_unpriv(test)) {
1822			if (!unpriv)
1823				set_admin(false);
1824			printf("#%d/u %s ", i, test->descr);
1825			do_test_single(test, true, &passes, &errors);
1826			if (!unpriv)
1827				set_admin(true);
1828		}
1829
1830		if (unpriv) {
1831			printf("#%d/p %s SKIP\n", i, test->descr);
1832			skips++;
1833		} else {
1834			printf("#%d/p %s ", i, test->descr);
1835			do_test_single(test, false, &passes, &errors);
1836		}
1837	}
1838
1839	unload_bpf_testmod(verbose);
1840	kfuncs_cleanup();
1841
1842	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1843	       skips, errors);
1844	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1845}
1846
1847int main(int argc, char **argv)
1848{
1849	unsigned int from = 0, to = ARRAY_SIZE(tests);
1850	bool unpriv = !is_admin();
1851	int arg = 1;
1852
1853	if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1854		arg++;
1855		verbose = true;
1856		verif_log_level = 1;
1857		argc--;
1858	}
1859	if (argc > 1 && strcmp(argv[1], "-vv") == 0) {
1860		arg++;
1861		verbose = true;
1862		verif_log_level = 2;
1863		argc--;
1864	}
1865
1866	if (argc == 3) {
1867		unsigned int l = atoi(argv[arg]);
1868		unsigned int u = atoi(argv[arg + 1]);
1869
1870		if (l < to && u < to) {
1871			from = l;
1872			to   = u + 1;
1873		}
1874	} else if (argc == 2) {
1875		unsigned int t = atoi(argv[arg]);
1876
1877		if (t < to) {
1878			from = t;
1879			to   = t + 1;
1880		}
1881	}
1882
1883	unpriv_disabled = get_unpriv_disabled();
1884	if (unpriv && unpriv_disabled) {
1885		printf("Cannot run as unprivileged user with sysctl %s.\n",
1886		       UNPRIV_SYSCTL);
1887		return EXIT_FAILURE;
1888	}
1889
1890	/* Use libbpf 1.0 API mode */
1891	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1892
1893	bpf_semi_rand_init();
1894	return do_test(unpriv, from, to);
1895}
1896