1{
2	"precise: test 1",
3	.insns = {
4	BPF_MOV64_IMM(BPF_REG_0, 1),
5	BPF_LD_MAP_FD(BPF_REG_6, 0),
6	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7	BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
8	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9	BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
10	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12	BPF_EXIT_INSN(),
13
14	BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
15
16	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
17	BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
18	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
19	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
20	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
21	BPF_EXIT_INSN(),
22
23	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
24
25	BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
26	BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
27	BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
28	BPF_EXIT_INSN(),
29
30	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
31	BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
32	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
33	BPF_MOV64_IMM(BPF_REG_3, 0),
34	BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
35	BPF_EXIT_INSN(),
36	},
37	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
38	.fixup_map_array_48b = { 1 },
39	.result = VERBOSE_ACCEPT,
40	.errstr =
41	"mark_precise: frame0: last_idx 26 first_idx 20\
42	mark_precise: frame0: regs=r2 stack= before 25\
43	mark_precise: frame0: regs=r2 stack= before 24\
44	mark_precise: frame0: regs=r2 stack= before 23\
45	mark_precise: frame0: regs=r2 stack= before 22\
46	mark_precise: frame0: regs=r2 stack= before 20\
47	mark_precise: frame0: parent state regs=r2 stack=:\
48	mark_precise: frame0: last_idx 19 first_idx 10\
49	mark_precise: frame0: regs=r2,r9 stack= before 19\
50	mark_precise: frame0: regs=r9 stack= before 18\
51	mark_precise: frame0: regs=r8,r9 stack= before 17\
52	mark_precise: frame0: regs=r0,r9 stack= before 15\
53	mark_precise: frame0: regs=r0,r9 stack= before 14\
54	mark_precise: frame0: regs=r9 stack= before 13\
55	mark_precise: frame0: regs=r9 stack= before 12\
56	mark_precise: frame0: regs=r9 stack= before 11\
57	mark_precise: frame0: regs=r9 stack= before 10\
58	mark_precise: frame0: parent state regs= stack=:",
59},
60{
61	"precise: test 2",
62	.insns = {
63	BPF_MOV64_IMM(BPF_REG_0, 1),
64	BPF_LD_MAP_FD(BPF_REG_6, 0),
65	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
66	BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
67	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
68	BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
69	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
70	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
71	BPF_EXIT_INSN(),
72
73	BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
74
75	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
76	BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
77	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
78	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
79	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
80	BPF_EXIT_INSN(),
81
82	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
83
84	BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
85	BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
86	BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
87	BPF_EXIT_INSN(),
88
89	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
90	BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
91	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
92	BPF_MOV64_IMM(BPF_REG_3, 0),
93	BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
94	BPF_EXIT_INSN(),
95	},
96	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
97	.fixup_map_array_48b = { 1 },
98	.result = VERBOSE_ACCEPT,
99	.flags = BPF_F_TEST_STATE_FREQ,
100	.errstr =
101	"26: (85) call bpf_probe_read_kernel#113\
102	mark_precise: frame0: last_idx 26 first_idx 22\
103	mark_precise: frame0: regs=r2 stack= before 25\
104	mark_precise: frame0: regs=r2 stack= before 24\
105	mark_precise: frame0: regs=r2 stack= before 23\
106	mark_precise: frame0: regs=r2 stack= before 22\
107	mark_precise: frame0: parent state regs=r2 stack=:\
108	mark_precise: frame0: last_idx 20 first_idx 20\
109	mark_precise: frame0: regs=r2,r9 stack= before 20\
110	mark_precise: frame0: parent state regs=r2,r9 stack=:\
111	mark_precise: frame0: last_idx 19 first_idx 17\
112	mark_precise: frame0: regs=r2,r9 stack= before 19\
113	mark_precise: frame0: regs=r9 stack= before 18\
114	mark_precise: frame0: regs=r8,r9 stack= before 17\
115	mark_precise: frame0: parent state regs= stack=:",
116},
117{
118	"precise: cross frame pruning",
119	.insns = {
120	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
121	BPF_MOV64_IMM(BPF_REG_8, 0),
122	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
123	BPF_MOV64_IMM(BPF_REG_8, 1),
124	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
125	BPF_MOV64_IMM(BPF_REG_9, 0),
126	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
127	BPF_MOV64_IMM(BPF_REG_9, 1),
128	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
130	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
131	BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
132	BPF_MOV64_IMM(BPF_REG_0, 0),
133	BPF_EXIT_INSN(),
134	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
135	BPF_EXIT_INSN(),
136	},
137	.prog_type = BPF_PROG_TYPE_XDP,
138	.flags = BPF_F_TEST_STATE_FREQ,
139	.errstr = "!read_ok",
140	.result = REJECT,
141},
142{
143	"precise: ST insn causing spi > allocated_stack",
144	.insns = {
145	BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
146	BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
147	BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0),
148	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
149	BPF_MOV64_IMM(BPF_REG_0, -1),
150	BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
151	BPF_EXIT_INSN(),
152	},
153	.prog_type = BPF_PROG_TYPE_XDP,
154	.flags = BPF_F_TEST_STATE_FREQ,
155	.errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
156	mark_precise: frame0: parent state regs=r4 stack=:\
157	mark_precise: frame0: last_idx 4 first_idx 2\
158	mark_precise: frame0: regs=r4 stack= before 4\
159	mark_precise: frame0: regs=r4 stack= before 3\
160	mark_precise: frame0: regs= stack=-8 before 2\
161	mark_precise: frame0: falling back to forcing all scalars precise\
162	force_precise: frame0: forcing r0 to be precise\
163	mark_precise: frame0: last_idx 5 first_idx 5\
164	mark_precise: frame0: parent state regs= stack=:",
165	.result = VERBOSE_ACCEPT,
166	.retval = -1,
167},
168{
169	"precise: STX insn causing spi > allocated_stack",
170	.insns = {
171	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
172	BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
173	BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
174	BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8),
175	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
176	BPF_MOV64_IMM(BPF_REG_0, -1),
177	BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
178	BPF_EXIT_INSN(),
179	},
180	.prog_type = BPF_PROG_TYPE_XDP,
181	.flags = BPF_F_TEST_STATE_FREQ,
182	.errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
183	mark_precise: frame0: parent state regs=r4 stack=:\
184	mark_precise: frame0: last_idx 5 first_idx 3\
185	mark_precise: frame0: regs=r4 stack= before 5\
186	mark_precise: frame0: regs=r4 stack= before 4\
187	mark_precise: frame0: regs= stack=-8 before 3\
188	mark_precise: frame0: falling back to forcing all scalars precise\
189	force_precise: frame0: forcing r0 to be precise\
190	force_precise: frame0: forcing r0 to be precise\
191	force_precise: frame0: forcing r0 to be precise\
192	force_precise: frame0: forcing r0 to be precise\
193	mark_precise: frame0: last_idx 6 first_idx 6\
194	mark_precise: frame0: parent state regs= stack=:",
195	.result = VERBOSE_ACCEPT,
196	.retval = -1,
197},
198{
199	"precise: mark_chain_precision for ARG_CONST_ALLOC_SIZE_OR_ZERO",
200	.insns = {
201	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, ingress_ifindex)),
202	BPF_LD_MAP_FD(BPF_REG_6, 0),
203	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
204	BPF_MOV64_IMM(BPF_REG_2, 1),
205	BPF_MOV64_IMM(BPF_REG_3, 0),
206	BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
207	BPF_MOV64_IMM(BPF_REG_2, 0x1000),
208	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
209	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
210	BPF_EXIT_INSN(),
211	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
212	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 42),
213	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
214	BPF_MOV64_IMM(BPF_REG_0, 0),
215	BPF_EXIT_INSN(),
216	},
217	.fixup_map_ringbuf = { 1 },
218	.prog_type = BPF_PROG_TYPE_XDP,
219	.flags = BPF_F_TEST_STATE_FREQ | F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
220	.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
221	.result = REJECT,
222},
223{
224	"precise: program doesn't prematurely prune branches",
225	.insns = {
226		BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
227		BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
228		BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
229		BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
230		BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
231		BPF_JMP_IMM(BPF_JA, 0, 0, 0),
232		BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
233		BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
234		BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
235		BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
236		BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
237		BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
238		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
239		BPF_LD_MAP_FD(BPF_REG_4, 0),
240		BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
241		BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
242		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
243		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
244		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
245		BPF_EXIT_INSN(),
246		BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
247		BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
248		BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
249		BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
250		BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
251		BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
252		BPF_EXIT_INSN(),
253	},
254	.fixup_map_array_48b = { 13 },
255	.prog_type = BPF_PROG_TYPE_XDP,
256	.result = REJECT,
257	.errstr = "register with unbounded min value is not allowed",
258},
259