1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4#define MAX_INSNS	512
5#define MAX_MATCHES	24
6
7struct bpf_reg_match {
8	unsigned int line;
9	const char *match;
10};
11
12struct bpf_align_test {
13	const char *descr;
14	struct bpf_insn	insns[MAX_INSNS];
15	enum {
16		UNDEF,
17		ACCEPT,
18		REJECT
19	} result;
20	enum bpf_prog_type prog_type;
21	/* Matches must be in order of increasing line */
22	struct bpf_reg_match matches[MAX_MATCHES];
23};
24
25static struct bpf_align_test tests[] = {
26	/* Four tests of known constants.  These aren't staggeringly
27	 * interesting since we track exact values now.
28	 */
29	{
30		.descr = "mov",
31		.insns = {
32			BPF_MOV64_IMM(BPF_REG_3, 2),
33			BPF_MOV64_IMM(BPF_REG_3, 4),
34			BPF_MOV64_IMM(BPF_REG_3, 8),
35			BPF_MOV64_IMM(BPF_REG_3, 16),
36			BPF_MOV64_IMM(BPF_REG_3, 32),
37			BPF_MOV64_IMM(BPF_REG_0, 0),
38			BPF_EXIT_INSN(),
39		},
40		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
41		.matches = {
42			{1, "R1=ctx(id=0,off=0,imm=0)"},
43			{1, "R10=fp0"},
44			{1, "R3_w=inv2"},
45			{2, "R3_w=inv4"},
46			{3, "R3_w=inv8"},
47			{4, "R3_w=inv16"},
48			{5, "R3_w=inv32"},
49		},
50	},
51	{
52		.descr = "shift",
53		.insns = {
54			BPF_MOV64_IMM(BPF_REG_3, 1),
55			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
56			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
57			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
58			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
59			BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
60			BPF_MOV64_IMM(BPF_REG_4, 32),
61			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
62			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
63			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
64			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
65			BPF_MOV64_IMM(BPF_REG_0, 0),
66			BPF_EXIT_INSN(),
67		},
68		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
69		.matches = {
70			{1, "R1=ctx(id=0,off=0,imm=0)"},
71			{1, "R10=fp0"},
72			{1, "R3_w=inv1"},
73			{2, "R3_w=inv2"},
74			{3, "R3_w=inv4"},
75			{4, "R3_w=inv8"},
76			{5, "R3_w=inv16"},
77			{6, "R3_w=inv1"},
78			{7, "R4_w=inv32"},
79			{8, "R4_w=inv16"},
80			{9, "R4_w=inv8"},
81			{10, "R4_w=inv4"},
82			{11, "R4_w=inv2"},
83		},
84	},
85	{
86		.descr = "addsub",
87		.insns = {
88			BPF_MOV64_IMM(BPF_REG_3, 4),
89			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
90			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
91			BPF_MOV64_IMM(BPF_REG_4, 8),
92			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
93			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
94			BPF_MOV64_IMM(BPF_REG_0, 0),
95			BPF_EXIT_INSN(),
96		},
97		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
98		.matches = {
99			{1, "R1=ctx(id=0,off=0,imm=0)"},
100			{1, "R10=fp0"},
101			{1, "R3_w=inv4"},
102			{2, "R3_w=inv8"},
103			{3, "R3_w=inv10"},
104			{4, "R4_w=inv8"},
105			{5, "R4_w=inv12"},
106			{6, "R4_w=inv14"},
107		},
108	},
109	{
110		.descr = "mul",
111		.insns = {
112			BPF_MOV64_IMM(BPF_REG_3, 7),
113			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
114			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
115			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
116			BPF_MOV64_IMM(BPF_REG_0, 0),
117			BPF_EXIT_INSN(),
118		},
119		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
120		.matches = {
121			{1, "R1=ctx(id=0,off=0,imm=0)"},
122			{1, "R10=fp0"},
123			{1, "R3_w=inv7"},
124			{2, "R3_w=inv7"},
125			{3, "R3_w=inv14"},
126			{4, "R3_w=inv56"},
127		},
128	},
129
130	/* Tests using unknown values */
131#define PREP_PKT_POINTERS \
132	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
133		    offsetof(struct __sk_buff, data)), \
134	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
135		    offsetof(struct __sk_buff, data_end))
136
137#define LOAD_UNKNOWN(DST_REG) \
138	PREP_PKT_POINTERS, \
139	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
140	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
141	BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
142	BPF_EXIT_INSN(), \
143	BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
144
145	{
146		.descr = "unknown shift",
147		.insns = {
148			LOAD_UNKNOWN(BPF_REG_3),
149			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
150			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
151			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
152			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
153			LOAD_UNKNOWN(BPF_REG_4),
154			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
155			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
156			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
157			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
158			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
159			BPF_MOV64_IMM(BPF_REG_0, 0),
160			BPF_EXIT_INSN(),
161		},
162		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
163		.matches = {
164			{7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
165			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
166			{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
167			{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
168			{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
169			{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
170			{18, "R3=pkt_end(id=0,off=0,imm=0)"},
171			{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
172			{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
173			{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
174			{21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
175			{22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
176			{23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
177		},
178	},
179	{
180		.descr = "unknown mul",
181		.insns = {
182			LOAD_UNKNOWN(BPF_REG_3),
183			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
184			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
185			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
186			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
187			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
188			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
189			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
190			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
191			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
192			BPF_MOV64_IMM(BPF_REG_0, 0),
193			BPF_EXIT_INSN(),
194		},
195		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
196		.matches = {
197			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
198			{8, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
199			{9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
200			{10, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
201			{11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
202			{12, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
203			{13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
204			{14, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
205			{15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
206			{16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
207		},
208	},
209	{
210		.descr = "packet const offset",
211		.insns = {
212			PREP_PKT_POINTERS,
213			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
214
215			BPF_MOV64_IMM(BPF_REG_0, 0),
216
217			/* Skip over ethernet header.  */
218			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
219			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
220			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
221			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
222			BPF_EXIT_INSN(),
223
224			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
225			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
226			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
227			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
228			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
229			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
230			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
231
232			BPF_MOV64_IMM(BPF_REG_0, 0),
233			BPF_EXIT_INSN(),
234		},
235		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
236		.matches = {
237			{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
238			{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
239			{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
240			{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
241			{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
242			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
243			{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
244			{15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
245		},
246	},
247	{
248		.descr = "packet variable offset",
249		.insns = {
250			LOAD_UNKNOWN(BPF_REG_6),
251			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
252
253			/* First, add a constant to the R5 packet pointer,
254			 * then a variable with a known alignment.
255			 */
256			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
257			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
258			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
259			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
260			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
261			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
262			BPF_EXIT_INSN(),
263			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
264
265			/* Now, test in the other direction.  Adding first
266			 * the variable offset to R5, then the constant.
267			 */
268			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
269			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
270			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
271			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
272			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
273			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
274			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
275			BPF_EXIT_INSN(),
276			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
277
278			/* Test multiple accumulations of unknown values
279			 * into a packet pointer.
280			 */
281			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
282			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
283			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
284			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
285			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
286			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
287			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
288			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
289			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
290			BPF_EXIT_INSN(),
291			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
292
293			BPF_MOV64_IMM(BPF_REG_0, 0),
294			BPF_EXIT_INSN(),
295		},
296		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
297		.matches = {
298			/* Calculated offset in R6 has unknown value, but known
299			 * alignment of 4.
300			 */
301			{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
302			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
303			/* Offset is added to packet pointer R5, resulting in
304			 * known fixed offset, and variable offset from R6.
305			 */
306			{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
307			/* At the time the word size load is performed from R5,
308			 * it's total offset is NET_IP_ALIGN + reg->off (0) +
309			 * reg->aux_off (14) which is 16.  Then the variable
310			 * offset is considered using reg->aux_off_align which
311			 * is 4 and meets the load's requirements.
312			 */
313			{15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
314			{15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
315			/* Variable offset is added to R5 packet pointer,
316			 * resulting in auxiliary alignment of 4. To avoid BPF
317			 * verifier's precision backtracking logging
318			 * interfering we also have a no-op R4 = R5
319			 * instruction to validate R5 state. We also check
320			 * that R4 is what it should be in such case.
321			 */
322			{19, "R4_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
323			{19, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
324			/* Constant offset is added to R5, resulting in
325			 * reg->off of 14.
326			 */
327			{20, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
328			/* At the time the word size load is performed from R5,
329			 * its total fixed offset is NET_IP_ALIGN + reg->off
330			 * (14) which is 16.  Then the variable offset is 4-byte
331			 * aligned, so the total offset is 4-byte aligned and
332			 * meets the load's requirements.
333			 */
334			{24, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
335			{24, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
336			/* Constant offset is added to R5 packet pointer,
337			 * resulting in reg->off value of 14.
338			 */
339			{27, "R5_w=pkt(id=0,off=14,r=8"},
340			/* Variable offset is added to R5, resulting in a
341			 * variable offset of (4n). See comment for insn #19
342			 * for R4 = R5 trick.
343			 */
344			{29, "R4_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
345			{29, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
346			/* Constant is added to R5 again, setting reg->off to 18. */
347			{30, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
348			/* And once more we add a variable; resulting var_off
349			 * is still (4n), fixed offset is not changed.
350			 * Also, we create a new reg->id.
351			 */
352			{32, "R4_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
353			{32, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
354			/* At the time the word size load is performed from R5,
355			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
356			 * which is 20.  Then the variable offset is (4n), so
357			 * the total offset is 4-byte aligned and meets the
358			 * load's requirements.
359			 */
360			{35, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
361			{35, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
362		},
363	},
364	{
365		.descr = "packet variable offset 2",
366		.insns = {
367			/* Create an unknown offset, (4n+2)-aligned */
368			LOAD_UNKNOWN(BPF_REG_6),
369			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
370			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
371			/* Add it to the packet pointer */
372			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
373			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
374			/* Check bounds and perform a read */
375			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
376			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
377			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
378			BPF_EXIT_INSN(),
379			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
380			/* Make a (4n) offset from the value we just read */
381			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
382			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
383			/* Add it to the packet pointer */
384			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
385			/* Check bounds and perform a read */
386			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
387			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
388			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
389			BPF_EXIT_INSN(),
390			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
391			BPF_MOV64_IMM(BPF_REG_0, 0),
392			BPF_EXIT_INSN(),
393		},
394		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
395		.matches = {
396			/* Calculated offset in R6 has unknown value, but known
397			 * alignment of 4.
398			 */
399			{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
400			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
401			/* Adding 14 makes R6 be (4n+2) */
402			{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
403			/* Packet pointer has (4n+2) offset */
404			{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
405			{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
406			/* At the time the word size load is performed from R5,
407			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
408			 * which is 2.  Then the variable offset is (4n+2), so
409			 * the total offset is 4-byte aligned and meets the
410			 * load's requirements.
411			 */
412			{15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
413			/* Newly read value in R6 was shifted left by 2, so has
414			 * known alignment of 4.
415			 */
416			{18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
417			/* Added (4n) to packet pointer's (4n+2) var_off, giving
418			 * another (4n+2).
419			 */
420			{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
421			{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
422			/* At the time the word size load is performed from R5,
423			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
424			 * which is 2.  Then the variable offset is (4n+2), so
425			 * the total offset is 4-byte aligned and meets the
426			 * load's requirements.
427			 */
428			{23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
429		},
430	},
431	{
432		.descr = "dubious pointer arithmetic",
433		.insns = {
434			PREP_PKT_POINTERS,
435			BPF_MOV64_IMM(BPF_REG_0, 0),
436			/* (ptr - ptr) << 2 */
437			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
438			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
439			BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
440			/* We have a (4n) value.  Let's make a packet offset
441			 * out of it.  First add 14, to make it a (4n+2)
442			 */
443			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
444			/* Then make sure it's nonnegative */
445			BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
446			BPF_EXIT_INSN(),
447			/* Add it to packet pointer */
448			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
449			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
450			/* Check bounds and perform a read */
451			BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
452			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
453			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
454			BPF_EXIT_INSN(),
455			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
456			BPF_EXIT_INSN(),
457		},
458		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
459		.result = REJECT,
460		.matches = {
461			{4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
462			/* (ptr - ptr) << 2 == unknown, (4n) */
463			{6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
464			/* (4n) + 14 == (4n+2).  We blow our bounds, because
465			 * the add could overflow.
466			 */
467			{7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
468			/* Checked s>=0 */
469			{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
470			/* packet pointer + nonnegative (4n+2) */
471			{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
472			{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
473			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
474			 * We checked the bounds, but it might have been able
475			 * to overflow if the packet pointer started in the
476			 * upper half of the address space.
477			 * So we did not get a 'range' on R6, and the access
478			 * attempt will fail.
479			 */
480			{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
481		}
482	},
483	{
484		.descr = "variable subtraction",
485		.insns = {
486			/* Create an unknown offset, (4n+2)-aligned */
487			LOAD_UNKNOWN(BPF_REG_6),
488			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
489			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
490			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
491			/* Create another unknown, (4n)-aligned, and subtract
492			 * it from the first one
493			 */
494			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
495			BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
496			/* Bounds-check the result */
497			BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
498			BPF_EXIT_INSN(),
499			/* Add it to the packet pointer */
500			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
501			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
502			/* Check bounds and perform a read */
503			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
504			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
505			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
506			BPF_EXIT_INSN(),
507			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
508			BPF_EXIT_INSN(),
509		},
510		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
511		.matches = {
512			/* Calculated offset in R6 has unknown value, but known
513			 * alignment of 4.
514			 */
515			{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
516			{9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
517			/* Adding 14 makes R6 be (4n+2) */
518			{10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
519			/* New unknown value in R7 is (4n) */
520			{11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
521			/* Subtracting it from R6 blows our unsigned bounds */
522			{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
523			/* Checked s>= 0 */
524			{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
525			/* At the time the word size load is performed from R5,
526			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
527			 * which is 2.  Then the variable offset is (4n+2), so
528			 * the total offset is 4-byte aligned and meets the
529			 * load's requirements.
530			 */
531			{20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
532
533		},
534	},
535	{
536		.descr = "pointer variable subtraction",
537		.insns = {
538			/* Create an unknown offset, (4n+2)-aligned and bounded
539			 * to [14,74]
540			 */
541			LOAD_UNKNOWN(BPF_REG_6),
542			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
543			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
544			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
545			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
546			/* Subtract it from the packet pointer */
547			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
548			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
549			/* Create another unknown, (4n)-aligned and >= 74.
550			 * That in fact means >= 76, since 74 % 4 == 2
551			 */
552			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
553			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
554			/* Add it to the packet pointer */
555			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
556			/* Check bounds and perform a read */
557			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
558			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
559			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
560			BPF_EXIT_INSN(),
561			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
562			BPF_EXIT_INSN(),
563		},
564		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
565		.matches = {
566			/* Calculated offset in R6 has unknown value, but known
567			 * alignment of 4.
568			 */
569			{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
570			{10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
571			/* Adding 14 makes R6 be (4n+2) */
572			{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
573			/* Subtracting from packet pointer overflows ubounds */
574			{13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
575			/* New unknown value in R7 is (4n), >= 76 */
576			{15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
577			/* Adding it to packet pointer gives nice bounds again */
578			{16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
579			/* At the time the word size load is performed from R5,
580			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
581			 * which is 2.  Then the variable offset is (4n+2), so
582			 * the total offset is 4-byte aligned and meets the
583			 * load's requirements.
584			 */
585			{20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
586		},
587	},
588};
589
590static int probe_filter_length(const struct bpf_insn *fp)
591{
592	int len;
593
594	for (len = MAX_INSNS - 1; len > 0; --len)
595		if (fp[len].code != 0 || fp[len].imm != 0)
596			break;
597	return len + 1;
598}
599
600static char bpf_vlog[32768];
601
602static int do_test_single(struct bpf_align_test *test)
603{
604	struct bpf_insn *prog = test->insns;
605	int prog_type = test->prog_type;
606	char bpf_vlog_copy[32768];
607	const char *line_ptr;
608	int cur_line = -1;
609	int prog_len, i;
610	int fd_prog;
611	int ret;
612
613	prog_len = probe_filter_length(prog);
614	fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
615				     prog, prog_len, BPF_F_STRICT_ALIGNMENT,
616				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
617	if (fd_prog < 0 && test->result != REJECT) {
618		printf("Failed to load program.\n");
619		printf("%s", bpf_vlog);
620		ret = 1;
621	} else if (fd_prog >= 0 && test->result == REJECT) {
622		printf("Unexpected success to load!\n");
623		printf("%s", bpf_vlog);
624		ret = 1;
625		close(fd_prog);
626	} else {
627		ret = 0;
628		/* We make a local copy so that we can strtok() it */
629		strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
630		line_ptr = strtok(bpf_vlog_copy, "\n");
631		for (i = 0; i < MAX_MATCHES; i++) {
632			struct bpf_reg_match m = test->matches[i];
633
634			if (!m.match)
635				break;
636			while (line_ptr) {
637				cur_line = -1;
638				sscanf(line_ptr, "%u: ", &cur_line);
639				if (cur_line == m.line)
640					break;
641				line_ptr = strtok(NULL, "\n");
642			}
643			if (!line_ptr) {
644				printf("Failed to find line %u for match: %s\n",
645				       m.line, m.match);
646				ret = 1;
647				printf("%s", bpf_vlog);
648				break;
649			}
650			if (!strstr(line_ptr, m.match)) {
651				printf("Failed to find match %u: %s\n",
652				       m.line, m.match);
653				ret = 1;
654				printf("%s", bpf_vlog);
655				break;
656			}
657		}
658		if (fd_prog >= 0)
659			close(fd_prog);
660	}
661	return ret;
662}
663
664void test_align(void)
665{
666	unsigned int i;
667
668	for (i = 0; i < ARRAY_SIZE(tests); i++) {
669		struct bpf_align_test *test = &tests[i];
670
671		if (!test__start_subtest(test->descr))
672			continue;
673
674		CHECK_FAIL(do_test_single(test));
675	}
676}
677