1{
2	"leak pointer into ctx 1",
3	.insns = {
4	BPF_MOV64_IMM(BPF_REG_0, 0),
5	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
6		    offsetof(struct __sk_buff, cb[0])),
7	BPF_LD_MAP_FD(BPF_REG_2, 0),
8	BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
9		      offsetof(struct __sk_buff, cb[0])),
10	BPF_EXIT_INSN(),
11	},
12	.fixup_map_hash_8b = { 2 },
13	.errstr_unpriv = "R2 leaks addr into mem",
14	.result_unpriv = REJECT,
15	.result = REJECT,
16	.errstr = "BPF_XADD stores into R1 ctx is not allowed",
17},
18{
19	"leak pointer into ctx 2",
20	.insns = {
21	BPF_MOV64_IMM(BPF_REG_0, 0),
22	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
23		    offsetof(struct __sk_buff, cb[0])),
24	BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
25		      offsetof(struct __sk_buff, cb[0])),
26	BPF_EXIT_INSN(),
27	},
28	.errstr_unpriv = "R10 leaks addr into mem",
29	.result_unpriv = REJECT,
30	.result = REJECT,
31	.errstr = "BPF_XADD stores into R1 ctx is not allowed",
32},
33{
34	"leak pointer into ctx 3",
35	.insns = {
36	BPF_MOV64_IMM(BPF_REG_0, 0),
37	BPF_LD_MAP_FD(BPF_REG_2, 0),
38	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
39		      offsetof(struct __sk_buff, cb[0])),
40	BPF_EXIT_INSN(),
41	},
42	.fixup_map_hash_8b = { 1 },
43	.errstr_unpriv = "R2 leaks addr into ctx",
44	.result_unpriv = REJECT,
45	.result = ACCEPT,
46},
47{
48	"leak pointer into map val",
49	.insns = {
50	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
51	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
52	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
53	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
54	BPF_LD_MAP_FD(BPF_REG_1, 0),
55	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
56	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
57	BPF_MOV64_IMM(BPF_REG_3, 0),
58	BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
59	BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
60	BPF_MOV64_IMM(BPF_REG_0, 0),
61	BPF_EXIT_INSN(),
62	},
63	.fixup_map_hash_8b = { 4 },
64	.errstr_unpriv = "R6 leaks addr into mem",
65	.result_unpriv = REJECT,
66	.result = ACCEPT,
67},
68