1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
4 *
5 * Check if eBPF can do arithmetic with 64bits. This targets a specific
6 * regression which only effects unprivileged users who are subject to extra
7 * pointer arithmetic checks during verification.
8 *
9 * Fixed by commit 3612af783cf52c74a031a2f11b82247b2599d3cd.
10 * https://new.blog.cloudflare.com/ebpf-cant-count/
11 *
12 * This test is very similar in structure to bpf_prog01 which is better
13 * annotated.
14 */
15
16#include <limits.h>
17#include <string.h>
18#include <stdio.h>
19#include <inttypes.h>
20
21#include "config.h"
22#include "tst_test.h"
23#include "tst_capability.h"
24#include "bpf_common.h"
25
26#define A64INT (((uint64_t)1) << 60)
27
28const char MSG[] = "Ahoj!";
29static char *msg;
30
31static char *log;
32static uint32_t *key;
33static uint64_t *val;
34static union bpf_attr *attr;
35
36static int load_prog(int fd)
37{
38	struct bpf_insn insn[] = {
39		BPF_MOV64_IMM(BPF_REG_6, 1),            /* 0: r6 = 1 */
40
41		BPF_LD_MAP_FD(BPF_REG_1, fd),	        /* 1: r1 = &fd */
42		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),   /* 3: r2 = fp */
43		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),  /* 4: r2 = r2 - 4 */
44		BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),     /* 5: *r2 = 0 */
45		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),/* 6: map_lookup_elem */
46		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), /* 7: if(!r0) goto 25 */
47		BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),    /* 8: r3 = r0 */
48		BPF_LD_IMM64(BPF_REG_4, A64INT),        /* 9: r4 = 2^61 */
49		BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_6), /* 11: r4 += r6 */
50		BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_4, 0), /* 12: *r3 = r4 */
51
52		BPF_LD_MAP_FD(BPF_REG_1, fd),	        /* 13: r1 = &fd */
53		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),   /* 15: r2 = fp */
54		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),  /* 16: r2 = r2 - 4 */
55		BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 1),     /* 17: *r2 = 1 */
56		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),/* 18: map_lookup_elem */
57		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),  /* 19: if(!r0) goto 25 */
58		BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),    /* 20: r3 = r0 */
59		BPF_LD_IMM64(BPF_REG_4, A64INT),        /* 21: r4 = 2^60 */
60		BPF_ALU64_REG(BPF_SUB, BPF_REG_4, BPF_REG_6), /* 23: r4 -= r6 */
61		BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_4, 0), /* 24: *r3 = r4 */
62
63		BPF_MOV64_IMM(BPF_REG_0, 0),            /* 25: r0 = 0 */
64		BPF_EXIT_INSN(),		        /* 26: return r0 */
65	};
66
67	bpf_init_prog_attr(attr, insn, sizeof(insn), log, BUFSIZE);
68	return bpf_load_prog(attr, log);
69}
70
71static void setup(void)
72{
73	rlimit_bump_memlock();
74	memcpy(msg, MSG, sizeof(MSG));
75}
76
77static void run(void)
78{
79	int map_fd, prog_fd;
80
81	map_fd = bpf_map_array_create(2);
82	prog_fd = load_prog(map_fd);
83	bpf_run_prog(prog_fd, msg, sizeof(MSG));
84	SAFE_CLOSE(prog_fd);
85
86        *key = 0;
87	bpf_map_array_get(map_fd, key, val);
88	if (*val != A64INT + 1) {
89		tst_res(TFAIL,
90			"val = %"PRIu64", but should be val = %"PRIu64" + 1",
91			*val, A64INT);
92	} else {
93		tst_res(TPASS, "val = %"PRIu64" + 1", A64INT);
94	}
95
96	*key = 1;
97	bpf_map_array_get(map_fd, key, val);
98	if (*val != A64INT - 1) {
99		tst_res(TFAIL,
100			"val = %"PRIu64", but should be val = %"PRIu64" - 1",
101			*val, A64INT);
102	} else {
103		tst_res(TPASS, "val = %"PRIu64" - 1", A64INT);
104	}
105
106	SAFE_CLOSE(map_fd);
107}
108
109static struct tst_test test = {
110	.setup = setup,
111	.test_all = run,
112	.min_kver = "3.18",
113	.caps = (struct tst_cap []) {
114		TST_CAP(TST_CAP_DROP, CAP_SYS_ADMIN),
115		{}
116	},
117	.bufs = (struct tst_buffers []) {
118		{&key, .size = sizeof(*key)},
119		{&val, .size = sizeof(*val)},
120		{&log, .size = BUFSIZE},
121		{&attr, .size = sizeof(*attr)},
122		{&msg, .size = sizeof(MSG)},
123		{},
124	},
125	.tags = (const struct tst_tag[]) {
126		{"linux-git", "3612af783cf5"},
127		{}
128	}
129};
130