Lines Matching defs:test
90 /* If specified, test engine looks for this sequence of
92 * test rewrites applied by verifier. Use values
94 * fields if content does not matter. The test case fails if
102 /* If specified, test engine applies same pattern matching
104 * matched test case is marked as failed.
190 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
284 /* test the sequence of 8k jumps */
291 /* test to check that the long sequence of jumps is acceptable */
310 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
322 /* test to check that the long sequence of jumps is acceptable */
427 /* This test was added to catch a specific use after free
787 static int load_btf_for_test(struct bpf_test *test)
792 test->btf_types[types_num] != BTF_END_RAW)
795 int types_len = types_num * sizeof(test->btf_types[0]);
797 return load_btf_spec(test->btf_types, types_len,
798 test->btf_strings, sizeof(test->btf_strings));
1009 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
1012 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
1013 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
1014 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
1015 int *fixup_map_array_48b = test->fixup_map_array_48b;
1016 int *fixup_map_sockmap = test->fixup_map_sockmap;
1017 int *fixup_map_sockhash = test->fixup_map_sockhash;
1018 int *fixup_map_xskmap = test->fixup_map_xskmap;
1019 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
1020 int *fixup_prog1 = test->fixup_prog1;
1021 int *fixup_prog2 = test->fixup_prog2;
1022 int *fixup_map_in_map = test->fixup_map_in_map;
1023 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
1024 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
1025 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
1026 int *fixup_map_array_ro = test->fixup_map_array_ro;
1027 int *fixup_map_array_wo = test->fixup_map_array_wo;
1028 int *fixup_map_array_small = test->fixup_map_array_small;
1029 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
1030 int *fixup_map_event_output = test->fixup_map_event_output;
1031 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
1032 int *fixup_map_ringbuf = test->fixup_map_ringbuf;
1033 int *fixup_map_timer = test->fixup_map_timer;
1034 int *fixup_map_kptr = test->fixup_map_kptr;
1036 if (test->fill_helper) {
1037 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
1038 test->fill_helper(test);
1041 /* Allocating HTs with 1 elem is fine here, since we only test
1235 fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
1505 static bool check_xlated_program(struct bpf_test *test, int fd_prog)
1510 bool check_expected = !is_null_insn(test->expected_insns);
1511 bool check_unexpected = !is_null_insn(test->unexpected_insns);
1524 !find_all_insn_subseqs(buf, test->expected_insns,
1532 print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
1537 find_all_insn_subseqs(buf, test->unexpected_insns,
1545 print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
1554 static void do_test_single(struct bpf_test *test, bool unpriv,
1558 int prog_len, prog_type = test->prog_type;
1559 struct bpf_insn *prog = test->insns;
1578 do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
1579 if (test->fill_insns) {
1580 prog = test->fill_insns;
1581 prog_len = test->prog_len;
1586 * features, skip this test.
1592 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
1594 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1596 if (test->flags & ~3)
1597 pflags |= test->flags;
1599 expected_ret = unpriv && test->result_unpriv != UNDEF ?
1600 test->result_unpriv : test->result;
1601 expected_err = unpriv && test->errstr_unpriv ?
1602 test->errstr_unpriv : test->errstr;
1604 opts.expected_attach_type = test->expected_attach_type;
1616 prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
1619 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1623 test->kfunc);
1631 if (test->btf_types[0] != 0) {
1632 btf_fd = load_btf_for_test(test);
1638 if (test->func_info_cnt != 0) {
1639 opts.func_info = test->func_info;
1640 opts.func_info_cnt = test->func_info_cnt;
1641 opts.func_info_rec_size = sizeof(test->func_info[0]);
1675 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1693 if (!unpriv && test->insn_processed) {
1699 if (test->insn_processed != insn_processed) {
1701 insn_processed, test->insn_processed);
1709 if (!check_xlated_program(test, fd_prog))
1714 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1718 if (!test->runs)
1719 test->runs = 1;
1721 for (i = 0; i < test->runs; i++) {
1722 if (unpriv && test->retvals[i].retval_unpriv)
1723 expected_val = test->retvals[i].retval_unpriv;
1725 expected_val = test->retvals[i].retval;
1728 test->retvals[i].data,
1729 sizeof(test->retvals[i].data));
1731 printf("(run %d/%d) ", i + 1, test->runs);
1752 if (test->fill_insns)
1753 free(test->fill_insns);
1770 /* The test checks for finer cap as CAP_NET_ADMIN,
1782 static bool test_as_unpriv(struct bpf_test *test)
1794 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1797 return !test->prog_type ||
1798 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1799 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1813 struct bpf_test *test = &tests[i];
1818 if (test_as_unpriv(test) && unpriv_disabled) {
1819 printf("#%d/u %s SKIP\n", i, test->descr);
1821 } else if (test_as_unpriv(test)) {
1824 printf("#%d/u %s ", i, test->descr);
1825 do_test_single(test, true, &passes, &errors);
1831 printf("#%d/p %s SKIP\n", i, test->descr);
1834 printf("#%d/p %s ", i, test->descr);
1835 do_test_single(test, false, &passes, &errors);