Lines Matching defs:insn
40 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
48 * The second pass is all possible path descent from the 1st insn.
50 * analysis is limited to 64k insn, which may be hit even if total number of
51 * insn is less then 4K, but there are too many branches that change stack/regs.
71 * 1st insn copies R10 (which has FRAME_PTR) type into R1
74 * So after 2nd insn, the register R1 has type PTR_TO_STACK
120 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
121 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
122 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
131 * If it's ok, then verifier allows this BPF_CALL insn and looks at
137 * insn, the register holding that pointer in the true branch changes state to
1453 struct bpf_insn *insn = env->prog->insnsi;
1464 if (insn[i].code != (BPF_JMP | BPF_CALL)) {
1467 if (insn[i].src_reg != BPF_PSEUDO_CALL) {
1474 ret = add_subprog(env, i + insn[i].imm + 1);
1495 u8 code = insn[i].code;
1497 if (code == (BPF_JMP | BPF_CALL) && insn[i].imm == BPF_FUNC_tail_call && insn[i].src_reg != BPF_PSEUDO_CALL) {
1509 off = i + insn[i].off + 1;
1511 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1517 * the last insn of the subprog should be either exit
1521 verbose(env, "last insn is not an exit or jmp\n");
1590 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, u32 regno, struct bpf_reg_state *reg,
1595 code = insn->code;
1611 if (insn->src_reg == BPF_PSEUDO_CALL) {
1627 (class == BPF_ALU && op == BPF_END && insn->imm == VERIFIER_SIXTYFOUR)) {
1681 static bool insn_no_def(struct bpf_insn *insn)
1683 u8 class = BPF_CLASS(insn->code);
1689 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1691 if (insn_no_def(insn)) {
1695 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1715 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
1725 rw64 = is_reg64(env, insn, regno, reg, t);
1775 /* Backtrack one insn at a time. If idx is not at the top of recorded
1791 /* For given verifier state backtrack_insn() is called from the last insn to
1792 * the first insn. Its purpose is to compute a bitmask of registers and
1801 struct bpf_insn *insn = env->prog->insnsi + idx;
1802 u8 class = BPF_CLASS(insn->code);
1803 u8 opcode = BPF_OP(insn->code);
1804 u8 mode = BPF_MODE(insn->code);
1805 u32 dreg = 1u << insn->dst_reg;
1806 u32 sreg = 1u << insn->src_reg;
1809 if (insn->code == 0) {
1815 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1823 if (BPF_SRC(insn->code) == BPF_X) {
1825 * dreg needs precision after this insn
1826 * sreg needs precision before this insn
1832 * dreg needs precision after this insn.
1840 if (BPF_SRC(insn->code) == BPF_X) {
1843 * before this insn
1848 * dreg still needs precision before this insn
1863 if (insn->src_reg != BPF_REG_FP) {
1866 if (BPF_SIZE(insn->code) != BPF_DW) {
1874 spi = (-insn->off - 1) / BPF_REG_SIZE;
1890 if (insn->dst_reg != BPF_REG_FP) {
1893 if (BPF_SIZE(insn->code) != BPF_DW) {
1896 spi = (-insn->off - 1) / BPF_REG_SIZE;
1911 if (insn->src_reg == BPF_PSEUDO_CALL) {
1967 * It's possible that this backtracking will go all the way till 1st insn.
2118 /* This can happen if backtracking reached insn 0
2160 * During backtracking insn 3 is not recognized as
2460 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", insn_idx, i);
2482 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", insn_idx,
3221 struct bpf_insn *insn = env->prog->insnsi;
3268 if (insn[i].code != (BPF_JMP | BPF_CALL)) {
3271 if (insn[i].src_reg != BPF_PSEUDO_CALL) {
3274 /* remember insn and function to return to */
3279 i = i + insn[i].imm + 1;
3282 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i);
3320 /* end of for() loop means the last insn of the 'subprog'
3336 static int get_callee_stack_depth(struct bpf_verifier_env *env, const struct bpf_insn *insn, int idx)
3338 int start = idx + insn->imm + 1, subprog;
3342 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", start);
3794 * insn. When the dst is PTR, it is for sure not
3895 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
3899 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) {
3905 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3911 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3916 if (is_pointer_value(env, insn->src_reg)) {
3917 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
3921 if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg) || is_flow_key_reg(env, insn->dst_reg) ||
3922 is_sk_reg(env, insn->dst_reg)) {
3923 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg,
3924 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
3929 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1, true);
3935 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5062 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx)
5075 target_insn = *insn_idx + insn->imm;
5078 verbose(env, "verifier bug. No program starts at insn %d\n", target_insn + 1);
5110 /* continue with next insn after call */
5148 /* and go analyze first insn of the callee */
5670 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, const struct bpf_insn *insn)
5672 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
5690 static int sanitize_val_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
5694 if (can_skip_alu_sanitation(env, insn)) {
5711 static struct bpf_verifier_state *sanitize_speculative_path(struct bpf_verifier_env *env, const struct bpf_insn *insn,
5718 if (branch && insn) {
5720 if (BPF_SRC(insn->code) == BPF_K) {
5721 mark_reg_unknown(env, regs, insn->dst_reg);
5722 } else if (BPF_SRC(insn->code) == BPF_X) {
5723 mark_reg_unknown(env, regs, insn->dst_reg);
5724 mark_reg_unknown(env, regs, insn->src_reg);
5730 static int sanitize_ptr_alu(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg,
5739 u8 opcode = BPF_OP(insn->code);
5745 if (can_skip_alu_sanitation(env, insn)) {
5831 * insn as 'seen' such that when we verify unreachable paths in
5840 static int sanitize_err(struct bpf_verifier_env *env, const struct bpf_insn *insn, int reason,
5844 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
5845 u32 dst = insn->dst_reg, src = insn->src_reg;
5903 static int sanitize_check_bounds(struct bpf_verifier_env *env, const struct bpf_insn *insn,
5906 u32 dst = insn->dst_reg;
5942 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn,
5954 u8 opcode = BPF_OP(insn->code);
5955 u32 dst = insn->dst_reg;
5968 if (BPF_CLASS(insn->code) != BPF_ALU64) {
6021 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, &info, false);
6023 return sanitize_err(env, insn, ret, off_reg, dst_reg);
6150 if (sanitize_check_bounds(env, insn, dst_reg) < 0) {
6154 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, &info, true);
6156 return sanitize_err(env, insn, ret, off_reg, dst_reg);
6712 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn,
6716 u8 opcode = BPF_OP(insn->code);
6722 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? VERIFIER_SIXTYFOUR : VERIFIER_THIRTYTWO;
6723 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
6764 ret = sanitize_val_alu(env, insn);
6766 return sanitize_err(env, insn, ret, NULL, NULL);
6820 mark_reg_unknown(env, regs, insn->dst_reg);
6834 mark_reg_unknown(env, regs, insn->dst_reg);
6848 mark_reg_unknown(env, regs, insn->dst_reg);
6858 mark_reg_unknown(env, regs, insn->dst_reg);
6874 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn)
6880 u8 opcode = BPF_OP(insn->code);
6883 dst_reg = ®s[insn->dst_reg];
6893 if (BPF_SRC(insn->code) == BPF_X) {
6894 src_reg = ®s[insn->src_reg];
6902 mark_reg_unknown(env, regs, insn->dst_reg);
6905 verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg,
6913 err = mark_chain_precision(env, insn->dst_reg);
6917 return adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg);
6921 err = mark_chain_precision(env, insn->src_reg);
6925 return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg);
6932 verifier_mark_reg_known(&off_reg, insn->imm);
6935 return adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg);
6950 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
6954 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
6957 u8 opcode = BPF_OP(insn->code);
6962 if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) {
6967 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
6968 (insn->imm != 0x10 && insn->imm != VERIFIER_THIRTYTWO && insn->imm != VERIFIER_SIXTYFOUR) ||
6969 BPF_CLASS(insn->code) == BPF_ALU64) {
6976 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6981 if (is_pointer_value(env, insn->dst_reg)) {
6982 verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg);
6987 err = check_reg_arg(env, insn->dst_reg, DST_OP);
6992 if (BPF_SRC(insn->code) == BPF_X) {
6993 if (insn->imm != 0 || insn->off != 0) {
6998 err = check_reg_arg(env, insn->src_reg, SRC_OP);
7003 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
7010 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
7015 if (BPF_SRC(insn->code) == BPF_X) {
7016 struct bpf_reg_state *src_reg = regs + insn->src_reg;
7017 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
7019 if (BPF_CLASS(insn->code) == BPF_ALU64) {
7035 if (is_pointer_value(env, insn->src_reg)) {
7036 verbose(env, "R%d partial copy of pointer\n", insn->src_reg);
7048 mark_reg_unknown(env, regs, insn->dst_reg);
7059 mark_reg_unknown(env, regs, insn->dst_reg);
7060 regs[insn->dst_reg].type = SCALAR_VALUE;
7061 if (BPF_CLASS(insn->code) == BPF_ALU64) {
7062 verifier_mark_reg_known(regs + insn->dst_reg, insn->imm);
7064 verifier_mark_reg_known(regs + insn->dst_reg, (u32)insn->imm);
7071 if (BPF_SRC(insn->code) == BPF_X) {
7072 if (insn->imm != 0 || insn->off != 0) {
7077 err = check_reg_arg(env, insn->src_reg, SRC_OP);
7082 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
7089 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7094 if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
7099 if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
7100 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? VERIFIER_SIXTYFOUR : 32;
7101 if (insn->imm < 0 || insn->imm >= size) {
7102 verbose(env, "invalid shift %d\n", insn->imm);
7107 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
7111 return adjust_reg_min_max_vals(env, insn);
7389 * 0 - branch will not be taken and fall-through to next insn
7722 static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg,
7726 if (BPF_SRC(insn->code) != BPF_X) {
7731 if (BPF_CLASS(insn->code) == BPF_JMP32) {
7735 switch (BPF_OP(insn->code)) {
7822 static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx)
7828 u8 opcode = BPF_OP(insn->code);
7839 if (BPF_SRC(insn->code) == BPF_X) {
7840 if (insn->imm != 0) {
7846 err = check_reg_arg(env, insn->src_reg, SRC_OP);
7851 if (is_pointer_value(env, insn->src_reg)) {
7852 verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg);
7855 src_reg = ®s[insn->src_reg];
7857 if (insn->src_reg != BPF_REG_0) {
7864 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7869 dst_reg = ®s[insn->dst_reg];
7870 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
7872 if (BPF_SRC(insn->code) == BPF_K) {
7873 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
7885 err = mark_chain_precision(env, insn->dst_reg);
7887 if (BPF_SRC(insn->code) == BPF_X && !err) {
7888 err = mark_chain_precision(env, insn->src_reg);
7900 if (!env->bypass_spec_v1 && !sanitize_speculative_path(env, insn, *insn_idx + 1, *insn_idx)) {
7903 *insn_idx += insn->off;
7910 if (!env->bypass_spec_v1 && !sanitize_speculative_path(env, insn, *insn_idx + insn->off + 1, *insn_idx)) {
7916 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false);
7929 if (BPF_SRC(insn->code) == BPF_X) {
7930 struct bpf_reg_state *src_reg_in = ®s[insn->src_reg];
7934 reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, src_reg_in->var_off.value,
7937 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], src_reg_in, dst_reg->var_off.value,
7941 reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], src_reg_in,
7944 if (src_reg_in->id && !WARN_ON_ONCE(src_reg_in->id != other_branch_regs[insn->src_reg].id)) {
7946 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
7950 reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, (u32)insn->imm, opcode, is_jmp32);
7954 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
7956 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
7963 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
7968 mark_ptr_or_null_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
7969 mark_ptr_or_null_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
7970 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], this_branch, other_branch) &&
7971 is_pointer_value(env, insn->dst_reg)) {
7972 verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg);
7982 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
7990 if (BPF_SIZE(insn->code) != BPF_DW) {
7991 verbose(env, "invalid BPF_LD_IMM insn\n");
7994 if (insn->off != 0) {
7999 err = check_reg_arg(env, insn->dst_reg, DST_OP);
8004 dst_reg = ®s[insn->dst_reg];
8005 if (insn->src_reg == 0) {
8006 u64 imm = ((u64)(insn + 1)->imm << VERIFIER_THIRTYTWO) | (u32)insn->imm;
8009 verifier_mark_reg_known(®s[insn->dst_reg], imm);
8017 mark_reg_known_zero(env, regs, insn->dst_reg);
8019 if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
8039 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
8045 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
8082 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
8086 u8 mode = BPF_MODE(insn->code);
8099 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW ||
8100 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
8133 err = check_reg_arg(env, insn->src_reg, SRC_OP);
8361 verbose(env, "jump out of range from insn %d to %d\n", t, w);
8385 verbose(env, "back-edge from insn %d to %d\n", t, w);
8391 verbose(env, "insn state internal bug\n");
8419 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
8517 verbose(env, "unreachable insn %d\n", i);
8628 verbose(env, "same or smaller insn offset (%u) than previous func info record (%u)", krecord[i].insn_off,
8770 verbose(env, "Invalid insn code at line_info[%u].insn_off\n", i);
8927 * the verifier states are added to state lists at given insn and
8929 * when the verifier reaches bpf_exit insn some of the verifer states
8937 * when the verifier reaches exit insn the register r0 in the state list of
8938 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
8939 * of insn 2 and goes exploring further. At the insn 4 it will walk the
8940 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
8957 static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur)
8962 sl = *explored_state(env, insn);
8967 if (sl->state.insn_idx != insn || sl->state.curframe != cur->curframe) {
9448 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
9577 WARN_ONCE(new->branches != 1, "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
9619 /* Return true if it's OK to have the same insn return a different type. */
9663 struct bpf_insn *insn;
9669 verbose(env, "invalid insn idx %d insn_cnt %d\n", env->insn_idx, insn_cnt);
9673 insn = &insns[env->insn_idx];
9674 class = BPF_CLASS(insn->code);
9677 verbose(env, "BPF program is too large. Processed %d insn\n", env->insn_processed);
9725 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
9740 err = check_alu_op(env, insn);
9750 err = check_reg_arg(env, insn->src_reg, SRC_OP);
9755 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
9760 src_reg_type = regs[insn->src_reg].type;
9765 err = check_mem_access(env, env->insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ,
9766 insn->dst_reg, false);
9774 /* saw a valid insn
9780 /* ABuser program is trying to use the same insn
9787 verbose(env, "same insn cannot be used with different pointers\n");
9792 if (BPF_MODE(insn->code) == BPF_XADD) {
9793 err = check_xadd(env, env->insn_idx, insn);
9802 err = check_reg_arg(env, insn->src_reg, SRC_OP);
9807 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9812 dst_reg_type = regs[insn->dst_reg].type;
9815 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE,
9816 insn->src_reg, false);
9826 verbose(env, "same insn cannot be used with different pointers\n");
9830 if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) {
9835 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9839 if (is_ctx_reg(env, insn->dst_reg)) {
9840 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", insn->dst_reg,
9841 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
9846 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1,
9852 u8 opcode = BPF_OP(insn->code);
9855 if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 ||
9856 (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || insn->dst_reg != BPF_REG_0 ||
9863 (insn->src_reg == BPF_PSEUDO_CALL || insn->imm != BPF_FUNC_spin_unlock)) {
9867 if (insn->src_reg == BPF_PSEUDO_CALL) {
9868 err = check_func_call(env, insn, &env->insn_idx);
9870 err = check_helper_call(env, insn->imm, env->insn_idx);
9876 if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 ||
9877 insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) {
9881 env->insn_idx += insn->off + 1;
9884 if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 ||
9885 insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) {
9925 err = check_cond_jmp_op(env, insn, &env->insn_idx);
9931 u8 mode = BPF_MODE(insn->code);
9933 err = check_ld_abs(env, insn);
9938 err = check_ld_imm(env, insn);
9949 verbose(env, "unknown insn class %d\n", class);
9959 static int check_pseudo_btf_id(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_insn_aux_data *aux)
9966 u32 type, id = insn->imm;
9976 if (insn[1].imm != 0) {
9977 verbose(env, "reserved field (insn[1].imm) is used in pseudo_btf_id ldimm64 insn.\n");
9983 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
10011 insn[0].imm = (u32)addr;
10012 insn[1].imm = addr >> VERIFIER_THIRTYTWO;
10155 struct bpf_insn *insn = env->prog->insnsi;
10164 for (i = 0; i < insn_cnt; i++, insn++) {
10165 if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
10170 if (BPF_CLASS(insn->code) == BPF_STX &&
10171 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
10176 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
10182 if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
10183 insn[1].off != 0) {
10184 verbose(env, "invalid bpf_ld_imm64 insn\n");
10188 if (insn[0].src_reg == 0) {
10193 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
10195 err = check_pseudo_btf_id(env, insn, aux);
10203 * converted into regular 64-bit imm load insn.
10205 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
10206 (insn[0].src_reg == BPF_PSEUDO_MAP_FD && insn[1].imm != 0)) {
10207 verbose(env, "unrecognized bpf_ld_imm64 insn\n");
10211 f = fdget(insn[0].imm);
10214 verbose(env, "fd %d is not pointing to valid bpf_map\n", insn[0].imm);
10225 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
10228 u32 off = insn[1].imm;
10253 insn[0].imm = (u32)addr;
10254 insn[1].imm = addr >> VERIFIER_THIRTYTWO;
10288 insn++;
10294 if (!bpf_opcode_in_insntable(insn->code)) {
10295 verbose(env, "unknown opcode %02x\n", insn->code);
10316 struct bpf_insn *insn = env->prog->insnsi;
10320 for (i = 0; i < insn_cnt; i++, insn++) {
10321 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) {
10322 insn->src_reg = 0;
10335 struct bpf_insn *insn = new_prog->insnsi;
10342 * original insn at old prog.
10344 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
10356 new_data[i].zext_dst = insn_has_def32(env, insn + i);
10409 verbose(env, "insn %d cannot be patched due to 16-bit range\n", env->insn_aux_data[off].orig_idx);
10508 /* First live insn doesn't match first live linfo, it needs to "inherit"
10592 struct bpf_insn *insn = env->prog->insnsi;
10600 memcpy(insn + i, &trap, sizeof(trap));
10625 struct bpf_insn *insn = env->prog->insnsi;
10629 for (i = 0; i < insn_cnt; i++, insn++) {
10630 if (!insn_is_cond_jump(insn->code)) {
10635 ja.off = insn->off;
10636 } else if (!aux_data[i + 1 + insn->off].seen) {
10646 memcpy(insn, &ja, sizeof(ja));
10680 struct bpf_insn *insn = env->prog->insnsi;
10685 if (memcmp(&insn[i], &ja, sizeof(ja))) {
10716 struct bpf_insn insn;
10718 insn = insns[adj_idx];
10727 code = insn.code;
10729 if (insn_no_def(&insn)) {
10737 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
10750 rnd_hi32_patch[0] = insn;
10752 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
10762 zext_patch[0] = insn;
10763 zext_patch[1].dst_reg = insn.dst_reg;
10764 zext_patch[1].src_reg = insn.dst_reg;
10791 struct bpf_insn insn_buf[VERIFIER_SIXTEEN], *insn;
10821 insn = env->prog->insnsi + delta;
10823 for (i = 0; i < insn_cnt; i++, insn++) {
10827 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
10828 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
10831 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
10832 insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
10833 insn->code == (BPF_ST | BPF_MEM | BPF_B) || insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
10834 insn->code == (BPF_ST | BPF_MEM | BPF_W) || insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
10836 ctx_access = BPF_CLASS(insn->code) == BPF_STX;
10843 *insn,
10855 insn = new_prog->insnsi + i + delta;
10882 insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
10894 size = BPF_LDST_BYTES(insn);
10903 off = insn->off;
10919 insn->off = off & ~(size_default - 1);
10920 insn->code = BPF_LDX | BPF_MEM | size_code;
10924 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, &target_size);
10938 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, insn->dst_reg, shift);
10940 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, ((1 << size) * VERIFIER_EIGHT) - 1);
10943 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, insn->dst_reg, shift);
10945 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, ((1ULL << size) * VERIFIER_EIGHT) - 1);
10958 insn = new_prog->insnsi + i + delta;
10969 struct bpf_insn *insn;
10977 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
10978 if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) {
10985 subprog = find_subprog(env, i + insn->imm + 1);
10987 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i + insn->imm + 1);
10990 /* temporarily remember subprog id inside insn instead of
10993 insn->off = subprog;
10997 env->insn_aux_data[i].call_imm = insn->imm;
10999 insn->imm = 1;
11061 insn = func[i]->insnsi;
11062 for (j = 0; j < func[i]->len; j++, insn++) {
11063 if (BPF_CLASS(insn->code) == BPF_LDX && BPF_MODE(insn->code) == BPF_PROBE_MEM) {
11082 insn = func[i]->insnsi;
11083 for (j = 0; j < func[i]->len; j++, insn++) {
11084 if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) {
11087 subprog = insn->off;
11088 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - __bpf_call_base;
11128 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
11129 if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) {
11132 insn->off = env->insn_aux_data[i].call_imm;
11133 subprog = find_subprog(env, i + insn->off + 1);
11134 insn->imm = subprog;
11167 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
11168 if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) {
11171 insn->off = 0;
11172 insn->imm = env->insn_aux_data[i].call_imm;
11182 struct bpf_insn *insn = prog->insnsi;
11204 for (i = 0; i < prog->len; i++, insn++) {
11205 if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) {
11208 depth = get_callee_stack_depth(env, insn, i);
11212 bpf_patch_call_args(insn, depth);
11219 /* fixup insn->imm field of bpf_call instructions
11228 struct bpf_insn *insn = prog->insnsi;
11238 for (i = 0; i < insn_cnt; i++, insn++) {
11239 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
11240 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
11241 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
11242 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
11246 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | BPF_JNE | BPF_K, insn->src_reg, 0, 2, 0),
11247 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
11249 *insn,
11253 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | BPF_JEQ | BPF_K, insn->src_reg, 0, 1 + (is64 ? 0 : 1), 0),
11254 *insn,
11256 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
11269 insn = new_prog->insnsi + i + delta;
11273 if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) {
11274 cnt = env->ops->gen_ld_abs(insn, insn_buf);
11287 insn = new_prog->insnsi + i + delta;
11291 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
11308 off_reg = issrc ? insn->src_reg : insn->dst_reg;
11323 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
11325 insn->src_reg = BPF_REG_AX;
11327 insn->code = insn->code == code_add ? code_sub : code_add;
11329 *patch++ = *insn;
11342 insn = new_prog->insnsi + i + delta;
11346 if (insn->code != (BPF_JMP | BPF_CALL)) {
11349 if (insn->src_reg == BPF_PSEUDO_CALL) {
11353 if (insn->imm == BPF_FUNC_get_route_realm) {
11356 if (insn->imm == BPF_FUNC_get_prandom_u32) {
11359 if (insn->imm == BPF_FUNC_override_return) {
11362 if (insn->imm == BPF_FUNC_tail_call) {
11379 insn->imm = 0;
11380 insn->code = BPF_JMP | BPF_TAIL_CALL;
11398 insn->imm = ret + 1;
11420 insn_buf[0x2] = *insn;
11429 insn = new_prog->insnsi + i + delta;
11438 (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem ||
11439 insn->imm == BPF_FUNC_map_delete_elem || insn->imm == BPF_FUNC_map_push_elem ||
11440 insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem)) {
11448 if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) {
11465 insn = new_prog->insnsi + i + delta;
11478 switch (insn->imm) {
11480 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base;
11483 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base;
11486 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base;
11489 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - __bpf_call_base;
11492 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - __bpf_call_base;
11495 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - __bpf_call_base;
11504 if (prog->jit_requested && BITS_PER_LONG == VERIFIER_SIXTYFOUR && insn->imm == BPF_FUNC_jiffies64) {
11521 insn = new_prog->insnsi + i + delta;
11526 fn = env->ops->get_func_proto(insn->imm, env->prog);
11531 verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm);
11534 insn->imm = fn->func - __bpf_call_base;
12308 /* do 32-bit optimization after insn patching has done so those patched