Lines Matching defs:state

39  * instruction by instruction and updates register/stack state.

137 * insn, the register holding that pointer in the true branch changes state to
138 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
150 * pointer id for the reference and stores it in the current function state.
153 * passes through a NULL-check conditional. For the branch wherein the state is
165 /* verifer state is 'st'
173 /* length of verifier log at the time this state was pushed on stack */
232 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
236 aux->map_key_state = state | BPF_MAP_KEY_SEEN | (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
560 static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
566 if (state->frameno) {
567 verbose(env, " frame%d:", state->frameno);
570 reg = &state->regs[i];
641 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
647 if (state->stack[i].slot_type[j] != STACK_INVALID) {
650 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
657 print_liveness(env, state->stack[i].spilled_ptr.live);
658 if (state->stack[i].slot_type[0] == STACK_SPILL) {
659 reg = &state->stack[i].spilled_ptr;
672 if (state->acquired_refs && state->refs[0].id) {
673 verbose(env, " refs=%d", state->refs[0].id);
674 for (i = 1; i < state->acquired_refs; i++) {
675 if (state->refs[i].id) {
676 verbose(env, ",%d", state->refs[i].id);
689 /* internal bug, make state invalid to reject the program */ \
703 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, bool copy_old) \
705 u32 old_size = state->COUNT; \
712 state->COUNT = slot * (SIZE); \
714 kfree(state->FIELD); \
715 state->FIELD = NULL; \
723 if (state->FIELD) \
724 memcpy(new_##FIELD, state->FIELD, sizeof(*new_##FIELD) * (old_size / (SIZE))); \
727 state->COUNT = slot * (SIZE); \
728 kfree(state->FIELD); \
729 state->FIELD = new_##FIELD; \
745 static int realloc_func_state(struct bpf_func_state *state, int stack_size, int refs_size, bool copy_old)
747 int err = realloc_reference_state(state, refs_size, copy_old);
751 return realloc_stack_state(state, stack_size, copy_old);
754 /* Acquire a pointer id from the env and update the state->refs to include
761 struct bpf_func_state *state = cur_func(env);
762 int new_ofs = state->acquired_refs;
765 err = realloc_reference_state(state, state->acquired_refs + 1, true);
770 state->refs[new_ofs].id = id;
771 state->refs[new_ofs].insn_idx = insn_idx;
777 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
781 last_idx = state->acquired_refs - 1;
782 for (i = 0; i < state->acquired_refs; i++) {
783 if (state->refs[i].id == ptr_id) {
785 memcpy(&state->refs[i], &state->refs[last_idx], sizeof(*state->refs));
787 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
788 state->acquired_refs--;
808 static void free_func_state(struct bpf_func_state *state)
810 if (!state) {
813 kfree(state->refs);
814 kfree(state->stack);
815 kfree(state);
818 static void clear_jmp_history(struct bpf_verifier_state *state)
820 kfree(state->jmp_history);
821 state->jmp_history = NULL;
822 state->jmp_history_cnt = 0;
825 static void free_verifier_state(struct bpf_verifier_state *state, bool free_self)
829 for (i = 0; i <= state->curframe; i++) {
830 free_func_state(state->frame[i]);
831 state->frame[i] = NULL;
833 clear_jmp_history(state);
835 kfree(state);
839 /* copy verifier state from src to dst growing dst stack space
986 * a new state for a sequence of branches and all such current
987 * and cloned states will be pointing to a single parent state
1377 static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state)
1379 struct bpf_reg_state *regs = state->regs;
1392 regs[BPF_REG_FP].frameno = state->frameno;
1396 static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno,
1399 state->callsite = callsite;
1400 state->frameno = frameno;
1401 state->subprogno = subprogno;
1402 init_reg_state(env, state);
1537 static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_reg_state *state, struct bpf_reg_state *parent,
1540 bool writes = parent == state->parent; /* Observe write marks */
1545 if (writes && (state->live & REG_LIVE_WRITTEN)) {
1574 state = parent;
1575 parent = state->parent;
1714 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1716 struct bpf_reg_state *reg, *regs = state->regs;
1757 /* for any branch, call, exit record the history of jmps in the given state */
1791 /* For given verifier state backtrack_insn() is called from the last insn to
1793 * stack slots that needs precision in the parent verifier state.
1834 * as precise=true in this verifier state.
1860 * by 'precise' mark in corresponding register of this state.
1954 * . during state pruning two registers (or spilled stack slots)
2107 /* Found assignment(s) into tracked register in this state.
2108 * Since this state is already marked, just return.
2109 * Nothing to be tracked further in the parent state.
2163 * However the parent state may not have accessed
2271 static void save_register_state(struct bpf_func_state *state, int spi, struct bpf_reg_state *reg)
2275 state->stack[spi].spilled_ptr = *reg;
2276 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2279 state->stack[spi].slot_type[i] = STACK_SPILL;
2288 struct bpf_func_state *state, int off, int size, int value_regno, int insn_idx)
2290 struct bpf_func_state *cur; /* state of the current function */
2295 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), state->acquired_refs, true);
2302 if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) {
2315 if (state->stack[spi].slot_type[i] == STACK_INVALID) {
2339 save_register_state(state, spi, reg);
2347 if (state != cur && reg->type == PTR_TO_STACK) {
2351 save_register_state(state, spi, reg);
2356 state->stack[spi].spilled_ptr.type = NOT_INIT;
2358 if (state->stack[spi].slot_type[0] == STACK_SPILL) {
2360 state->stack[spi].slot_type[i] = STACK_MISC;
2369 * to stack slots all the way to first state when programs
2373 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2388 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type;
2415 struct bpf_func_state *state, int ptr_regno, int off, int size, int value_regno,
2418 struct bpf_func_state *cur; /* state of the current function */
2439 err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE), state->acquired_refs, true);
2451 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
2465 state->stack[spi].spilled_ptr.type = NOT_INIT;
2511 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2529 verifier_mark_reg_const_zero(&state->regs[dst_regno]);
2540 state->regs[dst_regno].precise = true;
2543 mark_reg_unknown(env, state->regs, dst_regno);
2545 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
2562 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2578 mark_reg_unknown(env, state->regs, dst_regno);
2579 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
2592 /* restore register state from stack */
2593 state->regs[dst_regno] = *reg;
2594 /* mark reg as written since spilled pointer state likely
2596 * which resets stack/reg liveness for state transitions
2598 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
2661 /* The state of the source register. */
2692 struct bpf_func_state *state = func(env, reg);
2722 err = check_stack_read_fixed_off(env, state, off, size, dst_regno);
2747 struct bpf_func_state *state = func(env, reg);
2752 err = check_stack_write_fixed_off(env, state, off, size, value_regno, insn_idx);
2757 err = check_stack_write_var_off(env, state, ptr_regno, off, size, value_regno, insn_idx);
2814 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2815 struct bpf_reg_state *reg = &state->regs[regno];
2823 print_verifier_state(env, state);
2865 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2866 struct bpf_reg_state *reg = &state->regs[regno];
3602 * -state->allocated_stack for reads.
3604 static int check_stack_slot_within_bounds(int off, struct bpf_func_state *state, enum bpf_access_type t)
3611 min_valid_off = -state->allocated_stack;
3630 struct bpf_func_state *state = func(env, reg);
3664 err = check_stack_slot_within_bounds(min_off, state, type);
3666 err = check_stack_slot_within_bounds(max_off, state, type);
3694 struct bpf_func_state *state;
3810 state = func(env, reg);
3811 err = update_stack_depth(env, state, off);
3952 struct bpf_func_state *state = func(env, reg);
4022 if (state->allocated_stack <= slot) {
4025 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
4037 if (state->stack[spi].slot_type[0] == STACK_SPILL && state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) {
4041 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
4042 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || env->allow_ptr_leaks)) {
4044 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
4046 state->stack[spi].slot_type[j] = STACK_MISC;
4068 mark_reg_read(env, &state->stack[spi].spilled_ptr, state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
4070 return update_stack_depth(env, state, min_off);
4976 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, struct bpf_func_state *state)
4978 struct bpf_reg_state *regs = state->regs, *reg;
4987 bpf_for_each_spilled_reg(i, state, reg)
5008 static void release_reg_references(struct bpf_verifier_env *env, struct bpf_func_state *state, int ref_obj_id)
5010 struct bpf_reg_state *regs = state->regs, *reg;
5019 bpf_for_each_spilled_reg(i, state, reg)
5064 struct bpf_verifier_state *state = env->cur_state;
5070 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
5071 verbose(env, "the call stack of %d frames is too deep\n", state->curframe + 2);
5082 caller = state->frame[state->curframe];
5083 if (state->frame[state->curframe + 1]) {
5084 verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1);
5119 state->frame[state->curframe + 1] = callee;
5127 *insn_idx /* callsite */, state->curframe + 1 /* frameno within this callchain */,
5146 state->curframe++;
5162 struct bpf_verifier_state *state = env->cur_state;
5167 callee = state->frame[state->curframe];
5180 state->curframe--;
5181 caller = state->frame[state->curframe];
5200 state->frame[state->curframe + 1] = NULL;
5240 * state of the map from program side.
5297 struct bpf_func_state *state = cur_func(env);
5300 for (i = 0; i < state->acquired_refs; i++) {
5301 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", state->refs[i].id, state->refs[i].insn_idx);
5303 return state->acquired_refs ? -EINVAL : 0;
5377 * is inferred from register state.
5678 * state or limits to sanitize, then this won't work.
5946 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5947 struct bpf_reg_state *regs = state->regs, *dst_reg;
6877 struct bpf_func_state *state = vstate->frame[vstate->curframe];
6878 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
6929 * need to be able to read from this state.
6941 print_verifier_state(env, state);
6946 print_verifier_state(env, state);
7021 * copy register state to dest reg
7058 /* clear any state __mark_reg_known doesn't set */
7117 static void __find_good_pkt_pointers(struct bpf_func_state *state, struct bpf_reg_state *dst_reg,
7124 reg = &state->regs[i];
7131 bpf_for_each_spilled_reg(i, state, reg)
7629 static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null)
7664 * so that state pruning has chances to take effect.
7680 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, bool is_null)
7686 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
7689 bpf_for_each_spilled_reg(i, state, reg)
7694 mark_ptr_or_null_reg(state, reg, id, is_null);
7703 struct bpf_func_state *state = vstate->frame[vstate->curframe];
7704 struct bpf_reg_state *regs = state->regs;
7711 * No one could have freed the reference state before
7714 WARN_ON_ONCE(release_reference_state(state, id));
7797 struct bpf_func_state *state;
7802 state = vstate->frame[i];
7804 reg = &state->regs[j];
7810 bpf_for_each_spilled_reg(j, state, reg)
8331 struct bpf_func_state *state = cur->frame[cur->curframe];
8333 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
8366 /* mark branch target for state pruning */
8391 verbose(env, "insn state internal bug\n");
8852 /* If in the old state two registers had the same id, then they need to have
8853 * the same id in the new state as well. But that id could be different from
8854 * the old state, so we need to track the mapping from old to new ids.
8856 * regs with old id 5 must also have new id 9 for the new state to be safe. But
8892 /* since the register is unused, clear its state
8917 /* all regs in this state in all frames were already marked */
8927 * the verifier states are added to state lists at given insn and
8928 * pushed into state stack for future exploration.
8930 * stored in the state lists have their final liveness state already,
8937 * when the verifier reaches exit insn the register r0 in the state list of
8946 * Hence when the verifier completes the search of state list in is_state_visited()
8951 * to simplify state merging.
8964 if (sl->state.branches) {
8967 if (sl->state.insn_idx != insn || sl->state.curframe != cur->curframe) {
8971 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) {
8975 clean_verifier_state(env, &sl->state);
8988 /* explored state didn't use this */
9006 /* explored state can't have used this */
9115 * slots in the current stack, since explored(safe) state
9123 /* explored state didn't use this */
9138 /* if old state was safe with misc data in the stack
9147 /* Ex: old explored (safe) state has STACK_SPILL in
9191 * execution popped from the state stack. If it sees an old state that has
9192 * more strict register state and more strict stack state then this execution
9194 * concluded that more strict state leads to valid finish.
9196 * Therefore two states are equivalent if register state is more conservative
9197 * and explored stack state is more conservative than the current one.
9203 * In other words if current stack state (one being explored) has more
9205 * the verifier can stop exploring and conclude that current state is valid too
9207 * Similarly with registers. If explored state has register type as invalid
9208 * whereas register type in current state is meaningful, it means that
9209 * the current state will reach 'bpf_exit' instruction safely
9241 /* Verification state from speculative execution simulation
9297 * straight-line code between a state and its parent. When we arrive at an
9298 * equivalent state (jump target or such) we didn't arrive by the straight-line
9299 * code, so read marks in the state must propagate to the parent regardless
9300 * of the state's write marks. That's what 'parent == state->parent' comparison
9307 struct bpf_func_state *state, *parent;
9318 state = vstate->frame[frame];
9320 state_reg = state->regs;
9333 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) {
9335 state_reg = &state->stack[i].spilled_ptr;
9345 /* find precise scalars in the previous equivalent state and
9346 * propagate them into the current state
9351 struct bpf_func_state *state;
9354 state = old->frame[old->curframe];
9355 state_reg = state->regs;
9369 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
9370 if (state->stack[i].slot_type[0] != STACK_SPILL) {
9373 state_reg = &state->stack[i].spilled_ptr;
9418 * be doing state search here
9425 * Do not add new state for future pruning if the verifier hasn't seen
9442 if (sl->state.insn_idx != insn_idx) {
9445 if (sl->state.branches) {
9446 if (states_maybe_looping(&sl->state, cur) && states_equal(env, &sl->state, cur)) {
9451 /* if the verifier is processing a loop, avoid adding new state
9469 if (states_equal(env, &sl->state, cur)) {
9471 /* reached equivalent register/stack state,
9477 * own state will get the read marks recorded, but
9479 * this state and will pop a new one.
9481 err = propagate_liveness(env, &sl->state, cur);
9483 /* if previous state reached the exit with precision and
9484 * current state is equivalent to it (except precsion marks)
9486 * the current state.
9489 err = err ?: propagate_precision(env, &sl->state);
9496 /* when new state is not going to be added do not increase miss count.
9497 * Otherwise several loop iterations will remove the state
9505 /* heuristic to determine whether this state is beneficial
9506 * to keep checking from state equivalence point of view.
9511 /* the state is unlikely to be useful. Remove it to
9515 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
9516 u32 br = sl->state.branches;
9519 free_verifier_state(&sl->state, false);
9523 /* cannot free this state, since parentage chain may
9551 * Technically the current state is not proven to be safe yet,
9556 * When looping the sl->state.branches will be > 0 and this state
9568 /* add new state to the head of linked list */
9569 new = &new_sl->state;
9584 /* connect new state to parentage chain. Current frame needs all
9588 * the state of the call instruction (with WRITTEN set), and r0 comes
9591 /* clear write marks in current state: the writes we did are not writes
9593 * (There are no read marks in current state, because reads always mark
9594 * their parent and current state never has children yet. Only
9655 struct bpf_verifier_state *state = env->cur_state;
9686 /* found equivalent state, can prune the search */
9713 print_verifier_state(env, state->frame[state->curframe]);
9763 * the state of dst_reg will be updated by this func
9893 if (state->curframe) {
10084 * memory allocator would see inconsistent state.
11563 free_verifier_state(&sl->state, false);
11578 free_verifier_state(&sl->state, false);
11589 struct bpf_verifier_state *state;
11596 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
11597 if (!state) {
11600 state->curframe = 0;
11601 state->speculative = false;
11602 state->branches = 1;
11603 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
11604 if (!state->frame[0]) {
11605 kfree(state);
11608 env->cur_state = state;
11609 init_func_state(env, state->frame[0], BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, subprog);
11611 regs = state->frame[state->curframe]->regs;