Lines Matching refs:cur

550     struct bpf_verifier_state *cur = env->cur_state;

552 return cur->frame[reg->frameno];
921 struct bpf_verifier_state *cur = env->cur_state;
929 if (cur) {
930 err = copy_verifier_state(cur, &head->st);
955 struct bpf_verifier_state *cur = env->cur_state;
970 err = copy_verifier_state(&elem->st, cur);
1758 static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur)
1760 u32 cnt = cur->jmp_history_cnt;
1764 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1770 cur->jmp_history = p;
1771 cur->jmp_history_cnt = cnt;
2290 struct bpf_func_state *cur; /* state of the current function */
2307 cur = env->cur_state->frame[env->cur_state->curframe];
2309 reg = &cur->regs[value_regno];
2347 if (state != cur && reg->type == PTR_TO_STACK) {
2418 struct bpf_func_state *cur; /* state of the current function */
2428 cur = env->cur_state->frame[env->cur_state->curframe];
2429 ptr_reg = &cur->regs[ptr_regno];
2433 value_reg = &cur->regs[value_regno];
4142 struct bpf_verifier_state *cur = env->cur_state;
4170 if (cur->active_spin_lock) {
4174 cur->active_spin_lock = reg->id;
4176 if (!cur->active_spin_lock) {
4180 if (cur->active_spin_lock != reg->id) {
4184 cur->active_spin_lock = 0;
8330 struct bpf_verifier_state *cur = env->cur_state;
8331 struct bpf_func_state *state = cur->frame[cur->curframe];
8843 /* check %cur's range satisfies %old's */
8844 static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur)
8846 return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value &&
8847 old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value &&
8848 old->u32_min_value <= cur->u32_min_value && old->u32_max_value >= cur->u32_max_value &&
8849 old->s32_min_value <= cur->s32_min_value && old->s32_max_value >= cur->s32_max_value;
8870 idmap[i].cur = cur_id;
8874 return idmap[i].cur == cur_id;
8957 static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur)
8967 if (sl->state.insn_idx != insn || sl->state.curframe != cur->curframe) {
8970 for (i = 0; i <= cur->curframe; i++) {
8971 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) {
9109 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur,
9134 if (i >= cur->allocated_stack) {
9143 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) {
9146 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) {
9160 if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) {
9177 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
9179 if (old->acquired_refs != cur->acquired_refs) {
9182 return !memcmp(old->refs, cur->refs, sizeof(*old->refs) * old->acquired_refs);
9211 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur)
9217 if (!regsafe(env, &old->regs[i], &cur->regs[i], env->idmap_scratch)) {
9222 if (!stacksafe(env, old, cur, env->idmap_scratch)) {
9226 if (!refsafe(old, cur)) {
9233 static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
9237 if (old->curframe != cur->curframe) {
9244 if (old->speculative && !cur->speculative) {
9248 if (old->active_spin_lock != cur->active_spin_lock) {
9256 if (old->frame[i]->callsite != cur->frame[i]->callsite) {
9259 if (!func_states_equal(env, old->frame[i], cur->frame[i])) {
9388 static bool states_maybe_looping(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
9391 int i, fr = cur->curframe;
9398 fcur = cur->frame[fr];
9411 struct bpf_verifier_state *cur = env->cur_state, *new;
9415 cur->last_insn_idx = env->prev_insn_idx;
9438 clean_live_states(env, insn_idx, cur);
9446 if (states_maybe_looping(&sl->state, cur) && states_equal(env, &sl->state, cur)) {
9469 if (states_equal(env, &sl->state, cur)) {
9481 err = propagate_liveness(env, &sl->state, cur);
9488 err = err ?: push_jmp_history(env, cur);
9543 return push_jmp_history(env, cur);
9547 return push_jmp_history(env, cur);
9570 err = copy_verifier_state(new, cur);
9579 cur->parent = new;
9580 cur->first_insn_idx = insn_idx;
9581 clear_jmp_history(cur);
9597 for (j = 0; j <= cur->curframe; j++) {
9598 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
9599 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
9602 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
9607 for (j = 0; j <= cur->curframe; j++) {
9608 struct bpf_func_state *frame = cur->frame[j];