Lines Matching defs:offset

76 	int *offset;
190 /* BPF JMP offset is relative to the next instruction */
193 * Whereas arm64 branch instructions encode the offset
195 * instruction offset.
197 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
224 * Post-index, Pre-index, Unsigned offset.
226 * For BPF ldr/str, the "unsigned offset" type is sufficient.
228 * "Unsigned offset" type LDR(immediate) format:
237 * "Unsigned offset" type STR(immediate) format:
245 * The offset is calculated from imm12 and scale in the following way:
247 * offset = (u64)imm12 << scale
249 static bool is_lsi_offset(int offset, int scale)
251 if (offset < 0)
254 if (offset > (0xFFF << scale))
257 if (offset & ((1 << scale) - 1))
285 /* Tail call offset to jump into */
697 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
701 regs->pc = (unsigned long)&ex->fixup - offset;
710 off_t offset;
729 offset = pc - (long)&ex->insn;
730 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
732 ex->insn = offset;
735 * Since the extable follows the program, the fixup offset is always
742 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
743 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
746 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
1371 * offset to FP, converts it to positive number, and align down to 8 bytes.
1376 int offset = 0;
1401 off < offset)
1402 offset = insn->off;
1416 src == BPF_REG_FP && off < offset)
1417 offset = off;
1429 if (offset < 0) {
1434 offset = -offset;
1436 offset = ALIGN_DOWN(offset, 8);
1439 return offset;
1448 * - offset[0] offset of the end of prologue,
1450 * - offset[1] - offset of the end of 1st instruction,
1453 * - offset[3] - offset of the end of 3rd instruction,
1461 ctx->offset[i] = ctx->idx;
1466 ctx->offset[i] = ctx->idx;
1473 * offset is allocated with prog->len + 1 so fill in
1474 * the last element with the offset after the last
1478 ctx->offset[i] = ctx->idx;
1553 if (jit_data->ctx.offset) {
1564 ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1565 if (ctx.offset == NULL) {
1573 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
1575 * BPF line info needs ctx->offset[i] to be the offset of
1664 /* offset[prog->len] is the size of program */
1666 ctx.offset[i] *= AARCH64_INSN_SIZE;
1667 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1669 kvfree(ctx.offset);
1760 int offset = &ctx->image[ctx->idx] - branch;
1761 *branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset));
1979 int offset = &ctx->image[ctx->idx] - branches[i];
1980 *branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset));
2077 long offset;
2083 offset = (long)target - (long)ip;
2084 return offset < -SZ_128M || offset >= SZ_128M;
2178 unsigned long offset = ~0UL;
2185 if (!__bpf_address_lookup((unsigned long)ip, &size, &offset, namebuf))
2192 image = ip - offset;
2193 /* zero offset means we're poking bpf prog entry */
2194 poking_bpf_entry = (offset == 0UL);