/kernel/linux/linux-5.10/samples/bpf/ |
H A D | bpf_insn.h | 28 #define BPF_ALU64_IMM(OP, DST, IMM) \ 34 .imm = IMM }) 36 #define BPF_ALU32_IMM(OP, DST, IMM) \ 42 .imm = IMM }) 64 #define BPF_MOV64_IMM(DST, IMM) \ 70 .imm = IMM }) 72 #define BPF_MOV32_IMM(DST, IMM) \ 78 .imm = IMM }) 81 #define BPF_LD_IMM64(DST, IMM) \ 82 BPF_LD_IMM64_RAW(DST, 0, IMM) [all...] |
/kernel/linux/linux-6.6/samples/bpf/ |
H A D | bpf_insn.h | 28 #define BPF_ALU64_IMM(OP, DST, IMM) \ 34 .imm = IMM }) 36 #define BPF_ALU32_IMM(OP, DST, IMM) \ 42 .imm = IMM }) 64 #define BPF_MOV64_IMM(DST, IMM) \ 70 .imm = IMM }) 72 #define BPF_MOV32_IMM(DST, IMM) \ 78 .imm = IMM }) 81 #define BPF_LD_IMM64(DST, IMM) \ 82 BPF_LD_IMM64_RAW(DST, 0, IMM) [all...] |
/kernel/linux/linux-5.10/tools/include/linux/ |
H A D | filter.h | 52 #define BPF_ALU64_IMM(OP, DST, IMM) \ 58 .imm = IMM }) 60 #define BPF_ALU32_IMM(OP, DST, IMM) \ 66 .imm = IMM }) 98 #define BPF_MOV64_IMM(DST, IMM) \ 104 .imm = IMM }) 106 #define BPF_MOV32_IMM(DST, IMM) \ 112 .imm = IMM }) 116 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 122 .imm = IMM }) [all...] |
/kernel/linux/linux-6.6/tools/include/linux/ |
H A D | filter.h | 52 #define BPF_ALU64_IMM(OP, DST, IMM) \ 58 .imm = IMM }) 60 #define BPF_ALU32_IMM(OP, DST, IMM) \ 66 .imm = IMM }) 98 #define BPF_MOV64_IMM(DST, IMM) \ 104 .imm = IMM }) 106 #define BPF_MOV32_IMM(DST, IMM) \ 112 .imm = IMM }) 116 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 122 .imm = IMM }) [all...] |
/kernel/linux/linux-5.10/arch/m68k/lib/ |
H A D | udivsi3.S | 65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 94 cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */ 108 L4: lsrl IMM (1), d1 /* shift divisor */ 109 lsrl IMM (1), d0 /* shift dividend */ 110 cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */ 113 andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */ 129 L5: subql IMM (1), d0 /* adjust quotient */ 138 link a6,IMM (-12) 143 moveq IMM (31),d4 149 bset IMM ( [all...] |
H A D | umodsi3.S | 65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 94 addql IMM (8), sp 100 addql IMM (8), sp
|
H A D | modsi3.S | 67 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 96 addql IMM (8), sp 102 addql IMM (8), sp
|
H A D | divsi3.S | 67 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 93 moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */ 114 addql IMM (8), sp
|
H A D | mulsi3.S | 65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define
|
/kernel/linux/linux-6.6/arch/m68k/lib/ |
H A D | udivsi3.S | 65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 94 cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */ 108 L4: lsrl IMM (1), d1 /* shift divisor */ 109 lsrl IMM (1), d0 /* shift dividend */ 110 cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */ 113 andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */ 129 L5: subql IMM (1), d0 /* adjust quotient */ 138 link a6,IMM (-12) 143 moveq IMM (31),d4 149 bset IMM ( [all...] |
H A D | divsi3.S | 67 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 93 moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */ 114 addql IMM (8), sp
|
H A D | modsi3.S | 67 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 96 addql IMM (8), sp 102 addql IMM (8), sp
|
H A D | umodsi3.S | 65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define 94 addql IMM (8), sp 100 addql IMM (8), sp
|
H A D | mulsi3.S | 65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) define
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | filter.h | 111 #define BPF_ALU64_IMM(OP, DST, IMM) \ 117 .imm = IMM }) 119 #define BPF_ALU32_IMM(OP, DST, IMM) \ 125 .imm = IMM }) 157 #define BPF_MOV64_IMM(DST, IMM) \ 163 .imm = IMM }) 165 #define BPF_MOV32_IMM(DST, IMM) \ 171 .imm = IMM }) 188 #define BPF_LD_IMM64(DST, IMM) \ 189 BPF_LD_IMM64_RAW(DST, 0, IMM) [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | filter.h | 120 #define BPF_ALU64_IMM(OP, DST, IMM) \ 126 .imm = IMM }) 128 #define BPF_ALU32_IMM(OP, DST, IMM) \ 134 .imm = IMM }) 166 #define BPF_MOV64_IMM(DST, IMM) \ 172 .imm = IMM }) 174 #define BPF_MOV32_IMM(DST, IMM) \ 180 .imm = IMM }) 197 #define BPF_LD_IMM64(DST, IMM) \ 198 BPF_LD_IMM64_RAW(DST, 0, IMM) [all...] |
/kernel/linux/linux-5.10/arch/powerpc/math-emu/ |
H A D | mtfsfi.c | 10 mtfsfi(unsigned int crfD, unsigned int IMM) in mtfsfi() argument 18 __FPU_FPSCR |= (IMM & 0xf) << ((7 - crfD) << 2); in mtfsfi() 21 printk("%s: %d %x: %08lx\n", __func__, crfD, IMM, __FPU_FPSCR); in mtfsfi()
|
/kernel/linux/linux-6.6/arch/powerpc/math-emu/ |
H A D | mtfsfi.c | 10 mtfsfi(unsigned int crfD, unsigned int IMM) in mtfsfi() argument 18 __FPU_FPSCR |= (IMM & 0xf) << ((7 - crfD) << 2); in mtfsfi() 21 printk("%s: %d %x: %08lx\n", __func__, crfD, IMM, __FPU_FPSCR); in mtfsfi()
|
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | core.c | 65 #define IMM insn->imm macro 1642 INSN_3(LD, IMM, DW) 1725 DST = DST OP IMM; \ in ___bpf_prog_run() 1728 DST = (u32) DST OP (u32) IMM; \ in ___bpf_prog_run() 1739 DST = DST OP IMM; \ in ___bpf_prog_run() 1742 DST = (u32) DST OP (u32) IMM; \ in ___bpf_prog_run() 1774 DST = (u32) IMM; in ___bpf_prog_run() 1793 DST = IMM; in ___bpf_prog_run() 1803 DST = (u64) (u32) (((s32) DST) >> IMM); in ___bpf_prog_run() 1809 (*(s64 *) &DST) >>= IMM; in ___bpf_prog_run() [all...] |
/kernel/linux/linux-5.10/arch/sparc/net/ |
H A D | bpf_jit_comp_32.c | 264 #define emit_cmpi(R1, IMM) \ 265 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); 270 #define emit_btsti(R1, IMM) \ 271 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); 276 #define emit_subi(R1, IMM, R3) \ 277 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 282 #define emit_addi(R1, IMM, R3) \ 283 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 288 #define emit_andi(R1, IMM, R3) \ 289 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | R [all...] |
/kernel/linux/linux-6.6/arch/sparc/net/ |
H A D | bpf_jit_comp_32.c | 264 #define emit_cmpi(R1, IMM) \ 265 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); 270 #define emit_btsti(R1, IMM) \ 271 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); 276 #define emit_subi(R1, IMM, R3) \ 277 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 282 #define emit_addi(R1, IMM, R3) \ 283 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 288 #define emit_andi(R1, IMM, R3) \ 289 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | R [all...] |
/kernel/linux/linux-6.6/arch/microblaze/kernel/ |
H A D | hw_exception_handler.S | 163 .macro bsrli, rD, rA, IMM 164 .if (\IMM) == 2 166 .elseif (\IMM) == 10 168 .elseif (\IMM) == 12 171 .elseif (\IMM) == 14 174 .elseif (\IMM) == 20 176 .elseif (\IMM) == 24 179 .elseif (\IMM) == 28 184 .error "BSRLI shift macros \IMM"
|
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | core.c | 60 #define IMM insn->imm macro 1345 INSN_3(LD, IMM, DW) 1430 DST = DST OP IMM; \ in ___bpf_prog_run() 1433 DST = (u32) DST OP (u32) IMM; \ in ___bpf_prog_run() 1444 DST = DST OP IMM; \ in ___bpf_prog_run() 1447 DST = (u32) DST OP (u32) IMM; \ in ___bpf_prog_run() 1469 DST = (u32) IMM; in ___bpf_prog_run() 1475 DST = IMM; in ___bpf_prog_run() 1485 DST = (u64) (u32) (((s32) DST) >> IMM); in ___bpf_prog_run() 1491 (*(s64 *) &DST) >>= IMM; in ___bpf_prog_run() [all...] |
/kernel/linux/linux-5.10/arch/microblaze/kernel/ |
H A D | hw_exception_handler.S | 170 .macro bsrli, rD, rA, IMM 171 .if (\IMM) == 2 173 .elseif (\IMM) == 10 175 .elseif (\IMM) == 12 178 .elseif (\IMM) == 14 181 .elseif (\IMM) == 20 183 .elseif (\IMM) == 24 186 .elseif (\IMM) == 28 191 .error "BSRLI shift macros \IMM"
|
/kernel/linux/linux-5.10/drivers/crypto/caam/ |
H A D | caamalg_desc.c | 420 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, in cnstr_shdsc_aead_decap() 428 append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, in cnstr_shdsc_aead_decap() 672 append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM, in cnstr_shdsc_gcm_encap() 905 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); in cnstr_shdsc_rfc4106_encap() 1014 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); in cnstr_shdsc_rfc4106_decap()
|