/kernel/linux/linux-5.10/arch/x86/crypto/ |
H A D | nh-avx2-x86_64.S | 24 #define T0 %ymm8 define 42 vpaddd \k0, T3, T0 48 vpshufd $0x10, T0, T4 49 vpshufd $0x32, T0, T0 56 vpmuludq T4, T0, T0 60 vpaddq T0, PASS0_SUMS, PASS0_SUMS 142 vpunpcklqdq PASS1_SUMS, PASS0_SUMS, T0 // T0 [all...] |
H A D | poly1305-x86_64-cryptogams.pl | 420 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = 887 vmovdqu 16*2($inp),$T0 891 vpsrldq \$6,$T0,$T2 # splat input 893 vpunpckhqdq $T1,$T0,$T4 # 4 894 vpunpcklqdq $T1,$T0,$T0 # 0:1 898 vpsrlq \$26,$T0,$T1 899 vpand $MASK,$T0,$T0 # 0 980 vpmuludq $T0, [all...] |
H A D | aegis128-aesni-asm.S | 19 #define T0 %xmm6 define 51 * T0 54 movdqa STATE4, T0 59 aesenc T0, STATE3 70 * T0 121 movq (%r8), T0 122 pxor T0, MSG 134 * T0 - message block 144 movq T0, %r10 150 psrldq $8, T0 [all...] |
H A D | nh-sse2-x86_64.S | 20 #define T0 %xmm8 define 112 movdqa PASS0_SUMS, T0 114 punpcklqdq PASS1_SUMS, T0 // => (PASS0_SUM_A PASS1_SUM_A) 118 paddq PASS0_SUMS, T0 120 movdqu T0, 0x00(HASH)
|
/kernel/linux/linux-6.6/arch/x86/crypto/ |
H A D | nh-avx2-x86_64.S | 25 #define T0 %ymm8 define 43 vpaddd \k0, T3, T0 49 vpshufd $0x10, T0, T4 50 vpshufd $0x32, T0, T0 57 vpmuludq T4, T0, T0 61 vpaddq T0, PASS0_SUMS, PASS0_SUMS 143 vpunpcklqdq PASS1_SUMS, PASS0_SUMS, T0 // T0 [all...] |
H A D | poly1305-x86_64-cryptogams.pl | 419 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = 886 vmovdqu 16*2($inp),$T0 890 vpsrldq \$6,$T0,$T2 # splat input 892 vpunpckhqdq $T1,$T0,$T4 # 4 893 vpunpcklqdq $T1,$T0,$T0 # 0:1 897 vpsrlq \$26,$T0,$T1 898 vpand $MASK,$T0,$T0 # 0 979 vpmuludq $T0, [all...] |
H A D | aegis128-aesni-asm.S | 20 #define T0 %xmm6 define 52 * T0 55 movdqa STATE4, T0 60 aesenc T0, STATE3 71 * T0 122 movq (%r8), T0 123 pxor T0, MSG 135 * T0 - message block 145 movq T0, %r10 151 psrldq $8, T0 [all...] |
H A D | nh-sse2-x86_64.S | 21 #define T0 %xmm8 define 113 movdqa PASS0_SUMS, T0 115 punpcklqdq PASS1_SUMS, T0 // => (PASS0_SUM_A PASS1_SUM_A) 119 paddq PASS0_SUMS, T0 121 movdqu T0, 0x00(HASH)
|
/kernel/linux/linux-6.6/arch/arm64/crypto/ |
H A D | sm4-ce-gcm-core.S | 39 #define PMUL_128x128(r0, r1, m0, m1, T0, T1) \ 40 ext T0.16b, m1.16b, m1.16b, #8; \ 42 pmull T1.1q, m0.1d, T0.1d; \ 43 pmull2 T0.1q, m0.2d, T0.2d; \ 45 eor T0.16b, T0.16b, T1.16b; \ 46 ext T1.16b, RZERO.16b, T0.16b, #8; \ 47 ext T0.16b, T0 [all...] |
H A D | sha512-armv8.pl | 109 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); 110 $T0=@X[$i+3] if ($i<11); 142 eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]` 148 eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e) 149 ror $T0,$a,#$Sigma0[0] 156 eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a) 168 ror $T0,$a,#$Sigma0[0] 175 eor $T0,$T0,$a,ror#$Sigma0[1] 182 eor $t1,$T0, [all...] |
H A D | poly1305-armv8.pl | 267 my ($T0,$T1,$MASK) = map("v$_",(29..31)); 705 ushr $T0.2d,$ACC3,#26 709 add $ACC4,$ACC4,$T0.2d // h3 -> h4 713 ushr $T0.2d,$ACC4,#26 720 add $ACC0,$ACC0,$T0.2d 721 shl $T0.2d,$T0.2d,#2 724 add $ACC0,$ACC0,$T0.2d // h4 -> h0 729 shrn $T0.2s,$ACC0,#26 734 add $H1,$H1,$T0 [all...] |
/kernel/linux/linux-5.10/arch/sparc/crypto/ |
H A D | aes_asm.S | 7 #define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \ 8 AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ 10 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \ 11 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) 13 #define ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \ 14 AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ 18 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \ 19 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) \ 23 #define ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \ 24 AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ [all...] |
/kernel/linux/linux-6.6/arch/sparc/crypto/ |
H A D | aes_asm.S | 7 #define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \ 8 AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ 10 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \ 11 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) 13 #define ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \ 14 AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ 18 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \ 19 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) \ 23 #define ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \ 24 AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ [all...] |
/kernel/linux/linux-5.10/arch/mips/kvm/ |
H A D | entry.c | 31 #define T0 8 macro 38 #define T0 12 macro 305 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); in kvm_mips_build_enter_guest() 306 UASM_i_MTC0(&p, T0, C0_EPC); in kvm_mips_build_enter_guest() 348 uasm_i_mfc0(&p, T0, C0_GUESTCTL1); in kvm_mips_build_enter_guest() 350 uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, in kvm_mips_build_enter_guest() 352 uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, in kvm_mips_build_enter_guest() 354 uasm_i_mtc0(&p, T0, C0_GUESTCTL1); in kvm_mips_build_enter_guest() 372 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); in kvm_mips_build_enter_guest() 373 UASM_i_LW(&p, T0, offseto in kvm_mips_build_enter_guest() [all...] |
/kernel/linux/linux-6.6/arch/mips/kvm/ |
H A D | entry.c | 31 #define T0 8 macro 38 #define T0 12 macro 299 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); in kvm_mips_build_enter_guest() 300 UASM_i_MTC0(&p, T0, C0_EPC); in kvm_mips_build_enter_guest() 341 uasm_i_mfc0(&p, T0, C0_GUESTCTL1); in kvm_mips_build_enter_guest() 343 uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, in kvm_mips_build_enter_guest() 345 uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, in kvm_mips_build_enter_guest() 347 uasm_i_mtc0(&p, T0, C0_GUESTCTL1); in kvm_mips_build_enter_guest() 595 uasm_i_mfhi(&p, T0); in kvm_mips_build_exit() 596 UASM_i_SW(&p, T0, offseto in kvm_mips_build_exit() [all...] |
/kernel/linux/linux-5.10/arch/arm64/crypto/ |
H A D | sha512-armv8.pl | 109 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); 110 $T0=@X[$i+3] if ($i<11); 142 eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]` 148 eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e) 149 ror $T0,$a,#$Sigma0[0] 156 eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a) 168 ror $T0,$a,#$Sigma0[0] 175 eor $T0,$T0,$a,ror#$Sigma0[1] 182 eor $t1,$T0, [all...] |
H A D | poly1305-armv8.pl | 267 my ($T0,$T1,$MASK) = map("v$_",(29..31)); 705 ushr $T0.2d,$ACC3,#26 709 add $ACC4,$ACC4,$T0.2d // h3 -> h4 713 ushr $T0.2d,$ACC4,#26 720 add $ACC0,$ACC0,$T0.2d 721 shl $T0.2d,$T0.2d,#2 724 add $ACC0,$ACC0,$T0.2d // h4 -> h0 729 shrn $T0.2s,$ACC0,#26 734 add $H1,$H1,$T0 [all...] |
/kernel/linux/linux-5.10/arch/mips/crypto/ |
H A D | chacha-core.S | 28 #define T0 $s1 define 130 lw T0, (x*4)(STATE); \ 137 addu X ## x, T0; \ 147 lw T0, (x*4)(STATE); \ 153 addu X ## x, T0; \ 322 lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0) 325 ins T0, $at, 1, 6 331 addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0) 339 jr T0 402 lui T0, [all...] |
/kernel/linux/linux-6.6/arch/mips/crypto/ |
H A D | chacha-core.S | 28 #define T0 $s1 define 130 lw T0, (x*4)(STATE); \ 137 addu X ## x, T0; \ 147 lw T0, (x*4)(STATE); \ 153 addu X ## x, T0; \ 322 lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0) 325 ins T0, $at, 1, 6 331 addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0) 339 jr T0 402 lui T0, [all...] |
/kernel/linux/linux-5.10/arch/mips/mm/ |
H A D | page.c | 44 #define T0 8 macro 478 build_copy_load(&buf, T0, off); in build_copy_page() 486 build_copy_store(&buf, T0, off); in build_copy_page() 500 build_copy_load(&buf, T0, off); in build_copy_page() 508 build_copy_store(&buf, T0, off); in build_copy_page() 526 build_copy_load(&buf, T0, off); in build_copy_page() 531 build_copy_store(&buf, T0, off); in build_copy_page() 544 build_copy_load(&buf, T0, off); in build_copy_page() 549 build_copy_store(&buf, T0, off); in build_copy_page() 568 build_copy_load(&buf, T0, of in build_copy_page() [all...] |
/kernel/linux/linux-6.6/arch/mips/mm/ |
H A D | page.c | 43 #define T0 8 macro 479 build_copy_load(&buf, T0, off); in build_copy_page() 487 build_copy_store(&buf, T0, off); in build_copy_page() 501 build_copy_load(&buf, T0, off); in build_copy_page() 509 build_copy_store(&buf, T0, off); in build_copy_page() 527 build_copy_load(&buf, T0, off); in build_copy_page() 532 build_copy_store(&buf, T0, off); in build_copy_page() 545 build_copy_load(&buf, T0, off); in build_copy_page() 550 build_copy_store(&buf, T0, off); in build_copy_page() 569 build_copy_load(&buf, T0, of in build_copy_page() [all...] |
/kernel/linux/linux-5.10/arch/arm/crypto/ |
H A D | sha256-armv4.pl | 292 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); 312 &vext_8 ($T0,@X[0],@X[1],4); # X[1..4] 320 &vshr_u32 ($T2,$T0,$sigma0[0]); 326 &vshr_u32 ($T1,$T0,$sigma0[2]); 329 &vsli_32 ($T2,$T0,32-$sigma0[0]); 332 &vshr_u32 ($T3,$T0,$sigma0[1]); 338 &vsli_32 ($T3,$T0,32-$sigma0[1]); 386 &vld1_32 ("{$T0}","[$Ktbl,:128]!"); 398 &vadd_i32 ($T0,$T0, [all...] |
H A D | poly1305-armv4.pl | 497 my ($T0,$T1,$MASK) = map("q$_",(15,4,0)); 633 vshr.u64 $T0,$D3,#26 637 vadd.i64 $D4,$D4,$T0 @ h3 -> h4 642 vshrn.u64 $T0#lo,$D4,#26 650 vadd.i32 $D0#lo,$D0#lo,$T0#lo 651 vshl.u32 $T0#lo,$T0#lo,#2 654 vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0 658 vshr.u32 $T0#lo,$D0#lo,#26 662 vadd.i32 $D1#lo,$D1#lo,$T0#l [all...] |
/kernel/linux/linux-6.6/arch/arm/crypto/ |
H A D | sha256-armv4.pl | 292 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); 312 &vext_8 ($T0,@X[0],@X[1],4); # X[1..4] 320 &vshr_u32 ($T2,$T0,$sigma0[0]); 326 &vshr_u32 ($T1,$T0,$sigma0[2]); 329 &vsli_32 ($T2,$T0,32-$sigma0[0]); 332 &vshr_u32 ($T3,$T0,$sigma0[1]); 338 &vsli_32 ($T3,$T0,32-$sigma0[1]); 386 &vld1_32 ("{$T0}","[$Ktbl,:128]!"); 398 &vadd_i32 ($T0,$T0, [all...] |
H A D | poly1305-armv4.pl | 497 my ($T0,$T1,$MASK) = map("q$_",(15,4,0)); 633 vshr.u64 $T0,$D3,#26 637 vadd.i64 $D4,$D4,$T0 @ h3 -> h4 642 vshrn.u64 $T0#lo,$D4,#26 650 vadd.i32 $D0#lo,$D0#lo,$T0#lo 651 vshl.u32 $T0#lo,$T0#lo,#2 654 vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0 658 vshr.u32 $T0#lo,$D0#lo,#26 662 vadd.i32 $D1#lo,$D1#lo,$T0#l [all...] |