/kernel/linux/linux-5.10/arch/x86/crypto/ |
H A D | poly1305-x86_64-cryptogams.pl | 420 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = 893 vpunpckhqdq $T1,$T0,$T4 # 4 897 vpsrlq \$40,$T4,$T4 # 4 905 vpor 32(%rcx),$T4,$T4 # padbit, yes, always 986 vpmuludq $T4,$D4,$D4 # d4 = h4*r0 989 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 1012 vpmuludq $T4,$H4,$H0 # h4*s2 1023 vpmuludq $T4, [all...] |
H A D | aesni-intel_avx-x86_64.S | 605 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8 621 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6 667 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6 895 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5 916 vpslld $25, \GH, \T4 # packed right shifting shift << 25 919 vpxor \T4, \T2, \T2 930 vpsrld $7,\GH, \T4 # packed left shifting >> 7 932 vpxor \T4, \T2, \T2 941 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 950 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T [all...] |
H A D | nh-avx2-x86_64.S | 30 #define T4 %ymm12 define 48 vpshufd $0x10, T0, T4 56 vpmuludq T4, T0, T0 147 vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A) 152 vpaddq T5, T4, T4 154 vpaddq T4, T0, T0
|
H A D | nh-sse2-x86_64.S | 24 #define T4 %xmm12 define 50 pshufd $0x10, \k0, T4 58 pmuludq T4, \k0
|
/kernel/linux/linux-6.6/arch/x86/crypto/ |
H A D | poly1305-x86_64-cryptogams.pl | 419 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = 892 vpunpckhqdq $T1,$T0,$T4 # 4 896 vpsrlq \$40,$T4,$T4 # 4 904 vpor 32(%rcx),$T4,$T4 # padbit, yes, always 985 vpmuludq $T4,$D4,$D4 # d4 = h4*r0 988 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 1011 vpmuludq $T4,$H4,$H0 # h4*s2 1022 vpmuludq $T4, [all...] |
H A D | aesni-intel_avx-x86_64.S | 571 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8 587 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6 635 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6 863 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5 884 vpslld $25, \GH, \T4 # packed right shifting shift << 25 887 vpxor \T4, \T2, \T2 898 vpsrld $7,\GH, \T4 # packed left shifting >> 7 900 vpxor \T4, \T2, \T2 909 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 918 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T [all...] |
H A D | nh-avx2-x86_64.S | 31 #define T4 %ymm12 define 49 vpshufd $0x10, T0, T4 57 vpmuludq T4, T0, T0 148 vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A) 153 vpaddq T5, T4, T4 155 vpaddq T4, T0, T0
|
H A D | nh-sse2-x86_64.S | 25 #define T4 %xmm12 define 51 pshufd $0x10, \k0, T4 59 pmuludq T4, \k0
|
/kernel/linux/linux-6.6/arch/arm64/crypto/ |
H A D | sm4-ce-gcm-core.S | 53 r4, r5, m4, m5, T4, T5, \ 57 ext T4.16b, m5.16b, m5.16b, #8; \ 65 pmull T5.1q, m4.1d, T4.1d; \ 69 pmull2 T4.1q, m4.2d, T4.2d; \ 77 eor T4.16b, T4.16b, T5.16b; \ 81 ext T5.16b, RZERO.16b, T4.16b, #8; \ 85 ext T4.16b, T4 [all...] |
H A D | sha512-armv8.pl | 463 my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19)); 511 &ushr_32 ($T4,$T7,$sigma1[0]); 517 &sli_32 ($T4,$T7,32-$sigma1[0]); 532 &eor_8 ($T5,$T5,$T4);
|
/kernel/linux/linux-5.10/arch/arm/crypto/ |
H A D | sha256-armv4.pl | 292 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); 341 &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]); 347 &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]); 356 &veor ($T5,$T5,$T4); 359 &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]); 362 &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]); 365 &veor ($T5,$T5,$T4); # sigma1(X[14..15]) 371 &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]); 374 &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]); 380 &veor ($T5,$T5,$T4); [all...] |
/kernel/linux/linux-6.6/arch/arm/crypto/ |
H A D | sha256-armv4.pl | 292 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); 341 &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]); 347 &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]); 356 &veor ($T5,$T5,$T4); 359 &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]); 362 &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]); 365 &veor ($T5,$T5,$T4); # sigma1(X[14..15]) 371 &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]); 374 &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]); 380 &veor ($T5,$T5,$T4); [all...] |
/kernel/linux/linux-5.10/crypto/ |
H A D | anubis.c | 320 static const u32 T4[256] = { variable 495 K0 = T4[(kappa[N - 1] >> 24) ]; in anubis_setkey() 496 K1 = T4[(kappa[N - 1] >> 16) & 0xff]; in anubis_setkey() 497 K2 = T4[(kappa[N - 1] >> 8) & 0xff]; in anubis_setkey() 498 K3 = T4[(kappa[N - 1] ) & 0xff]; in anubis_setkey() 500 K0 = T4[(kappa[i] >> 24) ] ^ in anubis_setkey() 505 K1 = T4[(kappa[i] >> 16) & 0xff] ^ in anubis_setkey() 510 K2 = T4[(kappa[i] >> 8) & 0xff] ^ in anubis_setkey() 515 K3 = T4[(kappa[i] ) & 0xff] ^ in anubis_setkey() 562 T0[T4[( in anubis_setkey() [all...] |
H A D | khazad.c | 394 static const u64 T4[256] = { variable 775 T4[(int)(K1 >> 24) & 0xff] ^ in khazad_setkey() 791 T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^ in khazad_setkey() 817 T4[(int)(state >> 24) & 0xff] ^ in khazad_crypt() 828 (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^ in khazad_crypt()
|
/kernel/linux/linux-6.6/crypto/ |
H A D | anubis.c | 320 static const u32 T4[256] = { variable 495 K0 = T4[(kappa[N - 1] >> 24) ]; in anubis_setkey() 496 K1 = T4[(kappa[N - 1] >> 16) & 0xff]; in anubis_setkey() 497 K2 = T4[(kappa[N - 1] >> 8) & 0xff]; in anubis_setkey() 498 K3 = T4[(kappa[N - 1] ) & 0xff]; in anubis_setkey() 500 K0 = T4[(kappa[i] >> 24) ] ^ in anubis_setkey() 505 K1 = T4[(kappa[i] >> 16) & 0xff] ^ in anubis_setkey() 510 K2 = T4[(kappa[i] >> 8) & 0xff] ^ in anubis_setkey() 515 K3 = T4[(kappa[i] ) & 0xff] ^ in anubis_setkey() 562 T0[T4[( in anubis_setkey() [all...] |
H A D | khazad.c | 394 static const u64 T4[256] = { variable 775 T4[(int)(K1 >> 24) & 0xff] ^ in khazad_setkey() 791 T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^ in khazad_setkey() 817 T4[(int)(state >> 24) & 0xff] ^ in khazad_crypt() 828 (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^ in khazad_crypt()
|
/kernel/linux/linux-5.10/tools/perf/arch/riscv/util/ |
H A D | unwind-libdw.c | 50 dwarf_regs[29] = REG(T4); in libdw__arch_set_initial_registers()
|
/kernel/linux/linux-6.6/tools/perf/arch/riscv/util/ |
H A D | unwind-libdw.c | 51 dwarf_regs[29] = REG(T4); in libdw__arch_set_initial_registers()
|
/kernel/linux/linux-5.10/arch/arm64/crypto/ |
H A D | sha512-armv8.pl | 463 my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19)); 511 &ushr_32 ($T4,$T7,$sigma1[0]); 517 &sli_32 ($T4,$T7,32-$sigma1[0]); 532 &eor_8 ($T5,$T5,$T4);
|
/kernel/linux/linux-5.10/drivers/hid/ |
H A D | hid-alps.c | 73 T4, enumerator 457 case T4: in alps_post_reset() 691 case T4: in alps_input_configured() 803 data->dev_type = T4; in alps_probe()
|
/kernel/linux/linux-6.6/drivers/hid/ |
H A D | hid-alps.c | 73 T4, enumerator 457 case T4: in alps_post_reset() 691 case T4: in alps_input_configured() 803 data->dev_type = T4; in alps_probe()
|
/kernel/linux/linux-5.10/drivers/pinctrl/aspeed/ |
H A D | pinctrl-aspeed-g5.c | 862 #define T4 111 macro 864 SIG_EXPR_LIST_DECL_SINGLE(T4, VPIG7, VPI24, VPI24_DESC, T4_DESC); 865 SIG_EXPR_LIST_DECL_SINGLE(T4, PWM7, PWM7, T4_DESC, COND2); 866 PIN_DECL_2(T4, GPION7, VPIG7, PWM7); 867 FUNC_GROUP_DECL(PWM7, T4); 930 U3, W3, AA3, Y3, T4, U5, U4, AB3, Y4, AA4, W4, V4, W5, AA5, 2094 ASPEED_PINCTRL_PIN(T4), 2537 ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V2, T4, SCU8C, 29), 2538 ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V2, T4, SCU8C, 29),
|
/kernel/linux/linux-6.6/drivers/pinctrl/aspeed/ |
H A D | pinctrl-aspeed-g5.c | 862 #define T4 111 macro 864 SIG_EXPR_LIST_DECL_SINGLE(T4, VPIG7, VPI24, VPI24_DESC, T4_DESC); 865 SIG_EXPR_LIST_DECL_SINGLE(T4, PWM7, PWM7, T4_DESC, COND2); 866 PIN_DECL_2(T4, GPION7, VPIG7, PWM7); 867 FUNC_GROUP_DECL(PWM7, T4); 930 U3, W3, AA3, Y3, T4, U5, U4, AB3, Y4, AA4, W4, V4, W5, AA5, 2094 ASPEED_PINCTRL_PIN(T4), 2537 ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V2, T4, SCU8C, 29), 2538 ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V2, T4, SCU8C, 29),
|
/kernel/linux/linux-5.10/lib/crypto/ |
H A D | des.c | 607 #define T4(x) pt[2 * (x) + 3] macro 609 #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
|
/kernel/linux/linux-6.6/lib/crypto/ |
H A D | des.c | 607 #define T4(x) pt[2 * (x) + 3] macro 609 #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
|