Home
last modified time | relevance | path

Searched refs:rem_4bit (Results 1 - 25 of 26) sorted by relevance

12

/third_party/node/deps/openssl/openssl/crypto/modes/asm/
H A Dghash-s390x.pl80 $rem_4bit="%r14";
118 larl $rem_4bit,rem_4bit
152 larl $rem_4bit,rem_4bit
201 xg $Zhi,0($rem0,$rem_4bit)
214 xg $Zhi,0($rem1,$rem_4bit)
226 xg $Zhi,0($rem0,$rem_4bit)
236 xg $Zhi,0($rem1,$rem_4bit)
238 lg $tmp,0($xi,$rem_4bit)
252 rem_4bit: global() label
[all...]
H A Dghash-sparcv9.pl72 $rem_4bit="%l4";
96 rem_4bit: label
101 .type rem_4bit,#object
102 .size rem_4bit,(.-rem_4bit)
115 add %o7,rem_4bit-1b,$rem_4bit
131 ldx [$rem_4bit+$remi],$rem
153 ldx [$rem_4bit+$remi],$rem
168 ldx [$rem_4bit
[all...]
H A Dghash-parisc.pl72 $rem_4bit="%r28";
117 blr %r0,$rem_4bit
120 andcm $rem_4bit,$rem,$rem_4bit
122 ldo L\$rem_4bit-L\$pic_gmult($rem_4bit),$rem_4bit
156 ldd $rem($rem_4bit),$rem
172 ldd $rem($rem_4bit),$rem
185 ldd $rem($rem_4bit),
[all...]
H A Dghash-armv4.pl116 $rem_4bit=$inp; # used in gcm_gmult_4bit
160 .type rem_4bit,%object
162 rem_4bit: label
167 .size rem_4bit,.-rem_4bit
172 adr $rem_4bit,rem_4bit
174 sub $rem_4bit,pc,#8+32 @ &rem_4bit
186 adr r12,rem_4bit
[all...]
H A Dghash-alpha.pl52 $rem_4bit="AT"; # $28
76 s8addq $remp,$rem_4bit,$remp
104 s8addq $remp,$rem_4bit,$remp
122 s8addq $remp,$rem_4bit,$remp
146 s8addq $remp,$rem_4bit,$remp
163 s8addq $remp,$rem_4bit,$remp
188 s8addq $remp,$rem_4bit,$remp
206 s8addq $remp,$rem_4bit,$remp
229 s8addq $remp,$rem_4bit,$remp
242 s8addq $remp,$rem_4bit,
455 rem_4bit: global() label
[all...]
H A Dghash-x86.pl355 &static_label("rem_4bit");
359 $S=12; # shift factor for rem_4bit
367 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'.
370 # Reference to rem_4bit is scheduled so late that I had to >>4
371 # rem_4bit elements. This resulted in 20-45% procent improvement
375 my $rem_4bit = "eax";
404 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28);
414 &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem]
421 &shl ($inp,4); # compensate for rem_4bit[
[all...]
H A Dghash-ia64.pl60 # &rem_4bit[Zlo&0xf]. It works, because rem_4bit is aligned at 128
139 add rem_4bitp=rem_4bit#-gcm_gmult_4bit#,rem_4bitp
421 .type rem_4bit#,\@object
422 rem_4bit: label
427 .size rem_4bit#,128
H A Dghash-x86_64.pl136 $rem_4bit = "%r11";
187 xor ($rem_4bit,$rem,8),$Zhi
202 xor ($rem_4bit,$rem,8),$Zhi
217 xor ($rem_4bit,$rem,8),$Zhi
229 xor ($rem_4bit,$rem,8),$Zhi
263 lea .Lrem_4bit(%rip),$rem_4bit
286 $rem_8bit=$rem_4bit;
/third_party/openssl/crypto/modes/asm/
H A Dghash-s390x.pl80 $rem_4bit="%r14";
118 larl $rem_4bit,rem_4bit
152 larl $rem_4bit,rem_4bit
201 xg $Zhi,0($rem0,$rem_4bit)
214 xg $Zhi,0($rem1,$rem_4bit)
226 xg $Zhi,0($rem0,$rem_4bit)
236 xg $Zhi,0($rem1,$rem_4bit)
238 lg $tmp,0($xi,$rem_4bit)
252 rem_4bit: global() label
[all...]
H A Dghash-sparcv9.pl72 $rem_4bit="%l4";
96 rem_4bit: label
101 .type rem_4bit,#object
102 .size rem_4bit,(.-rem_4bit)
115 add %o7,rem_4bit-1b,$rem_4bit
131 ldx [$rem_4bit+$remi],$rem
153 ldx [$rem_4bit+$remi],$rem
168 ldx [$rem_4bit
[all...]
H A Dghash-parisc.pl72 $rem_4bit="%r28";
117 blr %r0,$rem_4bit
120 andcm $rem_4bit,$rem,$rem_4bit
122 ldo L\$rem_4bit-L\$pic_gmult($rem_4bit),$rem_4bit
156 ldd $rem($rem_4bit),$rem
172 ldd $rem($rem_4bit),$rem
185 ldd $rem($rem_4bit),
[all...]
H A Dghash-armv4.pl116 $rem_4bit=$inp; # used in gcm_gmult_4bit
160 .type rem_4bit,%object
162 rem_4bit: label
167 .size rem_4bit,.-rem_4bit
172 adr $rem_4bit,rem_4bit
174 sub $rem_4bit,pc,#8+32 @ &rem_4bit
186 adr r12,rem_4bit
[all...]
H A Dghash-alpha.pl52 $rem_4bit="AT"; # $28
76 s8addq $remp,$rem_4bit,$remp
104 s8addq $remp,$rem_4bit,$remp
122 s8addq $remp,$rem_4bit,$remp
146 s8addq $remp,$rem_4bit,$remp
163 s8addq $remp,$rem_4bit,$remp
188 s8addq $remp,$rem_4bit,$remp
206 s8addq $remp,$rem_4bit,$remp
229 s8addq $remp,$rem_4bit,$remp
242 s8addq $remp,$rem_4bit,
455 rem_4bit: global() label
[all...]
H A Dghash-x86.pl355 &static_label("rem_4bit");
359 $S=12; # shift factor for rem_4bit
367 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'.
370 # Reference to rem_4bit is scheduled so late that I had to >>4
371 # rem_4bit elements. This resulted in 20-45% procent improvement
375 my $rem_4bit = "eax";
404 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28);
414 &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem]
421 &shl ($inp,4); # compensate for rem_4bit[
[all...]
H A Dghash-ia64.pl60 # &rem_4bit[Zlo&0xf]. It works, because rem_4bit is aligned at 128
139 add rem_4bitp=rem_4bit#-gcm_gmult_4bit#,rem_4bitp
421 .type rem_4bit#,\@object
422 rem_4bit: label
427 .size rem_4bit#,128
H A Dghash-x86_64.pl136 $rem_4bit = "%r11";
187 xor ($rem_4bit,$rem,8),$Zhi
202 xor ($rem_4bit,$rem,8),$Zhi
217 xor ($rem_4bit,$rem,8),$Zhi
229 xor ($rem_4bit,$rem,8),$Zhi
263 lea .Lrem_4bit(%rip),$rem_4bit
286 $rem_8bit=$rem_4bit;
/third_party/node/deps/openssl/config/archs/linux32-s390x/asm/crypto/modes/
H A Dghash-s390x.S13 larl %r14,rem_4bit
41 larl %r14,rem_4bit
129 sllg %r12,%r12,4 # correct last rem_4bit[rem]
141 rem_4bit: label
146 .type rem_4bit,@object
147 .size rem_4bit,(.-rem_4bit)
/third_party/node/deps/openssl/config/archs/linux32-s390x/asm_avx2/crypto/modes/
H A Dghash-s390x.S13 larl %r14,rem_4bit
41 larl %r14,rem_4bit
129 sllg %r12,%r12,4 # correct last rem_4bit[rem]
141 rem_4bit: label
146 .type rem_4bit,@object
147 .size rem_4bit,(.-rem_4bit)
/third_party/node/deps/openssl/config/archs/linux64-s390x/asm_avx2/crypto/modes/
H A Dghash-s390x.S13 larl %r14,rem_4bit
40 larl %r14,rem_4bit
128 sllg %r12,%r12,4 # correct last rem_4bit[rem]
140 rem_4bit: label
145 .type rem_4bit,@object
146 .size rem_4bit,(.-rem_4bit)
/third_party/node/deps/openssl/config/archs/linux64-s390x/asm/crypto/modes/
H A Dghash-s390x.S13 larl %r14,rem_4bit
40 larl %r14,rem_4bit
128 sllg %r12,%r12,4 # correct last rem_4bit[rem]
140 rem_4bit: label
145 .type rem_4bit,@object
146 .size rem_4bit,(.-rem_4bit)
/third_party/node/deps/openssl/config/archs/linux-armv4/asm/crypto/modes/
H A Dghash-armv4.S16 .type rem_4bit,%object
18 rem_4bit: label
23 .size rem_4bit,.-rem_4bit
28 adr r2,rem_4bit
30 sub r2,pc,#8+32 @ &rem_4bit
42 adr r12,rem_4bit
44 sub r12,pc,#8+48 @ &rem_4bit
49 ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ...
69 ldrh r8,[sp,r14] @ rem_4bit[re
[all...]
/third_party/node/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/modes/
H A Dghash-armv4.S16 .type rem_4bit,%object
18 rem_4bit: label
23 .size rem_4bit,.-rem_4bit
28 adr r2,rem_4bit
30 sub r2,pc,#8+32 @ &rem_4bit
42 adr r12,rem_4bit
44 sub r12,pc,#8+48 @ &rem_4bit
49 ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ...
69 ldrh r8,[sp,r14] @ rem_4bit[re
[all...]
/third_party/node/deps/openssl/openssl/crypto/modes/
H A Dgcm128.c293 static const size_t rem_4bit[16] = { variable
319 Z.hi ^= rem_4bit[rem]; in gcm_gmult_4bit()
321 Z.hi ^= (u64)rem_4bit[rem] << 32; in gcm_gmult_4bit()
337 Z.hi ^= rem_4bit[rem]; in gcm_gmult_4bit()
339 Z.hi ^= (u64)rem_4bit[rem] << 32; in gcm_gmult_4bit()
399 Z.hi ^= rem_4bit[rem];
401 Z.hi ^= (u64)rem_4bit[rem] << 32;
418 Z.hi ^= rem_4bit[rem];
420 Z.hi ^= (u64)rem_4bit[rem] << 32;
/third_party/openssl/crypto/modes/
H A Dgcm128.c293 static const size_t rem_4bit[16] = { variable
319 Z.hi ^= rem_4bit[rem]; in gcm_gmult_4bit()
321 Z.hi ^= (u64)rem_4bit[rem] << 32; in gcm_gmult_4bit()
337 Z.hi ^= rem_4bit[rem]; in gcm_gmult_4bit()
339 Z.hi ^= (u64)rem_4bit[rem] << 32; in gcm_gmult_4bit()
399 Z.hi ^= rem_4bit[rem];
401 Z.hi ^= (u64)rem_4bit[rem] << 32;
418 Z.hi ^= rem_4bit[rem];
420 Z.hi ^= (u64)rem_4bit[rem] << 32;
/third_party/node/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/modes/
H A Dghash-x86_64.s27 leaq L$rem_4bit(%rip),%r11
1800 L$rem_4bit:

Completed in 25 milliseconds

12