1#! /usr/bin/env perl 2# Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the Apache License 2.0 (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9# ==================================================================== 10# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 11# project. The module is, however, dual licensed under OpenSSL and 12# CRYPTOGAMS licenses depending on where you obtain it. For further 13# details see http://www.openssl.org/~appro/cryptogams/. 14# 15# Permission to use under GPLv2 terms is granted. 16# ==================================================================== 17# 18# SHA256/512 for ARMv8. 19# 20# Performance in cycles per processed byte and improvement coefficient 21# over code generated with "default" compiler: 22# 23# SHA256-hw SHA256(*) SHA512 24# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) 25# Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) 26# Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) 27# Denver 2.01 10.5 (+26%) 6.70 (+8%) 28# X-Gene 20.0 (+100%) 12.8 (+300%(***)) 29# Mongoose 2.36 13.0 (+50%) 8.36 (+33%) 30# Kryo 1.92 17.4 (+30%) 11.2 (+8%) 31# ThunderX2 2.54 13.2 (+40%) 8.40 (+18%) 32# 33# (*) Software SHA256 results are of lesser relevance, presented 34# mostly for informational purposes. 35# (**) The result is a trade-off: it's possible to improve it by 36# 10% (or by 1 cycle per round), but at the cost of 20% loss 37# on Cortex-A53 (or by 4 cycles per round). 38# (***) Super-impressive coefficients over gcc-generated code are 39# indication of some compiler "pathology", most notably code 40# generated with -mgeneral-regs-only is significantly faster 41# and the gap is only 40-90%. 42# 43# October 2016. 44# 45# Originally it was reckoned that it makes no sense to implement NEON 46# version of SHA256 for 64-bit processors. This is because performance 47# improvement on most wide-spread Cortex-A5x processors was observed 48# to be marginal, same on Cortex-A53 and ~10% on A57. But then it was 49# observed that 32-bit NEON SHA256 performs significantly better than 50# 64-bit scalar version on *some* of the more recent processors. As 51# result 64-bit NEON version of SHA256 was added to provide best 52# all-round performance. For example it executes ~30% faster on X-Gene 53# and Mongoose. [For reference, NEON version of SHA512 is bound to 54# deliver much less improvement, likely *negative* on Cortex-A5x. 55# Which is why NEON support is limited to SHA256.] 56 57# $output is the last argument if it looks like a file (it has an extension) 58# $flavour is the first argument if it doesn't look like a file 59$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef; 60$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef; 61 62if ($flavour && $flavour ne "void") { 63 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 64 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 65 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or 66 die "can't locate arm-xlate.pl"; 67 68 open OUT,"| \"$^X\" $xlate $flavour \"$output\"" 69 or die "can't call $xlate: $!"; 70 *STDOUT=*OUT; 71} else { 72 $output and open STDOUT,">$output"; 73} 74 75if ($output =~ /512/) { 76 $BITS=512; 77 $SZ=8; 78 @Sigma0=(28,34,39); 79 @Sigma1=(14,18,41); 80 @sigma0=(1, 8, 7); 81 @sigma1=(19,61, 6); 82 $rounds=80; 83 $reg_t="x"; 84} else { 85 $BITS=256; 86 $SZ=4; 87 @Sigma0=( 2,13,22); 88 @Sigma1=( 6,11,25); 89 @sigma0=( 7,18, 3); 90 @sigma1=(17,19,10); 91 $rounds=64; 92 $reg_t="w"; 93} 94 95$func="sha${BITS}_block_data_order"; 96 97($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30)); 98 99@X=map("$reg_t$_",(3..15,0..2)); 100@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27)); 101($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28)); 102 103sub BODY_00_xx { 104my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; 105my $j=($i+1)&15; 106my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); 107 $T0=@X[$i+3] if ($i<11); 108 109$code.=<<___ if ($i<16); 110#ifndef __AARCH64EB__ 111 rev @X[$i],@X[$i] // $i 112#endif 113___ 114$code.=<<___ if ($i<13 && ($i&1)); 115 ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ 116___ 117$code.=<<___ if ($i==13); 118 ldp @X[14],@X[15],[$inp] 119___ 120$code.=<<___ if ($i>=14); 121 ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`] 122___ 123$code.=<<___ if ($i>0 && $i<16); 124 add $a,$a,$t1 // h+=Sigma0(a) 125___ 126$code.=<<___ if ($i>=11); 127 str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`] 128___ 129# While ARMv8 specifies merged rotate-n-logical operation such as 130# 'eor x,y,z,ror#n', it was found to negatively affect performance 131# on Apple A7. The reason seems to be that it requires even 'y' to 132# be available earlier. This means that such merged instruction is 133# not necessarily best choice on critical path... On the other hand 134# Cortex-A5x handles merged instructions much better than disjoint 135# rotate and logical... See (**) footnote above. 136$code.=<<___ if ($i<15); 137 ror $t0,$e,#$Sigma1[0] 138 add $h,$h,$t2 // h+=K[i] 139 eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]` 140 and $t1,$f,$e 141 bic $t2,$g,$e 142 add $h,$h,@X[$i&15] // h+=X[i] 143 orr $t1,$t1,$t2 // Ch(e,f,g) 144 eor $t2,$a,$b // a^b, b^c in next round 145 eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e) 146 ror $T0,$a,#$Sigma0[0] 147 add $h,$h,$t1 // h+=Ch(e,f,g) 148 eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]` 149 add $h,$h,$t0 // h+=Sigma1(e) 150 and $t3,$t3,$t2 // (b^c)&=(a^b) 151 add $d,$d,$h // d+=h 152 eor $t3,$t3,$b // Maj(a,b,c) 153 eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a) 154 add $h,$h,$t3 // h+=Maj(a,b,c) 155 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round 156 //add $h,$h,$t1 // h+=Sigma0(a) 157___ 158$code.=<<___ if ($i>=15); 159 ror $t0,$e,#$Sigma1[0] 160 add $h,$h,$t2 // h+=K[i] 161 ror $T1,@X[($j+1)&15],#$sigma0[0] 162 and $t1,$f,$e 163 ror $T2,@X[($j+14)&15],#$sigma1[0] 164 bic $t2,$g,$e 165 ror $T0,$a,#$Sigma0[0] 166 add $h,$h,@X[$i&15] // h+=X[i] 167 eor $t0,$t0,$e,ror#$Sigma1[1] 168 eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1] 169 orr $t1,$t1,$t2 // Ch(e,f,g) 170 eor $t2,$a,$b // a^b, b^c in next round 171 eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e) 172 eor $T0,$T0,$a,ror#$Sigma0[1] 173 add $h,$h,$t1 // h+=Ch(e,f,g) 174 and $t3,$t3,$t2 // (b^c)&=(a^b) 175 eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1] 176 eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1]) 177 add $h,$h,$t0 // h+=Sigma1(e) 178 eor $t3,$t3,$b // Maj(a,b,c) 179 eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a) 180 eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14]) 181 add @X[$j],@X[$j],@X[($j+9)&15] 182 add $d,$d,$h // d+=h 183 add $h,$h,$t3 // h+=Maj(a,b,c) 184 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round 185 add @X[$j],@X[$j],$T1 186 add $h,$h,$t1 // h+=Sigma0(a) 187 add @X[$j],@X[$j],$T2 188___ 189 ($t2,$t3)=($t3,$t2); 190} 191 192$code.=<<___; 193#ifndef __KERNEL__ 194# include "arm_arch.h" 195.extern OPENSSL_armcap_P 196.hidden OPENSSL_armcap_P 197#endif 198 199.text 200 201.globl $func 202.type $func,%function 203.align 6 204$func: 205#ifndef __KERNEL__ 206 adrp x16,OPENSSL_armcap_P 207 ldr w16,[x16,#:lo12:OPENSSL_armcap_P] 208___ 209$code.=<<___ if ($SZ==4); 210 tst w16,#ARMV8_SHA256 211 b.ne .Lv8_entry 212 tst w16,#ARMV7_NEON 213 b.ne .Lneon_entry 214___ 215$code.=<<___ if ($SZ==8); 216 tst w16,#ARMV8_SHA512 217 b.ne .Lv8_entry 218___ 219$code.=<<___; 220#endif 221 .inst 0xd503233f // paciasp 222 stp x29,x30,[sp,#-128]! 223 add x29,sp,#0 224 225 stp x19,x20,[sp,#16] 226 stp x21,x22,[sp,#32] 227 stp x23,x24,[sp,#48] 228 stp x25,x26,[sp,#64] 229 stp x27,x28,[sp,#80] 230 sub sp,sp,#4*$SZ 231 232 ldp $A,$B,[$ctx] // load context 233 ldp $C,$D,[$ctx,#2*$SZ] 234 ldp $E,$F,[$ctx,#4*$SZ] 235 add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input 236 ldp $G,$H,[$ctx,#6*$SZ] 237 adrp $Ktbl,.LK$BITS 238 add $Ktbl,$Ktbl,:lo12:.LK$BITS 239 stp $ctx,$num,[x29,#96] 240 241.Loop: 242 ldp @X[0],@X[1],[$inp],#2*$SZ 243 ldr $t2,[$Ktbl],#$SZ // *K++ 244 eor $t3,$B,$C // magic seed 245 str $inp,[x29,#112] 246___ 247for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } 248$code.=".Loop_16_xx:\n"; 249for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } 250$code.=<<___; 251 cbnz $t2,.Loop_16_xx 252 253 ldp $ctx,$num,[x29,#96] 254 ldr $inp,[x29,#112] 255 sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind 256 257 ldp @X[0],@X[1],[$ctx] 258 ldp @X[2],@X[3],[$ctx,#2*$SZ] 259 add $inp,$inp,#14*$SZ // advance input pointer 260 ldp @X[4],@X[5],[$ctx,#4*$SZ] 261 add $A,$A,@X[0] 262 ldp @X[6],@X[7],[$ctx,#6*$SZ] 263 add $B,$B,@X[1] 264 add $C,$C,@X[2] 265 add $D,$D,@X[3] 266 stp $A,$B,[$ctx] 267 add $E,$E,@X[4] 268 add $F,$F,@X[5] 269 stp $C,$D,[$ctx,#2*$SZ] 270 add $G,$G,@X[6] 271 add $H,$H,@X[7] 272 cmp $inp,$num 273 stp $E,$F,[$ctx,#4*$SZ] 274 stp $G,$H,[$ctx,#6*$SZ] 275 b.ne .Loop 276 277 ldp x19,x20,[x29,#16] 278 add sp,sp,#4*$SZ 279 ldp x21,x22,[x29,#32] 280 ldp x23,x24,[x29,#48] 281 ldp x25,x26,[x29,#64] 282 ldp x27,x28,[x29,#80] 283 ldp x29,x30,[sp],#128 284 .inst 0xd50323bf // autiasp 285 ret 286.size $func,.-$func 287 288.rodata 289 290.align 6 291.type .LK$BITS,%object 292.LK$BITS: 293___ 294$code.=<<___ if ($SZ==8); 295 .quad 0x428a2f98d728ae22,0x7137449123ef65cd 296 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc 297 .quad 0x3956c25bf348b538,0x59f111f1b605d019 298 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 299 .quad 0xd807aa98a3030242,0x12835b0145706fbe 300 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 301 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 302 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 303 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 304 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 305 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 306 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 307 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 308 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 309 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 310 .quad 0x06ca6351e003826f,0x142929670a0e6e70 311 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 312 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df 313 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 314 .quad 0x81c2c92e47edaee6,0x92722c851482353b 315 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 316 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 317 .quad 0xd192e819d6ef5218,0xd69906245565a910 318 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 319 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 320 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 321 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb 322 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 323 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 324 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec 325 .quad 0x90befffa23631e28,0xa4506cebde82bde9 326 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b 327 .quad 0xca273eceea26619c,0xd186b8c721c0c207 328 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 329 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 330 .quad 0x113f9804bef90dae,0x1b710b35131c471b 331 .quad 0x28db77f523047d84,0x32caab7b40c72493 332 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c 333 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a 334 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 335 .quad 0 // terminator 336___ 337$code.=<<___ if ($SZ==4); 338 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 339 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 340 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 341 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 342 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc 343 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da 344 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 345 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 346 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 347 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 348 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 349 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 350 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 351 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 352 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 353 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 354 .long 0 //terminator 355___ 356$code.=<<___; 357.size .LK$BITS,.-.LK$BITS 358.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>" 359.align 2 360 361.text 362___ 363 364if ($SZ==4) { 365my $Ktbl="x3"; 366 367my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2)); 368my @MSG=map("v$_.16b",(4..7)); 369my ($W0,$W1)=("v16.4s","v17.4s"); 370my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b"); 371 372$code.=<<___; 373#ifndef __KERNEL__ 374.type sha256_block_armv8,%function 375.align 6 376sha256_block_armv8: 377.Lv8_entry: 378 stp x29,x30,[sp,#-16]! 379 add x29,sp,#0 380 381 ld1.32 {$ABCD,$EFGH},[$ctx] 382 adrp $Ktbl,.LK256 383 add $Ktbl,$Ktbl,:lo12:.LK256 384 385.Loop_hw: 386 ld1 {@MSG[0]-@MSG[3]},[$inp],#64 387 sub $num,$num,#1 388 ld1.32 {$W0},[$Ktbl],#16 389 rev32 @MSG[0],@MSG[0] 390 rev32 @MSG[1],@MSG[1] 391 rev32 @MSG[2],@MSG[2] 392 rev32 @MSG[3],@MSG[3] 393 orr $ABCD_SAVE,$ABCD,$ABCD // offload 394 orr $EFGH_SAVE,$EFGH,$EFGH 395___ 396for($i=0;$i<12;$i++) { 397$code.=<<___; 398 ld1.32 {$W1},[$Ktbl],#16 399 add.i32 $W0,$W0,@MSG[0] 400 sha256su0 @MSG[0],@MSG[1] 401 orr $abcd,$ABCD,$ABCD 402 sha256h $ABCD,$EFGH,$W0 403 sha256h2 $EFGH,$abcd,$W0 404 sha256su1 @MSG[0],@MSG[2],@MSG[3] 405___ 406 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); 407} 408$code.=<<___; 409 ld1.32 {$W1},[$Ktbl],#16 410 add.i32 $W0,$W0,@MSG[0] 411 orr $abcd,$ABCD,$ABCD 412 sha256h $ABCD,$EFGH,$W0 413 sha256h2 $EFGH,$abcd,$W0 414 415 ld1.32 {$W0},[$Ktbl],#16 416 add.i32 $W1,$W1,@MSG[1] 417 orr $abcd,$ABCD,$ABCD 418 sha256h $ABCD,$EFGH,$W1 419 sha256h2 $EFGH,$abcd,$W1 420 421 ld1.32 {$W1},[$Ktbl] 422 add.i32 $W0,$W0,@MSG[2] 423 sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind 424 orr $abcd,$ABCD,$ABCD 425 sha256h $ABCD,$EFGH,$W0 426 sha256h2 $EFGH,$abcd,$W0 427 428 add.i32 $W1,$W1,@MSG[3] 429 orr $abcd,$ABCD,$ABCD 430 sha256h $ABCD,$EFGH,$W1 431 sha256h2 $EFGH,$abcd,$W1 432 433 add.i32 $ABCD,$ABCD,$ABCD_SAVE 434 add.i32 $EFGH,$EFGH,$EFGH_SAVE 435 436 cbnz $num,.Loop_hw 437 438 st1.32 {$ABCD,$EFGH},[$ctx] 439 440 ldr x29,[sp],#16 441 ret 442.size sha256_block_armv8,.-sha256_block_armv8 443#endif 444___ 445} 446 447if ($SZ==4) { ######################################### NEON stuff # 448# You'll surely note a lot of similarities with sha256-armv4 module, 449# and of course it's not a coincidence. sha256-armv4 was used as 450# initial template, but was adapted for ARMv8 instruction set and 451# extensively re-tuned for all-round performance. 452 453my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10)); 454my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15)); 455my $Ktbl="x16"; 456my $Xfer="x17"; 457my @X = map("q$_",(0..3)); 458my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19)); 459my $j=0; 460 461sub AUTOLOAD() # thunk [simplified] x86-style perlasm 462{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; 463 my $arg = pop; 464 $arg = "#$arg" if ($arg*1 eq $arg); 465 $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; 466} 467 468sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; } 469sub Dlo { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; } 470sub Dhi { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; } 471 472sub Xupdate() 473{ use integer; 474 my $body = shift; 475 my @insns = (&$body,&$body,&$body,&$body); 476 my ($a,$b,$c,$d,$e,$f,$g,$h); 477 478 &ext_8 ($T0,@X[0],@X[1],4); # X[1..4] 479 eval(shift(@insns)); 480 eval(shift(@insns)); 481 eval(shift(@insns)); 482 &ext_8 ($T3,@X[2],@X[3],4); # X[9..12] 483 eval(shift(@insns)); 484 eval(shift(@insns)); 485 &mov (&Dscalar($T7),&Dhi(@X[3])); # X[14..15] 486 eval(shift(@insns)); 487 eval(shift(@insns)); 488 &ushr_32 ($T2,$T0,$sigma0[0]); 489 eval(shift(@insns)); 490 &ushr_32 ($T1,$T0,$sigma0[2]); 491 eval(shift(@insns)); 492 &add_32 (@X[0],@X[0],$T3); # X[0..3] += X[9..12] 493 eval(shift(@insns)); 494 &sli_32 ($T2,$T0,32-$sigma0[0]); 495 eval(shift(@insns)); 496 eval(shift(@insns)); 497 &ushr_32 ($T3,$T0,$sigma0[1]); 498 eval(shift(@insns)); 499 eval(shift(@insns)); 500 &eor_8 ($T1,$T1,$T2); 501 eval(shift(@insns)); 502 eval(shift(@insns)); 503 &sli_32 ($T3,$T0,32-$sigma0[1]); 504 eval(shift(@insns)); 505 eval(shift(@insns)); 506 &ushr_32 ($T4,$T7,$sigma1[0]); 507 eval(shift(@insns)); 508 eval(shift(@insns)); 509 &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4]) 510 eval(shift(@insns)); 511 eval(shift(@insns)); 512 &sli_32 ($T4,$T7,32-$sigma1[0]); 513 eval(shift(@insns)); 514 eval(shift(@insns)); 515 &ushr_32 ($T5,$T7,$sigma1[2]); 516 eval(shift(@insns)); 517 eval(shift(@insns)); 518 &ushr_32 ($T3,$T7,$sigma1[1]); 519 eval(shift(@insns)); 520 eval(shift(@insns)); 521 &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) 522 eval(shift(@insns)); 523 eval(shift(@insns)); 524 &sli_u32 ($T3,$T7,32-$sigma1[1]); 525 eval(shift(@insns)); 526 eval(shift(@insns)); 527 &eor_8 ($T5,$T5,$T4); 528 eval(shift(@insns)); 529 eval(shift(@insns)); 530 eval(shift(@insns)); 531 &eor_8 ($T5,$T5,$T3); # sigma1(X[14..15]) 532 eval(shift(@insns)); 533 eval(shift(@insns)); 534 eval(shift(@insns)); 535 &add_32 (@X[0],@X[0],$T5); # X[0..1] += sigma1(X[14..15]) 536 eval(shift(@insns)); 537 eval(shift(@insns)); 538 eval(shift(@insns)); 539 &ushr_32 ($T6,@X[0],$sigma1[0]); 540 eval(shift(@insns)); 541 &ushr_32 ($T7,@X[0],$sigma1[2]); 542 eval(shift(@insns)); 543 eval(shift(@insns)); 544 &sli_32 ($T6,@X[0],32-$sigma1[0]); 545 eval(shift(@insns)); 546 &ushr_32 ($T5,@X[0],$sigma1[1]); 547 eval(shift(@insns)); 548 eval(shift(@insns)); 549 &eor_8 ($T7,$T7,$T6); 550 eval(shift(@insns)); 551 eval(shift(@insns)); 552 &sli_32 ($T5,@X[0],32-$sigma1[1]); 553 eval(shift(@insns)); 554 eval(shift(@insns)); 555 &ld1_32 ("{$T0}","[$Ktbl], #16"); 556 eval(shift(@insns)); 557 &eor_8 ($T7,$T7,$T5); # sigma1(X[16..17]) 558 eval(shift(@insns)); 559 eval(shift(@insns)); 560 &eor_8 ($T5,$T5,$T5); 561 eval(shift(@insns)); 562 eval(shift(@insns)); 563 &mov (&Dhi($T5), &Dlo($T7)); 564 eval(shift(@insns)); 565 eval(shift(@insns)); 566 eval(shift(@insns)); 567 &add_32 (@X[0],@X[0],$T5); # X[2..3] += sigma1(X[16..17]) 568 eval(shift(@insns)); 569 eval(shift(@insns)); 570 eval(shift(@insns)); 571 &add_32 ($T0,$T0,@X[0]); 572 while($#insns>=1) { eval(shift(@insns)); } 573 &st1_32 ("{$T0}","[$Xfer], #16"); 574 eval(shift(@insns)); 575 576 push(@X,shift(@X)); # "rotate" X[] 577} 578 579sub Xpreload() 580{ use integer; 581 my $body = shift; 582 my @insns = (&$body,&$body,&$body,&$body); 583 my ($a,$b,$c,$d,$e,$f,$g,$h); 584 585 eval(shift(@insns)); 586 eval(shift(@insns)); 587 &ld1_8 ("{@X[0]}","[$inp],#16"); 588 eval(shift(@insns)); 589 eval(shift(@insns)); 590 &ld1_32 ("{$T0}","[$Ktbl],#16"); 591 eval(shift(@insns)); 592 eval(shift(@insns)); 593 eval(shift(@insns)); 594 eval(shift(@insns)); 595 &rev32 (@X[0],@X[0]); 596 eval(shift(@insns)); 597 eval(shift(@insns)); 598 eval(shift(@insns)); 599 eval(shift(@insns)); 600 &add_32 ($T0,$T0,@X[0]); 601 foreach (@insns) { eval; } # remaining instructions 602 &st1_32 ("{$T0}","[$Xfer], #16"); 603 604 push(@X,shift(@X)); # "rotate" X[] 605} 606 607sub body_00_15 () { 608 ( 609 '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'. 610 '&add ($h,$h,$t1)', # h+=X[i]+K[i] 611 '&add ($a,$a,$t4);'. # h+=Sigma0(a) from the past 612 '&and ($t1,$f,$e)', 613 '&bic ($t4,$g,$e)', 614 '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))', 615 '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past 616 '&orr ($t1,$t1,$t4)', # Ch(e,f,g) 617 '&eor ($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e) 618 '&eor ($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))', 619 '&add ($h,$h,$t1)', # h+=Ch(e,f,g) 620 '&ror ($t0,$t0,"#$Sigma1[0]")', 621 '&eor ($t2,$a,$b)', # a^b, b^c in next round 622 '&eor ($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a) 623 '&add ($h,$h,$t0)', # h+=Sigma1(e) 624 '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'. 625 '&ldr ($t1,"[$Ktbl]") if ($j==15);'. 626 '&and ($t3,$t3,$t2)', # (b^c)&=(a^b) 627 '&ror ($t4,$t4,"#$Sigma0[0]")', 628 '&add ($d,$d,$h)', # d+=h 629 '&eor ($t3,$t3,$b)', # Maj(a,b,c) 630 '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);' 631 ) 632} 633 634$code.=<<___; 635#ifdef __KERNEL__ 636.globl sha256_block_neon 637#endif 638.type sha256_block_neon,%function 639.align 4 640sha256_block_neon: 641.Lneon_entry: 642 stp x29, x30, [sp, #-16]! 643 mov x29, sp 644 sub sp,sp,#16*4 645 646 adrp $Ktbl,.LK256 647 add $Ktbl,$Ktbl,:lo12:.LK256 648 add $num,$inp,$num,lsl#6 // len to point at the end of inp 649 650 ld1.8 {@X[0]},[$inp], #16 651 ld1.8 {@X[1]},[$inp], #16 652 ld1.8 {@X[2]},[$inp], #16 653 ld1.8 {@X[3]},[$inp], #16 654 ld1.32 {$T0},[$Ktbl], #16 655 ld1.32 {$T1},[$Ktbl], #16 656 ld1.32 {$T2},[$Ktbl], #16 657 ld1.32 {$T3},[$Ktbl], #16 658 rev32 @X[0],@X[0] // yes, even on 659 rev32 @X[1],@X[1] // big-endian 660 rev32 @X[2],@X[2] 661 rev32 @X[3],@X[3] 662 mov $Xfer,sp 663 add.32 $T0,$T0,@X[0] 664 add.32 $T1,$T1,@X[1] 665 add.32 $T2,$T2,@X[2] 666 st1.32 {$T0-$T1},[$Xfer], #32 667 add.32 $T3,$T3,@X[3] 668 st1.32 {$T2-$T3},[$Xfer] 669 sub $Xfer,$Xfer,#32 670 671 ldp $A,$B,[$ctx] 672 ldp $C,$D,[$ctx,#8] 673 ldp $E,$F,[$ctx,#16] 674 ldp $G,$H,[$ctx,#24] 675 ldr $t1,[sp,#0] 676 mov $t2,wzr 677 eor $t3,$B,$C 678 mov $t4,wzr 679 b .L_00_48 680 681.align 4 682.L_00_48: 683___ 684 &Xupdate(\&body_00_15); 685 &Xupdate(\&body_00_15); 686 &Xupdate(\&body_00_15); 687 &Xupdate(\&body_00_15); 688$code.=<<___; 689 cmp $t1,#0 // check for K256 terminator 690 ldr $t1,[sp,#0] 691 sub $Xfer,$Xfer,#64 692 bne .L_00_48 693 694 sub $Ktbl,$Ktbl,#256 // rewind $Ktbl 695 cmp $inp,$num 696 mov $Xfer, #64 697 csel $Xfer, $Xfer, xzr, eq 698 sub $inp,$inp,$Xfer // avoid SEGV 699 mov $Xfer,sp 700___ 701 &Xpreload(\&body_00_15); 702 &Xpreload(\&body_00_15); 703 &Xpreload(\&body_00_15); 704 &Xpreload(\&body_00_15); 705$code.=<<___; 706 add $A,$A,$t4 // h+=Sigma0(a) from the past 707 ldp $t0,$t1,[$ctx,#0] 708 add $A,$A,$t2 // h+=Maj(a,b,c) from the past 709 ldp $t2,$t3,[$ctx,#8] 710 add $A,$A,$t0 // accumulate 711 add $B,$B,$t1 712 ldp $t0,$t1,[$ctx,#16] 713 add $C,$C,$t2 714 add $D,$D,$t3 715 ldp $t2,$t3,[$ctx,#24] 716 add $E,$E,$t0 717 add $F,$F,$t1 718 ldr $t1,[sp,#0] 719 stp $A,$B,[$ctx,#0] 720 add $G,$G,$t2 721 mov $t2,wzr 722 stp $C,$D,[$ctx,#8] 723 add $H,$H,$t3 724 stp $E,$F,[$ctx,#16] 725 eor $t3,$B,$C 726 stp $G,$H,[$ctx,#24] 727 mov $t4,wzr 728 mov $Xfer,sp 729 b.ne .L_00_48 730 731 ldr x29,[x29] 732 add sp,sp,#16*4+16 733 ret 734.size sha256_block_neon,.-sha256_block_neon 735___ 736} 737 738if ($SZ==8) { 739my $Ktbl="x3"; 740 741my @H = map("v$_.16b",(0..4)); 742my ($fg,$de,$m9_10)=map("v$_.16b",(5..7)); 743my @MSG=map("v$_.16b",(16..23)); 744my ($W0,$W1)=("v24.2d","v25.2d"); 745my ($AB,$CD,$EF,$GH)=map("v$_.16b",(26..29)); 746 747$code.=<<___; 748#ifndef __KERNEL__ 749.type sha512_block_armv8,%function 750.align 6 751sha512_block_armv8: 752.Lv8_entry: 753 stp x29,x30,[sp,#-16]! 754 add x29,sp,#0 755 756 ld1 {@MSG[0]-@MSG[3]},[$inp],#64 // load input 757 ld1 {@MSG[4]-@MSG[7]},[$inp],#64 758 759 ld1.64 {@H[0]-@H[3]},[$ctx] // load context 760 adrp $Ktbl,.LK512 761 add $Ktbl,$Ktbl,:lo12:.LK512 762 763 rev64 @MSG[0],@MSG[0] 764 rev64 @MSG[1],@MSG[1] 765 rev64 @MSG[2],@MSG[2] 766 rev64 @MSG[3],@MSG[3] 767 rev64 @MSG[4],@MSG[4] 768 rev64 @MSG[5],@MSG[5] 769 rev64 @MSG[6],@MSG[6] 770 rev64 @MSG[7],@MSG[7] 771 b .Loop_hw 772 773.align 4 774.Loop_hw: 775 ld1.64 {$W0},[$Ktbl],#16 776 subs $num,$num,#1 777 sub x4,$inp,#128 778 orr $AB,@H[0],@H[0] // offload 779 orr $CD,@H[1],@H[1] 780 orr $EF,@H[2],@H[2] 781 orr $GH,@H[3],@H[3] 782 csel $inp,$inp,x4,ne // conditional rewind 783___ 784for($i=0;$i<32;$i++) { 785$code.=<<___; 786 add.i64 $W0,$W0,@MSG[0] 787 ld1.64 {$W1},[$Ktbl],#16 788 ext $W0,$W0,$W0,#8 789 ext $fg,@H[2],@H[3],#8 790 ext $de,@H[1],@H[2],#8 791 add.i64 @H[3],@H[3],$W0 // "T1 + H + K512[i]" 792 sha512su0 @MSG[0],@MSG[1] 793 ext $m9_10,@MSG[4],@MSG[5],#8 794 sha512h @H[3],$fg,$de 795 sha512su1 @MSG[0],@MSG[7],$m9_10 796 add.i64 @H[4],@H[1],@H[3] // "D + T1" 797 sha512h2 @H[3],$H[1],@H[0] 798___ 799 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); 800 @H = (@H[3],@H[0],@H[4],@H[2],@H[1]); 801} 802for(;$i<40;$i++) { 803$code.=<<___ if ($i<39); 804 ld1.64 {$W1},[$Ktbl],#16 805___ 806$code.=<<___ if ($i==39); 807 sub $Ktbl,$Ktbl,#$rounds*$SZ // rewind 808___ 809$code.=<<___; 810 add.i64 $W0,$W0,@MSG[0] 811 ld1 {@MSG[0]},[$inp],#16 // load next input 812 ext $W0,$W0,$W0,#8 813 ext $fg,@H[2],@H[3],#8 814 ext $de,@H[1],@H[2],#8 815 add.i64 @H[3],@H[3],$W0 // "T1 + H + K512[i]" 816 sha512h @H[3],$fg,$de 817 rev64 @MSG[0],@MSG[0] 818 add.i64 @H[4],@H[1],@H[3] // "D + T1" 819 sha512h2 @H[3],$H[1],@H[0] 820___ 821 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); 822 @H = (@H[3],@H[0],@H[4],@H[2],@H[1]); 823} 824$code.=<<___; 825 add.i64 @H[0],@H[0],$AB // accumulate 826 add.i64 @H[1],@H[1],$CD 827 add.i64 @H[2],@H[2],$EF 828 add.i64 @H[3],@H[3],$GH 829 830 cbnz $num,.Loop_hw 831 832 st1.64 {@H[0]-@H[3]},[$ctx] // store context 833 834 ldr x29,[sp],#16 835 ret 836.size sha512_block_armv8,.-sha512_block_armv8 837#endif 838___ 839} 840 841{ my %opcode = ( 842 "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000, 843 "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 ); 844 845 sub unsha256 { 846 my ($mnemonic,$arg)=@_; 847 848 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o 849 && 850 sprintf ".inst\t0x%08x\t//%s %s", 851 $opcode{$mnemonic}|$1|($2<<5)|($3<<16), 852 $mnemonic,$arg; 853 } 854} 855 856{ my %opcode = ( 857 "sha512h" => 0xce608000, "sha512h2" => 0xce608400, 858 "sha512su0" => 0xcec08000, "sha512su1" => 0xce608800 ); 859 860 sub unsha512 { 861 my ($mnemonic,$arg)=@_; 862 863 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o 864 && 865 sprintf ".inst\t0x%08x\t//%s %s", 866 $opcode{$mnemonic}|$1|($2<<5)|($3<<16), 867 $mnemonic,$arg; 868 } 869} 870 871open SELF,$0; 872while(<SELF>) { 873 next if (/^#!/); 874 last if (!s/^#/\/\// and !/^$/); 875 print; 876} 877close SELF; 878 879foreach(split("\n",$code)) { 880 881 s/\`([^\`]*)\`/eval($1)/ge; 882 883 s/\b(sha512\w+)\s+([qv].*)/unsha512($1,$2)/ge or 884 s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge; 885 886 s/\bq([0-9]+)\b/v$1.16b/g; # old->new registers 887 888 s/\.[ui]?8(\s)/$1/; 889 s/\.\w?64\b// and s/\.16b/\.2d/g or 890 s/\.\w?32\b// and s/\.16b/\.4s/g; 891 m/\bext\b/ and s/\.2d/\.16b/g or 892 m/(ld|st)1[^\[]+\[0\]/ and s/\.4s/\.s/g; 893 894 print $_,"\n"; 895} 896 897close STDOUT or die "error closing STDOUT: $!"; 898