1#! /usr/bin/env perl 2# Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the Apache License 2.0 (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9 10###################################################################### 11## Constant-time SSSE3 AES core implementation. 12## version 0.1 13## 14## By Mike Hamburg (Stanford University), 2009 15## Public domain. 16## 17## For details see http://shiftleft.org/papers/vector_aes/ and 18## http://crypto.stanford.edu/vpaes/. 19## 20###################################################################### 21# ARMv8 NEON adaptation by <appro@openssl.org> 22# 23# Reason for undertaken effort is that there is at least one popular 24# SoC based on Cortex-A53 that doesn't have crypto extensions. 25# 26# CBC enc ECB enc/dec(*) [bit-sliced enc/dec] 27# Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ] 28# Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ] 29# X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ] 30# Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ] 31# Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ] 32# Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ] 33# ThunderX2(***) 39.4(**) 33.8/48.6(**) 34# 35# (*) ECB denotes approximate result for parallelizable modes 36# such as CBC decrypt, CTR, etc.; 37# (**) these results are worse than scalar compiler-generated 38# code, but it's constant-time and therefore preferred; 39# (***) presented for reference/comparison purposes; 40 41# $output is the last argument if it looks like a file (it has an extension) 42# $flavour is the first argument if it doesn't look like a file 43$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef; 44$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef; 45 46$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 47( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 48( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or 49die "can't locate arm-xlate.pl"; 50 51open OUT,"| \"$^X\" $xlate $flavour \"$output\"" 52 or die "can't call $xlate: $!"; 53*STDOUT=*OUT; 54 55$code.=<<___; 56.rodata 57 58.type _vpaes_consts,%object 59.align 7 // totally strategic alignment 60_vpaes_consts: 61.Lk_mc_forward: // mc_forward 62 .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 63 .quad 0x080B0A0904070605, 0x000302010C0F0E0D 64 .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 65 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 66.Lk_mc_backward:// mc_backward 67 .quad 0x0605040702010003, 0x0E0D0C0F0A09080B 68 .quad 0x020100030E0D0C0F, 0x0A09080B06050407 69 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 70 .quad 0x0A09080B06050407, 0x020100030E0D0C0F 71.Lk_sr: // sr 72 .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 73 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 74 .quad 0x0F060D040B020900, 0x070E050C030A0108 75 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 76 77// 78// "Hot" constants 79// 80.Lk_inv: // inv, inva 81 .quad 0x0E05060F0D080180, 0x040703090A0B0C02 82 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 83.Lk_ipt: // input transform (lo, hi) 84 .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 85 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 86.Lk_sbo: // sbou, sbot 87 .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 88 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA 89.Lk_sb1: // sb1u, sb1t 90 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF 91 .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 92.Lk_sb2: // sb2u, sb2t 93 .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A 94 .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD 95 96// 97// Decryption stuff 98// 99.Lk_dipt: // decryption input transform 100 .quad 0x0F505B040B545F00, 0x154A411E114E451A 101 .quad 0x86E383E660056500, 0x12771772F491F194 102.Lk_dsbo: // decryption sbox final output 103 .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D 104 .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C 105.Lk_dsb9: // decryption sbox output *9*u, *9*t 106 .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 107 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 108.Lk_dsbd: // decryption sbox output *D*u, *D*t 109 .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 110 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 111.Lk_dsbb: // decryption sbox output *B*u, *B*t 112 .quad 0xD022649296B44200, 0x602646F6B0F2D404 113 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B 114.Lk_dsbe: // decryption sbox output *E*u, *E*t 115 .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 116 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 117 118// 119// Key schedule constants 120// 121.Lk_dksd: // decryption key schedule: invskew x*D 122 .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 123 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E 124.Lk_dksb: // decryption key schedule: invskew x*B 125 .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 126 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 127.Lk_dkse: // decryption key schedule: invskew x*E + 0x63 128 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 129 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 130.Lk_dks9: // decryption key schedule: invskew x*9 131 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC 132 .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE 133 134.Lk_rcon: // rcon 135 .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 136 137.Lk_opt: // output transform 138 .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 139 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 140.Lk_deskew: // deskew tables: inverts the sbox's "skew" 141 .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A 142 .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 143 144.asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)" 145.size _vpaes_consts,.-_vpaes_consts 146.align 6 147 148.text 149 150___ 151 152{ 153my ($inp,$out,$key) = map("x$_",(0..2)); 154 155my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23)); 156my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27)); 157my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31)); 158 159$code.=<<___; 160// 161// _aes_preheat 162// 163// Fills register %r10 -> .aes_consts (so you can -fPIC) 164// and %xmm9-%xmm15 as specified below. 165// 166.type _vpaes_encrypt_preheat,%function 167.align 4 168_vpaes_encrypt_preheat: 169 adrp x10, .Lk_inv 170 add x10, x10, :lo12:.Lk_inv 171 movi v17.16b, #0x0f 172 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv 173 ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo 174 ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2 175 ret 176.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat 177 178// 179// _aes_encrypt_core 180// 181// AES-encrypt %xmm0. 182// 183// Inputs: 184// %xmm0 = input 185// %xmm9-%xmm15 as in _vpaes_preheat 186// (%rdx) = scheduled keys 187// 188// Output in %xmm0 189// Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 190// Preserves %xmm6 - %xmm8 so you get some local vectors 191// 192// 193.type _vpaes_encrypt_core,%function 194.align 4 195_vpaes_encrypt_core: 196 mov x9, $key 197 ldr w8, [$key,#240] // pull rounds 198 adrp x11, .Lk_mc_forward+16 199 add x11, x11, :lo12:.Lk_mc_forward+16 200 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 201 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 202 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 203 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 204 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 205 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 206 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 207 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 208 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 209 b .Lenc_entry 210 211.align 4 212.Lenc_loop: 213 // middle of middle round 214 add x10, x11, #0x40 215 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 216 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 217 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 218 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 219 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 220 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 221 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 222 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 223 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 224 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 225 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 226 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 227 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 228 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 229 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4 230 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 231 sub w8, w8, #1 // nr-- 232 233.Lenc_entry: 234 // top of round 235 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 236 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 237 tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 238 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 239 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 240 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 241 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 242 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 243 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 244 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 245 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 246 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 247 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 248 cbnz w8, .Lenc_loop 249 250 // middle of last round 251 add x10, x11, #0x80 252 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 253 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 254 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 255 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 256 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 257 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 258 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 259 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 260 ret 261.size _vpaes_encrypt_core,.-_vpaes_encrypt_core 262 263.globl vpaes_encrypt 264.type vpaes_encrypt,%function 265.align 4 266vpaes_encrypt: 267 .inst 0xd503233f // paciasp 268 stp x29,x30,[sp,#-16]! 269 add x29,sp,#0 270 271 ld1 {v7.16b}, [$inp] 272 bl _vpaes_encrypt_preheat 273 bl _vpaes_encrypt_core 274 st1 {v0.16b}, [$out] 275 276 ldp x29,x30,[sp],#16 277 .inst 0xd50323bf // autiasp 278 ret 279.size vpaes_encrypt,.-vpaes_encrypt 280 281.type _vpaes_encrypt_2x,%function 282.align 4 283_vpaes_encrypt_2x: 284 mov x9, $key 285 ldr w8, [$key,#240] // pull rounds 286 adrp x11, .Lk_mc_forward+16 287 add x11, x11, :lo12:.Lk_mc_forward+16 288 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 289 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 290 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 291 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 292 and v9.16b, v15.16b, v17.16b 293 ushr v8.16b, v15.16b, #4 294 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 295 tbl v9.16b, {$iptlo}, v9.16b 296 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 297 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 298 tbl v10.16b, {$ipthi}, v8.16b 299 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 300 eor v8.16b, v9.16b, v16.16b 301 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 302 eor v8.16b, v8.16b, v10.16b 303 b .Lenc_2x_entry 304 305.align 4 306.Lenc_2x_loop: 307 // middle of middle round 308 add x10, x11, #0x40 309 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 310 tbl v12.16b, {$sb1t}, v10.16b 311 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 312 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 313 tbl v8.16b, {$sb1u}, v11.16b 314 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 315 eor v12.16b, v12.16b, v16.16b 316 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 317 tbl v13.16b, {$sb2t}, v10.16b 318 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 319 eor v8.16b, v8.16b, v12.16b 320 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 321 tbl v10.16b, {$sb2u}, v11.16b 322 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 323 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 324 tbl v11.16b, {v8.16b}, v1.16b 325 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 326 eor v10.16b, v10.16b, v13.16b 327 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 328 tbl v8.16b, {v8.16b}, v4.16b 329 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 330 eor v11.16b, v11.16b, v10.16b 331 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 332 tbl v12.16b, {v11.16b},v1.16b 333 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 334 eor v8.16b, v8.16b, v11.16b 335 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4 336 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 337 eor v8.16b, v8.16b, v12.16b 338 sub w8, w8, #1 // nr-- 339 340.Lenc_2x_entry: 341 // top of round 342 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 343 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 344 and v9.16b, v8.16b, v17.16b 345 ushr v8.16b, v8.16b, #4 346 tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 347 tbl v13.16b, {$invhi},v9.16b 348 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 349 eor v9.16b, v9.16b, v8.16b 350 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 351 tbl v11.16b, {$invlo},v8.16b 352 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 353 tbl v12.16b, {$invlo},v9.16b 354 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 355 eor v11.16b, v11.16b, v13.16b 356 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 357 eor v12.16b, v12.16b, v13.16b 358 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 359 tbl v10.16b, {$invlo},v11.16b 360 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 361 tbl v11.16b, {$invlo},v12.16b 362 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 363 eor v10.16b, v10.16b, v9.16b 364 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 365 eor v11.16b, v11.16b, v8.16b 366 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 367 cbnz w8, .Lenc_2x_loop 368 369 // middle of last round 370 add x10, x11, #0x80 371 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 372 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 373 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 374 tbl v12.16b, {$sbou}, v10.16b 375 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 376 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 377 tbl v8.16b, {$sbot}, v11.16b 378 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 379 eor v12.16b, v12.16b, v16.16b 380 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 381 eor v8.16b, v8.16b, v12.16b 382 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 383 tbl v1.16b, {v8.16b},v1.16b 384 ret 385.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x 386 387.type _vpaes_decrypt_preheat,%function 388.align 4 389_vpaes_decrypt_preheat: 390 adrp x10, .Lk_inv 391 add x10, x10, :lo12:.Lk_inv 392 movi v17.16b, #0x0f 393 adrp x11, .Lk_dipt 394 add x11, x11, :lo12:.Lk_dipt 395 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv 396 ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo 397 ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd 398 ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe 399 ret 400.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat 401 402// 403// Decryption core 404// 405// Same API as encryption core. 406// 407.type _vpaes_decrypt_core,%function 408.align 4 409_vpaes_decrypt_core: 410 mov x9, $key 411 ldr w8, [$key,#240] // pull rounds 412 413 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo 414 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11 415 eor x11, x11, #0x30 // xor \$0x30, %r11 416 adrp x10, .Lk_sr 417 add x10, x10, :lo12:.Lk_sr 418 and x11, x11, #0x30 // and \$0x30, %r11 419 add x11, x11, x10 420 adrp x10, .Lk_mc_forward+48 421 add x10, x10, :lo12:.Lk_mc_forward+48 422 423 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key 424 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 425 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 426 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 427 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 428 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi 429 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 430 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 431 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 432 b .Ldec_entry 433 434.align 4 435.Ldec_loop: 436// 437// Inverse mix columns 438// 439 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u 440 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t 441 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u 442 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t 443 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 444 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu 445 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 446 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt 447 448 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu 449 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 450 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt 451 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 452 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu 453 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 454 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt 455 456 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu 457 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 458 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt 459 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 460 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu 461 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 462 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet 463 464 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu 465 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 466 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet 467 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 468 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5 469 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 470 sub w8, w8, #1 // sub \$1,%rax # nr-- 471 472.Ldec_entry: 473 // top of round 474 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 475 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 476 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 477 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 478 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 479 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 480 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 481 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 482 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 483 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 484 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 485 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 486 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 487 cbnz w8, .Ldec_loop 488 489 // middle of last round 490 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou 491 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 492 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot 493 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 494 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t 495 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k 496 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A 497 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 498 ret 499.size _vpaes_decrypt_core,.-_vpaes_decrypt_core 500 501.globl vpaes_decrypt 502.type vpaes_decrypt,%function 503.align 4 504vpaes_decrypt: 505 .inst 0xd503233f // paciasp 506 stp x29,x30,[sp,#-16]! 507 add x29,sp,#0 508 509 ld1 {v7.16b}, [$inp] 510 bl _vpaes_decrypt_preheat 511 bl _vpaes_decrypt_core 512 st1 {v0.16b}, [$out] 513 514 ldp x29,x30,[sp],#16 515 .inst 0xd50323bf // autiasp 516 ret 517.size vpaes_decrypt,.-vpaes_decrypt 518 519// v14-v15 input, v0-v1 output 520.type _vpaes_decrypt_2x,%function 521.align 4 522_vpaes_decrypt_2x: 523 mov x9, $key 524 ldr w8, [$key,#240] // pull rounds 525 526 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo 527 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11 528 eor x11, x11, #0x30 // xor \$0x30, %r11 529 adrp x10, .Lk_sr 530 add x10, x10, :lo12:.Lk_sr 531 and x11, x11, #0x30 // and \$0x30, %r11 532 add x11, x11, x10 533 adrp x10, .Lk_mc_forward+48 534 add x10, x10, :lo12:.Lk_mc_forward+48 535 536 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key 537 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 538 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 539 and v9.16b, v15.16b, v17.16b 540 ushr v8.16b, v15.16b, #4 541 tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 542 tbl v10.16b, {$iptlo},v9.16b 543 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 544 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi 545 tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 546 tbl v8.16b, {$ipthi},v8.16b 547 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 548 eor v10.16b, v10.16b, v16.16b 549 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 550 eor v8.16b, v8.16b, v10.16b 551 b .Ldec_2x_entry 552 553.align 4 554.Ldec_2x_loop: 555// 556// Inverse mix columns 557// 558 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u 559 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t 560 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u 561 tbl v12.16b, {$sb9u}, v10.16b 562 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t 563 tbl v9.16b, {$sb9t}, v11.16b 564 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 565 eor v8.16b, v12.16b, v16.16b 566 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu 567 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 568 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 569 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt 570 571 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu 572 tbl v12.16b, {$sbdu}, v10.16b 573 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 574 tbl v8.16b, {v8.16b},v5.16b 575 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt 576 tbl v9.16b, {$sbdt}, v11.16b 577 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 578 eor v8.16b, v8.16b, v12.16b 579 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu 580 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 581 eor v8.16b, v8.16b, v9.16b 582 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt 583 584 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu 585 tbl v12.16b, {$sbbu}, v10.16b 586 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 587 tbl v8.16b, {v8.16b},v5.16b 588 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt 589 tbl v9.16b, {$sbbt}, v11.16b 590 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 591 eor v8.16b, v8.16b, v12.16b 592 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu 593 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 594 eor v8.16b, v8.16b, v9.16b 595 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet 596 597 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu 598 tbl v12.16b, {$sbeu}, v10.16b 599 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 600 tbl v8.16b, {v8.16b},v5.16b 601 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet 602 tbl v9.16b, {$sbet}, v11.16b 603 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 604 eor v8.16b, v8.16b, v12.16b 605 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5 606 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 607 eor v8.16b, v8.16b, v9.16b 608 sub w8, w8, #1 // sub \$1,%rax # nr-- 609 610.Ldec_2x_entry: 611 // top of round 612 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 613 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 614 and v9.16b, v8.16b, v17.16b 615 ushr v8.16b, v8.16b, #4 616 tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 617 tbl v10.16b, {$invhi},v9.16b 618 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 619 eor v9.16b, v9.16b, v8.16b 620 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 621 tbl v11.16b, {$invlo},v8.16b 622 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 623 tbl v12.16b, {$invlo},v9.16b 624 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 625 eor v11.16b, v11.16b, v10.16b 626 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 627 eor v12.16b, v12.16b, v10.16b 628 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 629 tbl v10.16b, {$invlo},v11.16b 630 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 631 tbl v11.16b, {$invlo},v12.16b 632 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 633 eor v10.16b, v10.16b, v9.16b 634 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 635 eor v11.16b, v11.16b, v8.16b 636 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 637 cbnz w8, .Ldec_2x_loop 638 639 // middle of last round 640 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou 641 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 642 tbl v12.16b, {$sbou}, v10.16b 643 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot 644 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t 645 tbl v9.16b, {$sbot}, v11.16b 646 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 647 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k 648 eor v12.16b, v12.16b, v16.16b 649 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A 650 eor v8.16b, v9.16b, v12.16b 651 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 652 tbl v1.16b, {v8.16b},v2.16b 653 ret 654.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x 655___ 656} 657{ 658my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3"); 659my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8)); 660 661$code.=<<___; 662//////////////////////////////////////////////////////// 663// // 664// AES key schedule // 665// // 666//////////////////////////////////////////////////////// 667.type _vpaes_key_preheat,%function 668.align 4 669_vpaes_key_preheat: 670 adrp x10, .Lk_inv 671 add x10, x10, :lo12:.Lk_inv 672 movi v16.16b, #0x5b // .Lk_s63 673 adrp x11, .Lk_sb1 674 add x11, x11, :lo12:.Lk_sb1 675 movi v17.16b, #0x0f // .Lk_s0F 676 ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt 677 adrp x10, .Lk_dksd 678 add x10, x10, :lo12:.Lk_dksd 679 ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1 680 adrp x11, .Lk_mc_forward 681 add x11, x11, :lo12:.Lk_mc_forward 682 ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb 683 ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9 684 ld1 {v8.2d}, [x10] // .Lk_rcon 685 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0] 686 ret 687.size _vpaes_key_preheat,.-_vpaes_key_preheat 688 689.type _vpaes_schedule_core,%function 690.align 4 691_vpaes_schedule_core: 692 .inst 0xd503233f // paciasp 693 stp x29, x30, [sp,#-16]! 694 add x29,sp,#0 695 696 bl _vpaes_key_preheat // load the tables 697 698 ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) 699 700 // input transform 701 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 702 bl _vpaes_schedule_transform 703 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 704 705 adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10 706 add x10, x10, :lo12:.Lk_sr 707 add x8, x8, x10 708 cbnz $dir, .Lschedule_am_decrypting 709 710 // encrypting, output zeroth round key after transform 711 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) 712 b .Lschedule_go 713 714.Lschedule_am_decrypting: 715 // decrypting, output zeroth round key after shiftrows 716 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 717 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 718 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx) 719 eor x8, x8, #0x30 // xor \$0x30, %r8 720 721.Lschedule_go: 722 cmp $bits, #192 // cmp \$192, %esi 723 b.hi .Lschedule_256 724 b.eq .Lschedule_192 725 // 128: fall though 726 727// 728// .schedule_128 729// 730// 128-bit specific part of key schedule. 731// 732// This schedule is really simple, because all its parts 733// are accomplished by the subroutines. 734// 735.Lschedule_128: 736 mov $inp, #10 // mov \$10, %esi 737 738.Loop_schedule_128: 739 sub $inp, $inp, #1 // dec %esi 740 bl _vpaes_schedule_round 741 cbz $inp, .Lschedule_mangle_last 742 bl _vpaes_schedule_mangle // write output 743 b .Loop_schedule_128 744 745// 746// .aes_schedule_192 747// 748// 192-bit specific part of key schedule. 749// 750// The main body of this schedule is the same as the 128-bit 751// schedule, but with more smearing. The long, high side is 752// stored in %xmm7 as before, and the short, low side is in 753// the high bits of %xmm6. 754// 755// This schedule is somewhat nastier, however, because each 756// round produces 192 bits of key material, or 1.5 round keys. 757// Therefore, on each cycle we do 2 rounds and produce 3 round 758// keys. 759// 760.align 4 761.Lschedule_192: 762 sub $inp, $inp, #8 763 ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) 764 bl _vpaes_schedule_transform // input transform 765 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part 766 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 767 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros 768 mov $inp, #4 // mov \$4, %esi 769 770.Loop_schedule_192: 771 sub $inp, $inp, #1 // dec %esi 772 bl _vpaes_schedule_round 773 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0 774 bl _vpaes_schedule_mangle // save key n 775 bl _vpaes_schedule_192_smear 776 bl _vpaes_schedule_mangle // save key n+1 777 bl _vpaes_schedule_round 778 cbz $inp, .Lschedule_mangle_last 779 bl _vpaes_schedule_mangle // save key n+2 780 bl _vpaes_schedule_192_smear 781 b .Loop_schedule_192 782 783// 784// .aes_schedule_256 785// 786// 256-bit specific part of key schedule. 787// 788// The structure here is very similar to the 128-bit 789// schedule, but with an additional "low side" in 790// %xmm6. The low side's rounds are the same as the 791// high side's, except no rcon and no rotation. 792// 793.align 4 794.Lschedule_256: 795 ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) 796 bl _vpaes_schedule_transform // input transform 797 mov $inp, #7 // mov \$7, %esi 798 799.Loop_schedule_256: 800 sub $inp, $inp, #1 // dec %esi 801 bl _vpaes_schedule_mangle // output low result 802 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 803 804 // high round 805 bl _vpaes_schedule_round 806 cbz $inp, .Lschedule_mangle_last 807 bl _vpaes_schedule_mangle 808 809 // low round. swap xmm7 and xmm6 810 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0 811 movi v4.16b, #0 812 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 813 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 814 bl _vpaes_schedule_low_round 815 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 816 817 b .Loop_schedule_256 818 819// 820// .aes_schedule_mangle_last 821// 822// Mangler for last round of key schedule 823// Mangles %xmm0 824// when encrypting, outputs out(%xmm0) ^ 63 825// when decrypting, outputs unskew(%xmm0) 826// 827// Always called right before return... jumps to cleanup and exits 828// 829.align 4 830.Lschedule_mangle_last: 831 // schedule last round key from xmm0 832 adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew 833 add x11, x11, :lo12:.Lk_deskew 834 cbnz $dir, .Lschedule_mangle_last_dec 835 836 // encrypting 837 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 838 adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform 839 add x11, x11, :lo12:.Lk_opt 840 add $out, $out, #32 // add \$32, %rdx 841 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute 842 843.Lschedule_mangle_last_dec: 844 ld1 {v20.2d-v21.2d}, [x11] // reload constants 845 sub $out, $out, #16 // add \$-16, %rdx 846 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 847 bl _vpaes_schedule_transform // output transform 848 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key 849 850 // cleanup 851 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 852 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 853 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 854 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 855 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 856 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 857 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 858 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 859 ldp x29, x30, [sp],#16 860 .inst 0xd50323bf // autiasp 861 ret 862.size _vpaes_schedule_core,.-_vpaes_schedule_core 863 864// 865// .aes_schedule_192_smear 866// 867// Smear the short, low side in the 192-bit key schedule. 868// 869// Inputs: 870// %xmm7: high side, b a x y 871// %xmm6: low side, d c 0 0 872// %xmm13: 0 873// 874// Outputs: 875// %xmm6: b+c+d b+c 0 0 876// %xmm0: b+c+d b+c b a 877// 878.type _vpaes_schedule_192_smear,%function 879.align 4 880_vpaes_schedule_192_smear: 881 movi v1.16b, #0 882 dup v0.4s, v7.s[3] 883 ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 884 ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a 885 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 886 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 887 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a 888 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 889 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros 890 ret 891.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear 892 893// 894// .aes_schedule_round 895// 896// Runs one main round of the key schedule on %xmm0, %xmm7 897// 898// Specifically, runs subbytes on the high dword of %xmm0 899// then rotates it by one byte and xors into the low dword of 900// %xmm7. 901// 902// Adds rcon from low byte of %xmm8, then rotates %xmm8 for 903// next rcon. 904// 905// Smears the dwords of %xmm7 by xoring the low into the 906// second low, result into third, result into highest. 907// 908// Returns results in %xmm7 = %xmm0. 909// Clobbers %xmm1-%xmm4, %r11. 910// 911.type _vpaes_schedule_round,%function 912.align 4 913_vpaes_schedule_round: 914 // extract rcon from xmm8 915 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 916 ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1 917 ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8 918 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 919 920 // rotate 921 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0 922 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0 923 924 // fall through... 925 926 // low round: same as high round, but no rotation and no rcon. 927_vpaes_schedule_low_round: 928 // smear xmm7 929 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1 930 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 931 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4 932 933 // subbytes 934 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 935 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 936 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 937 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 938 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 939 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 940 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 941 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 942 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7 943 tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak 944 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 945 tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak 946 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io 947 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo 948 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou 949 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t 950 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output 951 952 // add in smeared stuff 953 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 954 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 955 ret 956.size _vpaes_schedule_round,.-_vpaes_schedule_round 957 958// 959// .aes_schedule_transform 960// 961// Linear-transform %xmm0 according to tables at (%r11) 962// 963// Requires that %xmm9 = 0x0F0F... as in preheat 964// Output in %xmm0 965// Clobbers %xmm1, %xmm2 966// 967.type _vpaes_schedule_transform,%function 968.align 4 969_vpaes_schedule_transform: 970 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 971 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 972 // vmovdqa (%r11), %xmm2 # lo 973 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 974 // vmovdqa 16(%r11), %xmm1 # hi 975 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 976 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 977 ret 978.size _vpaes_schedule_transform,.-_vpaes_schedule_transform 979 980// 981// .aes_schedule_mangle 982// 983// Mangle xmm0 from (basis-transformed) standard version 984// to our version. 985// 986// On encrypt, 987// xor with 0x63 988// multiply by circulant 0,1,1,1 989// apply shiftrows transform 990// 991// On decrypt, 992// xor with 0x63 993// multiply by "inverse mixcolumns" circulant E,B,D,9 994// deskew 995// apply shiftrows transform 996// 997// 998// Writes out to (%rdx), and increments or decrements it 999// Keeps track of round number mod 4 in %r8 1000// Preserves xmm0 1001// Clobbers xmm1-xmm5 1002// 1003.type _vpaes_schedule_mangle,%function 1004.align 4 1005_vpaes_schedule_mangle: 1006 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later 1007 // vmovdqa .Lk_mc_forward(%rip),%xmm5 1008 cbnz $dir, .Lschedule_mangle_dec 1009 1010 // encrypting 1011 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4 1012 add $out, $out, #16 // add \$16, %rdx 1013 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 1014 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 1015 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 1016 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 1017 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 1018 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 1019 1020 b .Lschedule_mangle_both 1021.align 4 1022.Lschedule_mangle_dec: 1023 // inverse mix columns 1024 // lea .Lk_dksd(%rip),%r11 1025 ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi 1026 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo 1027 1028 // vmovdqa 0x00(%r11), %xmm2 1029 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1030 // vmovdqa 0x10(%r11), %xmm3 1031 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1032 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 1033 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 1034 1035 // vmovdqa 0x20(%r11), %xmm2 1036 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1037 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 1038 // vmovdqa 0x30(%r11), %xmm3 1039 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1040 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 1041 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 1042 1043 // vmovdqa 0x40(%r11), %xmm2 1044 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1045 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 1046 // vmovdqa 0x50(%r11), %xmm3 1047 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1048 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 1049 1050 // vmovdqa 0x60(%r11), %xmm2 1051 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1052 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 1053 // vmovdqa 0x70(%r11), %xmm4 1054 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 1055 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 1056 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 1057 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 1058 1059 sub $out, $out, #16 // add \$-16, %rdx 1060 1061.Lschedule_mangle_both: 1062 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1063 add x8, x8, #64-16 // add \$-16, %r8 1064 and x8, x8, #~(1<<6) // and \$0x30, %r8 1065 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx) 1066 ret 1067.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle 1068 1069.globl vpaes_set_encrypt_key 1070.type vpaes_set_encrypt_key,%function 1071.align 4 1072vpaes_set_encrypt_key: 1073 .inst 0xd503233f // paciasp 1074 stp x29,x30,[sp,#-16]! 1075 add x29,sp,#0 1076 stp d8,d9,[sp,#-16]! // ABI spec says so 1077 1078 lsr w9, $bits, #5 // shr \$5,%eax 1079 add w9, w9, #5 // \$5,%eax 1080 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 1081 1082 mov $dir, #0 // mov \$0,%ecx 1083 mov x8, #0x30 // mov \$0x30,%r8d 1084 bl _vpaes_schedule_core 1085 eor x0, x0, x0 1086 1087 ldp d8,d9,[sp],#16 1088 ldp x29,x30,[sp],#16 1089 .inst 0xd50323bf // autiasp 1090 ret 1091.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key 1092 1093.globl vpaes_set_decrypt_key 1094.type vpaes_set_decrypt_key,%function 1095.align 4 1096vpaes_set_decrypt_key: 1097 .inst 0xd503233f // paciasp 1098 stp x29,x30,[sp,#-16]! 1099 add x29,sp,#0 1100 stp d8,d9,[sp,#-16]! // ABI spec says so 1101 1102 lsr w9, $bits, #5 // shr \$5,%eax 1103 add w9, w9, #5 // \$5,%eax 1104 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 1105 lsl w9, w9, #4 // shl \$4,%eax 1106 add $out, $out, #16 // lea 16(%rdx,%rax),%rdx 1107 add $out, $out, x9 1108 1109 mov $dir, #1 // mov \$1,%ecx 1110 lsr w8, $bits, #1 // shr \$1,%r8d 1111 and x8, x8, #32 // and \$32,%r8d 1112 eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32 1113 bl _vpaes_schedule_core 1114 1115 ldp d8,d9,[sp],#16 1116 ldp x29,x30,[sp],#16 1117 .inst 0xd50323bf // autiasp 1118 ret 1119.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key 1120___ 1121} 1122{ 1123my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5)); 1124 1125$code.=<<___; 1126.globl vpaes_cbc_encrypt 1127.type vpaes_cbc_encrypt,%function 1128.align 4 1129vpaes_cbc_encrypt: 1130 cbz $len, .Lcbc_abort 1131 cmp w5, #0 // check direction 1132 b.eq vpaes_cbc_decrypt 1133 1134 .inst 0xd503233f // paciasp 1135 stp x29,x30,[sp,#-16]! 1136 add x29,sp,#0 1137 1138 mov x17, $len // reassign 1139 mov x2, $key // reassign 1140 1141 ld1 {v0.16b}, [$ivec] // load ivec 1142 bl _vpaes_encrypt_preheat 1143 b .Lcbc_enc_loop 1144 1145.align 4 1146.Lcbc_enc_loop: 1147 ld1 {v7.16b}, [$inp],#16 // load input 1148 eor v7.16b, v7.16b, v0.16b // xor with ivec 1149 bl _vpaes_encrypt_core 1150 st1 {v0.16b}, [$out],#16 // save output 1151 subs x17, x17, #16 1152 b.hi .Lcbc_enc_loop 1153 1154 st1 {v0.16b}, [$ivec] // write ivec 1155 1156 ldp x29,x30,[sp],#16 1157 .inst 0xd50323bf // autiasp 1158.Lcbc_abort: 1159 ret 1160.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt 1161 1162.type vpaes_cbc_decrypt,%function 1163.align 4 1164vpaes_cbc_decrypt: 1165 .inst 0xd503233f // paciasp 1166 stp x29,x30,[sp,#-16]! 1167 add x29,sp,#0 1168 stp d8,d9,[sp,#-16]! // ABI spec says so 1169 stp d10,d11,[sp,#-16]! 1170 stp d12,d13,[sp,#-16]! 1171 stp d14,d15,[sp,#-16]! 1172 1173 mov x17, $len // reassign 1174 mov x2, $key // reassign 1175 ld1 {v6.16b}, [$ivec] // load ivec 1176 bl _vpaes_decrypt_preheat 1177 tst x17, #16 1178 b.eq .Lcbc_dec_loop2x 1179 1180 ld1 {v7.16b}, [$inp], #16 // load input 1181 bl _vpaes_decrypt_core 1182 eor v0.16b, v0.16b, v6.16b // xor with ivec 1183 orr v6.16b, v7.16b, v7.16b // next ivec value 1184 st1 {v0.16b}, [$out], #16 1185 subs x17, x17, #16 1186 b.ls .Lcbc_dec_done 1187 1188.align 4 1189.Lcbc_dec_loop2x: 1190 ld1 {v14.16b,v15.16b}, [$inp], #32 1191 bl _vpaes_decrypt_2x 1192 eor v0.16b, v0.16b, v6.16b // xor with ivec 1193 eor v1.16b, v1.16b, v14.16b 1194 orr v6.16b, v15.16b, v15.16b 1195 st1 {v0.16b,v1.16b}, [$out], #32 1196 subs x17, x17, #32 1197 b.hi .Lcbc_dec_loop2x 1198 1199.Lcbc_dec_done: 1200 st1 {v6.16b}, [$ivec] 1201 1202 ldp d14,d15,[sp],#16 1203 ldp d12,d13,[sp],#16 1204 ldp d10,d11,[sp],#16 1205 ldp d8,d9,[sp],#16 1206 ldp x29,x30,[sp],#16 1207 .inst 0xd50323bf // autiasp 1208 ret 1209.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt 1210___ 1211if (1) { 1212$code.=<<___; 1213.globl vpaes_ecb_encrypt 1214.type vpaes_ecb_encrypt,%function 1215.align 4 1216vpaes_ecb_encrypt: 1217 .inst 0xd503233f // paciasp 1218 stp x29,x30,[sp,#-16]! 1219 add x29,sp,#0 1220 stp d8,d9,[sp,#-16]! // ABI spec says so 1221 stp d10,d11,[sp,#-16]! 1222 stp d12,d13,[sp,#-16]! 1223 stp d14,d15,[sp,#-16]! 1224 1225 mov x17, $len 1226 mov x2, $key 1227 bl _vpaes_encrypt_preheat 1228 tst x17, #16 1229 b.eq .Lecb_enc_loop 1230 1231 ld1 {v7.16b}, [$inp],#16 1232 bl _vpaes_encrypt_core 1233 st1 {v0.16b}, [$out],#16 1234 subs x17, x17, #16 1235 b.ls .Lecb_enc_done 1236 1237.align 4 1238.Lecb_enc_loop: 1239 ld1 {v14.16b,v15.16b}, [$inp], #32 1240 bl _vpaes_encrypt_2x 1241 st1 {v0.16b,v1.16b}, [$out], #32 1242 subs x17, x17, #32 1243 b.hi .Lecb_enc_loop 1244 1245.Lecb_enc_done: 1246 ldp d14,d15,[sp],#16 1247 ldp d12,d13,[sp],#16 1248 ldp d10,d11,[sp],#16 1249 ldp d8,d9,[sp],#16 1250 ldp x29,x30,[sp],#16 1251 .inst 0xd50323bf // autiasp 1252 ret 1253.size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt 1254 1255.globl vpaes_ecb_decrypt 1256.type vpaes_ecb_decrypt,%function 1257.align 4 1258vpaes_ecb_decrypt: 1259 .inst 0xd503233f // paciasp 1260 stp x29,x30,[sp,#-16]! 1261 add x29,sp,#0 1262 stp d8,d9,[sp,#-16]! // ABI spec says so 1263 stp d10,d11,[sp,#-16]! 1264 stp d12,d13,[sp,#-16]! 1265 stp d14,d15,[sp,#-16]! 1266 1267 mov x17, $len 1268 mov x2, $key 1269 bl _vpaes_decrypt_preheat 1270 tst x17, #16 1271 b.eq .Lecb_dec_loop 1272 1273 ld1 {v7.16b}, [$inp],#16 1274 bl _vpaes_encrypt_core 1275 st1 {v0.16b}, [$out],#16 1276 subs x17, x17, #16 1277 b.ls .Lecb_dec_done 1278 1279.align 4 1280.Lecb_dec_loop: 1281 ld1 {v14.16b,v15.16b}, [$inp], #32 1282 bl _vpaes_decrypt_2x 1283 st1 {v0.16b,v1.16b}, [$out], #32 1284 subs x17, x17, #32 1285 b.hi .Lecb_dec_loop 1286 1287.Lecb_dec_done: 1288 ldp d14,d15,[sp],#16 1289 ldp d12,d13,[sp],#16 1290 ldp d10,d11,[sp],#16 1291 ldp d8,d9,[sp],#16 1292 ldp x29,x30,[sp],#16 1293 .inst 0xd50323bf // autiasp 1294 ret 1295.size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt 1296___ 1297} } 1298print $code; 1299 1300close STDOUT or die "error closing STDOUT: $!"; 1301