Lines Matching refs:TMP3
199 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
201 movdqu (%r12), \TMP3
203 pshufb \TMP2, \TMP3
207 movdqa \TMP3, \TMP2
208 psllq $1, \TMP3
213 por \TMP2, \TMP3
220 pxor \TMP2, \TMP3
221 movdqu \TMP3, HashKey(%arg2)
223 movdqa \TMP3, \TMP5
224 pshufd $78, \TMP3, \TMP1
225 pxor \TMP3, \TMP1
228 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
236 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
243 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
514 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
517 pshufd $78, \HK, \TMP3
519 pxor \HK, \TMP3 # TMP3 = b1+b0
522 pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
525 movdqa \TMP2, \TMP3
526 pslldq $8, \TMP3 # left shift TMP3 2 DWs
528 pxor \TMP3, \GH
534 movdqa \GH, \TMP3
535 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
539 pslld $30, \TMP3 # packed right shift <<30
541 pxor \TMP3, \TMP2 # xor the shifted versions
550 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
553 movdqa \GH,\TMP3
556 psrld $2,\TMP3 # packed left shift >>2
558 pxor \TMP3,\TMP2 # xor the shifted versions
598 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
612 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
628 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
791 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
853 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
855 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
857 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
860 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
862 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
865 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
980 .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
1031 movaps 0x30(%arg1), \TMP3
1032 aesenc \TMP3, \XMM1 # Round 3
1033 aesenc \TMP3, \XMM2
1034 aesenc \TMP3, \XMM3
1035 aesenc \TMP3, \XMM4
1037 movaps 0x40(%arg1), \TMP3
1038 aesenc \TMP3, \XMM1 # Round 4
1039 aesenc \TMP3, \XMM2
1040 aesenc \TMP3, \XMM3
1041 aesenc \TMP3, \XMM4
1044 movaps 0x50(%arg1), \TMP3
1045 aesenc \TMP3, \XMM1 # Round 5
1046 aesenc \TMP3, \XMM2
1047 aesenc \TMP3, \XMM3
1048 aesenc \TMP3, \XMM4
1061 movaps 0x60(%arg1), \TMP3
1062 aesenc \TMP3, \XMM1 # Round 6
1063 aesenc \TMP3, \XMM2
1064 aesenc \TMP3, \XMM3
1065 aesenc \TMP3, \XMM4
1067 movaps 0x70(%arg1), \TMP3
1068 aesenc \TMP3, \XMM1 # Round 7
1069 aesenc \TMP3, \XMM2
1070 aesenc \TMP3, \XMM3
1071 aesenc \TMP3, \XMM4
1074 movaps 0x80(%arg1), \TMP3
1075 aesenc \TMP3, \XMM1 # Round 8
1076 aesenc \TMP3, \XMM2
1077 aesenc \TMP3, \XMM3
1078 aesenc \TMP3, \XMM4
1092 movaps 0x90(%arg1), \TMP3
1093 aesenc \TMP3, \XMM1 # Round 9
1094 aesenc \TMP3, \XMM2
1095 aesenc \TMP3, \XMM3
1096 aesenc \TMP3, \XMM4
1105 MOVADQ (%r10),\TMP3
1107 aesenc \TMP3, %xmm\index
1114 MOVADQ (%r10), \TMP3
1115 aesenclast \TMP3, \XMM1 # Round 10
1116 aesenclast \TMP3, \XMM2
1117 aesenclast \TMP3, \XMM3
1118 aesenclast \TMP3, \XMM4
1121 movdqu (%arg4,%r11,1), \TMP3
1122 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
1123 movdqu 16(%arg4,%r11,1), \TMP3
1124 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
1125 movdqu 32(%arg4,%r11,1), \TMP3
1126 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
1127 movdqu 48(%arg4,%r11,1), \TMP3
1128 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
1143 movdqa \TMP2, \TMP3
1144 pslldq $8, \TMP3 # left shift TMP3 2 DWs
1146 pxor \TMP3, \XMM5
1152 movdqa \XMM5, \TMP3
1154 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1156 pslld $30, \TMP3 # packed right shift << 30
1158 pxor \TMP3, \TMP2 # xor the shifted versions
1167 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1168 movdqa \XMM5,\TMP3
1171 psrld $2, \TMP3 # packed left shift >>2
1173 pxor \TMP3,\TMP2 # xor the shifted versions
1188 .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1239 movaps 0x30(%arg1), \TMP3
1240 aesenc \TMP3, \XMM1 # Round 3
1241 aesenc \TMP3, \XMM2
1242 aesenc \TMP3, \XMM3
1243 aesenc \TMP3, \XMM4
1245 movaps 0x40(%arg1), \TMP3
1246 aesenc \TMP3, \XMM1 # Round 4
1247 aesenc \TMP3, \XMM2
1248 aesenc \TMP3, \XMM3
1249 aesenc \TMP3, \XMM4
1252 movaps 0x50(%arg1), \TMP3
1253 aesenc \TMP3, \XMM1 # Round 5
1254 aesenc \TMP3, \XMM2
1255 aesenc \TMP3, \XMM3
1256 aesenc \TMP3, \XMM4
1269 movaps 0x60(%arg1), \TMP3
1270 aesenc \TMP3, \XMM1 # Round 6
1271 aesenc \TMP3, \XMM2
1272 aesenc \TMP3, \XMM3
1273 aesenc \TMP3, \XMM4
1275 movaps 0x70(%arg1), \TMP3
1276 aesenc \TMP3, \XMM1 # Round 7
1277 aesenc \TMP3, \XMM2
1278 aesenc \TMP3, \XMM3
1279 aesenc \TMP3, \XMM4
1282 movaps 0x80(%arg1), \TMP3
1283 aesenc \TMP3, \XMM1 # Round 8
1284 aesenc \TMP3, \XMM2
1285 aesenc \TMP3, \XMM3
1286 aesenc \TMP3, \XMM4
1300 movaps 0x90(%arg1), \TMP3
1301 aesenc \TMP3, \XMM1 # Round 9
1302 aesenc \TMP3, \XMM2
1303 aesenc \TMP3, \XMM3
1304 aesenc \TMP3, \XMM4
1313 MOVADQ (%r10),\TMP3
1315 aesenc \TMP3, %xmm\index
1322 MOVADQ (%r10), \TMP3
1323 aesenclast \TMP3, \XMM1 # last round
1324 aesenclast \TMP3, \XMM2
1325 aesenclast \TMP3, \XMM3
1326 aesenclast \TMP3, \XMM4
1329 movdqu (%arg4,%r11,1), \TMP3
1330 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
1332 movdqa \TMP3, \XMM1
1333 movdqu 16(%arg4,%r11,1), \TMP3
1334 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
1336 movdqa \TMP3, \XMM2
1337 movdqu 32(%arg4,%r11,1), \TMP3
1338 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
1340 movdqa \TMP3, \XMM3
1341 movdqu 48(%arg4,%r11,1), \TMP3
1342 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
1344 movdqa \TMP3, \XMM4
1355 movdqa \TMP2, \TMP3
1356 pslldq $8, \TMP3 # left shift TMP3 2 DWs
1358 pxor \TMP3, \XMM5
1364 movdqa \XMM5, \TMP3
1366 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1368 pslld $30, \TMP3 # packed right shift << 30
1370 pxor \TMP3, \TMP2 # xor the shifted versions
1379 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1380 movdqa \XMM5,\TMP3
1383 psrld $2, \TMP3 # packed left shift >>2
1385 pxor \TMP3,\TMP2 # xor the shifted versions
1395 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1463 movdqa \XMMDst, \TMP3
1465 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1467 pslld $30, \TMP3 # packed right shifting << 30
1469 pxor \TMP3, \TMP2 # xor the shifted versions
1479 movdqa \XMMDst, \TMP3
1482 psrld $2, \TMP3 # packed left shift >> 2
1484 pxor \TMP3, \TMP2 # xor the shifted versions