Lines Matching refs:TMP4

199 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
228 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
236 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
243 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
514 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
535 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
540 pslld $25, \TMP4 # packed right shift <<25
542 pxor \TMP4, \TMP2
550 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
554 movdqa \GH,\TMP4
557 psrld $7,\TMP4 # packed left shift >>7
559 pxor \TMP4,\TMP2
598 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
612 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
628 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
791 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
853 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
855 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
857 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
860 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
862 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
865 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
980 .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
991 movdqa \XMM5, \TMP4
996 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
1049 pxor \TMP1, \TMP4
1050 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1079 pxor \TMP1, \TMP4
1080 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1138 pxor \TMP4, \TMP1
1153 movdqa \XMM5, \TMP4
1154 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1157 pslld $25, \TMP4 # packed right shift << 25
1159 pxor \TMP4, \TMP2
1167 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1169 movdqa \XMM5,\TMP4
1172 psrld $7, \TMP4 # packed left shift >>7
1174 pxor \TMP4,\TMP2
1188 .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1199 movdqa \XMM5, \TMP4
1204 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
1257 pxor \TMP1, \TMP4
1258 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1287 pxor \TMP1, \TMP4
1288 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1350 pxor \TMP4, \TMP1
1365 movdqa \XMM5, \TMP4
1366 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1369 pslld $25, \TMP4 # packed right shift << 25
1371 pxor \TMP4, \TMP2
1379 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1381 movdqa \XMM5,\TMP4
1384 psrld $7, \TMP4 # packed left shift >>7
1386 pxor \TMP4,\TMP2
1395 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1406 movdqu HashKey_4_k(%arg2), \TMP4
1407 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1419 movdqu HashKey_3_k(%arg2), \TMP4
1420 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1434 movdqu HashKey_2_k(%arg2), \TMP4
1435 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1447 movdqu HashKey_k(%arg2), \TMP4
1448 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1455 movdqa \TMP2, \TMP4
1456 pslldq $8, \TMP4 # left shift TMP4 2 DWs
1458 pxor \TMP4, \XMMDst
1464 movdqa \XMMDst, \TMP4
1465 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1468 pslld $25, \TMP4 # packed right shifting << 25
1470 pxor \TMP4, \TMP2
1480 movdqa \XMMDst, \TMP4
1483 psrld $7, \TMP4 # packed left shift >> 7
1485 pxor \TMP4, \TMP2