Lines Matching refs:T4

605 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
621 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6
667 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
895 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5
916 vpslld $25, \GH, \T4 # packed right shifting shift << 25
919 vpxor \T4, \T2, \T2
930 vpsrld $7,\GH, \T4 # packed left shifting >> 7
932 vpxor \T4, \T2, \T2
941 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6
950 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
956 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
962 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
968 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
974 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
980 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
986 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
1001 .macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
1075 GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks
1236 .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
1321 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
1343 vpxor \T3, \T4, \T4
1368 vpxor \T3, \T4, \T4
1391 vpxor \T3, \T4, \T4
1415 vpxor \T3, \T4, \T4
1438 vpxor \T3, \T4, \T4
1462 vpxor \T3, \T4, \T4
1487 vpxor \T3, \T4, \T4
1497 vpxor \T4, \T6, \T6
1542 vpxor \T4, \T6, \T6 # accumulate the results in T6:T7
1551 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1554 vpxor \T4, \T2, \T2
1576 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1578 vpxor \T4, \T2, \T2
1603 .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
1622 vpclmulqdq $0x11, \T5, \XMM2, \T4
1623 vpxor \T4, \T6, \T6
1625 vpclmulqdq $0x00, \T5, \XMM2, \T4
1626 vpxor \T4, \T7, \T7
1637 vpclmulqdq $0x11, \T5, \XMM3, \T4
1638 vpxor \T4, \T6, \T6
1640 vpclmulqdq $0x00, \T5, \XMM3, \T4
1641 vpxor \T4, \T7, \T7
1652 vpclmulqdq $0x11, \T5, \XMM4, \T4
1653 vpxor \T4, \T6, \T6
1655 vpclmulqdq $0x00, \T5, \XMM4, \T4
1656 vpxor \T4, \T7, \T7
1667 vpclmulqdq $0x11, \T5, \XMM5, \T4
1668 vpxor \T4, \T6, \T6
1670 vpclmulqdq $0x00, \T5, \XMM5, \T4
1671 vpxor \T4, \T7, \T7
1682 vpclmulqdq $0x11, \T5, \XMM6, \T4
1683 vpxor \T4, \T6, \T6
1685 vpclmulqdq $0x00, \T5, \XMM6, \T4
1686 vpxor \T4, \T7, \T7
1697 vpclmulqdq $0x11, \T5, \XMM7, \T4
1698 vpxor \T4, \T6, \T6
1700 vpclmulqdq $0x00, \T5, \XMM7, \T4
1701 vpxor \T4, \T7, \T7
1712 vpclmulqdq $0x11, \T5, \XMM8, \T4
1713 vpxor \T4, \T6, \T6
1715 vpclmulqdq $0x00, \T5, \XMM8, \T4
1716 vpxor \T4, \T7, \T7
1728 vpslldq $8, \T2, \T4
1731 vpxor \T4, \T7, \T7
1739 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1742 vpxor \T4, \T2, \T2
1754 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1756 vpxor \T4, \T2, \T2
1877 .macro GHASH_MUL_AVX2 GH HK T1 T2 T3 T4 T5
1915 .macro PRECOMPUTE_AVX2 HK T1 T2 T3 T4 T5 T6
1919 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
1922 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
1925 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
1928 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
1931 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
1934 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
1937 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
1949 .macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
2025 GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks
2191 .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
2276 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
2295 vpxor \T3, \T4, \T4
2321 vpxor \T3, \T4, \T4
2345 vpxor \T3, \T4, \T4
2370 vpxor \T3, \T4, \T4
2394 vpxor \T3, \T4, \T4
2418 vpxor \T3, \T4, \T4
2455 vpxor \T3, \T4, \T1
2528 vpclmulqdq $0x10, \T7, \T3, \T4
2529 vpslldq $4, \T4, \T4 # shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
2531 vpxor \T2, \T4, \T4 # second phase of the reduction complete
2533 vpxor \T4, \T1, \T1 # the result is in T1
2553 .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
2577 vpclmulqdq $0x11, \T5, \XMM2, \T4
2578 vpxor \T4, \T6, \T6
2580 vpclmulqdq $0x00, \T5, \XMM2, \T4
2581 vpxor \T4, \T7, \T7
2595 vpclmulqdq $0x11, \T5, \XMM3, \T4
2596 vpxor \T4, \T6, \T6
2598 vpclmulqdq $0x00, \T5, \XMM3, \T4
2599 vpxor \T4, \T7, \T7
2613 vpclmulqdq $0x11, \T5, \XMM4, \T4
2614 vpxor \T4, \T6, \T6
2616 vpclmulqdq $0x00, \T5, \XMM4, \T4
2617 vpxor \T4, \T7, \T7
2631 vpclmulqdq $0x11, \T5, \XMM5, \T4
2632 vpxor \T4, \T6, \T6
2634 vpclmulqdq $0x00, \T5, \XMM5, \T4
2635 vpxor \T4, \T7, \T7
2649 vpclmulqdq $0x11, \T5, \XMM6, \T4
2650 vpxor \T4, \T6, \T6
2652 vpclmulqdq $0x00, \T5, \XMM6, \T4
2653 vpxor \T4, \T7, \T7
2667 vpclmulqdq $0x11, \T5, \XMM7, \T4
2668 vpxor \T4, \T6, \T6
2670 vpclmulqdq $0x00, \T5, \XMM7, \T4
2671 vpxor \T4, \T7, \T7
2685 vpclmulqdq $0x11, \T5, \XMM8, \T4
2686 vpxor \T4, \T6, \T6
2688 vpclmulqdq $0x00, \T5, \XMM8, \T4
2689 vpxor \T4, \T7, \T7
2700 vpslldq $8, \T2, \T4
2703 vpxor \T4, \T7, \T7
2722 vpclmulqdq $0x10, \T7, \T3, \T4
2723 vpslldq $4, \T4, \T4 # shift-L T4 1 DW (Shift-L 1-DW to obtain result with no shifts)
2725 vpxor \T2, \T4, \T4 # second phase of the reduction complete
2727 vpxor \T4, \T6, \T6 # the result is in T6