Lines Matching refs:rip

349         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
364 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
368 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
371 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
376 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
399 vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
401 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
414 lea SHIFT_MASK+16(%rip), %r12
431 lea SHIFT_MASK+16(%rip), %r12
448 vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
457 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
461 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext
516 vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap
585 vpshufb SHUF_MASK(%rip), \T7, \T7
626 leaq ALL_F(%rip), %r11
633 vpshufb SHUF_MASK(%rip), \T7, \T7
653 vpshufb SHUF_MASK(%rip), %xmm0, %xmm0
658 vpshufb SHUF_MASK(%rip), %xmm6, %xmm6
669 vpcmpeqd TWOONE(%rip), %xmm2, %xmm2
670 vpand POLY(%rip), %xmm2, %xmm2
740 lea SHIFT_MASK(%rip), %r12
767 vmovdqa SHUF_MASK(%rip), %xmm10
802 vmovdqa SHUF_MASK(%rip), %xmm1
821 vmovdqa SHUF_MASK(%rip), %xmm10
980 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
982 vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap
1031 vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations
1058 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1060 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
1062 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1064 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
1066 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1068 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
1070 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1072 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
1074 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1076 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
1078 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1080 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
1082 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1084 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
1086 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1088 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1184 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
1186 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
1187 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
1188 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
1189 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
1190 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
1191 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
1192 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1216 vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT
1217 vpaddd ONE(%rip), \XMM1, \XMM2
1218 vpaddd ONE(%rip), \XMM2, \XMM3
1219 vpaddd ONE(%rip), \XMM3, \XMM4
1220 vpaddd ONE(%rip), \XMM4, \XMM5
1221 vpaddd ONE(%rip), \XMM5, \XMM6
1222 vpaddd ONE(%rip), \XMM6, \XMM7
1223 vpaddd ONE(%rip), \XMM7, \XMM8
1226 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
1227 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
1228 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
1229 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
1230 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
1231 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
1232 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
1233 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1235 vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT
1236 vpaddd ONEf(%rip), \XMM1, \XMM2
1237 vpaddd ONEf(%rip), \XMM2, \XMM3
1238 vpaddd ONEf(%rip), \XMM3, \XMM4
1239 vpaddd ONEf(%rip), \XMM4, \XMM5
1240 vpaddd ONEf(%rip), \XMM5, \XMM6
1241 vpaddd ONEf(%rip), \XMM6, \XMM7
1242 vpaddd ONEf(%rip), \XMM7, \XMM8
1553 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
1554 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
1555 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
1556 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
1557 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
1558 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
1559 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
1560 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1862 vmovdqa POLY2(%rip), \T3
1928 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
1930 vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap
1981 vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations
2008 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2010 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
2012 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2014 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
2016 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2018 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
2020 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2022 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
2024 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2026 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
2028 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2030 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
2032 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2034 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
2036 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
2038 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2135 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
2138 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
2139 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
2140 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
2141 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
2142 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
2143 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
2144 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2171 vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT
2172 vpaddd ONE(%rip), \XMM1, \XMM2
2173 vpaddd ONE(%rip), \XMM2, \XMM3
2174 vpaddd ONE(%rip), \XMM3, \XMM4
2175 vpaddd ONE(%rip), \XMM4, \XMM5
2176 vpaddd ONE(%rip), \XMM5, \XMM6
2177 vpaddd ONE(%rip), \XMM6, \XMM7
2178 vpaddd ONE(%rip), \XMM7, \XMM8
2181 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
2182 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
2183 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
2184 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
2185 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
2186 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
2187 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
2188 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2190 vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT
2191 vpaddd ONEf(%rip), \XMM1, \XMM2
2192 vpaddd ONEf(%rip), \XMM2, \XMM3
2193 vpaddd ONEf(%rip), \XMM3, \XMM4
2194 vpaddd ONEf(%rip), \XMM4, \XMM5
2195 vpaddd ONEf(%rip), \XMM5, \XMM6
2196 vpaddd ONEf(%rip), \XMM6, \XMM7
2197 vpaddd ONEf(%rip), \XMM7, \XMM8
2473 vmovdqa POLY2(%rip), \T3
2503 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
2504 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
2505 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
2506 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
2507 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
2508 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
2509 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
2510 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2677 vmovdqa POLY2(%rip), \T3