Lines Matching refs:mov

133 	mov	8*0($a_ptr), $a0
135 mov 8*1($a_ptr), $a1
137 mov 8*2($a_ptr), $a2
139 mov 8*3($a_ptr), $a3
141 mov $a0, $t0
144 mov $a1, $t1
148 mov $a2, $t2
151 mov $a3, $t3
157 mov $a0, 8*0($r_ptr)
159 mov $a1, 8*1($r_ptr)
161 mov $a2, 8*2($r_ptr)
162 mov $a3, 8*3($r_ptr)
164 mov 0(%rsp),%r13
166 mov 8(%rsp),%r12
188 mov 8*0($a_ptr), $a0
189 mov 8*1($a_ptr), $a1
190 mov 8*2($a_ptr), $a2
191 mov $a0, $t0
192 mov 8*3($a_ptr), $a3
195 mov $a1, $t1
198 mov $a2, $t2
201 mov $a3, $t3
213 mov $a1, $t0 # a0:a3>>1
216 mov $a2, $t1
220 mov $a3, $t2
229 mov $a0, 8*0($r_ptr)
230 mov $a1, 8*1($r_ptr)
231 mov $a2, 8*2($r_ptr)
232 mov $a3, 8*3($r_ptr)
234 mov 0(%rsp),%r13
236 mov 8(%rsp),%r12
258 mov 8*0($a_ptr), $a0
260 mov 8*1($a_ptr), $a1
262 mov 8*2($a_ptr), $a2
264 mov 8*3($a_ptr), $a3
265 mov $a0, $t0
268 mov $a1, $t1
272 mov $a2, $t2
275 mov $a3, $t3
287 mov $a0, $t0
290 mov $a1, $t1
294 mov $a2, $t2
297 mov $a3, $t3
303 mov $a0, 8*0($r_ptr)
305 mov $a1, 8*1($r_ptr)
307 mov $a2, 8*2($r_ptr)
308 mov $a3, 8*3($r_ptr)
310 mov 0(%rsp),%r13
312 mov 8(%rsp),%r12
334 mov 8*0($a_ptr), $a0
336 mov 8*1($a_ptr), $a1
337 mov 8*2($a_ptr), $a2
338 mov 8*3($a_ptr), $a3
343 mov $a0, $t0
346 mov $a1, $t1
350 mov $a2, $t2
353 mov $a3, $t3
359 mov $a0, 8*0($r_ptr)
361 mov $a1, 8*1($r_ptr)
363 mov $a2, 8*2($r_ptr)
364 mov $a3, 8*3($r_ptr)
366 mov 0(%rsp),%r13
368 mov 8(%rsp),%r12
390 mov 8*0($a_ptr), $a0
392 mov 8*1($a_ptr), $a1
393 mov 8*2($a_ptr), $a2
394 mov 8*3($a_ptr), $a3
399 mov $a0, $t0
402 mov $a1, $t1
406 mov $a2, $t2
409 mov $a3, $t3
415 mov $a0, 8*0($r_ptr)
417 mov $a1, 8*1($r_ptr)
419 mov $a2, 8*2($r_ptr)
420 mov $a3, 8*3($r_ptr)
422 mov 0(%rsp),%r13
424 mov 8(%rsp),%r12
455 mov $a0, $t0
458 mov $a1, $t1
462 mov $a2, $t2
465 mov $a3, $t3
471 mov $a0, 8*0($r_ptr)
473 mov $a1, 8*1($r_ptr)
475 mov $a2, 8*2($r_ptr)
476 mov $a3, 8*3($r_ptr)
478 mov 0(%rsp),%r13
480 mov 8(%rsp),%r12
510 mov \$0x80100, %ecx
530 mov 8*0($b_org), %rax
531 mov $b_org, $b_ptr
533 mov .LordK(%rip), %r15
536 mov %rax, $t0
538 mov %rax, $acc0
539 mov $t0, %rax
540 mov %rdx, $acc1
544 mov $t0, %rax
546 mov %rdx, $acc2
550 mov $t0, %rax
553 mov $acc0, $acc5
556 mov %rdx, $acc3
559 mov $acc0, %rax
561 mov %rdx, $acc4
565 mov $acc0, $t1
567 mov $acc0, %rax
569 mov %rdx, $t0
578 mov $t1, %rax
580 mov $t1, %rdx
586 mov 8*1($b_ptr), %rax
594 mov %rax, $t0
597 mov $t0, %rax
599 mov %rdx, $t1
605 mov $t0, %rax
607 mov %rdx, $t1
613 mov $t0, %rax
616 mov $acc1, $t0
619 mov %rdx, $t1
625 mov $acc1, %rax
631 mov $acc1, $t1
633 mov $acc1, %rax
643 mov $t1, %rax
645 mov $t1, %rdx
651 mov 8*2($b_ptr), %rax
659 mov %rax, $t0
662 mov $t0, %rax
664 mov %rdx, $t1
670 mov $t0, %rax
672 mov %rdx, $t1
678 mov $t0, %rax
681 mov $acc2, $t0
684 mov %rdx, $t1
690 mov $acc2, %rax
696 mov $acc2, $t1
698 mov $acc2, %rax
708 mov $t1, %rax
710 mov $t1, %rdx
716 mov 8*3($b_ptr), %rax
724 mov %rax, $t0
727 mov $t0, %rax
729 mov %rdx, $t1
735 mov $t0, %rax
737 mov %rdx, $t1
743 mov $t0, %rax
746 mov $acc3, $t0
749 mov %rdx, $t1
755 mov $acc3, %rax
761 mov $acc3, $t1
763 mov $acc3, %rax
773 mov $t1, %rax
775 mov $t1, %rdx
788 mov $acc4, $a_ptr
790 mov $acc5, $acc3
792 mov $acc0, $t0
794 mov $acc1, $t1
803 mov $acc4, 8*0($r_ptr)
804 mov $acc5, 8*1($r_ptr)
805 mov $acc0, 8*2($r_ptr)
806 mov $acc1, 8*3($r_ptr)
808 mov 0(%rsp),%r15
810 mov 8(%rsp),%r14
812 mov 16(%rsp),%r13
814 mov 24(%rsp),%r12
816 mov 32(%rsp),%rbx
818 mov 40(%rsp),%rbp
840 mov \$0x80100, %ecx
860 mov 8*0($a_ptr), $acc0
861 mov 8*1($a_ptr), %rax
862 mov 8*2($a_ptr), $acc6
863 mov 8*3($a_ptr), $acc7
865 mov $b_org, $b_ptr
871 mov %rax, $t1 # put aside a[1]
873 mov %rax, $acc1
875 mov $acc6, %rax
876 mov %rdx, $acc2
880 mov $acc7, %rax
883 mov %rdx, $acc3
887 mov $acc7, %rax
890 mov %rdx, $acc4
894 mov %rax, $acc5
895 mov $acc6, %rax
896 mov %rdx, $acc6
901 mov $acc7, %rax
903 mov %rdx, $acc7
915 mov $acc0, %rax
926 mov %rax, $acc0
928 mov %rdx, $t1
935 mov %rdx, $t1
942 mov %rdx, $t1
944 mov $acc0, $t0
950 mov 8*0($a_ptr), %rax # modulus[0]
955 mov $acc0, $t1
957 mov 8*1($a_ptr), %rax # modulus[1]
967 mov $acc0, %rax
969 mov $acc0, %rdx
972 mov $acc1, $t0
978 mov 8*0($a_ptr), %rax
986 mov $acc1, $t1
988 mov 8*1($a_ptr), %rax
998 mov $acc1, %rax
1000 mov $acc1, %rdx
1003 mov $acc2, $t0
1009 mov 8*0($a_ptr), %rax
1017 mov $acc2, $t1
1019 mov 8*1($a_ptr), %rax
1029 mov $acc2, %rax
1031 mov $acc2, %rdx
1034 mov $acc3, $t0
1040 mov 8*0($a_ptr), %rax
1048 mov $acc3, $t1
1050 mov 8*1($a_ptr), %rax
1060 mov $acc3, %rax
1062 mov $acc3, %rdx
1077 mov $acc0, $acc4
1080 mov $acc1, %rax
1085 mov $acc2, $acc6
1088 mov $acc3, $acc7
1100 mov $acc0, 8*0($r_ptr)
1101 mov %rax, 8*1($r_ptr)
1103 mov $acc6, 8*2($r_ptr)
1105 mov $acc7, 8*3($r_ptr)
1108 mov 0(%rsp),%r15
1110 mov 8(%rsp),%r14
1112 mov 16(%rsp),%r13
1114 mov 24(%rsp),%r12
1116 mov 32(%rsp),%rbx
1118 mov 40(%rsp),%rbp
1149 mov $b_org, $b_ptr
1150 mov 8*0($b_org), %rdx
1151 mov 8*0($a_ptr), $acc1
1152 mov 8*1($a_ptr), $acc2
1153 mov 8*2($a_ptr), $acc3
1154 mov 8*3($a_ptr), $acc4
1157 mov .LordK(%rip), %r15
1165 mov $acc0, %rdx
1186 mov 8*1($b_ptr), %rdx
1207 mov $acc1, %rdx
1230 mov 8*2($b_ptr), %rdx
1251 mov $acc2, %rdx
1274 mov 8*3($b_ptr), %rdx
1295 mov $acc3, %rdx
1319 mov $acc4, $t2
1322 mov $acc5, $t3
1329 mov $acc0, $t0
1333 mov $acc1, $t1
1342 mov $acc4, 8*0($r_ptr)
1343 mov $acc5, 8*1($r_ptr)
1344 mov $acc0, 8*2($r_ptr)
1345 mov $acc1, 8*3($r_ptr)
1347 mov 0(%rsp),%r15
1349 mov 8(%rsp),%r14
1351 mov 16(%rsp),%r13
1353 mov 24(%rsp),%r12
1355 mov 32(%rsp),%rbx
1357 mov 40(%rsp),%rbp
1385 mov $b_org, $b_ptr
1386 mov 8*0($a_ptr), %rdx
1387 mov 8*1($a_ptr), $acc6
1388 mov 8*2($a_ptr), $acc7
1389 mov 8*3($a_ptr), $acc0
1397 mov %rdx, %rax # offload a[0]
1400 mov $acc6, %rdx
1412 mov $acc7, %rdx
1418 mov %rax, %rdx
1449 mov $acc0, %rdx
1468 mov $acc1, %rdx
1486 mov $acc2, %rdx
1504 mov $acc3, %rdx
1524 mov $acc4, %rdx
1527 mov $acc1, $acc6
1532 mov $acc2, $acc7
1535 mov $acc3, $acc0
1547 mov %rdx, 8*0($r_ptr)
1548 mov $acc6, 8*1($r_ptr)
1550 mov $acc7, 8*2($r_ptr)
1552 mov $acc0, 8*3($r_ptr)
1555 mov 0(%rsp),%r15
1557 mov 8(%rsp),%r14
1559 mov 16(%rsp),%r13
1561 mov 24(%rsp),%r12
1563 mov 32(%rsp),%rbx
1565 mov 40(%rsp),%rbp
1587 mov \$0x80100, %ecx
1609 mov \$0x80100, %ecx
1633 mov $b_org, $b_ptr
1634 mov 8*0($b_org), %rax
1635 mov 8*0($a_ptr), $acc1
1636 mov 8*1($a_ptr), $acc2
1637 mov 8*2($a_ptr), $acc3
1638 mov 8*3($a_ptr), $acc4
1647 mov $b_org, $b_ptr
1648 mov 8*0($b_org), %rdx
1649 mov 8*0($a_ptr), $acc1
1650 mov 8*1($a_ptr), $acc2
1651 mov 8*2($a_ptr), $acc3
1652 mov 8*3($a_ptr), $acc4
1659 mov 0(%rsp),%r15
1661 mov 8(%rsp),%r14
1663 mov 16(%rsp),%r13
1665 mov 24(%rsp),%r12
1667 mov 32(%rsp),%rbx
1669 mov 40(%rsp),%rbp
1684 mov %rax, $t1
1686 mov .Lpoly+8*1(%rip),$poly1
1687 mov %rax, $acc0
1688 mov $t1, %rax
1689 mov %rdx, $acc1
1692 mov .Lpoly+8*3(%rip),$poly3
1694 mov $t1, %rax
1696 mov %rdx, $acc2
1700 mov $t1, %rax
1702 mov %rdx, $acc3
1706 mov $acc0, %rax
1709 mov %rdx, $acc4
1720 mov $acc0, $t1
1727 mov 8*1($b_ptr), %rax
1734 mov %rax, $t1
1737 mov $t1, %rax
1739 mov %rdx, $t0
1745 mov $t1, %rax
1747 mov %rdx, $t0
1753 mov $t1, %rax
1755 mov %rdx, $t0
1761 mov $acc1, %rax
1767 mov $acc1, $t1
1774 mov 8*2($b_ptr), %rax
1781 mov %rax, $t1
1784 mov $t1, %rax
1786 mov %rdx, $t0
1792 mov $t1, %rax
1794 mov %rdx, $t0
1800 mov $t1, %rax
1802 mov %rdx, $t0
1808 mov $acc2, %rax
1814 mov $acc2, $t1
1821 mov 8*3($b_ptr), %rax
1828 mov %rax, $t1
1831 mov $t1, %rax
1833 mov %rdx, $t0
1839 mov $t1, %rax
1841 mov %rdx, $t0
1847 mov $t1, %rax
1849 mov %rdx, $t0
1855 mov $acc3, %rax
1861 mov $acc3, $t1
1867 mov $acc4, $t0
1870 mov $acc5, $t1
1876 mov $acc0, $t2
1879 mov $acc1, $t3
1885 mov $acc4, 8*0($r_ptr)
1887 mov $acc5, 8*1($r_ptr)
1889 mov $acc0, 8*2($r_ptr)
1890 mov $acc1, 8*3($r_ptr)
1910 mov \$0x80100, %ecx
1933 mov 8*0($a_ptr), %rax
1934 mov 8*1($a_ptr), $acc6
1935 mov 8*2($a_ptr), $acc7
1936 mov 8*3($a_ptr), $acc0
1945 mov 8*0($a_ptr), %rdx
1946 mov 8*1($a_ptr), $acc6
1947 mov 8*2($a_ptr), $acc7
1948 mov 8*3($a_ptr), $acc0
1955 mov 0(%rsp),%r15
1957 mov 8(%rsp),%r14
1959 mov 16(%rsp),%r13
1961 mov 24(%rsp),%r12
1963 mov 32(%rsp),%rbx
1965 mov 40(%rsp),%rbp
1978 mov %rax, $acc5
1980 mov %rax, $acc1
1981 mov $acc7, %rax
1982 mov %rdx, $acc2
1986 mov $acc0, %rax
1988 mov %rdx, $acc3
1992 mov $acc7, %rax
1994 mov %rdx, $acc4
1999 mov $acc0, %rax
2001 mov %rdx, $t1
2005 mov $acc0, %rax
2008 mov %rdx, $acc5
2015 mov 8*0($a_ptr), %rax
2016 mov %rdx, $acc6
2028 mov %rax, $acc0
2029 mov 8*1($a_ptr), %rax
2030 mov %rdx, $t0
2035 mov 8*2($a_ptr), %rax
2037 mov %rdx, $t0
2042 mov 8*3($a_ptr), %rax
2044 mov %rdx, $t0
2049 mov $acc0, %rax
2052 mov .Lpoly+8*1(%rip), $a_ptr
2053 mov .Lpoly+8*3(%rip), $t1
2058 mov $acc0, $t0
2065 mov $acc1, %rax
2070 mov $acc1, $t0
2072 mov %rdx, $acc0
2078 mov $acc2, %rax
2083 mov $acc2, $t0
2085 mov %rdx, $acc1
2091 mov $acc3, %rax
2096 mov $acc3, $t0
2098 mov %rdx, $acc2
2111 mov $acc4, $acc0
2114 mov $acc5, $acc1
2118 mov $acc6, $acc2
2121 mov $acc7, $t0
2127 mov $acc4, 8*0($r_ptr)
2129 mov $acc5, 8*1($r_ptr)
2131 mov $acc6, 8*2($r_ptr)
2132 mov $acc7, 8*3($r_ptr)
2149 mov \$32, $poly1
2152 mov .Lpoly+8*3(%rip), $poly3
2155 mov $acc0, %rdx
2168 mov 8*1($b_ptr), %rdx
2189 mov $acc1, %rdx
2205 mov 8*2($b_ptr), %rdx
2226 mov $acc2, %rdx
2242 mov 8*3($b_ptr), %rdx
2263 mov $acc3, %rdx
2279 mov $acc4, $t2
2280 mov .Lpoly+8*1(%rip), $poly1
2282 mov $acc5, $t3
2289 mov $acc0, $t0
2293 mov $acc1, $t1
2299 mov $acc4, 8*0($r_ptr)
2301 mov $acc5, 8*1($r_ptr)
2303 mov $acc0, 8*2($r_ptr)
2304 mov $acc1, 8*3($r_ptr)
2319 mov $acc6, %rdx
2330 mov $acc7, %rdx
2337 mov 8*0+128($a_ptr), %rdx
2345 mov 8*1+128($a_ptr), %rdx
2350 mov 8*2+128($a_ptr), %rdx
2356 mov 8*3+128($a_ptr), %rdx
2360 mov \$32, $a_ptr
2364 mov .Lpoly+8*3(%rip), %rdx
2369 mov %rdx,$t1
2411 mov .Lpoly+8*1(%rip), $a_ptr
2413 mov $acc4, $acc0
2416 mov $acc5, $acc1
2420 mov $acc6, $acc2
2423 mov $acc7, $acc3
2429 mov $acc4, 8*0($r_ptr)
2431 mov $acc5, 8*1($r_ptr)
2433 mov $acc6, 8*2($r_ptr)
2434 mov $acc7, 8*3($r_ptr)
2465 mov 8*0($in_ptr), %rax
2466 mov .Lpoly+8*3(%rip), $t2
2467 mov 8*1($in_ptr), $acc1
2468 mov 8*2($in_ptr), $acc2
2469 mov 8*3($in_ptr), $acc3
2470 mov %rax, $acc0
2471 mov .Lpoly+8*1(%rip), $t1
2475 mov %rax, $t0
2482 mov $acc1, %rax
2487 mov $acc1, $t0
2489 mov %rdx, $acc0
2495 mov $acc2, %rax
2500 mov $acc2, $t0
2502 mov %rdx, $acc1
2508 mov $acc3, %rax
2513 mov $acc3, $t0
2515 mov %rdx, $acc2
2520 mov $acc0, $t0
2522 mov $acc1, $in_ptr
2528 mov $acc2, %rax
2531 mov %rdx, $acc3
2537 mov $acc0, 8*0($r_ptr)
2539 mov $acc1, 8*1($r_ptr)
2541 mov $acc2, 8*2($r_ptr)
2542 mov $acc3, 8*3($r_ptr)
2544 mov 0(%rsp),%r13
2546 mov 8(%rsp),%r12
2598 mov OPENSSL_ia32cap_P+8(%rip), %eax
2631 mov \$16, %rax
2718 mov OPENSSL_ia32cap_P+8(%rip), %eax
2748 mov \$64, %rax
2816 mov %rsp,%r11
2843 mov \$8, %rax
2923 mov %rsp,%r11
2952 mov \$21, %rax
3063 " mov $b, $src0
3065 mov 8*0+$a, $acc1
3066 mov 8*1+$a, $acc2
3068 mov 8*2+$a, $acc3
3069 mov 8*3+$a, $acc4"
3076 " mov 8*0+$a, $src0
3077 mov 8*1+$a, $acc6
3079 mov 8*2+$a, $acc7
3080 mov 8*3+$a, $acc0"
3097 mov $a0, $t0
3100 mov $a1, $t1
3104 mov $a2, $t2
3107 mov $a3, $t3
3113 mov $a0, 8*0($r_ptr)
3115 mov $a1, 8*1($r_ptr)
3117 mov $a2, 8*2($r_ptr)
3118 mov $a3, 8*3($r_ptr)
3130 mov $a0, $t0
3133 mov $a1, $t1
3137 mov $a2, $t2
3140 mov $a3, $t3
3146 mov $a0, 8*0($r_ptr)
3148 mov $a1, 8*1($r_ptr)
3150 mov $a2, 8*2($r_ptr)
3151 mov $a3, 8*3($r_ptr)
3163 mov $t0, $a0
3166 mov $t1, $a1
3170 mov $t2, $a2
3173 mov $t3, $a3
3193 mov $a0, $t0
3196 mov $a1, $t1
3200 mov $a2, $t2
3203 mov $a3, $t3
3209 mov $a0, 8*0($r_ptr)
3211 mov $a1, 8*1($r_ptr)
3213 mov $a2, 8*2($r_ptr)
3214 mov $a3, 8*3($r_ptr)
3239 mov \$0x80100, %ecx
3276 mov $a_ptr, $b_ptr # backup copy
3278 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
3279 mov 0x20+8*1($a_ptr), $acc5
3280 mov 0x20+8*2($a_ptr), $acc0
3281 mov 0x20+8*3($a_ptr), $acc1
3282 mov .Lpoly+8*1(%rip), $poly1
3283 mov .Lpoly+8*3(%rip), $poly3
3295 mov 0x40+8*0($a_ptr), $src0
3296 mov 0x40+8*1($a_ptr), $acc6
3297 mov 0x40+8*2($a_ptr), $acc7
3298 mov 0x40+8*3($a_ptr), $acc0
3307 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
3308 mov 0x40+8*0($b_ptr), $acc1
3309 mov 0x40+8*1($b_ptr), $acc2
3310 mov 0x40+8*2($b_ptr), $acc3
3311 mov 0x40+8*3($b_ptr), $acc4
3318 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
3319 mov $in_x+8*1(%rsp), $acc5
3321 mov $in_x+8*2(%rsp), $acc0
3322 mov $in_x+8*3(%rsp), $acc1
3326 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
3327 mov $in_x+8*1(%rsp), $acc5
3329 mov $in_x+8*2(%rsp), $acc0
3330 mov $in_x+8*3(%rsp), $acc1
3347 mov $a0, $t0
3349 mov $a1, $t1
3351 mov $a2, $t2
3353 mov $a3, $t3
3365 mov $a1, $t0 # a0:a3>>1
3368 mov $a2, $t1
3372 mov $a3, $t2
3376 mov $a0, 8*0($r_ptr)
3378 mov $a1, 8*1($r_ptr)
3382 mov $a2, 8*2($r_ptr)
3383 mov $a3, 8*3($r_ptr)
3410 mov $acc6, $acc0 # harmonize sqr output and sub input
3411 mov $acc7, $acc1
3412 mov $a_ptr, $poly1
3413 mov $t1, $poly3
3416 mov $S+8*0(%rsp), $t0
3417 mov $S+8*1(%rsp), $t1
3418 mov $S+8*2(%rsp), $t2
3419 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
3423 mov $M(%rsp), $src0
3425 mov $acc4, $acc6 # harmonize sub output and mul input
3427 mov $acc4, $S+8*0(%rsp) # have to save:-(
3428 mov $acc5, $acc2
3429 mov $acc5, $S+8*1(%rsp)
3431 mov $acc0, $S+8*2(%rsp)
3434 mov $acc1, $S+8*3(%rsp)
3435 mov $acc6, $acc1
3445 mov -48(%rsi),%r15
3447 mov -40(%rsi),%r14
3449 mov -32(%rsi),%r13
3451 mov -24(%rsi),%r12
3453 mov -16(%rsi),%rbx
3455 mov -8(%rsi),%rbp
3490 mov \$0x80100, %ecx
3531 mov $a_ptr, $b_ptr # reassign
3532 mov $b_org, $a_ptr # reassign
3547 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
3548 mov 0x40+8*1($a_ptr), $acc6
3549 mov 0x40+8*2($a_ptr), $acc7
3550 mov 0x40+8*3($a_ptr), $acc0
3564 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
3565 mov $acc6, $in2_z+8*1(%rsp)
3566 mov $acc7, $in2_z+8*2(%rsp)
3567 mov $acc0, $in2_z+8*3(%rsp)
3580 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
3581 mov 0x40+8*1($b_ptr), $acc6
3582 mov 0x40+8*2($b_ptr), $acc7
3583 mov 0x40+8*3($b_ptr), $acc0
3693 mov $acc0, $t0
3696 mov $acc1, $t1
3700 mov $acc2, $t2
3703 mov $acc3, $t3
3708 mov 8*0($a_ptr), $t0
3710 mov 8*1($a_ptr), $t1
3712 mov 8*2($a_ptr), $t2
3714 mov 8*3($a_ptr), $t3
3722 mov $U2+8*0(%rsp), $t0
3723 mov $U2+8*1(%rsp), $t1
3724 mov $U2+8*2(%rsp), $t2
3725 mov $U2+8*3(%rsp), $t3
3730 mov $acc0, 8*0($r_ptr) # save the result, as
3731 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
3732 mov $acc2, 8*2($r_ptr)
3733 mov $acc3, 8*3($r_ptr)
3826 mov -48(%rsi),%r15
3828 mov -40(%rsi),%r14
3830 mov -32(%rsi),%r13
3832 mov -24(%rsi),%r12
3834 mov -16(%rsi),%rbx
3836 mov -8(%rsi),%rbp
3870 mov \$0x80100, %ecx
3906 mov $b_org, $b_ptr # reassign
3912 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
3913 mov 0x40+8*1($a_ptr), $acc6
3914 mov 0x40+8*2($a_ptr), $acc7
3915 mov 0x40+8*3($a_ptr), $acc0
3948 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
3950 mov $acc4, $acc1 # harmonize sqr output and mul input
3954 mov $acc5, $acc2
3957 mov $acc6, $acc3
3962 mov $acc7, $acc4
4018 mov $acc0, $t0
4021 mov $acc1, $t1
4025 mov $acc2, $t2
4028 mov $acc3, $t3
4033 mov 8*0($a_ptr), $t0
4035 mov 8*1($a_ptr), $t1
4037 mov 8*2($a_ptr), $t2
4039 mov 8*3($a_ptr), $t3
4047 mov $U2+8*0(%rsp), $t0
4048 mov $U2+8*1(%rsp), $t1
4049 mov $U2+8*2(%rsp), $t2
4050 mov $U2+8*3(%rsp), $t3
4055 mov $acc0, 8*0($r_ptr) # save the result, as
4056 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
4057 mov $acc2, 8*2($r_ptr)
4058 mov $acc3, 8*3($r_ptr)
4150 mov -48(%rsi),%r15
4152 mov -40(%rsi),%r14
4154 mov -32(%rsi),%r13
4156 mov -24(%rsi),%r12
4158 mov -16(%rsi),%rbx
4160 mov -8(%rsi),%rbp
4189 mov $a0, $t0
4192 mov $a1, $t1
4197 mov $a2, $t2
4200 mov $a3, $t3
4206 mov $a0, 8*0($r_ptr)
4208 mov $a1, 8*1($r_ptr)
4210 mov $a2, 8*2($r_ptr)
4211 mov $a3, 8*3($r_ptr)
4224 mov $a0, $t0
4227 mov $a1, $t1
4232 mov $a2, $t2
4235 mov $a3, $t3
4241 mov $a0, 8*0($r_ptr)
4243 mov $a1, 8*1($r_ptr)
4245 mov $a2, 8*2($r_ptr)
4246 mov $a3, 8*3($r_ptr)
4259 mov $t0, $a0
4262 mov $t1, $a1
4267 mov $t2, $a2
4270 mov $t3, $a3
4290 mov $a0, $t0
4293 mov $a1, $t1
4298 mov $a2, $t2
4301 mov $a3, $t3
4307 mov $a0, 8*0($r_ptr)
4309 mov $a1, 8*1($r_ptr)
4311 mov $a2, 8*2($r_ptr)
4312 mov $a3, 8*3($r_ptr)
4350 mov 120($context),%rax # pull context->Rax
4351 mov 248($context),%rbx # pull context->Rip
4353 mov 8($disp),%rsi # disp->ImageBase
4354 mov 56($disp),%r11 # disp->HandlerData
4356 mov 0(%r11),%r10d # HandlerData[0]
4361 mov 152($context),%rax # pull context->Rsp
4363 mov 4(%r11),%r10d # HandlerData[1]
4370 mov -8(%rax),%r12
4371 mov -16(%rax),%r13
4372 mov %r12,216($context) # restore context->R12
4373 mov %r13,224($context) # restore context->R13
4392 mov 120($context),%rax # pull context->Rax
4393 mov 248($context),%rbx # pull context->Rip
4395 mov 8($disp),%rsi # disp->ImageBase
4396 mov 56($disp),%r11 # disp->HandlerData
4398 mov 0(%r11),%r10d # HandlerData[0]
4403 mov 152($context),%rax # pull context->Rsp
4405 mov 4(%r11),%r10d # HandlerData[1]
4410 mov 8(%r11),%r10d # HandlerData[2]
4413 mov -8(%rax),%rbp
4414 mov -16(%rax),%rbx
4415 mov -24(%rax),%r12
4416 mov -32(%rax),%r13
4417 mov -40(%rax),%r14
4418 mov -48(%rax),%r15
4419 mov %rbx,144($context) # restore context->Rbx
4420 mov %rbp,160($context) # restore context->Rbp
4421 mov %r12,216($context) # restore context->R12
4422 mov %r13,224($context) # restore context->R13
4423 mov %r14,232($context) # restore context->R14
4424 mov %r15,240($context) # restore context->R15
4427 mov 8(%rax),%rdi
4428 mov 16(%rax),%rsi
4429 mov %rax,152($context) # restore context->Rsp
4430 mov %rsi,168($context) # restore context->Rsi
4431 mov %rdi,176($context) # restore context->Rdi
4433 mov 40($disp),%rdi # disp->ContextRecord
4434 mov $context,%rsi # context
4435 mov \$154,%ecx # sizeof(CONTEXT)
4438 mov $disp,%rsi
4440 mov 8(%rsi),%rdx # arg2, disp->ImageBase
4441 mov 0(%rsi),%r8 # arg3, disp->ControlPc
4442 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
4443 mov 40(%rsi),%r10 # disp->ContextRecord
4446 mov %r10,32(%rsp) # arg5
4447 mov %r11,40(%rsp) # arg6
4448 mov %r12,48(%rsp) # arg7
4449 mov %rcx,56(%rsp) # arg8, (NULL)
4452 mov \$1,%eax # ExceptionContinueSearch