1 #include "arm_arch.h" 2 3 .text 4 5 6 .align 5 7 .globl __armv7_neon_probe 8 9 __armv7_neon_probe: 10 orr v15.16b, v15.16b, v15.16b 11 ret 12 13 14 .globl __armv7_tick 15 16 __armv7_tick: 17 #ifdef __APPLE__ 18 mrs x0, CNTPCT_EL0 19 #else 20 mrs x0, CNTVCT_EL0 21 #endif 22 ret 23 24 25 .globl __armv8_aes_probe 26 27 __armv8_aes_probe: 28 aese v0.16b, v0.16b 29 ret 30 31 32 .globl __armv8_sha1_probe 33 34 __armv8_sha1_probe: 35 sha1h s0, s0 36 ret 37 38 39 .globl __armv8_sha256_probe 40 41 __armv8_sha256_probe: 42 sha256su0 v0.4s, v0.4s 43 ret 44 45 46 .globl __armv8_pmull_probe 47 48 __armv8_pmull_probe: 49 pmull v0.1q, v0.1d, v0.1d 50 ret 51 52 53 .globl __armv8_sha512_probe 54 55 __armv8_sha512_probe: 56 .long 0xcec08000 // sha512su0 v0.2d,v0.2d 57 ret 58 59 60 .globl __armv8_cpuid_probe 61 62 __armv8_cpuid_probe: 63 mrs x0, midr_el1 64 ret 65 66 67 .globl _OPENSSL_cleanse 68 69 .align 5 70 _OPENSSL_cleanse: 71 cbz x1,Lret // len==0? 72 cmp x1,#15 73 b.hi Lot // len>15 74 nop 75 Little: 76 strb wzr,[x0],#1 // store byte-by-byte 77 subs x1,x1,#1 78 b.ne Little 79 Lret: ret 80 81 .align 4 82 Lot: tst x0,#7 83 b.eq Laligned // inp is aligned 84 strb wzr,[x0],#1 // store byte-by-byte 85 sub x1,x1,#1 86 b Lot 87 88 .align 4 89 Laligned: 90 str xzr,[x0],#8 // store word-by-word 91 sub x1,x1,#8 92 tst x1,#-8 93 b.ne Laligned // len>=8 94 cbnz x1,Little // len!=0? 95 ret 96 97 98 .globl _CRYPTO_memcmp 99 100 .align 4 101 _CRYPTO_memcmp: 102 eor w3,w3,w3 103 cbz x2,Lno_data // len==0? 104 cmp x2,#16 105 b.ne Loop_cmp 106 ldp x8,x9,[x0] 107 ldp x10,x11,[x1] 108 eor x8,x8,x10 109 eor x9,x9,x11 110 orr x8,x8,x9 111 mov x0,#1 112 cmp x8,#0 113 csel x0,xzr,x0,eq 114 ret 115 116 .align 4 117 Loop_cmp: 118 ldrb w4,[x0],#1 119 ldrb w5,[x1],#1 120 eor w4,w4,w5 121 orr w3,w3,w4 122 subs x2,x2,#1 123 b.ne Loop_cmp 124 125 Lno_data: 126 neg w0,w3 127 lsr w0,w0,#31 128 ret 129 130