1bbbf1280Sopenharmony_ci/* 2bbbf1280Sopenharmony_ci * strcpy/stpcpy - copy a string returning pointer to start/end. 3bbbf1280Sopenharmony_ci * 4bbbf1280Sopenharmony_ci * Copyright (c) 2013-2020, Arm Limited. 5bbbf1280Sopenharmony_ci * SPDX-License-Identifier: MIT 6bbbf1280Sopenharmony_ci */ 7bbbf1280Sopenharmony_ci 8bbbf1280Sopenharmony_ci/* Assumptions: 9bbbf1280Sopenharmony_ci * 10bbbf1280Sopenharmony_ci * ARMv8-a, AArch64, unaligned accesses, min page size 4k. 11bbbf1280Sopenharmony_ci */ 12bbbf1280Sopenharmony_ci 13bbbf1280Sopenharmony_ci#include "../asmdefs.h" 14bbbf1280Sopenharmony_ci 15bbbf1280Sopenharmony_ci/* To build as stpcpy, define BUILD_STPCPY before compiling this file. 16bbbf1280Sopenharmony_ci 17bbbf1280Sopenharmony_ci To test the page crossing code path more thoroughly, compile with 18bbbf1280Sopenharmony_ci -DSTRCPY_TEST_PAGE_CROSS - this will force all copies through the slower 19bbbf1280Sopenharmony_ci entry path. This option is not intended for production use. */ 20bbbf1280Sopenharmony_ci 21bbbf1280Sopenharmony_ci/* Arguments and results. */ 22bbbf1280Sopenharmony_ci#define dstin x0 23bbbf1280Sopenharmony_ci#define srcin x1 24bbbf1280Sopenharmony_ci 25bbbf1280Sopenharmony_ci/* Locals and temporaries. */ 26bbbf1280Sopenharmony_ci#define src x2 27bbbf1280Sopenharmony_ci#define dst x3 28bbbf1280Sopenharmony_ci#define data1 x4 29bbbf1280Sopenharmony_ci#define data1w w4 30bbbf1280Sopenharmony_ci#define data2 x5 31bbbf1280Sopenharmony_ci#define data2w w5 32bbbf1280Sopenharmony_ci#define has_nul1 x6 33bbbf1280Sopenharmony_ci#define has_nul2 x7 34bbbf1280Sopenharmony_ci#define tmp1 x8 35bbbf1280Sopenharmony_ci#define tmp2 x9 36bbbf1280Sopenharmony_ci#define tmp3 x10 37bbbf1280Sopenharmony_ci#define tmp4 x11 38bbbf1280Sopenharmony_ci#define zeroones x12 39bbbf1280Sopenharmony_ci#define data1a x13 40bbbf1280Sopenharmony_ci#define data2a x14 41bbbf1280Sopenharmony_ci#define pos x15 42bbbf1280Sopenharmony_ci#define len x16 43bbbf1280Sopenharmony_ci#define to_align x17 44bbbf1280Sopenharmony_ci 45bbbf1280Sopenharmony_ci#ifdef BUILD_STPCPY 46bbbf1280Sopenharmony_ci#define STRCPY __stpcpy_aarch64 47bbbf1280Sopenharmony_ci#else 48bbbf1280Sopenharmony_ci#define STRCPY __strcpy_aarch64 49bbbf1280Sopenharmony_ci#endif 50bbbf1280Sopenharmony_ci 51bbbf1280Sopenharmony_ci /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 52bbbf1280Sopenharmony_ci (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and 53bbbf1280Sopenharmony_ci can be done in parallel across the entire word. */ 54bbbf1280Sopenharmony_ci 55bbbf1280Sopenharmony_ci#define REP8_01 0x0101010101010101 56bbbf1280Sopenharmony_ci#define REP8_7f 0x7f7f7f7f7f7f7f7f 57bbbf1280Sopenharmony_ci#define REP8_80 0x8080808080808080 58bbbf1280Sopenharmony_ci 59bbbf1280Sopenharmony_ci /* AArch64 systems have a minimum page size of 4k. We can do a quick 60bbbf1280Sopenharmony_ci page size check for crossing this boundary on entry and if we 61bbbf1280Sopenharmony_ci do not, then we can short-circuit much of the entry code. We 62bbbf1280Sopenharmony_ci expect early page-crossing strings to be rare (probability of 63bbbf1280Sopenharmony_ci 16/MIN_PAGE_SIZE ~= 0.4%), so the branch should be quite 64bbbf1280Sopenharmony_ci predictable, even with random strings. 65bbbf1280Sopenharmony_ci 66bbbf1280Sopenharmony_ci We don't bother checking for larger page sizes, the cost of setting 67bbbf1280Sopenharmony_ci up the correct page size is just not worth the extra gain from 68bbbf1280Sopenharmony_ci a small reduction in the cases taking the slow path. Note that 69bbbf1280Sopenharmony_ci we only care about whether the first fetch, which may be 70bbbf1280Sopenharmony_ci misaligned, crosses a page boundary - after that we move to aligned 71bbbf1280Sopenharmony_ci fetches for the remainder of the string. */ 72bbbf1280Sopenharmony_ci 73bbbf1280Sopenharmony_ci#ifdef STRCPY_TEST_PAGE_CROSS 74bbbf1280Sopenharmony_ci /* Make everything that isn't Qword aligned look like a page cross. */ 75bbbf1280Sopenharmony_ci#define MIN_PAGE_P2 4 76bbbf1280Sopenharmony_ci#else 77bbbf1280Sopenharmony_ci#define MIN_PAGE_P2 12 78bbbf1280Sopenharmony_ci#endif 79bbbf1280Sopenharmony_ci 80bbbf1280Sopenharmony_ci#define MIN_PAGE_SIZE (1 << MIN_PAGE_P2) 81bbbf1280Sopenharmony_ci 82bbbf1280Sopenharmony_ciENTRY (STRCPY) 83bbbf1280Sopenharmony_ci PTR_ARG (0) 84bbbf1280Sopenharmony_ci PTR_ARG (1) 85bbbf1280Sopenharmony_ci /* For moderately short strings, the fastest way to do the copy is to 86bbbf1280Sopenharmony_ci calculate the length of the string in the same way as strlen, then 87bbbf1280Sopenharmony_ci essentially do a memcpy of the result. This avoids the need for 88bbbf1280Sopenharmony_ci multiple byte copies and further means that by the time we 89bbbf1280Sopenharmony_ci reach the bulk copy loop we know we can always use DWord 90bbbf1280Sopenharmony_ci accesses. We expect __strcpy_aarch64 to rarely be called repeatedly 91bbbf1280Sopenharmony_ci with the same source string, so branch prediction is likely to 92bbbf1280Sopenharmony_ci always be difficult - we mitigate against this by preferring 93bbbf1280Sopenharmony_ci conditional select operations over branches whenever this is 94bbbf1280Sopenharmony_ci feasible. */ 95bbbf1280Sopenharmony_ci and tmp2, srcin, #(MIN_PAGE_SIZE - 1) 96bbbf1280Sopenharmony_ci mov zeroones, #REP8_01 97bbbf1280Sopenharmony_ci and to_align, srcin, #15 98bbbf1280Sopenharmony_ci cmp tmp2, #(MIN_PAGE_SIZE - 16) 99bbbf1280Sopenharmony_ci neg tmp1, to_align 100bbbf1280Sopenharmony_ci /* The first fetch will straddle a (possible) page boundary iff 101bbbf1280Sopenharmony_ci srcin + 15 causes bit[MIN_PAGE_P2] to change value. A 16-byte 102bbbf1280Sopenharmony_ci aligned string will never fail the page align check, so will 103bbbf1280Sopenharmony_ci always take the fast path. */ 104bbbf1280Sopenharmony_ci b.gt L(page_cross) 105bbbf1280Sopenharmony_ci 106bbbf1280Sopenharmony_ciL(page_cross_ok): 107bbbf1280Sopenharmony_ci ldp data1, data2, [srcin] 108bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 109bbbf1280Sopenharmony_ci /* Because we expect the end to be found within 16 characters 110bbbf1280Sopenharmony_ci (profiling shows this is the most common case), it's worth 111bbbf1280Sopenharmony_ci swapping the bytes now to save having to recalculate the 112bbbf1280Sopenharmony_ci termination syndrome later. We preserve data1 and data2 113bbbf1280Sopenharmony_ci so that we can re-use the values later on. */ 114bbbf1280Sopenharmony_ci rev tmp2, data1 115bbbf1280Sopenharmony_ci sub tmp1, tmp2, zeroones 116bbbf1280Sopenharmony_ci orr tmp2, tmp2, #REP8_7f 117bbbf1280Sopenharmony_ci bics has_nul1, tmp1, tmp2 118bbbf1280Sopenharmony_ci b.ne L(fp_le8) 119bbbf1280Sopenharmony_ci rev tmp4, data2 120bbbf1280Sopenharmony_ci sub tmp3, tmp4, zeroones 121bbbf1280Sopenharmony_ci orr tmp4, tmp4, #REP8_7f 122bbbf1280Sopenharmony_ci#else 123bbbf1280Sopenharmony_ci sub tmp1, data1, zeroones 124bbbf1280Sopenharmony_ci orr tmp2, data1, #REP8_7f 125bbbf1280Sopenharmony_ci bics has_nul1, tmp1, tmp2 126bbbf1280Sopenharmony_ci b.ne L(fp_le8) 127bbbf1280Sopenharmony_ci sub tmp3, data2, zeroones 128bbbf1280Sopenharmony_ci orr tmp4, data2, #REP8_7f 129bbbf1280Sopenharmony_ci#endif 130bbbf1280Sopenharmony_ci bics has_nul2, tmp3, tmp4 131bbbf1280Sopenharmony_ci b.eq L(bulk_entry) 132bbbf1280Sopenharmony_ci 133bbbf1280Sopenharmony_ci /* The string is short (<=16 bytes). We don't know exactly how 134bbbf1280Sopenharmony_ci short though, yet. Work out the exact length so that we can 135bbbf1280Sopenharmony_ci quickly select the optimal copy strategy. */ 136bbbf1280Sopenharmony_ciL(fp_gt8): 137bbbf1280Sopenharmony_ci rev has_nul2, has_nul2 138bbbf1280Sopenharmony_ci clz pos, has_nul2 139bbbf1280Sopenharmony_ci mov tmp2, #56 140bbbf1280Sopenharmony_ci add dst, dstin, pos, lsr #3 /* Bits to bytes. */ 141bbbf1280Sopenharmony_ci sub pos, tmp2, pos 142bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 143bbbf1280Sopenharmony_ci lsr data2, data2, pos 144bbbf1280Sopenharmony_ci#else 145bbbf1280Sopenharmony_ci lsl data2, data2, pos 146bbbf1280Sopenharmony_ci#endif 147bbbf1280Sopenharmony_ci str data2, [dst, #1] 148bbbf1280Sopenharmony_ci str data1, [dstin] 149bbbf1280Sopenharmony_ci#ifdef BUILD_STPCPY 150bbbf1280Sopenharmony_ci add dstin, dst, #8 151bbbf1280Sopenharmony_ci#endif 152bbbf1280Sopenharmony_ci ret 153bbbf1280Sopenharmony_ci 154bbbf1280Sopenharmony_ciL(fp_le8): 155bbbf1280Sopenharmony_ci rev has_nul1, has_nul1 156bbbf1280Sopenharmony_ci clz pos, has_nul1 157bbbf1280Sopenharmony_ci add dst, dstin, pos, lsr #3 /* Bits to bytes. */ 158bbbf1280Sopenharmony_ci subs tmp2, pos, #24 /* Pos in bits. */ 159bbbf1280Sopenharmony_ci b.lt L(fp_lt4) 160bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 161bbbf1280Sopenharmony_ci mov tmp2, #56 162bbbf1280Sopenharmony_ci sub pos, tmp2, pos 163bbbf1280Sopenharmony_ci lsr data2, data1, pos 164bbbf1280Sopenharmony_ci lsr data1, data1, #32 165bbbf1280Sopenharmony_ci#else 166bbbf1280Sopenharmony_ci lsr data2, data1, tmp2 167bbbf1280Sopenharmony_ci#endif 168bbbf1280Sopenharmony_ci /* 4->7 bytes to copy. */ 169bbbf1280Sopenharmony_ci str data2w, [dst, #-3] 170bbbf1280Sopenharmony_ci str data1w, [dstin] 171bbbf1280Sopenharmony_ci#ifdef BUILD_STPCPY 172bbbf1280Sopenharmony_ci mov dstin, dst 173bbbf1280Sopenharmony_ci#endif 174bbbf1280Sopenharmony_ci ret 175bbbf1280Sopenharmony_ciL(fp_lt4): 176bbbf1280Sopenharmony_ci cbz pos, L(fp_lt2) 177bbbf1280Sopenharmony_ci /* 2->3 bytes to copy. */ 178bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 179bbbf1280Sopenharmony_ci lsr data1, data1, #48 180bbbf1280Sopenharmony_ci#endif 181bbbf1280Sopenharmony_ci strh data1w, [dstin] 182bbbf1280Sopenharmony_ci /* Fall-through, one byte (max) to go. */ 183bbbf1280Sopenharmony_ciL(fp_lt2): 184bbbf1280Sopenharmony_ci /* Null-terminated string. Last character must be zero! */ 185bbbf1280Sopenharmony_ci strb wzr, [dst] 186bbbf1280Sopenharmony_ci#ifdef BUILD_STPCPY 187bbbf1280Sopenharmony_ci mov dstin, dst 188bbbf1280Sopenharmony_ci#endif 189bbbf1280Sopenharmony_ci ret 190bbbf1280Sopenharmony_ci 191bbbf1280Sopenharmony_ci .p2align 6 192bbbf1280Sopenharmony_ci /* Aligning here ensures that the entry code and main loop all lies 193bbbf1280Sopenharmony_ci within one 64-byte cache line. */ 194bbbf1280Sopenharmony_ciL(bulk_entry): 195bbbf1280Sopenharmony_ci sub to_align, to_align, #16 196bbbf1280Sopenharmony_ci stp data1, data2, [dstin] 197bbbf1280Sopenharmony_ci sub src, srcin, to_align 198bbbf1280Sopenharmony_ci sub dst, dstin, to_align 199bbbf1280Sopenharmony_ci b L(entry_no_page_cross) 200bbbf1280Sopenharmony_ci 201bbbf1280Sopenharmony_ci /* The inner loop deals with two Dwords at a time. This has a 202bbbf1280Sopenharmony_ci slightly higher start-up cost, but we should win quite quickly, 203bbbf1280Sopenharmony_ci especially on cores with a high number of issue slots per 204bbbf1280Sopenharmony_ci cycle, as we get much better parallelism out of the operations. */ 205bbbf1280Sopenharmony_ciL(main_loop): 206bbbf1280Sopenharmony_ci stp data1, data2, [dst], #16 207bbbf1280Sopenharmony_ciL(entry_no_page_cross): 208bbbf1280Sopenharmony_ci ldp data1, data2, [src], #16 209bbbf1280Sopenharmony_ci sub tmp1, data1, zeroones 210bbbf1280Sopenharmony_ci orr tmp2, data1, #REP8_7f 211bbbf1280Sopenharmony_ci sub tmp3, data2, zeroones 212bbbf1280Sopenharmony_ci orr tmp4, data2, #REP8_7f 213bbbf1280Sopenharmony_ci bic has_nul1, tmp1, tmp2 214bbbf1280Sopenharmony_ci bics has_nul2, tmp3, tmp4 215bbbf1280Sopenharmony_ci ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */ 216bbbf1280Sopenharmony_ci b.eq L(main_loop) 217bbbf1280Sopenharmony_ci 218bbbf1280Sopenharmony_ci /* Since we know we are copying at least 16 bytes, the fastest way 219bbbf1280Sopenharmony_ci to deal with the tail is to determine the location of the 220bbbf1280Sopenharmony_ci trailing NUL, then (re)copy the 16 bytes leading up to that. */ 221bbbf1280Sopenharmony_ci cmp has_nul1, #0 222bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 223bbbf1280Sopenharmony_ci /* For big-endian, carry propagation (if the final byte in the 224bbbf1280Sopenharmony_ci string is 0x01) means we cannot use has_nul directly. The 225bbbf1280Sopenharmony_ci easiest way to get the correct byte is to byte-swap the data 226bbbf1280Sopenharmony_ci and calculate the syndrome a second time. */ 227bbbf1280Sopenharmony_ci csel data1, data1, data2, ne 228bbbf1280Sopenharmony_ci rev data1, data1 229bbbf1280Sopenharmony_ci sub tmp1, data1, zeroones 230bbbf1280Sopenharmony_ci orr tmp2, data1, #REP8_7f 231bbbf1280Sopenharmony_ci bic has_nul1, tmp1, tmp2 232bbbf1280Sopenharmony_ci#else 233bbbf1280Sopenharmony_ci csel has_nul1, has_nul1, has_nul2, ne 234bbbf1280Sopenharmony_ci#endif 235bbbf1280Sopenharmony_ci rev has_nul1, has_nul1 236bbbf1280Sopenharmony_ci clz pos, has_nul1 237bbbf1280Sopenharmony_ci add tmp1, pos, #72 238bbbf1280Sopenharmony_ci add pos, pos, #8 239bbbf1280Sopenharmony_ci csel pos, pos, tmp1, ne 240bbbf1280Sopenharmony_ci add src, src, pos, lsr #3 241bbbf1280Sopenharmony_ci add dst, dst, pos, lsr #3 242bbbf1280Sopenharmony_ci ldp data1, data2, [src, #-32] 243bbbf1280Sopenharmony_ci stp data1, data2, [dst, #-16] 244bbbf1280Sopenharmony_ci#ifdef BUILD_STPCPY 245bbbf1280Sopenharmony_ci sub dstin, dst, #1 246bbbf1280Sopenharmony_ci#endif 247bbbf1280Sopenharmony_ci ret 248bbbf1280Sopenharmony_ci 249bbbf1280Sopenharmony_ciL(page_cross): 250bbbf1280Sopenharmony_ci bic src, srcin, #15 251bbbf1280Sopenharmony_ci /* Start by loading two words at [srcin & ~15], then forcing the 252bbbf1280Sopenharmony_ci bytes that precede srcin to 0xff. This means they never look 253bbbf1280Sopenharmony_ci like termination bytes. */ 254bbbf1280Sopenharmony_ci ldp data1, data2, [src] 255bbbf1280Sopenharmony_ci lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ 256bbbf1280Sopenharmony_ci tst to_align, #7 257bbbf1280Sopenharmony_ci csetm tmp2, ne 258bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 259bbbf1280Sopenharmony_ci lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ 260bbbf1280Sopenharmony_ci#else 261bbbf1280Sopenharmony_ci lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ 262bbbf1280Sopenharmony_ci#endif 263bbbf1280Sopenharmony_ci orr data1, data1, tmp2 264bbbf1280Sopenharmony_ci orr data2a, data2, tmp2 265bbbf1280Sopenharmony_ci cmp to_align, #8 266bbbf1280Sopenharmony_ci csinv data1, data1, xzr, lt 267bbbf1280Sopenharmony_ci csel data2, data2, data2a, lt 268bbbf1280Sopenharmony_ci sub tmp1, data1, zeroones 269bbbf1280Sopenharmony_ci orr tmp2, data1, #REP8_7f 270bbbf1280Sopenharmony_ci sub tmp3, data2, zeroones 271bbbf1280Sopenharmony_ci orr tmp4, data2, #REP8_7f 272bbbf1280Sopenharmony_ci bic has_nul1, tmp1, tmp2 273bbbf1280Sopenharmony_ci bics has_nul2, tmp3, tmp4 274bbbf1280Sopenharmony_ci ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */ 275bbbf1280Sopenharmony_ci b.eq L(page_cross_ok) 276bbbf1280Sopenharmony_ci /* We now need to make data1 and data2 look like they've been 277bbbf1280Sopenharmony_ci loaded directly from srcin. Do a rotate on the 128-bit value. */ 278bbbf1280Sopenharmony_ci lsl tmp1, to_align, #3 /* Bytes->bits. */ 279bbbf1280Sopenharmony_ci neg tmp2, to_align, lsl #3 280bbbf1280Sopenharmony_ci#ifdef __AARCH64EB__ 281bbbf1280Sopenharmony_ci lsl data1a, data1, tmp1 282bbbf1280Sopenharmony_ci lsr tmp4, data2, tmp2 283bbbf1280Sopenharmony_ci lsl data2, data2, tmp1 284bbbf1280Sopenharmony_ci orr tmp4, tmp4, data1a 285bbbf1280Sopenharmony_ci cmp to_align, #8 286bbbf1280Sopenharmony_ci csel data1, tmp4, data2, lt 287bbbf1280Sopenharmony_ci rev tmp2, data1 288bbbf1280Sopenharmony_ci rev tmp4, data2 289bbbf1280Sopenharmony_ci sub tmp1, tmp2, zeroones 290bbbf1280Sopenharmony_ci orr tmp2, tmp2, #REP8_7f 291bbbf1280Sopenharmony_ci sub tmp3, tmp4, zeroones 292bbbf1280Sopenharmony_ci orr tmp4, tmp4, #REP8_7f 293bbbf1280Sopenharmony_ci#else 294bbbf1280Sopenharmony_ci lsr data1a, data1, tmp1 295bbbf1280Sopenharmony_ci lsl tmp4, data2, tmp2 296bbbf1280Sopenharmony_ci lsr data2, data2, tmp1 297bbbf1280Sopenharmony_ci orr tmp4, tmp4, data1a 298bbbf1280Sopenharmony_ci cmp to_align, #8 299bbbf1280Sopenharmony_ci csel data1, tmp4, data2, lt 300bbbf1280Sopenharmony_ci sub tmp1, data1, zeroones 301bbbf1280Sopenharmony_ci orr tmp2, data1, #REP8_7f 302bbbf1280Sopenharmony_ci sub tmp3, data2, zeroones 303bbbf1280Sopenharmony_ci orr tmp4, data2, #REP8_7f 304bbbf1280Sopenharmony_ci#endif 305bbbf1280Sopenharmony_ci bic has_nul1, tmp1, tmp2 306bbbf1280Sopenharmony_ci cbnz has_nul1, L(fp_le8) 307bbbf1280Sopenharmony_ci bic has_nul2, tmp3, tmp4 308bbbf1280Sopenharmony_ci b L(fp_gt8) 309bbbf1280Sopenharmony_ci 310bbbf1280Sopenharmony_ciEND (STRCPY) 311bbbf1280Sopenharmony_ci 312