1 /*
2  * strcpy/stpcpy - copy a string returning pointer to start/end.
3  *
4  * Copyright (c) 2013-2020, Arm Limited.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 /* Assumptions:
9  *
10  * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
11  */
12 
13 #include "../asmdefs.h"
14 
15 /* To build as stpcpy, define BUILD_STPCPY before compiling this file.
16 
17    To test the page crossing code path more thoroughly, compile with
18    -DSTRCPY_TEST_PAGE_CROSS - this will force all copies through the slower
19    entry path.  This option is not intended for production use.  */
20 
21 /* Arguments and results.  */
22 #define dstin		x0
23 #define srcin		x1
24 
25 /* Locals and temporaries.  */
26 #define src		x2
27 #define dst		x3
28 #define data1		x4
29 #define data1w		w4
30 #define data2		x5
31 #define data2w		w5
32 #define has_nul1	x6
33 #define has_nul2	x7
34 #define tmp1		x8
35 #define tmp2		x9
36 #define tmp3		x10
37 #define tmp4		x11
38 #define zeroones	x12
39 #define data1a		x13
40 #define data2a		x14
41 #define pos		x15
42 #define len		x16
43 #define to_align	x17
44 
45 #ifdef BUILD_STPCPY
46 #define STRCPY __stpcpy_aarch64
47 #else
48 #define STRCPY __strcpy_aarch64
49 #endif
50 
51 	/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
52 	   (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
53 	   can be done in parallel across the entire word.  */
54 
55 #define REP8_01 0x0101010101010101
56 #define REP8_7f 0x7f7f7f7f7f7f7f7f
57 #define REP8_80 0x8080808080808080
58 
59 	/* AArch64 systems have a minimum page size of 4k.  We can do a quick
60 	   page size check for crossing this boundary on entry and if we
61 	   do not, then we can short-circuit much of the entry code.  We
62 	   expect early page-crossing strings to be rare (probability of
63 	   16/MIN_PAGE_SIZE ~= 0.4%), so the branch should be quite
64 	   predictable, even with random strings.
65 
66 	   We don't bother checking for larger page sizes, the cost of setting
67 	   up the correct page size is just not worth the extra gain from
68 	   a small reduction in the cases taking the slow path.  Note that
69 	   we only care about whether the first fetch, which may be
70 	   misaligned, crosses a page boundary - after that we move to aligned
71 	   fetches for the remainder of the string.  */
72 
73 #ifdef STRCPY_TEST_PAGE_CROSS
74 	/* Make everything that isn't Qword aligned look like a page cross.  */
75 #define MIN_PAGE_P2 4
76 #else
77 #define MIN_PAGE_P2 12
78 #endif
79 
80 #define MIN_PAGE_SIZE (1 << MIN_PAGE_P2)
81 
82 ENTRY (STRCPY)
83 	PTR_ARG (0)
84 	PTR_ARG (1)
85 	/* For moderately short strings, the fastest way to do the copy is to
86 	   calculate the length of the string in the same way as strlen, then
87 	   essentially do a memcpy of the result.  This avoids the need for
88 	   multiple byte copies and further means that by the time we
89 	   reach the bulk copy loop we know we can always use DWord
90 	   accesses.  We expect __strcpy_aarch64 to rarely be called repeatedly
91 	   with the same source string, so branch prediction is likely to
92 	   always be difficult - we mitigate against this by preferring
93 	   conditional select operations over branches whenever this is
94 	   feasible.  */
95 	and	tmp2, srcin, #(MIN_PAGE_SIZE - 1)
96 	mov	zeroones, #REP8_01
97 	and	to_align, srcin, #15
98 	cmp	tmp2, #(MIN_PAGE_SIZE - 16)
99 	neg	tmp1, to_align
100 	/* The first fetch will straddle a (possible) page boundary iff
101 	   srcin + 15 causes bit[MIN_PAGE_P2] to change value.  A 16-byte
102 	   aligned string will never fail the page align check, so will
103 	   always take the fast path.  */
104 	b.gt	L(page_cross)
105 
106 L(page_cross_ok):
107 	ldp	data1, data2, [srcin]
108 #ifdef __AARCH64EB__
109 	/* Because we expect the end to be found within 16 characters
110 	   (profiling shows this is the most common case), it's worth
111 	   swapping the bytes now to save having to recalculate the
112 	   termination syndrome later.  We preserve data1 and data2
113 	   so that we can re-use the values later on.  */
114 	rev	tmp2, data1
115 	sub	tmp1, tmp2, zeroones
116 	orr	tmp2, tmp2, #REP8_7f
117 	bics	has_nul1, tmp1, tmp2
118 	b.ne	L(fp_le8)
119 	rev	tmp4, data2
120 	sub	tmp3, tmp4, zeroones
121 	orr	tmp4, tmp4, #REP8_7f
122 #else
123 	sub	tmp1, data1, zeroones
124 	orr	tmp2, data1, #REP8_7f
125 	bics	has_nul1, tmp1, tmp2
126 	b.ne	L(fp_le8)
127 	sub	tmp3, data2, zeroones
128 	orr	tmp4, data2, #REP8_7f
129 #endif
130 	bics	has_nul2, tmp3, tmp4
131 	b.eq	L(bulk_entry)
132 
133 	/* The string is short (<=16 bytes).  We don't know exactly how
134 	   short though, yet.  Work out the exact length so that we can
135 	   quickly select the optimal copy strategy.  */
136 L(fp_gt8):
137 	rev	has_nul2, has_nul2
138 	clz	pos, has_nul2
139 	mov	tmp2, #56
140 	add	dst, dstin, pos, lsr #3		/* Bits to bytes.  */
141 	sub	pos, tmp2, pos
142 #ifdef __AARCH64EB__
143 	lsr	data2, data2, pos
144 #else
145 	lsl	data2, data2, pos
146 #endif
147 	str	data2, [dst, #1]
148 	str	data1, [dstin]
149 #ifdef BUILD_STPCPY
150 	add	dstin, dst, #8
151 #endif
152 	ret
153 
154 L(fp_le8):
155 	rev	has_nul1, has_nul1
156 	clz	pos, has_nul1
157 	add	dst, dstin, pos, lsr #3		/* Bits to bytes.  */
158 	subs	tmp2, pos, #24			/* Pos in bits. */
159 	b.lt	L(fp_lt4)
160 #ifdef __AARCH64EB__
161 	mov	tmp2, #56
162 	sub	pos, tmp2, pos
163 	lsr	data2, data1, pos
164 	lsr	data1, data1, #32
165 #else
166 	lsr	data2, data1, tmp2
167 #endif
168 	/* 4->7 bytes to copy.  */
169 	str	data2w, [dst, #-3]
170 	str	data1w, [dstin]
171 #ifdef BUILD_STPCPY
172 	mov	dstin, dst
173 #endif
174 	ret
175 L(fp_lt4):
176 	cbz	pos, L(fp_lt2)
177 	/* 2->3 bytes to copy.  */
178 #ifdef __AARCH64EB__
179 	lsr	data1, data1, #48
180 #endif
181 	strh	data1w, [dstin]
182 	/* Fall-through, one byte (max) to go.  */
183 L(fp_lt2):
184 	/* Null-terminated string.  Last character must be zero!  */
185 	strb	wzr, [dst]
186 #ifdef BUILD_STPCPY
187 	mov	dstin, dst
188 #endif
189 	ret
190 
191 	.p2align 6
192 	/* Aligning here ensures that the entry code and main loop all lies
193 	   within one 64-byte cache line.  */
194 L(bulk_entry):
195 	sub	to_align, to_align, #16
196 	stp	data1, data2, [dstin]
197 	sub	src, srcin, to_align
198 	sub	dst, dstin, to_align
199 	b	L(entry_no_page_cross)
200 
201 	/* The inner loop deals with two Dwords at a time.  This has a
202 	   slightly higher start-up cost, but we should win quite quickly,
203 	   especially on cores with a high number of issue slots per
204 	   cycle, as we get much better parallelism out of the operations.  */
205 L(main_loop):
206 	stp	data1, data2, [dst], #16
207 L(entry_no_page_cross):
208 	ldp	data1, data2, [src], #16
209 	sub	tmp1, data1, zeroones
210 	orr	tmp2, data1, #REP8_7f
211 	sub	tmp3, data2, zeroones
212 	orr	tmp4, data2, #REP8_7f
213 	bic	has_nul1, tmp1, tmp2
214 	bics	has_nul2, tmp3, tmp4
215 	ccmp	has_nul1, #0, #0, eq	/* NZCV = 0000  */
216 	b.eq	L(main_loop)
217 
218 	/* Since we know we are copying at least 16 bytes, the fastest way
219 	   to deal with the tail is to determine the location of the
220 	   trailing NUL, then (re)copy the 16 bytes leading up to that.  */
221 	cmp	has_nul1, #0
222 #ifdef __AARCH64EB__
223 	/* For big-endian, carry propagation (if the final byte in the
224 	   string is 0x01) means we cannot use has_nul directly.  The
225 	   easiest way to get the correct byte is to byte-swap the data
226 	   and calculate the syndrome a second time.  */
227 	csel	data1, data1, data2, ne
228 	rev	data1, data1
229 	sub	tmp1, data1, zeroones
230 	orr	tmp2, data1, #REP8_7f
231 	bic	has_nul1, tmp1, tmp2
232 #else
233 	csel	has_nul1, has_nul1, has_nul2, ne
234 #endif
235 	rev	has_nul1, has_nul1
236 	clz	pos, has_nul1
237 	add	tmp1, pos, #72
238 	add	pos, pos, #8
239 	csel	pos, pos, tmp1, ne
240 	add	src, src, pos, lsr #3
241 	add	dst, dst, pos, lsr #3
242 	ldp	data1, data2, [src, #-32]
243 	stp	data1, data2, [dst, #-16]
244 #ifdef BUILD_STPCPY
245 	sub	dstin, dst, #1
246 #endif
247 	ret
248 
249 L(page_cross):
250 	bic	src, srcin, #15
251 	/* Start by loading two words at [srcin & ~15], then forcing the
252 	   bytes that precede srcin to 0xff.  This means they never look
253 	   like termination bytes.  */
254 	ldp	data1, data2, [src]
255 	lsl	tmp1, tmp1, #3	/* Bytes beyond alignment -> bits.  */
256 	tst	to_align, #7
257 	csetm	tmp2, ne
258 #ifdef __AARCH64EB__
259 	lsl	tmp2, tmp2, tmp1	/* Shift (tmp1 & 63).  */
260 #else
261 	lsr	tmp2, tmp2, tmp1	/* Shift (tmp1 & 63).  */
262 #endif
263 	orr	data1, data1, tmp2
264 	orr	data2a, data2, tmp2
265 	cmp	to_align, #8
266 	csinv	data1, data1, xzr, lt
267 	csel	data2, data2, data2a, lt
268 	sub	tmp1, data1, zeroones
269 	orr	tmp2, data1, #REP8_7f
270 	sub	tmp3, data2, zeroones
271 	orr	tmp4, data2, #REP8_7f
272 	bic	has_nul1, tmp1, tmp2
273 	bics	has_nul2, tmp3, tmp4
274 	ccmp	has_nul1, #0, #0, eq	/* NZCV = 0000  */
275 	b.eq	L(page_cross_ok)
276 	/* We now need to make data1 and data2 look like they've been
277 	   loaded directly from srcin.  Do a rotate on the 128-bit value.  */
278 	lsl	tmp1, to_align, #3	/* Bytes->bits.  */
279 	neg	tmp2, to_align, lsl #3
280 #ifdef __AARCH64EB__
281 	lsl	data1a, data1, tmp1
282 	lsr	tmp4, data2, tmp2
283 	lsl	data2, data2, tmp1
284 	orr	tmp4, tmp4, data1a
285 	cmp	to_align, #8
286 	csel	data1, tmp4, data2, lt
287 	rev	tmp2, data1
288 	rev	tmp4, data2
289 	sub	tmp1, tmp2, zeroones
290 	orr	tmp2, tmp2, #REP8_7f
291 	sub	tmp3, tmp4, zeroones
292 	orr	tmp4, tmp4, #REP8_7f
293 #else
294 	lsr	data1a, data1, tmp1
295 	lsl	tmp4, data2, tmp2
296 	lsr	data2, data2, tmp1
297 	orr	tmp4, tmp4, data1a
298 	cmp	to_align, #8
299 	csel	data1, tmp4, data2, lt
300 	sub	tmp1, data1, zeroones
301 	orr	tmp2, data1, #REP8_7f
302 	sub	tmp3, data2, zeroones
303 	orr	tmp4, data2, #REP8_7f
304 #endif
305 	bic	has_nul1, tmp1, tmp2
306 	cbnz	has_nul1, L(fp_le8)
307 	bic	has_nul2, tmp3, tmp4
308 	b	L(fp_gt8)
309 
310 END (STRCPY)
311 
312