1 /*
2  * memchr - scan memory for a character
3  *
4  * Copyright (c) 2010-2021, Arm Limited.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 /*
9    Written by Dave Gilbert <david.gilbert@linaro.org>
10 
11    This __memchr_arm routine is optimised on a Cortex-A9 and should work on
12    all ARMv7 processors.   It has a fast past for short sizes, and has
13    an optimised path for large data sets; the worst case is finding the
14    match early in a large data set.
15 
16  */
17 
18 @ 2011-02-07 david.gilbert@linaro.org
19 @    Extracted from local git a5b438d861
20 @ 2011-07-14 david.gilbert@linaro.org
21 @    Import endianness fix from local git ea786f1b
22 @ 2011-12-07 david.gilbert@linaro.org
23 @    Removed unneeded cbz from align loop
24 
25 	.syntax unified
26 	.arch armv7-a
27 
28 @ this lets us check a flag in a 00/ff byte easily in either endianness
29 #ifdef __ARMEB__
30 #define CHARTSTMASK(c) 1<<(31-(c*8))
31 #else
32 #define CHARTSTMASK(c) 1<<(c*8)
33 #endif
34 	.thumb
35 
36 @ ---------------------------------------------------------------------------
37 	.thumb_func
38 	.align 2
39 	.p2align 4,,15
40 	.global __memchr_arm
41 	.type __memchr_arm,%function
42 __memchr_arm:
43 	@ r0 = start of memory to scan
44 	@ r1 = character to look for
45 	@ r2 = length
46 	@ returns r0 = pointer to character or NULL if not found
47 	and	r1,r1,#0xff	@ Don't think we can trust the caller to actually pass a char
48 
49 	cmp	r2,#16		@ If it's short don't bother with anything clever
50 	blt	20f
51 
52 	tst	r0, #7		@ If it's already aligned skip the next bit
53 	beq	10f
54 
55 	@ Work up to an aligned point
56 5:
57 	ldrb	r3, [r0],#1
58 	subs	r2, r2, #1
59 	cmp	r3, r1
60 	beq	50f		@ If it matches exit found
61 	tst	r0, #7
62 	bne	5b		@ If not aligned yet then do next byte
63 
64 10:
65 	@ At this point, we are aligned, we know we have at least 8 bytes to work with
66 	push	{r4,r5,r6,r7}
67 	orr	r1, r1, r1, lsl #8	@ expand the match word across to all bytes
68 	orr	r1, r1, r1, lsl #16
69 	bic	r4, r2, #7	@ Number of double words to work with
70 	mvns	r7, #0		@ all F's
71 	movs	r3, #0
72 
73 15:
74 	ldmia	r0!,{r5,r6}
75 	subs	r4, r4, #8
76 	eor	r5,r5, r1	@ Get it so that r5,r6 have 00's where the bytes match the target
77 	eor	r6,r6, r1
78 	uadd8	r5, r5, r7	@ Parallel add 0xff - sets the GE bits for anything that wasn't 0
79 	sel	r5, r3, r7	@ bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
80 	uadd8	r6, r6, r7	@ Parallel add 0xff - sets the GE bits for anything that wasn't 0
81 	sel	r6, r5, r7	@ chained....bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
82 	cbnz	r6, 60f
83 	bne	15b		@ (Flags from the subs above) If not run out of bytes then go around again
84 
85 	pop	{r4,r5,r6,r7}
86 	and	r1,r1,#0xff	@ Get r1 back to a single character from the expansion above
87 	and	r2,r2,#7	@ Leave the count remaining as the number after the double words have been done
88 
89 20:
90 	cbz	r2, 40f		@ 0 length or hit the end already then not found
91 
92 21:  @ Post aligned section, or just a short call
93 	ldrb	r3,[r0],#1
94 	subs	r2,r2,#1
95 	eor	r3,r3,r1	@ r3 = 0 if match - doesn't break flags from sub
96 	cbz	r3, 50f
97 	bne	21b		@ on r2 flags
98 
99 40:
100 	movs	r0,#0		@ not found
101 	bx	lr
102 
103 50:
104 	subs	r0,r0,#1	@ found
105 	bx	lr
106 
107 60:  @ We're here because the fast path found a hit - now we have to track down exactly which word it was
108 	@ r0 points to the start of the double word after the one that was tested
109 	@ r5 has the 00/ff pattern for the first word, r6 has the chained value
110 	cmp	r5, #0
111 	itte	eq
112 	moveq	r5, r6		@ the end is in the 2nd word
113 	subeq	r0,r0,#3	@ Points to 2nd byte of 2nd word
114 	subne	r0,r0,#7	@ or 2nd byte of 1st word
115 
116 	@ r0 currently points to the 3rd byte of the word containing the hit
117 	tst	r5, # CHARTSTMASK(0)	@ 1st character
118 	bne	61f
119 	adds	r0,r0,#1
120 	tst	r5, # CHARTSTMASK(1)	@ 2nd character
121 	ittt	eq
122 	addeq	r0,r0,#1
123 	tsteq	r5, # (3<<15)		@ 2nd & 3rd character
124 	@ If not the 3rd must be the last one
125 	addeq	r0,r0,#1
126 
127 61:
128 	pop	{r4,r5,r6,r7}
129 	subs	r0,r0,#1
130 	bx	lr
131 
132 	.size	__memchr_arm, . - __memchr_arm
133