1 /* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
2 #include <linux/linkage.h>
3 #include <asm/asmmacro.h>
4 #include <asm/core.h>
5 
6 #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 || XCHAL_HAVE_MAC16
7 #define XCHAL_NO_MUL 0
8 #else
9 #define XCHAL_NO_MUL 1
10 #endif
11 
12 ENTRY(__umulsidi3)
13 
14 #ifdef __XTENSA_CALL0_ABI__
15 	abi_entry(32)
16 	s32i	a12, sp, 16
17 	s32i	a13, sp, 20
18 	s32i	a14, sp, 24
19 	s32i	a15, sp, 28
20 #elif XCHAL_NO_MUL
21 	/* This is not really a leaf function; allocate enough stack space
22 	   to allow CALL12s to a helper function.  */
23 	abi_entry(32)
24 #else
25 	abi_entry_default
26 #endif
27 
28 #ifdef __XTENSA_EB__
29 #define wh a2
30 #define wl a3
31 #else
32 #define wh a3
33 #define wl a2
34 #endif /* __XTENSA_EB__ */
35 
36 	/* This code is taken from the mulsf3 routine in ieee754-sf.S.
37 	   See more comments there.  */
38 
39 #if XCHAL_HAVE_MUL32_HIGH
40 	mull	a6, a2, a3
41 	muluh	wh, a2, a3
42 	mov	wl, a6
43 
44 #else /* ! MUL32_HIGH */
45 
46 #if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL
47 	/* a0 and a8 will be clobbered by calling the multiply function
48 	   but a8 is not used here and need not be saved.  */
49 	s32i	a0, sp, 0
50 #endif
51 
52 #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
53 
54 #define a2h a4
55 #define a3h a5
56 
57 	/* Get the high halves of the inputs into registers.  */
58 	srli	a2h, a2, 16
59 	srli	a3h, a3, 16
60 
61 #define a2l a2
62 #define a3l a3
63 
64 #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
65 	/* Clear the high halves of the inputs.  This does not matter
66 	   for MUL16 because the high bits are ignored.  */
67 	extui	a2, a2, 0, 16
68 	extui	a3, a3, 0, 16
69 #endif
70 #endif /* MUL16 || MUL32 */
71 
72 
73 #if XCHAL_HAVE_MUL16
74 
75 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
76 	mul16u	dst, xreg ## xhalf, yreg ## yhalf
77 
78 #elif XCHAL_HAVE_MUL32
79 
80 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
81 	mull	dst, xreg ## xhalf, yreg ## yhalf
82 
83 #elif XCHAL_HAVE_MAC16
84 
85 /* The preprocessor insists on inserting a space when concatenating after
86    a period in the definition of do_mul below.  These macros are a workaround
87    using underscores instead of periods when doing the concatenation.  */
88 #define umul_aa_ll umul.aa.ll
89 #define umul_aa_lh umul.aa.lh
90 #define umul_aa_hl umul.aa.hl
91 #define umul_aa_hh umul.aa.hh
92 
93 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
94 	umul_aa_ ## xhalf ## yhalf	xreg, yreg; \
95 	rsr	dst, ACCLO
96 
97 #else /* no multiply hardware */
98 
99 #define set_arg_l(dst, src) \
100 	extui	dst, src, 0, 16
101 #define set_arg_h(dst, src) \
102 	srli	dst, src, 16
103 
104 #ifdef __XTENSA_CALL0_ABI__
105 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
106 	set_arg_ ## xhalf (a13, xreg); \
107 	set_arg_ ## yhalf (a14, yreg); \
108 	call0	.Lmul_mulsi3; \
109 	mov	dst, a12
110 #else
111 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
112 	set_arg_ ## xhalf (a14, xreg); \
113 	set_arg_ ## yhalf (a15, yreg); \
114 	call12	.Lmul_mulsi3; \
115 	mov	dst, a14
116 #endif /* __XTENSA_CALL0_ABI__ */
117 
118 #endif /* no multiply hardware */
119 
120 	/* Add pp1 and pp2 into a6 with carry-out in a9.  */
121 	do_mul(a6, a2, l, a3, h)	/* pp 1 */
122 	do_mul(a11, a2, h, a3, l)	/* pp 2 */
123 	movi	a9, 0
124 	add	a6, a6, a11
125 	bgeu	a6, a11, 1f
126 	addi	a9, a9, 1
127 1:
128 	/* Shift the high half of a9/a6 into position in a9.  Note that
129 	   this value can be safely incremented without any carry-outs.  */
130 	ssai	16
131 	src	a9, a9, a6
132 
133 	/* Compute the low word into a6.  */
134 	do_mul(a11, a2, l, a3, l)	/* pp 0 */
135 	sll	a6, a6
136 	add	a6, a6, a11
137 	bgeu	a6, a11, 1f
138 	addi	a9, a9, 1
139 1:
140 	/* Compute the high word into wh.  */
141 	do_mul(wh, a2, h, a3, h)	/* pp 3 */
142 	add	wh, wh, a9
143 	mov	wl, a6
144 
145 #endif /* !MUL32_HIGH */
146 
147 #if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL
148 	/* Restore the original return address.  */
149 	l32i	a0, sp, 0
150 #endif
151 #ifdef __XTENSA_CALL0_ABI__
152 	l32i	a12, sp, 16
153 	l32i	a13, sp, 20
154 	l32i	a14, sp, 24
155 	l32i	a15, sp, 28
156 	abi_ret(32)
157 #else
158 	abi_ret_default
159 #endif
160 
161 #if XCHAL_NO_MUL
162 
163 	.macro	do_addx2 dst, as, at, tmp
164 #if XCHAL_HAVE_ADDX
165 	addx2	\dst, \as, \at
166 #else
167 	slli	\tmp, \as, 1
168 	add	\dst, \tmp, \at
169 #endif
170 	.endm
171 
172 	.macro	do_addx4 dst, as, at, tmp
173 #if XCHAL_HAVE_ADDX
174 	addx4	\dst, \as, \at
175 #else
176 	slli	\tmp, \as, 2
177 	add	\dst, \tmp, \at
178 #endif
179 	.endm
180 
181 	.macro	do_addx8 dst, as, at, tmp
182 #if XCHAL_HAVE_ADDX
183 	addx8	\dst, \as, \at
184 #else
185 	slli	\tmp, \as, 3
186 	add	\dst, \tmp, \at
187 #endif
188 	.endm
189 
190 	/* For Xtensa processors with no multiply hardware, this simplified
191 	   version of _mulsi3 is used for multiplying 16-bit chunks of
192 	   the floating-point mantissas.  When using CALL0, this function
193 	   uses a custom ABI: the inputs are passed in a13 and a14, the
194 	   result is returned in a12, and a8 and a15 are clobbered.  */
195 	.align	4
196 .Lmul_mulsi3:
197 	abi_entry_default
198 
199 	.macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
200 	movi	\dst, 0
201 1:	add	\tmp1, \src2, \dst
202 	extui	\tmp2, \src1, 0, 1
203 	movnez	\dst, \tmp1, \tmp2
204 
205 	do_addx2 \tmp1, \src2, \dst, \tmp1
206 	extui	\tmp2, \src1, 1, 1
207 	movnez	\dst, \tmp1, \tmp2
208 
209 	do_addx4 \tmp1, \src2, \dst, \tmp1
210 	extui	\tmp2, \src1, 2, 1
211 	movnez	\dst, \tmp1, \tmp2
212 
213 	do_addx8 \tmp1, \src2, \dst, \tmp1
214 	extui	\tmp2, \src1, 3, 1
215 	movnez	\dst, \tmp1, \tmp2
216 
217 	srli	\src1, \src1, 4
218 	slli	\src2, \src2, 4
219 	bnez	\src1, 1b
220 	.endm
221 
222 #ifdef __XTENSA_CALL0_ABI__
223 	mul_mulsi3_body a12, a13, a14, a15, a8
224 #else
225 	/* The result will be written into a2, so save that argument in a4.  */
226 	mov	a4, a2
227 	mul_mulsi3_body a2, a4, a3, a5, a6
228 #endif
229 	abi_ret_default
230 #endif /* XCHAL_NO_MUL */
231 
232 ENDPROC(__umulsidi3)
233 EXPORT_SYMBOL(__umulsidi3)
234