1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* multi_arith.h: multi-precision integer arithmetic functions, needed
3   to do extended-precision floating point.
4
5   (c) 1998 David Huggins-Daines.
6
7   Somewhat based on arch/alpha/math-emu/ieee-math.c, which is (c)
8   David Mosberger-Tang.
9
10 */
11
12/* Note:
13
14   These are not general multi-precision math routines.  Rather, they
15   implement the subset of integer arithmetic that we need in order to
16   multiply, divide, and normalize 128-bit unsigned mantissae.  */
17
18#ifndef MULTI_ARITH_H
19#define MULTI_ARITH_H
20
21static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
22{
23	reg->exp += cnt;
24
25	switch (cnt) {
26	case 0 ... 8:
27		reg->lowmant = reg->mant.m32[1] << (8 - cnt);
28		reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) |
29				   (reg->mant.m32[0] << (32 - cnt));
30		reg->mant.m32[0] = reg->mant.m32[0] >> cnt;
31		break;
32	case 9 ... 32:
33		reg->lowmant = reg->mant.m32[1] >> (cnt - 8);
34		if (reg->mant.m32[1] << (40 - cnt))
35			reg->lowmant |= 1;
36		reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) |
37				   (reg->mant.m32[0] << (32 - cnt));
38		reg->mant.m32[0] = reg->mant.m32[0] >> cnt;
39		break;
40	case 33 ... 39:
41		asm volatile ("bfextu %1{%2,#8},%0" : "=d" (reg->lowmant)
42			: "m" (reg->mant.m32[0]), "d" (64 - cnt));
43		if (reg->mant.m32[1] << (40 - cnt))
44			reg->lowmant |= 1;
45		reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32);
46		reg->mant.m32[0] = 0;
47		break;
48	case 40 ... 71:
49		reg->lowmant = reg->mant.m32[0] >> (cnt - 40);
50		if ((reg->mant.m32[0] << (72 - cnt)) || reg->mant.m32[1])
51			reg->lowmant |= 1;
52		reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32);
53		reg->mant.m32[0] = 0;
54		break;
55	default:
56		reg->lowmant = reg->mant.m32[0] || reg->mant.m32[1];
57		reg->mant.m32[0] = 0;
58		reg->mant.m32[1] = 0;
59		break;
60	}
61}
62
63static inline int fp_overnormalize(struct fp_ext *reg)
64{
65	int shift;
66
67	if (reg->mant.m32[0]) {
68		asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[0]));
69		reg->mant.m32[0] = (reg->mant.m32[0] << shift) | (reg->mant.m32[1] >> (32 - shift));
70		reg->mant.m32[1] = (reg->mant.m32[1] << shift);
71	} else {
72		asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[1]));
73		reg->mant.m32[0] = (reg->mant.m32[1] << shift);
74		reg->mant.m32[1] = 0;
75		shift += 32;
76	}
77
78	return shift;
79}
80
81static inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src)
82{
83	int carry;
84
85	/* we assume here, gcc only insert move and a clr instr */
86	asm volatile ("add.b %1,%0" : "=d,g" (dest->lowmant)
87		: "g,d" (src->lowmant), "0,0" (dest->lowmant));
88	asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[1])
89		: "d" (src->mant.m32[1]), "0" (dest->mant.m32[1]));
90	asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[0])
91		: "d" (src->mant.m32[0]), "0" (dest->mant.m32[0]));
92	asm volatile ("addx.l %0,%0" : "=d" (carry) : "0" (0));
93
94	return carry;
95}
96
97static inline int fp_addcarry(struct fp_ext *reg)
98{
99	if (++reg->exp == 0x7fff) {
100		if (reg->mant.m64)
101			fp_set_sr(FPSR_EXC_INEX2);
102		reg->mant.m64 = 0;
103		fp_set_sr(FPSR_EXC_OVFL);
104		return 0;
105	}
106	reg->lowmant = (reg->mant.m32[1] << 7) | (reg->lowmant ? 1 : 0);
107	reg->mant.m32[1] = (reg->mant.m32[1] >> 1) |
108			   (reg->mant.m32[0] << 31);
109	reg->mant.m32[0] = (reg->mant.m32[0] >> 1) | 0x80000000;
110
111	return 1;
112}
113
114static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
115			      struct fp_ext *src2)
116{
117	/* we assume here, gcc only insert move and a clr instr */
118	asm volatile ("sub.b %1,%0" : "=d,g" (dest->lowmant)
119		: "g,d" (src2->lowmant), "0,0" (src1->lowmant));
120	asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[1])
121		: "d" (src2->mant.m32[1]), "0" (src1->mant.m32[1]));
122	asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[0])
123		: "d" (src2->mant.m32[0]), "0" (src1->mant.m32[0]));
124}
125
126#define fp_mul64(desth, destl, src1, src2) ({				\
127	asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth)		\
128		: "dm" (src1), "0" (src2));				\
129})
130#define fp_div64(quot, rem, srch, srcl, div)				\
131	asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem)		\
132		: "dm" (div), "1" (srch), "0" (srcl))
133#define fp_add64(dest1, dest2, src1, src2) ({				\
134	asm ("add.l %1,%0" : "=d,dm" (dest2)				\
135		: "dm,d" (src2), "0,0" (dest2));			\
136	asm ("addx.l %1,%0" : "=d" (dest1)				\
137		: "d" (src1), "0" (dest1));				\
138})
139#define fp_addx96(dest, src) ({						\
140	/* we assume here, gcc only insert move and a clr instr */	\
141	asm volatile ("add.l %1,%0" : "=d,g" (dest->m32[2])		\
142		: "g,d" (temp.m32[1]), "0,0" (dest->m32[2]));		\
143	asm volatile ("addx.l %1,%0" : "=d" (dest->m32[1])		\
144		: "d" (temp.m32[0]), "0" (dest->m32[1]));		\
145	asm volatile ("addx.l %1,%0" : "=d" (dest->m32[0])		\
146		: "d" (0), "0" (dest->m32[0]));				\
147})
148#define fp_sub64(dest, src) ({						\
149	asm ("sub.l %1,%0" : "=d,dm" (dest.m32[1])			\
150		: "dm,d" (src.m32[1]), "0,0" (dest.m32[1]));		\
151	asm ("subx.l %1,%0" : "=d" (dest.m32[0])			\
152		: "d" (src.m32[0]), "0" (dest.m32[0]));			\
153})
154#define fp_sub96c(dest, srch, srcm, srcl) ({				\
155	char carry;							\
156	asm ("sub.l %1,%0" : "=d,dm" (dest.m32[2])			\
157		: "dm,d" (srcl), "0,0" (dest.m32[2]));			\
158	asm ("subx.l %1,%0" : "=d" (dest.m32[1])			\
159		: "d" (srcm), "0" (dest.m32[1]));			\
160	asm ("subx.l %2,%1; scs %0" : "=d" (carry), "=d" (dest.m32[0])	\
161		: "d" (srch), "1" (dest.m32[0]));			\
162	carry;								\
163})
164
165static inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1,
166				   struct fp_ext *src2)
167{
168	union fp_mant64 temp;
169
170	fp_mul64(dest->m32[0], dest->m32[1], src1->mant.m32[0], src2->mant.m32[0]);
171	fp_mul64(dest->m32[2], dest->m32[3], src1->mant.m32[1], src2->mant.m32[1]);
172
173	fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[0], src2->mant.m32[1]);
174	fp_addx96(dest, temp);
175
176	fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[1], src2->mant.m32[0]);
177	fp_addx96(dest, temp);
178}
179
180static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
181				 struct fp_ext *div)
182{
183	union fp_mant128 tmp;
184	union fp_mant64 tmp64;
185	unsigned long *mantp = dest->m32;
186	unsigned long fix, rem, first, dummy;
187	int i;
188
189	/* the algorithm below requires dest to be smaller than div,
190	   but both have the high bit set */
191	if (src->mant.m64 >= div->mant.m64) {
192		fp_sub64(src->mant, div->mant);
193		*mantp = 1;
194	} else
195		*mantp = 0;
196	mantp++;
197
198	/* basic idea behind this algorithm: we can't divide two 64bit numbers
199	   (AB/CD) directly, but we can calculate AB/C0, but this means this
200	   quotient is off by C0/CD, so we have to multiply the first result
201	   to fix the result, after that we have nearly the correct result
202	   and only a few corrections are needed. */
203
204	/* C0/CD can be precalculated, but it's an 64bit division again, but
205	   we can make it a bit easier, by dividing first through C so we get
206	   10/1D and now only a single shift and the value fits into 32bit. */
207	fix = 0x80000000;
208	dummy = div->mant.m32[1] / div->mant.m32[0] + 1;
209	dummy = (dummy >> 1) | fix;
210	fp_div64(fix, dummy, fix, 0, dummy);
211	fix--;
212
213	for (i = 0; i < 3; i++, mantp++) {
214		if (src->mant.m32[0] == div->mant.m32[0]) {
215			fp_div64(first, rem, 0, src->mant.m32[1], div->mant.m32[0]);
216
217			fp_mul64(*mantp, dummy, first, fix);
218			*mantp += fix;
219		} else {
220			fp_div64(first, rem, src->mant.m32[0], src->mant.m32[1], div->mant.m32[0]);
221
222			fp_mul64(*mantp, dummy, first, fix);
223		}
224
225		fp_mul64(tmp.m32[0], tmp.m32[1], div->mant.m32[0], first - *mantp);
226		fp_add64(tmp.m32[0], tmp.m32[1], 0, rem);
227		tmp.m32[2] = 0;
228
229		fp_mul64(tmp64.m32[0], tmp64.m32[1], *mantp, div->mant.m32[1]);
230		fp_sub96c(tmp, 0, tmp64.m32[0], tmp64.m32[1]);
231
232		src->mant.m32[0] = tmp.m32[1];
233		src->mant.m32[1] = tmp.m32[2];
234
235		while (!fp_sub96c(tmp, 0, div->mant.m32[0], div->mant.m32[1])) {
236			src->mant.m32[0] = tmp.m32[1];
237			src->mant.m32[1] = tmp.m32[2];
238			*mantp += 1;
239		}
240	}
241}
242
243static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
244				 int shift)
245{
246	unsigned long tmp;
247
248	switch (shift) {
249	case 0:
250		dest->mant.m64 = src->m64[0];
251		dest->lowmant = src->m32[2] >> 24;
252		if (src->m32[3] || (src->m32[2] << 8))
253			dest->lowmant |= 1;
254		break;
255	case 1:
256		asm volatile ("lsl.l #1,%0"
257			: "=d" (tmp) : "0" (src->m32[2]));
258		asm volatile ("roxl.l #1,%0"
259			: "=d" (dest->mant.m32[1]) : "0" (src->m32[1]));
260		asm volatile ("roxl.l #1,%0"
261			: "=d" (dest->mant.m32[0]) : "0" (src->m32[0]));
262		dest->lowmant = tmp >> 24;
263		if (src->m32[3] || (tmp << 8))
264			dest->lowmant |= 1;
265		break;
266	case 31:
267		asm volatile ("lsr.l #1,%1; roxr.l #1,%0"
268			: "=d" (dest->mant.m32[0])
269			: "d" (src->m32[0]), "0" (src->m32[1]));
270		asm volatile ("roxr.l #1,%0"
271			: "=d" (dest->mant.m32[1]) : "0" (src->m32[2]));
272		asm volatile ("roxr.l #1,%0"
273			: "=d" (tmp) : "0" (src->m32[3]));
274		dest->lowmant = tmp >> 24;
275		if (src->m32[3] << 7)
276			dest->lowmant |= 1;
277		break;
278	case 32:
279		dest->mant.m32[0] = src->m32[1];
280		dest->mant.m32[1] = src->m32[2];
281		dest->lowmant = src->m32[3] >> 24;
282		if (src->m32[3] << 8)
283			dest->lowmant |= 1;
284		break;
285	}
286}
287
288#endif	/* MULTI_ARITH_H */
289