1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*---------------------------------------------------------------------------+
3  |  mul_Xsig.S                                                               |
4  |                                                                           |
5  | Multiply a 12 byte fixed point number by another fixed point number.      |
6  |                                                                           |
7  | Copyright (C) 1992,1994,1995                                              |
8  |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
9  |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
10  |                                                                           |
11  | Call from C as:                                                           |
12  |   void mul32_Xsig(Xsig *x, unsigned b)                                    |
13  |                                                                           |
14  |   void mul64_Xsig(Xsig *x, unsigned long long *b)                         |
15  |                                                                           |
16  |   void mul_Xsig_Xsig(Xsig *x, unsigned *b)                                |
17  |                                                                           |
18  | The result is neither rounded nor normalized, and the ls bit or so may    |
19  | be wrong.                                                                 |
20  |                                                                           |
21  +---------------------------------------------------------------------------*/
22 	.file	"mul_Xsig.S"
23 
24 
25 #include "fpu_emu.h"
26 
27 .text
28 SYM_FUNC_START(mul32_Xsig)
29 	pushl %ebp
30 	movl %esp,%ebp
31 	subl $16,%esp
32 	pushl %esi
33 
34 	movl PARAM1,%esi
35 	movl PARAM2,%ecx
36 
37 	xor %eax,%eax
38 	movl %eax,-4(%ebp)
39 	movl %eax,-8(%ebp)
40 
41 	movl (%esi),%eax        /* lsl of Xsig */
42 	mull %ecx		/* msl of b */
43 	movl %edx,-12(%ebp)
44 
45 	movl 4(%esi),%eax	/* midl of Xsig */
46 	mull %ecx		/* msl of b */
47 	addl %eax,-12(%ebp)
48 	adcl %edx,-8(%ebp)
49 	adcl $0,-4(%ebp)
50 
51 	movl 8(%esi),%eax	/* msl of Xsig */
52 	mull %ecx		/* msl of b */
53 	addl %eax,-8(%ebp)
54 	adcl %edx,-4(%ebp)
55 
56 	movl -12(%ebp),%eax
57 	movl %eax,(%esi)
58 	movl -8(%ebp),%eax
59 	movl %eax,4(%esi)
60 	movl -4(%ebp),%eax
61 	movl %eax,8(%esi)
62 
63 	popl %esi
64 	leave
65 	RET
66 SYM_FUNC_END(mul32_Xsig)
67 
68 
69 SYM_FUNC_START(mul64_Xsig)
70 	pushl %ebp
71 	movl %esp,%ebp
72 	subl $16,%esp
73 	pushl %esi
74 
75 	movl PARAM1,%esi
76 	movl PARAM2,%ecx
77 
78 	xor %eax,%eax
79 	movl %eax,-4(%ebp)
80 	movl %eax,-8(%ebp)
81 
82 	movl (%esi),%eax        /* lsl of Xsig */
83 	mull 4(%ecx)		/* msl of b */
84 	movl %edx,-12(%ebp)
85 
86 	movl 4(%esi),%eax	/* midl of Xsig */
87 	mull (%ecx)		/* lsl of b */
88 	addl %edx,-12(%ebp)
89 	adcl $0,-8(%ebp)
90 	adcl $0,-4(%ebp)
91 
92 	movl 4(%esi),%eax	/* midl of Xsig */
93 	mull 4(%ecx)		/* msl of b */
94 	addl %eax,-12(%ebp)
95 	adcl %edx,-8(%ebp)
96 	adcl $0,-4(%ebp)
97 
98 	movl 8(%esi),%eax	/* msl of Xsig */
99 	mull (%ecx)		/* lsl of b */
100 	addl %eax,-12(%ebp)
101 	adcl %edx,-8(%ebp)
102 	adcl $0,-4(%ebp)
103 
104 	movl 8(%esi),%eax	/* msl of Xsig */
105 	mull 4(%ecx)		/* msl of b */
106 	addl %eax,-8(%ebp)
107 	adcl %edx,-4(%ebp)
108 
109 	movl -12(%ebp),%eax
110 	movl %eax,(%esi)
111 	movl -8(%ebp),%eax
112 	movl %eax,4(%esi)
113 	movl -4(%ebp),%eax
114 	movl %eax,8(%esi)
115 
116 	popl %esi
117 	leave
118 	RET
119 SYM_FUNC_END(mul64_Xsig)
120 
121 
122 
123 SYM_FUNC_START(mul_Xsig_Xsig)
124 	pushl %ebp
125 	movl %esp,%ebp
126 	subl $16,%esp
127 	pushl %esi
128 
129 	movl PARAM1,%esi
130 	movl PARAM2,%ecx
131 
132 	xor %eax,%eax
133 	movl %eax,-4(%ebp)
134 	movl %eax,-8(%ebp)
135 
136 	movl (%esi),%eax        /* lsl of Xsig */
137 	mull 8(%ecx)		/* msl of b */
138 	movl %edx,-12(%ebp)
139 
140 	movl 4(%esi),%eax	/* midl of Xsig */
141 	mull 4(%ecx)		/* midl of b */
142 	addl %edx,-12(%ebp)
143 	adcl $0,-8(%ebp)
144 	adcl $0,-4(%ebp)
145 
146 	movl 8(%esi),%eax	/* msl of Xsig */
147 	mull (%ecx)		/* lsl of b */
148 	addl %edx,-12(%ebp)
149 	adcl $0,-8(%ebp)
150 	adcl $0,-4(%ebp)
151 
152 	movl 4(%esi),%eax	/* midl of Xsig */
153 	mull 8(%ecx)		/* msl of b */
154 	addl %eax,-12(%ebp)
155 	adcl %edx,-8(%ebp)
156 	adcl $0,-4(%ebp)
157 
158 	movl 8(%esi),%eax	/* msl of Xsig */
159 	mull 4(%ecx)		/* midl of b */
160 	addl %eax,-12(%ebp)
161 	adcl %edx,-8(%ebp)
162 	adcl $0,-4(%ebp)
163 
164 	movl 8(%esi),%eax	/* msl of Xsig */
165 	mull 8(%ecx)		/* msl of b */
166 	addl %eax,-8(%ebp)
167 	adcl %edx,-4(%ebp)
168 
169 	movl -12(%ebp),%edx
170 	movl %edx,(%esi)
171 	movl -8(%ebp),%edx
172 	movl %edx,4(%esi)
173 	movl -4(%ebp),%edx
174 	movl %edx,8(%esi)
175 
176 	popl %esi
177 	leave
178 	RET
179 SYM_FUNC_END(mul_Xsig_Xsig)
180