]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/rl78/mulsi3.S
Update copyright years.
[thirdparty/gcc.git] / libgcc / config / rl78 / mulsi3.S
1 ; Copyright (C) 2011-2017 Free Software Foundation, Inc.
2 ; Contributed by Red Hat.
3 ;
4 ; This file is free software; you can redistribute it and/or modify it
5 ; under the terms of the GNU General Public License as published by the
6 ; Free Software Foundation; either version 3, or (at your option) any
7 ; later version.
8 ;
9 ; This file is distributed in the hope that it will be useful, but
10 ; WITHOUT ANY WARRANTY; without even the implied warranty of
11 ; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 ; General Public License for more details.
13 ;
14 ; Under Section 7 of GPL version 3, you are granted additional
15 ; permissions described in the GCC Runtime Library Exception, version
16 ; 3.1, as published by the Free Software Foundation.
17 ;
18 ; You should have received a copy of the GNU General Public License and
19 ; a copy of the GCC Runtime Library Exception along with this program;
20 ; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
21 ; <http://www.gnu.org/licenses/>.
22
23 ;; 32x32=32 multiply
24
25 #include "vregs.h"
26
27 ;----------------------------------------------------------------------
28
29 ; Register use:
30 ; RB0 RB1 RB2
31 ; AX op2L res32L res32H
32 ; BC op2H (resH) op1
33 ; DE count (resL-tmp)
34 ; HL [sp+4]
35
36 ; Register use (G10):
37 ;
38 ; AX op2L
39 ; BC op2H
40 ; DE count
41 ; HL [sp+4]
42 ; r8/r9 res32L
43 ; r10/r11 (resH)
44 ; r12/r13 (resL-tmp)
45 ; r16/r17 res32H
46 ; r18/r19 op1
47
48 START_FUNC ___mulsi3
49 ;; A is at [sp+4]
50 ;; B is at [sp+8]
51 ;; result is in R8..R11
52
53 #ifdef __RL78_G10__
54 movw ax, r16
55 push ax
56 movw ax, r18
57 push ax
58 #else
59 sel rb2
60 push ax
61 push bc
62 sel rb0
63 #endif
64 clrw ax
65 movw r8, ax
66 movw r16, ax
67
68 movw ax, [sp+14]
69 cmpw ax, #0
70 bz $1f
71 cmpw ax, #0xffff
72 bnz $2f
73 movw ax, [sp+8]
74 #ifdef __RL78_G10__
75 push bc
76 movw bc, r8
77 xchw ax, bc
78 subw ax, bc
79 movw r8, ax
80 movw ax, bc
81 pop bc
82 #else
83 sel rb1
84 subw ax, r_0
85 sel rb0
86 #endif
87 br $1f
88 2:
89 movw bc, ax
90 movw ax, [sp+8]
91 cmpw ax, #0
92 skz
93 call !.Lmul_hi
94 1:
95
96 movw ax, [sp+10]
97 cmpw ax, #0
98 bz $1f
99 cmpw ax, #0xffff
100 bnz $2f
101 movw ax, [sp+12]
102 #ifdef __RL78_G10__
103 push bc
104 movw bc, r8
105 xchw ax, bc
106 subw ax, bc
107 movw r8, ax
108 movw ax, bc
109 pop bc
110 #else
111 sel rb1
112 subw ax, r_0
113 sel rb0
114 #endif
115 br $1f
116 2:
117 movw bc, ax
118 movw ax, [sp+12]
119 cmpw ax, #0
120 skz
121 call !.Lmul_hi
122 1:
123
124 movw ax, r8
125 movw r16, ax
126 clrw ax
127 movw r8, ax
128
129 ;; now do R16:R8 += op1L * op2L
130
131 ;; op1 is in AX.0 (needs to shrw)
132 ;; op2 is in BC.2 and BC.1 (bc can shlw/rolcw)
133 ;; res is in AX.2 and AX.1 (needs to addw)
134
135 movw ax, [sp+8]
136 movw r10, ax ; BC.1
137 movw ax, [sp+12]
138
139 cmpw ax, r10
140 bc $.Lmul_hisi_top
141 movw bc, r10
142 movw r10, ax
143 movw ax, bc
144
145 .Lmul_hisi_top:
146 movw bc, #0
147
148 .Lmul_hisi_loop:
149 shrw ax, 1
150 #ifdef __RL78_G10__
151 push ax
152 bnc $.Lmul_hisi_no_add_g10
153 movw ax, r8
154 addw ax, r10
155 movw r8, ax
156 sknc
157 incw r16
158 movw ax, r16
159 addw ax, r_2
160 movw r16, ax
161 .Lmul_hisi_no_add_g10:
162 movw ax, r10
163 shlw ax, 1
164 movw r10, ax
165 pop ax
166 #else
167 bnc $.Lmul_hisi_no_add
168 sel rb1
169 addw ax, bc
170 sel rb2
171 sknc
172 incw ax
173 addw ax, r_2
174 .Lmul_hisi_no_add:
175 sel rb1
176 shlw bc, 1
177 sel rb0
178 #endif
179 rolwc bc, 1
180 cmpw ax, #0
181 bz $.Lmul_hisi_done
182
183 shrw ax, 1
184 #ifdef __RL78_G10__
185 push ax
186 bnc $.Lmul_hisi_no_add2_g10
187 movw ax, r8
188 addw ax, r10
189 movw r8, ax
190 movw ax, r16
191 sknc
192 incw ax
193 addw ax, r_2
194 movw r16, ax
195 .Lmul_hisi_no_add2_g10:
196 movw ax, r10
197 shlw ax, 1
198 movw r10, ax
199 pop ax
200 #else
201 bnc $.Lmul_hisi_no_add2
202 sel rb1
203 addw ax, bc
204 sel rb2
205 sknc
206 incw ax
207 addw ax, r_2
208 .Lmul_hisi_no_add2:
209 sel rb1
210 shlw bc, 1
211 sel rb0
212 #endif
213 rolwc bc, 1
214 cmpw ax, #0
215 bnz $.Lmul_hisi_loop
216
217 .Lmul_hisi_done:
218
219 movw ax, r16
220 movw r10, ax
221
222 #ifdef __RL78_G10__
223 pop ax
224 movw r18, ax
225 pop ax
226 movw r16, ax
227 #else
228 sel rb2
229 pop bc
230 pop ax
231 sel rb0
232 #endif
233
234 ret
235 END_FUNC ___mulsi3
236
237 ;----------------------------------------------------------------------
238
239 START_FUNC ___mulhi3
240 movw r8, #0
241 movw ax, [sp+6]
242 movw bc, ax
243 movw ax, [sp+4]
244
245 ;; R8 += AX * BC
246 .Lmul_hi:
247 cmpw ax, bc
248 skc
249 xchw ax, bc
250 br $.Lmul_hi_loop
251
252 .Lmul_hi_top:
253 #ifdef __RL78_G10__
254 push ax
255 movw ax, r8
256 addw ax, r_2
257 movw r8, ax
258 pop ax
259 #else
260 sel rb1
261 addw ax, r_2
262 sel rb0
263 #endif
264
265 .Lmul_hi_no_add:
266 shlw bc, 1
267 .Lmul_hi_loop:
268 shrw ax, 1
269 bc $.Lmul_hi_top
270 cmpw ax, #0
271 bz $.Lmul_hi_done
272
273 shlw bc, 1
274 shrw ax, 1
275 bc $.Lmul_hi_top
276 cmpw ax, #0
277 bnz $.Lmul_hi_no_add
278
279 .Lmul_hi_done:
280 ret
281 END_FUNC ___mulhi3
282
283 ;;; --------------------------------------
284 #ifdef __RL78_G10__
285 START_FUNC ___mulqi3
286
287 mov a, [sp+4]
288 mov r9, a
289 mov a, [sp+6]
290 mov r10, a
291 mov a, #9
292 mov r11, a
293 clrb a
294 mov r8, a
295 .L2:
296 cmp0 r10
297 skz
298 dec r11
299 sknz
300 ret
301 mov a, r10
302 and a, #1
303 mov r12, a
304 cmp0 r12
305 sknz
306 br !!.L3
307 mov a, r9
308 mov l, a
309 mov a, r8
310 add a, l
311 mov r8, a
312 .L3:
313 mov a, r9
314 add a, a
315 mov r9, a
316 mov a, r10
317 shr a, 1
318 mov r10, a
319 br !!.L2
320
321 END_FUNC ___mulqi3
322 #endif
323