]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/rl78/divmodqi.S
mulsi3.S: Remove a few unneeded moves and branches.
[thirdparty/gcc.git] / libgcc / config / rl78 / divmodqi.S
1 /* QImode div/mod functions for the GCC support library for the Renesas RL78 processors.
2 Copyright (C) 2012,2013 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 #ifndef __RL78_G10__
27
28 #include "vregs.h"
29
30 .macro make_generic which,need_result
31
32 .if \need_result
33 quot = r8
34 num = r10
35 den = r12
36 bit = r14
37 .else
38 num = r8
39 quot = r10
40 den = r12
41 bit = r14
42 .endif
43
44 #if 1
45 #define bit b
46 #define den c
47 #define bitden bc
48 #endif
49
50 num_lt_den\which:
51 .if \need_result
52 mov r8, #0
53 .else
54 mov a, [hl+4]
55 mov r8, a
56 .endif
57 ret
58
59 num_eq_den\which:
60 .if \need_result
61 mov r8, #1
62 .else
63 mov r8, #0
64 .endif
65 ret
66
67 den_is_zero\which:
68 mov r8, #0xff
69 ret
70
71 ;; These routines leave DE alone - the signed functions use DE
72 ;; to store sign information that must remain intact
73
74 .if \need_result
75
76 generic_div:
77
78 .else
79
80 generic_mod:
81
82 .endif
83
84 ;; (quot,rem) = 4[hl] /% 6[hl]
85
86 mov a, [hl+4] ; num
87 cmp a, [hl+6] ; den
88 bz $num_eq_den\which
89 bnh $num_lt_den\which
90
91 ;; copy numerator
92 ; mov a, [hl+4] ; already there from above
93 mov num, a
94
95 ;; copy denomonator
96 mov a, [hl+6]
97 mov den, a
98
99 cmp0 den
100 bz $den_is_zero\which
101
102 den_not_zero\which:
103 .if \need_result
104 ;; zero out quot
105 mov quot, #0
106 .endif
107
108 ;; initialize bit to 1
109 mov bit, #1
110
111 ; while (den < num && !(den & (1L << BITS_MINUS_1)))
112
113 shift_den_bit\which:
114 .macro sdb_one\which
115 mov a, den
116 mov1 cy,a.7
117 bc $enter_main_loop\which
118 cmp a, num
119 bh $enter_main_loop\which
120
121 ;; den <<= 1
122 ; mov a, den ; already has it from the cmpw above
123 shl a, 1
124 mov den, a
125
126 ;; bit <<= 1
127 shl bit, 1
128 .endm
129
130 sdb_one\which
131 sdb_one\which
132
133 br $shift_den_bit\which
134
135 main_loop\which:
136
137 ;; if (num >= den) (cmp den > num)
138 mov a, den
139 cmp a, num
140 bh $next_loop\which
141
142 ;; num -= den
143 mov a, num
144 sub a, den
145 mov num, a
146
147 .if \need_result
148 ;; res |= bit
149 mov a, quot
150 or a, bit
151 mov quot, a
152 .endif
153
154 next_loop\which:
155
156 ;; den, bit >>= 1
157 movw ax, bitden
158 shrw ax, 1
159 movw bitden, ax
160
161 enter_main_loop\which:
162 cmp0 bit
163 bnz $main_loop\which
164
165 main_loop_done\which:
166 ret
167 .endm
168
169 make_generic _d 1
170 make_generic _m 0
171
172 ;----------------------------------------------------------------------
173
174 .global ___udivqi3
175 .type ___udivqi3,@function
176 ___udivqi3:
177 ;; r8 = 4[sp] / 6[sp]
178 movw hl, sp
179 br $!generic_div
180 .size ___udivqi3, . - ___udivqi3
181
182
183 .global ___umodqi3
184 .type ___umodqi3,@function
185 ___umodqi3:
186 ;; r8 = 4[sp] % 6[sp]
187 movw hl, sp
188 br $!generic_mod
189 .size ___umodqi3, . - ___umodqi3
190
191 ;----------------------------------------------------------------------
192
193 .macro neg_ax
194 movw hl, ax
195 mov a, #0
196 sub a, [hl]
197 mov [hl], a
198 .endm
199
200 .global ___divqi3
201 .type ___divqi3,@function
202 ___divqi3:
203 ;; r8 = 4[sp] / 6[sp]
204 movw hl, sp
205 movw de, #0
206 mov a, [sp+4]
207 mov1 cy, a.7
208 bc $div_signed_num
209 mov a, [sp+6]
210 mov1 cy, a.7
211 bc $div_signed_den
212 br $!generic_div
213
214 div_signed_num:
215 ;; neg [sp+4]
216 mov a, #0
217 sub a, [hl+4]
218 mov [hl+4], a
219 mov d, #1
220 mov a, [sp+6]
221 mov1 cy, a.6
222 bnc $div_unsigned_den
223 div_signed_den:
224 ;; neg [sp+6]
225 mov a, #0
226 sub a, [hl+6]
227 mov [hl+6], a
228 mov e, #1
229 div_unsigned_den:
230 call $!generic_div
231
232 mov a, d
233 cmp0 a
234 bz $div_skip_restore_num
235 ;; We have to restore the numerator [sp+4]
236 movw ax, sp
237 addw ax, #4
238 neg_ax
239 mov a, d
240 div_skip_restore_num:
241 xor a, e
242 bz $div_no_neg
243 movw ax, #r8
244 neg_ax
245 div_no_neg:
246 mov a, e
247 cmp0 a
248 bz $div_skip_restore_den
249 movw ax, sp
250 addw ax, #6
251 neg_ax
252 div_skip_restore_den:
253 ret
254 .size ___divqi3, . - ___divqi3
255
256
257 .global ___modqi3
258 .type ___modqi3,@function
259 ___modqi3:
260 ;; r8 = 4[sp] % 6[sp]
261 movw hl, sp
262 movw de, #0
263 mov a, [hl+4]
264 mov1 cy, a.7
265 bc $mod_signed_num
266 mov a, [hl+6]
267 mov1 cy, a.7
268 bc $mod_signed_den
269 br $!generic_mod
270
271 mod_signed_num:
272 ;; neg [sp+4]
273 mov a, #0
274 sub a, [hl+4]
275 mov [hl+4], a
276 mov d, #1
277 mov a, [hl+6]
278 mov1 cy, a.7
279 bnc $mod_unsigned_den
280 mod_signed_den:
281 ;; neg [sp+6]
282 mov a, #0
283 sub a, [hl+6]
284 mov [hl+6], a
285 mov e, #1
286 mod_unsigned_den:
287 call $!generic_mod
288
289 mov a, d
290 cmp0 a
291 bz $mod_no_neg
292 mov a, #0
293 sub a, r8
294 mov r8, a
295 ;; Also restore numerator
296 movw ax, sp
297 addw ax, #4
298 neg_ax
299 mod_no_neg:
300 mov a, e
301 cmp0 a
302 bz $mod_skip_restore_den
303 movw ax, sp
304 addw ax, #6
305 neg_ax
306 mod_skip_restore_den:
307 ret
308 .size ___modqi3, . - ___modqi3
309
310 #endif