]> git.ipfire.org Git - thirdparty/glibc.git/blame - stdlib/longlong.h
Update.
[thirdparty/glibc.git] / stdlib / longlong.h
CommitLineData
28f540f4 1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
62818cfd 2 Copyright (C) 1991,92,94,95,96,97,98,99 Free Software Foundation, Inc.
28f540f4 3
1da2d51a
UD
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
28f540f4 8
1da2d51a
UD
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
28f540f4 13
1da2d51a
UD
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place - Suite 330,
478b92f0 17 Boston, MA 02111-1307, USA. */
28f540f4 18
e9b3e3c5
UD
19/* You have to define the following before including this file:
20
21 UWtype -- An unsigned type, default type for operations (typically a "word")
22 UHWtype -- An unsigned type, at least half the size of UWtype.
23 UDWtype -- An unsigned type, at least twice as large a UWtype
24 W_TYPE_SIZE -- size in bits of UWtype
25
26 UQItype -- Unsigned 8 bit type.
27 SItype, USItype -- Signed and unsigned 32 bit types.
28 DItype, UDItype -- Signed and unsigned 64 bit types.
29
30 On a 32 bit machine UWtype should typically be USItype;
31 on a 64 bit machine, UWtype should typically be UDItype.
32*/
b928942e 33
e9b3e3c5
UD
34#define __BITS4 (W_TYPE_SIZE / 4)
35#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
36#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
37#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
38
39#ifndef W_TYPE_SIZE
40#define W_TYPE_SIZE 32
41#define UWtype USItype
42#define UHWtype USItype
43#define UDWtype UDItype
44#endif
04fbd653 45
28f540f4
RM
46/* Define auxiliary asm macros.
47
e9b3e3c5
UD
48 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
49 UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
50 word product in HIGH_PROD and LOW_PROD.
28f540f4 51
e9b3e3c5
UD
52 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
53 UDWtype product. This is just a variant of umul_ppmm.
28f540f4
RM
54
55 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
e9b3e3c5
UD
56 denominator) divides a UDWtype, composed by the UWtype integers
57 HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
58 in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
59 than DENOMINATOR for correct operation. If, in addition, the most
60 significant bit of DENOMINATOR must be 1, then the pre-processor symbol
61 UDIV_NEEDS_NORMALIZATION is defined to 1.
28f540f4
RM
62
63 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
e9b3e3c5
UD
64 denominator). Like udiv_qrnnd but the numbers are signed. The quotient
65 is rounded towards 0.
1da2d51a 66
e9b3e3c5
UD
67 5) count_leading_zeros(count, x) counts the number of zero-bits from the
68 msb to the first non-zero bit in the UWtype X. This is the number of
69 steps X needs to be shifted left to set the msb. Undefined for X == 0,
70 unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
1da2d51a 71
62818cfd
UD
72 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
73 from the least significant end.
74
75 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
e9b3e3c5
UD
76 high_addend_2, low_addend_2) adds two UWtype integers, composed by
77 HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
78 respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
79 (i.e. carry out) is not stored anywhere, and is lost.
80
81 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
82 high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
83 composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
84 LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
85 and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
28f540f4
RM
86 and is lost.
87
88 If any of these macros are left undefined for a particular CPU,
89 C macros are used. */
90
91/* The CPUs come in alphabetical order below.
92
93 Please add support for more CPUs here, or improve the current support
1da2d51a
UD
94 for the CPUs below!
95 (E.g. WE32100, IBM360.) */
28f540f4
RM
96
97#if defined (__GNUC__) && !defined (NO_ASM)
98
99/* We sometimes need to clobber "cc" with gcc2, but that would not be
100 understood by gcc1. Use cpp to avoid major code duplication. */
101#if __GNUC__ < 2
102#define __CLOBBER_CC
103#define __AND_CLOBBER_CC
104#else /* __GNUC__ >= 2 */
105#define __CLOBBER_CC : "cc"
106#define __AND_CLOBBER_CC , "cc"
107#endif /* __GNUC__ < 2 */
108
e9b3e3c5 109#if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
28f540f4
RM
110#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
111 __asm__ ("add %1,%4,%5
112 addc %0,%2,%3" \
1da2d51a
UD
113 : "=r" ((USItype) (sh)), \
114 "=&r" ((USItype) (sl)) \
115 : "%r" ((USItype) (ah)), \
116 "rI" ((USItype) (bh)), \
117 "%r" ((USItype) (al)), \
118 "rI" ((USItype) (bl)))
28f540f4
RM
119#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
120 __asm__ ("sub %1,%4,%5
121 subc %0,%2,%3" \
1da2d51a
UD
122 : "=r" ((USItype) (sh)), \
123 "=&r" ((USItype) (sl)) \
124 : "r" ((USItype) (ah)), \
125 "rI" ((USItype) (bh)), \
126 "r" ((USItype) (al)), \
127 "rI" ((USItype) (bl)))
28f540f4
RM
128#define umul_ppmm(xh, xl, m0, m1) \
129 do { \
130 USItype __m0 = (m0), __m1 = (m1); \
131 __asm__ ("multiplu %0,%1,%2" \
1da2d51a 132 : "=r" ((USItype) (xl)) \
28f540f4
RM
133 : "r" (__m0), \
134 "r" (__m1)); \
135 __asm__ ("multmu %0,%1,%2" \
1da2d51a 136 : "=r" ((USItype) (xh)) \
28f540f4
RM
137 : "r" (__m0), \
138 "r" (__m1)); \
139 } while (0)
140#define udiv_qrnnd(q, r, n1, n0, d) \
141 __asm__ ("dividu %0,%3,%4" \
1da2d51a
UD
142 : "=r" ((USItype) (q)), \
143 "=q" ((USItype) (r)) \
144 : "1" ((USItype) (n1)), \
145 "r" ((USItype) (n0)), \
146 "r" ((USItype) (d)))
28f540f4
RM
147#define count_leading_zeros(count, x) \
148 __asm__ ("clz %0,%1" \
1da2d51a
UD
149 : "=r" ((USItype) (count)) \
150 : "r" ((USItype) (x)))
e9b3e3c5 151#define COUNT_LEADING_ZEROS_0 32
28f540f4
RM
152#endif /* __a29k__ */
153
e9b3e3c5
UD
154#if defined (__alpha) && W_TYPE_SIZE == 64
155#define umul_ppmm(ph, pl, m0, m1) \
156 do { \
157 UDItype __m0 = (m0), __m1 = (m1); \
158 __asm__ ("umulh %r1,%2,%0" \
159 : "=r" ((UDItype) ph) \
160 : "%rJ" (__m0), \
161 "rI" (__m1)); \
162 (pl) = __m0 * __m1; \
163 } while (0)
164#define UMUL_TIME 46
165#ifndef LONGLONG_STANDALONE
166#define udiv_qrnnd(q, r, n1, n0, d) \
167 do { UDItype __r; \
168 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
169 (r) = __r; \
170 } while (0)
f16d4019 171extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
e9b3e3c5
UD
172#define UDIV_TIME 220
173#endif /* LONGLONG_STANDALONE */
174#endif /* __alpha */
175
176#if defined (__arc__) && W_TYPE_SIZE == 32
1da2d51a
UD
177#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
178 __asm__ ("add.f %1, %4, %5
179 adc %0, %2, %3" \
180 : "=r" ((USItype) (sh)), \
181 "=&r" ((USItype) (sl)) \
182 : "%r" ((USItype) (ah)), \
183 "rIJ" ((USItype) (bh)), \
184 "%r" ((USItype) (al)), \
185 "rIJ" ((USItype) (bl)))
186#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
187 __asm__ ("sub.f %1, %4, %5
188 sbc %0, %2, %3" \
189 : "=r" ((USItype) (sh)), \
190 "=&r" ((USItype) (sl)) \
191 : "r" ((USItype) (ah)), \
192 "rIJ" ((USItype) (bh)), \
193 "r" ((USItype) (al)), \
194 "rIJ" ((USItype) (bl)))
195/* Call libgcc1 routine. */
196#define umul_ppmm(w1, w0, u, v) \
197do { \
198 DIunion __w; \
199 __w.ll = __umulsidi3 (u, v); \
200 w1 = __w.s.high; \
201 w0 = __w.s.low; \
202} while (0)
203#define __umulsidi3 __umulsidi3
204UDItype __umulsidi3 (USItype, USItype);
205#endif
28f540f4 206
e9b3e3c5 207#if defined (__arm__) && W_TYPE_SIZE == 32
28f540f4 208#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
8f5ca04b
RM
209 __asm__ ("adds %1, %4, %5
210 adc %0, %2, %3" \
1da2d51a
UD
211 : "=r" ((USItype) (sh)), \
212 "=&r" ((USItype) (sl)) \
213 : "%r" ((USItype) (ah)), \
214 "rI" ((USItype) (bh)), \
215 "%r" ((USItype) (al)), \
216 "rI" ((USItype) (bl)))
28f540f4 217#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
8f5ca04b
RM
218 __asm__ ("subs %1, %4, %5
219 sbc %0, %2, %3" \
1da2d51a
UD
220 : "=r" ((USItype) (sh)), \
221 "=&r" ((USItype) (sl)) \
222 : "r" ((USItype) (ah)), \
223 "rI" ((USItype) (bh)), \
224 "r" ((USItype) (al)), \
225 "rI" ((USItype) (bl)))
28f540f4 226#define umul_ppmm(xh, xl, a, b) \
1da2d51a 227{register USItype __t0, __t1, __t2; \
8f5ca04b 228 __asm__ ("%@ Inlined umul_ppmm
1da2d51a
UD
229 mov %2, %5, lsr #16
230 mov %0, %6, lsr #16
231 bic %3, %5, %2, lsl #16
232 bic %4, %6, %0, lsl #16
233 mul %1, %3, %4
234 mul %4, %2, %4
235 mul %3, %0, %3
236 mul %0, %2, %0
237 adds %3, %4, %3
8f5ca04b 238 addcs %0, %0, #65536
1da2d51a
UD
239 adds %1, %1, %3, lsl #16
240 adc %0, %0, %3, lsr #16" \
241 : "=&r" ((USItype) (xh)), \
242 "=r" ((USItype) (xl)), \
243 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
244 : "r" ((USItype) (a)), \
245 "r" ((USItype) (b)));}
28f540f4
RM
246#define UMUL_TIME 20
247#define UDIV_TIME 100
248#endif /* __arm__ */
249
e9b3e3c5 250#if defined (__clipper__) && W_TYPE_SIZE == 32
28f540f4
RM
251#define umul_ppmm(w1, w0, u, v) \
252 ({union {UDItype __ll; \
253 struct {USItype __l, __h;} __i; \
254 } __xx; \
255 __asm__ ("mulwux %2,%0" \
256 : "=r" (__xx.__ll) \
1da2d51a
UD
257 : "%0" ((USItype) (u)), \
258 "r" ((USItype) (v))); \
28f540f4
RM
259 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
260#define smul_ppmm(w1, w0, u, v) \
261 ({union {DItype __ll; \
262 struct {SItype __l, __h;} __i; \
263 } __xx; \
264 __asm__ ("mulwx %2,%0" \
265 : "=r" (__xx.__ll) \
1da2d51a
UD
266 : "%0" ((SItype) (u)), \
267 "r" ((SItype) (v))); \
28f540f4
RM
268 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
269#define __umulsidi3(u, v) \
270 ({UDItype __w; \
271 __asm__ ("mulwux %2,%0" \
272 : "=r" (__w) \
1da2d51a
UD
273 : "%0" ((USItype) (u)), \
274 "r" ((USItype) (v))); \
28f540f4
RM
275 __w; })
276#endif /* __clipper__ */
277
e9b3e3c5 278#if defined (__gmicro__) && W_TYPE_SIZE == 32
28f540f4
RM
279#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
280 __asm__ ("add.w %5,%1
281 addx %3,%0" \
1da2d51a
UD
282 : "=g" ((USItype) (sh)), \
283 "=&g" ((USItype) (sl)) \
284 : "%0" ((USItype) (ah)), \
285 "g" ((USItype) (bh)), \
286 "%1" ((USItype) (al)), \
287 "g" ((USItype) (bl)))
28f540f4
RM
288#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
289 __asm__ ("sub.w %5,%1
290 subx %3,%0" \
1da2d51a
UD
291 : "=g" ((USItype) (sh)), \
292 "=&g" ((USItype) (sl)) \
293 : "0" ((USItype) (ah)), \
294 "g" ((USItype) (bh)), \
295 "1" ((USItype) (al)), \
296 "g" ((USItype) (bl)))
28f540f4
RM
297#define umul_ppmm(ph, pl, m0, m1) \
298 __asm__ ("mulx %3,%0,%1" \
1da2d51a
UD
299 : "=g" ((USItype) (ph)), \
300 "=r" ((USItype) (pl)) \
301 : "%0" ((USItype) (m0)), \
302 "g" ((USItype) (m1)))
28f540f4
RM
303#define udiv_qrnnd(q, r, nh, nl, d) \
304 __asm__ ("divx %4,%0,%1" \
1da2d51a
UD
305 : "=g" ((USItype) (q)), \
306 "=r" ((USItype) (r)) \
307 : "1" ((USItype) (nh)), \
308 "0" ((USItype) (nl)), \
309 "g" ((USItype) (d)))
28f540f4
RM
310#define count_leading_zeros(count, x) \
311 __asm__ ("bsch/1 %1,%0" \
312 : "=g" (count) \
1da2d51a
UD
313 : "g" ((USItype) (x)), \
314 "0" ((USItype) 0))
28f540f4
RM
315#endif
316
e9b3e3c5 317#if defined (__hppa) && W_TYPE_SIZE == 32
28f540f4
RM
318#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
319 __asm__ ("add %4,%5,%1
320 addc %2,%3,%0" \
1da2d51a
UD
321 : "=r" ((USItype) (sh)), \
322 "=&r" ((USItype) (sl)) \
323 : "%rM" ((USItype) (ah)), \
324 "rM" ((USItype) (bh)), \
325 "%rM" ((USItype) (al)), \
326 "rM" ((USItype) (bl)))
28f540f4
RM
327#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
328 __asm__ ("sub %4,%5,%1
329 subb %2,%3,%0" \
1da2d51a
UD
330 : "=r" ((USItype) (sh)), \
331 "=&r" ((USItype) (sl)) \
332 : "rM" ((USItype) (ah)), \
333 "rM" ((USItype) (bh)), \
334 "rM" ((USItype) (al)), \
335 "rM" ((USItype) (bl)))
28f540f4 336#if defined (_PA_RISC1_1)
1da2d51a 337#define umul_ppmm(w1, w0, u, v) \
28f540f4 338 do { \
1da2d51a
UD
339 union \
340 { \
341 UDItype __f; \
342 struct {USItype __w1, __w0;} __w1w0; \
343 } __t; \
28f540f4 344 __asm__ ("xmpyu %1,%2,%0" \
1da2d51a
UD
345 : "=x" (__t.__f) \
346 : "x" ((USItype) (u)), \
347 "x" ((USItype) (v))); \
348 (w1) = __t.__w1w0.__w1; \
349 (w0) = __t.__w1w0.__w0; \
350 } while (0)
28f540f4 351#define UMUL_TIME 8
28f540f4 352#else
1da2d51a 353#define UMUL_TIME 30
28f540f4 354#endif
1da2d51a 355#define UDIV_TIME 40
28f540f4
RM
356#define count_leading_zeros(count, x) \
357 do { \
358 USItype __tmp; \
359 __asm__ ( \
360 "ldi 1,%0
361 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
362 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
363 ldo 16(%0),%0 ; Yes. Perform add.
364 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
365 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
366 ldo 8(%0),%0 ; Yes. Perform add.
367 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
368 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
369 ldo 4(%0),%0 ; Yes. Perform add.
370 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
371 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
372 ldo 2(%0),%0 ; Yes. Perform add.
373 extru %1,30,1,%1 ; Extract bit 1.
374 sub %0,%1,%0 ; Subtract it.
375 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
376 } while (0)
28f540f4
RM
377#endif
378
e9b3e3c5
UD
379#if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
380#define umul_ppmm(xh, xl, m0, m1) \
381 do { \
382 union {UDItype __ll; \
383 struct {USItype __h, __l;} __i; \
384 } __xx; \
385 USItype __m0 = (m0), __m1 = (m1); \
386 __asm__ ("mr %0,%3" \
387 : "=r" (__xx.__i.__h), \
388 "=r" (__xx.__i.__l) \
389 : "%1" (__m0), \
390 "r" (__m1)); \
391 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
392 (xh) += ((((SItype) __m0 >> 31) & __m1) \
393 + (((SItype) __m1 >> 31) & __m0)); \
394 } while (0)
395#define smul_ppmm(xh, xl, m0, m1) \
396 do { \
397 union {DItype __ll; \
398 struct {USItype __h, __l;} __i; \
399 } __xx; \
400 __asm__ ("mr %0,%3" \
401 : "=r" (__xx.__i.__h), \
402 "=r" (__xx.__i.__l) \
403 : "%1" (m0), \
404 "r" (m1)); \
405 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
406 } while (0)
407#define sdiv_qrnnd(q, r, n1, n0, d) \
408 do { \
409 union {DItype __ll; \
410 struct {USItype __h, __l;} __i; \
411 } __xx; \
412 __xx.__i.__h = n1; __xx.__i.__l = n0; \
413 __asm__ ("dr %0,%2" \
414 : "=r" (__xx.__ll) \
415 : "0" (__xx.__ll), "r" (d)); \
416 (q) = __xx.__i.__l; (r) = __xx.__i.__h; \
417 } while (0)
418#endif
419
420#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
28f540f4
RM
421#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
422 __asm__ ("addl %5,%1
423 adcl %3,%0" \
1da2d51a
UD
424 : "=r" ((USItype) (sh)), \
425 "=&r" ((USItype) (sl)) \
426 : "%0" ((USItype) (ah)), \
427 "g" ((USItype) (bh)), \
428 "%1" ((USItype) (al)), \
429 "g" ((USItype) (bl)))
28f540f4
RM
430#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
431 __asm__ ("subl %5,%1
432 sbbl %3,%0" \
1da2d51a
UD
433 : "=r" ((USItype) (sh)), \
434 "=&r" ((USItype) (sl)) \
435 : "0" ((USItype) (ah)), \
436 "g" ((USItype) (bh)), \
437 "1" ((USItype) (al)), \
438 "g" ((USItype) (bl)))
28f540f4
RM
439#define umul_ppmm(w1, w0, u, v) \
440 __asm__ ("mull %3" \
1da2d51a
UD
441 : "=a" ((USItype) (w0)), \
442 "=d" ((USItype) (w1)) \
443 : "%0" ((USItype) (u)), \
444 "rm" ((USItype) (v)))
28f540f4
RM
445#define udiv_qrnnd(q, r, n1, n0, d) \
446 __asm__ ("divl %4" \
1da2d51a
UD
447 : "=a" ((USItype) (q)), \
448 "=d" ((USItype) (r)) \
449 : "0" ((USItype) (n0)), \
450 "1" ((USItype) (n1)), \
451 "rm" ((USItype) (d)))
28f540f4
RM
452#define count_leading_zeros(count, x) \
453 do { \
454 USItype __cbtmp; \
455 __asm__ ("bsrl %1,%0" \
1da2d51a 456 : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
28f540f4
RM
457 (count) = __cbtmp ^ 31; \
458 } while (0)
62818cfd
UD
459#define count_trailing_zeros(count, x) \
460 __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
28f540f4
RM
461#define UMUL_TIME 40
462#define UDIV_TIME 40
463#endif /* 80x86 */
464
e9b3e3c5 465#if defined (__i860__) && W_TYPE_SIZE == 32
1da2d51a
UD
466#if 0
467/* Make sure these patterns really improve the code before
468 switching them on. */
ba848785 469#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1da2d51a
UD
470 do { \
471 union \
472 { \
473 DItype __ll; \
474 struct {USItype __l, __h;} __i; \
475 } __a, __b, __s; \
476 __a.__i.__l = (al); \
477 __a.__i.__h = (ah); \
478 __b.__i.__l = (bl); \
479 __b.__i.__h = (bh); \
480 __asm__ ("fiadd.dd %1,%2,%0" \
481 : "=f" (__s.__ll) \
482 : "%f" (__a.__ll), "f" (__b.__ll)); \
483 (sh) = __s.__i.__h; \
484 (sl) = __s.__i.__l; \
485 } while (0)
ba848785 486#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1da2d51a
UD
487 do { \
488 union \
489 { \
490 DItype __ll; \
491 struct {USItype __l, __h;} __i; \
492 } __a, __b, __s; \
493 __a.__i.__l = (al); \
494 __a.__i.__h = (ah); \
495 __b.__i.__l = (bl); \
496 __b.__i.__h = (bh); \
497 __asm__ ("fisub.dd %1,%2,%0" \
498 : "=f" (__s.__ll) \
499 : "%f" (__a.__ll), "f" (__b.__ll)); \
500 (sh) = __s.__i.__h; \
501 (sl) = __s.__i.__l; \
502 } while (0)
503#endif
504#endif /* __i860__ */
505
e9b3e3c5 506#if defined (__i960__) && W_TYPE_SIZE == 32
28f540f4
RM
507#define umul_ppmm(w1, w0, u, v) \
508 ({union {UDItype __ll; \
509 struct {USItype __l, __h;} __i; \
510 } __xx; \
511 __asm__ ("emul %2,%1,%0" \
512 : "=d" (__xx.__ll) \
1da2d51a
UD
513 : "%dI" ((USItype) (u)), \
514 "dI" ((USItype) (v))); \
28f540f4
RM
515 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
516#define __umulsidi3(u, v) \
517 ({UDItype __w; \
518 __asm__ ("emul %2,%1,%0" \
519 : "=d" (__w) \
1da2d51a
UD
520 : "%dI" ((USItype) (u)), \
521 "dI" ((USItype) (v))); \
62818cfd 522 __w; })
1da2d51a 523#endif /* __i960__ */
28f540f4 524
e9b3e3c5 525#if defined (__M32R__) && W_TYPE_SIZE == 32
1da2d51a
UD
526#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
527 /* The cmp clears the condition bit. */ \
528 __asm__ ("cmp %0,%0
529 addx %%5,%1
530 addx %%3,%0" \
531 : "=r" ((USItype) (sh)), \
532 "=&r" ((USItype) (sl)) \
533 : "%0" ((USItype) (ah)), \
534 "r" ((USItype) (bh)), \
535 "%1" ((USItype) (al)), \
536 "r" ((USItype) (bl)) \
537 : "cbit")
538#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
539 /* The cmp clears the condition bit. */ \
540 __asm__ ("cmp %0,%0
541 subx %5,%1
542 subx %3,%0" \
543 : "=r" ((USItype) (sh)), \
544 "=&r" ((USItype) (sl)) \
545 : "0" ((USItype) (ah)), \
546 "r" ((USItype) (bh)), \
547 "1" ((USItype) (al)), \
548 "r" ((USItype) (bl)) \
549 : "cbit")
550#endif /* __M32R__ */
551
e9b3e3c5 552#if defined (__mc68000__) && W_TYPE_SIZE == 32
28f540f4
RM
553#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
554 __asm__ ("add%.l %5,%1
555 addx%.l %3,%0" \
1da2d51a
UD
556 : "=d" ((USItype) (sh)), \
557 "=&d" ((USItype) (sl)) \
558 : "%0" ((USItype) (ah)), \
559 "d" ((USItype) (bh)), \
560 "%1" ((USItype) (al)), \
561 "g" ((USItype) (bl)))
28f540f4
RM
562#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
563 __asm__ ("sub%.l %5,%1
564 subx%.l %3,%0" \
1da2d51a
UD
565 : "=d" ((USItype) (sh)), \
566 "=&d" ((USItype) (sl)) \
567 : "0" ((USItype) (ah)), \
568 "d" ((USItype) (bh)), \
569 "1" ((USItype) (al)), \
570 "g" ((USItype) (bl)))
571
572/* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
573#if defined (__mc68020__) || defined(mc68020) \
574 || defined(__mc68030__) || defined(mc68030) \
575 || defined(__mc68040__) || defined(mc68040) \
576 || defined(__mcpu32__) || defined(mcpu32) \
577 || defined(__NeXT__)
28f540f4
RM
578#define umul_ppmm(w1, w0, u, v) \
579 __asm__ ("mulu%.l %3,%1:%0" \
1da2d51a
UD
580 : "=d" ((USItype) (w0)), \
581 "=d" ((USItype) (w1)) \
582 : "%0" ((USItype) (u)), \
583 "dmi" ((USItype) (v)))
28f540f4
RM
584#define UMUL_TIME 45
585#define udiv_qrnnd(q, r, n1, n0, d) \
586 __asm__ ("divu%.l %4,%1:%0" \
1da2d51a
UD
587 : "=d" ((USItype) (q)), \
588 "=d" ((USItype) (r)) \
589 : "0" ((USItype) (n0)), \
590 "1" ((USItype) (n1)), \
591 "dmi" ((USItype) (d)))
28f540f4
RM
592#define UDIV_TIME 90
593#define sdiv_qrnnd(q, r, n1, n0, d) \
594 __asm__ ("divs%.l %4,%1:%0" \
1da2d51a
UD
595 : "=d" ((USItype) (q)), \
596 "=d" ((USItype) (r)) \
597 : "0" ((USItype) (n0)), \
598 "1" ((USItype) (n1)), \
599 "dmi" ((USItype) (d)))
600
28f540f4 601#else /* not mc68020 */
1da2d51a
UD
602#if !defined(__mcf5200__)
603/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
ba848785 604#define umul_ppmm(xh, xl, a, b) \
1da2d51a
UD
605 __asm__ ("| Inlined umul_ppmm
606 move%.l %2,%/d0
607 move%.l %3,%/d1
608 move%.l %/d0,%/d2
609 swap %/d0
610 move%.l %/d1,%/d3
611 swap %/d1
612 move%.w %/d2,%/d4
613 mulu %/d3,%/d4
614 mulu %/d1,%/d2
615 mulu %/d0,%/d3
616 mulu %/d0,%/d1
617 move%.l %/d4,%/d0
618 eor%.w %/d0,%/d0
619 swap %/d0
620 add%.l %/d0,%/d2
621 add%.l %/d3,%/d2
28f540f4 622 jcc 1f
1da2d51a
UD
623 add%.l %#65536,%/d1
6241: swap %/d2
625 moveq %#0,%/d0
626 move%.w %/d2,%/d0
627 move%.w %/d4,%/d2
628 move%.l %/d2,%1
629 add%.l %/d1,%/d0
630 move%.l %/d0,%0" \
631 : "=g" ((USItype) (xh)), \
632 "=g" ((USItype) (xl)) \
633 : "g" ((USItype) (a)), \
634 "g" ((USItype) (b)) \
635 : "d0", "d1", "d2", "d3", "d4")
28f540f4
RM
636#define UMUL_TIME 100
637#define UDIV_TIME 400
1da2d51a 638#endif /* not mcf5200 */
28f540f4 639#endif /* not mc68020 */
1da2d51a
UD
640
641/* The '020, '030, '040 and '060 have bitfield insns. */
642#if defined (__mc68020__) || defined(mc68020) \
643 || defined(__mc68030__) || defined(mc68030) \
644 || defined(__mc68040__) || defined(mc68040) \
645 || defined(__mc68060__) || defined(mc68060) \
646 || defined(__NeXT__)
647#define count_leading_zeros(count, x) \
648 __asm__ ("bfffo %1{%b2:%b2},%0" \
649 : "=d" ((USItype) (count)) \
650 : "od" ((USItype) (x)), "n" (0))
651#endif
28f540f4
RM
652#endif /* mc68000 */
653
e9b3e3c5 654#if defined (__m88000__) && W_TYPE_SIZE == 32
28f540f4
RM
655#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
656 __asm__ ("addu.co %1,%r4,%r5
657 addu.ci %0,%r2,%r3" \
1da2d51a
UD
658 : "=r" ((USItype) (sh)), \
659 "=&r" ((USItype) (sl)) \
660 : "%rJ" ((USItype) (ah)), \
661 "rJ" ((USItype) (bh)), \
662 "%rJ" ((USItype) (al)), \
663 "rJ" ((USItype) (bl)))
28f540f4
RM
664#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
665 __asm__ ("subu.co %1,%r4,%r5
666 subu.ci %0,%r2,%r3" \
1da2d51a
UD
667 : "=r" ((USItype) (sh)), \
668 "=&r" ((USItype) (sl)) \
669 : "rJ" ((USItype) (ah)), \
670 "rJ" ((USItype) (bh)), \
671 "rJ" ((USItype) (al)), \
672 "rJ" ((USItype) (bl)))
28f540f4
RM
673#define count_leading_zeros(count, x) \
674 do { \
675 USItype __cbtmp; \
676 __asm__ ("ff1 %0,%1" \
677 : "=r" (__cbtmp) \
1da2d51a 678 : "r" ((USItype) (x))); \
28f540f4
RM
679 (count) = __cbtmp ^ 31; \
680 } while (0)
e9b3e3c5 681#define COUNT_LEADING_ZEROS_0 63 /* sic */
1da2d51a 682#if defined (__mc88110__)
28f540f4
RM
683#define umul_ppmm(wh, wl, u, v) \
684 do { \
685 union {UDItype __ll; \
686 struct {USItype __h, __l;} __i; \
687 } __xx; \
688 __asm__ ("mulu.d %0,%1,%2" \
689 : "=r" (__xx.__ll) \
1da2d51a
UD
690 : "r" ((USItype) (u)), \
691 "r" ((USItype) (v))); \
28f540f4
RM
692 (wh) = __xx.__i.__h; \
693 (wl) = __xx.__i.__l; \
694 } while (0)
695#define udiv_qrnnd(q, r, n1, n0, d) \
696 ({union {UDItype __ll; \
697 struct {USItype __h, __l;} __i; \
698 } __xx; \
699 USItype __q; \
700 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
701 __asm__ ("divu.d %0,%1,%2" \
702 : "=r" (__q) \
703 : "r" (__xx.__ll), \
1da2d51a 704 "r" ((USItype) (d))); \
28f540f4
RM
705 (r) = (n0) - __q * (d); (q) = __q; })
706#define UMUL_TIME 5
707#define UDIV_TIME 25
708#else
709#define UMUL_TIME 17
710#define UDIV_TIME 150
1da2d51a 711#endif /* __mc88110__ */
28f540f4
RM
712#endif /* __m88000__ */
713
e9b3e3c5 714#if defined (__mips__) && W_TYPE_SIZE == 32
8f5ca04b
RM
715#define umul_ppmm(w1, w0, u, v) \
716 __asm__ ("multu %2,%3" \
1da2d51a
UD
717 : "=l" ((USItype) (w0)), \
718 "=h" ((USItype) (w1)) \
719 : "d" ((USItype) (u)), \
720 "d" ((USItype) (v)))
28f540f4
RM
721#define UMUL_TIME 10
722#define UDIV_TIME 100
723#endif /* __mips__ */
724
e9b3e3c5 725#if defined (__ns32000__) && W_TYPE_SIZE == 32
28f540f4
RM
726#define umul_ppmm(w1, w0, u, v) \
727 ({union {UDItype __ll; \
728 struct {USItype __l, __h;} __i; \
729 } __xx; \
730 __asm__ ("meid %2,%0" \
731 : "=g" (__xx.__ll) \
1da2d51a
UD
732 : "%0" ((USItype) (u)), \
733 "g" ((USItype) (v))); \
28f540f4
RM
734 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
735#define __umulsidi3(u, v) \
736 ({UDItype __w; \
737 __asm__ ("meid %2,%0" \
738 : "=g" (__w) \
1da2d51a
UD
739 : "%0" ((USItype) (u)), \
740 "g" ((USItype) (v))); \
28f540f4
RM
741 __w; })
742#define udiv_qrnnd(q, r, n1, n0, d) \
743 ({union {UDItype __ll; \
744 struct {USItype __l, __h;} __i; \
745 } __xx; \
746 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
747 __asm__ ("deid %2,%0" \
748 : "=g" (__xx.__ll) \
749 : "0" (__xx.__ll), \
1da2d51a 750 "g" ((USItype) (d))); \
28f540f4 751 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
62818cfd
UD
752#define count_trailing_zeros(count,x) \
753 do {
754 __asm__ ("ffsd %2,%0" \
755 : "=r" ((USItype) (count)) \
756 : "0" ((USItype) 0), \
757 "r" ((USItype) (x))); \
758 } while (0)
28f540f4
RM
759#endif /* __ns32000__ */
760
8f5ca04b 761#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
28f540f4
RM
762#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
763 do { \
764 if (__builtin_constant_p (bh) && (bh) == 0) \
765 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
1da2d51a
UD
766 : "=r" ((USItype) (sh)), \
767 "=&r" ((USItype) (sl)) \
768 : "%r" ((USItype) (ah)), \
769 "%r" ((USItype) (al)), \
770 "rI" ((USItype) (bl))); \
28f540f4
RM
771 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
772 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
1da2d51a
UD
773 : "=r" ((USItype) (sh)), \
774 "=&r" ((USItype) (sl)) \
775 : "%r" ((USItype) (ah)), \
776 "%r" ((USItype) (al)), \
777 "rI" ((USItype) (bl))); \
28f540f4
RM
778 else \
779 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
1da2d51a
UD
780 : "=r" ((USItype) (sh)), \
781 "=&r" ((USItype) (sl)) \
782 : "%r" ((USItype) (ah)), \
783 "r" ((USItype) (bh)), \
784 "%r" ((USItype) (al)), \
785 "rI" ((USItype) (bl))); \
28f540f4
RM
786 } while (0)
787#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
788 do { \
789 if (__builtin_constant_p (ah) && (ah) == 0) \
8f5ca04b 790 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
1da2d51a
UD
791 : "=r" ((USItype) (sh)), \
792 "=&r" ((USItype) (sl)) \
793 : "r" ((USItype) (bh)), \
794 "rI" ((USItype) (al)), \
795 "r" ((USItype) (bl))); \
28f540f4 796 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
8f5ca04b 797 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
1da2d51a
UD
798 : "=r" ((USItype) (sh)), \
799 "=&r" ((USItype) (sl)) \
800 : "r" ((USItype) (bh)), \
801 "rI" ((USItype) (al)), \
802 "r" ((USItype) (bl))); \
28f540f4
RM
803 else if (__builtin_constant_p (bh) && (bh) == 0) \
804 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
1da2d51a
UD
805 : "=r" ((USItype) (sh)), \
806 "=&r" ((USItype) (sl)) \
807 : "r" ((USItype) (ah)), \
808 "rI" ((USItype) (al)), \
809 "r" ((USItype) (bl))); \
28f540f4
RM
810 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
811 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
1da2d51a
UD
812 : "=r" ((USItype) (sh)), \
813 "=&r" ((USItype) (sl)) \
814 : "r" ((USItype) (ah)), \
815 "rI" ((USItype) (al)), \
816 "r" ((USItype) (bl))); \
28f540f4
RM
817 else \
818 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
1da2d51a
UD
819 : "=r" ((USItype) (sh)), \
820 "=&r" ((USItype) (sl)) \
821 : "r" ((USItype) (ah)), \
822 "r" ((USItype) (bh)), \
823 "rI" ((USItype) (al)), \
824 "r" ((USItype) (bl))); \
28f540f4
RM
825 } while (0)
826#define count_leading_zeros(count, x) \
827 __asm__ ("{cntlz|cntlzw} %0,%1" \
1da2d51a
UD
828 : "=r" ((USItype) (count)) \
829 : "r" ((USItype) (x)))
e9b3e3c5 830#define COUNT_LEADING_ZEROS_0 32
8f5ca04b 831#if defined (_ARCH_PPC)
28f540f4
RM
832#define umul_ppmm(ph, pl, m0, m1) \
833 do { \
834 USItype __m0 = (m0), __m1 = (m1); \
835 __asm__ ("mulhwu %0,%1,%2" \
836 : "=r" ((USItype) ph) \
837 : "%r" (__m0), \
838 "r" (__m1)); \
839 (pl) = __m0 * __m1; \
840 } while (0)
841#define UMUL_TIME 15
842#define smul_ppmm(ph, pl, m0, m1) \
843 do { \
844 SItype __m0 = (m0), __m1 = (m1); \
845 __asm__ ("mulhw %0,%1,%2" \
846 : "=r" ((SItype) ph) \
847 : "%r" (__m0), \
848 "r" (__m1)); \
849 (pl) = __m0 * __m1; \
850 } while (0)
851#define SMUL_TIME 14
852#define UDIV_TIME 120
853#else
854#define umul_ppmm(xh, xl, m0, m1) \
855 do { \
856 USItype __m0 = (m0), __m1 = (m1); \
857 __asm__ ("mul %0,%2,%3" \
1da2d51a
UD
858 : "=r" ((USItype) (xh)), \
859 "=q" ((USItype) (xl)) \
28f540f4
RM
860 : "r" (__m0), \
861 "r" (__m1)); \
862 (xh) += ((((SItype) __m0 >> 31) & __m1) \
863 + (((SItype) __m1 >> 31) & __m0)); \
864 } while (0)
865#define UMUL_TIME 8
866#define smul_ppmm(xh, xl, m0, m1) \
867 __asm__ ("mul %0,%2,%3" \
1da2d51a
UD
868 : "=r" ((SItype) (xh)), \
869 "=q" ((SItype) (xl)) \
28f540f4
RM
870 : "r" (m0), \
871 "r" (m1))
872#define SMUL_TIME 4
873#define sdiv_qrnnd(q, r, nh, nl, d) \
874 __asm__ ("div %0,%2,%4" \
1da2d51a
UD
875 : "=r" ((SItype) (q)), "=q" ((SItype) (r)) \
876 : "r" ((SItype) (nh)), "1" ((SItype) (nl)), "r" ((SItype) (d)))
28f540f4
RM
877#define UDIV_TIME 100
878#endif
879#endif /* Power architecture variants. */
880
e9b3e3c5 881#if defined (__pyr__) && W_TYPE_SIZE == 32
28f540f4
RM
882#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
883 __asm__ ("addw %5,%1
884 addwc %3,%0" \
1da2d51a
UD
885 : "=r" ((USItype) (sh)), \
886 "=&r" ((USItype) (sl)) \
887 : "%0" ((USItype) (ah)), \
888 "g" ((USItype) (bh)), \
889 "%1" ((USItype) (al)), \
890 "g" ((USItype) (bl)))
28f540f4
RM
891#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
892 __asm__ ("subw %5,%1
893 subwb %3,%0" \
1da2d51a
UD
894 : "=r" ((USItype) (sh)), \
895 "=&r" ((USItype) (sl)) \
896 : "0" ((USItype) (ah)), \
897 "g" ((USItype) (bh)), \
898 "1" ((USItype) (al)), \
899 "g" ((USItype) (bl)))
8f5ca04b 900/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
28f540f4
RM
901#define umul_ppmm(w1, w0, u, v) \
902 ({union {UDItype __ll; \
903 struct {USItype __h, __l;} __i; \
904 } __xx; \
8f5ca04b
RM
905 __asm__ ("movw %1,%R0
906 uemul %2,%0" \
907 : "=&r" (__xx.__ll) \
908 : "g" ((USItype) (u)), \
1da2d51a 909 "g" ((USItype) (v))); \
28f540f4
RM
910 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
911#endif /* __pyr__ */
912
e9b3e3c5 913#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
28f540f4
RM
914#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
915 __asm__ ("a %1,%5
916 ae %0,%3" \
1da2d51a
UD
917 : "=r" ((USItype) (sh)), \
918 "=&r" ((USItype) (sl)) \
919 : "%0" ((USItype) (ah)), \
920 "r" ((USItype) (bh)), \
921 "%1" ((USItype) (al)), \
922 "r" ((USItype) (bl)))
28f540f4
RM
923#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
924 __asm__ ("s %1,%5
925 se %0,%3" \
1da2d51a
UD
926 : "=r" ((USItype) (sh)), \
927 "=&r" ((USItype) (sl)) \
928 : "0" ((USItype) (ah)), \
929 "r" ((USItype) (bh)), \
930 "1" ((USItype) (al)), \
931 "r" ((USItype) (bl)))
28f540f4
RM
932#define umul_ppmm(ph, pl, m0, m1) \
933 do { \
934 USItype __m0 = (m0), __m1 = (m1); \
935 __asm__ ( \
936 "s r2,r2
937 mts r10,%2
938 m r2,%3
939 m r2,%3
940 m r2,%3
941 m r2,%3
942 m r2,%3
943 m r2,%3
944 m r2,%3
945 m r2,%3
946 m r2,%3
947 m r2,%3
948 m r2,%3
949 m r2,%3
950 m r2,%3
951 m r2,%3
952 m r2,%3
953 m r2,%3
954 cas %0,r2,r0
955 mfs r10,%1" \
1da2d51a
UD
956 : "=r" ((USItype) (ph)), \
957 "=r" ((USItype) (pl)) \
28f540f4
RM
958 : "%r" (__m0), \
959 "r" (__m1) \
960 : "r2"); \
961 (ph) += ((((SItype) __m0 >> 31) & __m1) \
962 + (((SItype) __m1 >> 31) & __m0)); \
963 } while (0)
964#define UMUL_TIME 20
965#define UDIV_TIME 200
966#define count_leading_zeros(count, x) \
967 do { \
968 if ((x) >= 0x10000) \
969 __asm__ ("clz %0,%1" \
1da2d51a
UD
970 : "=r" ((USItype) (count)) \
971 : "r" ((USItype) (x) >> 16)); \
28f540f4
RM
972 else \
973 { \
974 __asm__ ("clz %0,%1" \
1da2d51a
UD
975 : "=r" ((USItype) (count)) \
976 : "r" ((USItype) (x))); \
28f540f4
RM
977 (count) += 16; \
978 } \
979 } while (0)
8f5ca04b
RM
980#endif
981
e9b3e3c5
UD
982#if defined (__sh2__) && W_TYPE_SIZE == 32
983#define umul_ppmm(w1, w0, u, v) \
984 __asm__ ( \
985 "dmulu.l %2,%3
986 sts macl,%1
987 sts mach,%0" \
988 : "=r" ((USItype)(w1)), \
989 "=r" ((USItype)(w0)) \
990 : "r" ((USItype)(u)), \
991 "r" ((USItype)(v)) \
992 : "macl", "mach")
993#define UMUL_TIME 5
994#endif
995
996#if defined (__sparc__) && !defined (__sparc_v9__) && !defined(__arch64__) \
997 && !defined(__sparc_v9) && W_TYPE_SIZE == 32
28f540f4
RM
998#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
999 __asm__ ("addcc %r4,%5,%1
1000 addx %r2,%3,%0" \
1da2d51a
UD
1001 : "=r" ((USItype) (sh)), \
1002 "=&r" ((USItype) (sl)) \
1003 : "%rJ" ((USItype) (ah)), \
1004 "rI" ((USItype) (bh)), \
1005 "%rJ" ((USItype) (al)), \
1006 "rI" ((USItype) (bl)) \
28f540f4
RM
1007 __CLOBBER_CC)
1008#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1009 __asm__ ("subcc %r4,%5,%1
1010 subx %r2,%3,%0" \
1da2d51a
UD
1011 : "=r" ((USItype) (sh)), \
1012 "=&r" ((USItype) (sl)) \
1013 : "rJ" ((USItype) (ah)), \
1014 "rI" ((USItype) (bh)), \
1015 "rJ" ((USItype) (al)), \
1016 "rI" ((USItype) (bl)) \
28f540f4
RM
1017 __CLOBBER_CC)
1018#if defined (__sparc_v8__)
28f540f4
RM
1019#define umul_ppmm(w1, w0, u, v) \
1020 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
1da2d51a
UD
1021 : "=r" ((USItype) (w1)), \
1022 "=r" ((USItype) (w0)) \
1023 : "r" ((USItype) (u)), \
1024 "r" ((USItype) (v)))
28f540f4 1025#define udiv_qrnnd(q, r, n1, n0, d) \
1da2d51a
UD
1026 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
1027 : "=&r" ((USItype) (q)), \
1028 "=&r" ((USItype) (r)) \
1029 : "r" ((USItype) (n1)), \
1030 "r" ((USItype) (n0)), \
1031 "r" ((USItype) (d)))
1032#else
28f540f4
RM
1033#if defined (__sparclite__)
1034/* This has hardware multiply but not divide. It also has two additional
1035 instructions scan (ffs from high bit) and divscc. */
1036#define umul_ppmm(w1, w0, u, v) \
1037 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
1da2d51a
UD
1038 : "=r" ((USItype) (w1)), \
1039 "=r" ((USItype) (w0)) \
1040 : "r" ((USItype) (u)), \
1041 "r" ((USItype) (v)))
28f540f4
RM
1042#define udiv_qrnnd(q, r, n1, n0, d) \
1043 __asm__ ("! Inlined udiv_qrnnd
1044 wr %%g0,%2,%%y ! Not a delayed write for sparclite
1045 tst %%g0
1046 divscc %3,%4,%%g1
1047 divscc %%g1,%4,%%g1
1048 divscc %%g1,%4,%%g1
1049 divscc %%g1,%4,%%g1
1050 divscc %%g1,%4,%%g1
1051 divscc %%g1,%4,%%g1
1052 divscc %%g1,%4,%%g1
1053 divscc %%g1,%4,%%g1
1054 divscc %%g1,%4,%%g1
1055 divscc %%g1,%4,%%g1
1056 divscc %%g1,%4,%%g1
1057 divscc %%g1,%4,%%g1
1058 divscc %%g1,%4,%%g1
1059 divscc %%g1,%4,%%g1
1060 divscc %%g1,%4,%%g1
1061 divscc %%g1,%4,%%g1
1062 divscc %%g1,%4,%%g1
1063 divscc %%g1,%4,%%g1
1064 divscc %%g1,%4,%%g1
1065 divscc %%g1,%4,%%g1
1066 divscc %%g1,%4,%%g1
1067 divscc %%g1,%4,%%g1
1068 divscc %%g1,%4,%%g1
1069 divscc %%g1,%4,%%g1
1070 divscc %%g1,%4,%%g1
1071 divscc %%g1,%4,%%g1
1072 divscc %%g1,%4,%%g1
1073 divscc %%g1,%4,%%g1
1074 divscc %%g1,%4,%%g1
1075 divscc %%g1,%4,%%g1
1076 divscc %%g1,%4,%%g1
1077 divscc %%g1,%4,%0
1078 rd %%y,%1
1079 bl,a 1f
1080 add %1,%4,%1
10811: ! End of inline udiv_qrnnd" \
1da2d51a
UD
1082 : "=r" ((USItype) (q)), \
1083 "=r" ((USItype) (r)) \
1084 : "r" ((USItype) (n1)), \
1085 "r" ((USItype) (n0)), \
1086 "rI" ((USItype) (d)) \
e9b3e3c5 1087 : "g1" __AND_CLOBBER_CC)
28f540f4
RM
1088#define UDIV_TIME 37
1089#define count_leading_zeros(count, x) \
1da2d51a
UD
1090 do { \
1091 __asm__ ("scan %1,1,%0" \
1092 : "=r" ((USItype) (count)) \
1093 : "r" ((USItype) (x))); \
62818cfd 1094 } while (0)
e9b3e3c5
UD
1095/* Early sparclites return 63 for an argument of 0, but they warn that future
1096 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
1097 undefined. */
1da2d51a
UD
1098#else
1099/* SPARC without integer multiplication and divide instructions.
1100 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
28f540f4
RM
1101#define umul_ppmm(w1, w0, u, v) \
1102 __asm__ ("! Inlined umul_ppmm
1103 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
e9b3e3c5
UD
1104 sra %3,31,%%o5 ! Don't move this insn
1105 and %2,%%o5,%%o5 ! Don't move this insn
28f540f4
RM
1106 andcc %%g0,0,%%g1 ! Don't move this insn
1107 mulscc %%g1,%3,%%g1
1108 mulscc %%g1,%3,%%g1
1109 mulscc %%g1,%3,%%g1
1110 mulscc %%g1,%3,%%g1
1111 mulscc %%g1,%3,%%g1
1112 mulscc %%g1,%3,%%g1
1113 mulscc %%g1,%3,%%g1
1114 mulscc %%g1,%3,%%g1
1115 mulscc %%g1,%3,%%g1
1116 mulscc %%g1,%3,%%g1
1117 mulscc %%g1,%3,%%g1
1118 mulscc %%g1,%3,%%g1
1119 mulscc %%g1,%3,%%g1
1120 mulscc %%g1,%3,%%g1
1121 mulscc %%g1,%3,%%g1
1122 mulscc %%g1,%3,%%g1
1123 mulscc %%g1,%3,%%g1
1124 mulscc %%g1,%3,%%g1
1125 mulscc %%g1,%3,%%g1
1126 mulscc %%g1,%3,%%g1
1127 mulscc %%g1,%3,%%g1
1128 mulscc %%g1,%3,%%g1
1129 mulscc %%g1,%3,%%g1
1130 mulscc %%g1,%3,%%g1
1131 mulscc %%g1,%3,%%g1
1132 mulscc %%g1,%3,%%g1
1133 mulscc %%g1,%3,%%g1
1134 mulscc %%g1,%3,%%g1
1135 mulscc %%g1,%3,%%g1
1136 mulscc %%g1,%3,%%g1
1137 mulscc %%g1,%3,%%g1
1138 mulscc %%g1,%3,%%g1
1139 mulscc %%g1,0,%%g1
e9b3e3c5 1140 add %%g1,%%o5,%0
28f540f4 1141 rd %%y,%1" \
1da2d51a
UD
1142 : "=r" ((USItype) (w1)), \
1143 "=r" ((USItype) (w0)) \
1144 : "%rI" ((USItype) (u)), \
1145 "r" ((USItype) (v)) \
e9b3e3c5 1146 : "g1", "o5" __AND_CLOBBER_CC)
28f540f4 1147#define UMUL_TIME 39 /* 39 instructions */
390a4882
UD
1148/* It's quite necessary to add this much assembler for the sparc.
1149 The default udiv_qrnnd (in C) is more than 10 times slower! */
28f540f4 1150#define udiv_qrnnd(q, r, n1, n0, d) \
390a4882
UD
1151 __asm__ ("! Inlined udiv_qrnnd
1152 mov 32,%%g1
1153 subcc %1,%2,%%g0
11541: bcs 5f
1155 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
1156 sub %1,%2,%1 ! this kills msb of n
1157 addx %1,%1,%1 ! so this can't give carry
1158 subcc %%g1,1,%%g1
11592: bne 1b
1160 subcc %1,%2,%%g0
1161 bcs 3f
1162 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
1163 b 3f
1164 sub %1,%2,%1 ! this kills msb of n
11654: sub %1,%2,%1
11665: addxcc %1,%1,%1
1167 bcc 2b
1168 subcc %%g1,1,%%g1
1169! Got carry from n. Subtract next step to cancel this carry.
1170 bne 4b
1171 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
1172 sub %1,%2,%1
11733: xnor %0,0,%0
1174 ! End of inline udiv_qrnnd" \
1da2d51a
UD
1175 : "=&r" ((USItype) (q)), \
1176 "=&r" ((USItype) (r)) \
1177 : "r" ((USItype) (d)), \
1178 "1" ((USItype) (n1)), \
e9b3e3c5 1179 "0" ((USItype) (n0)) : "g1" __AND_CLOBBER_CC)
390a4882 1180#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
1da2d51a
UD
1181#endif /* __sparclite__ */
1182#endif /* __sparc_v8__ */
28f540f4
RM
1183#endif /* __sparc__ */
1184
e9b3e3c5
UD
1185#if (defined (__sparc_v9__) || (defined (__sparc__) && defined (__arch64__)) \
1186 || defined (__sparcv9)) && W_TYPE_SIZE == 64
1187#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
b85697f6
UD
1188 __asm__ ("addcc %r4,%5,%1
1189 add %r2,%3,%0
e9b3e3c5
UD
1190 bcs,a,pn %%xcc, 1f
1191 add %0, 1, %0
1192 1:" \
1193 : "=r" ((UDItype)(sh)), \
1194 "=&r" ((UDItype)(sl)) \
1195 : "%rJ" ((UDItype)(ah)), \
1196 "rI" ((UDItype)(bh)), \
1197 "%rJ" ((UDItype)(al)), \
1198 "rI" ((UDItype)(bl)) \
1199 __CLOBBER_CC)
1200
1201#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
b85697f6
UD
1202 __asm__ ("subcc %r4,%5,%1
1203 sub %r2,%3,%0
e9b3e3c5
UD
1204 bcs,a,pn %%xcc, 1f
1205 sub %0, 1, %0
1206 1:" \
1207 : "=r" ((UDItype)(sh)), \
1208 "=&r" ((UDItype)(sl)) \
1209 : "rJ" ((UDItype)(ah)), \
1210 "rI" ((UDItype)(bh)), \
1211 "rJ" ((UDItype)(al)), \
1212 "rI" ((UDItype)(bl)) \
1213 __CLOBBER_CC)
1214
1215#define umul_ppmm(wh, wl, u, v) \
1216 do { \
1217 UDItype tmp1, tmp2, tmp3, tmp4; \
1218 __asm__ __volatile__ ( \
1219 "srl %7,0,%3
1220 mulx %3,%6,%1
1221 srlx %6,32,%2
1222 mulx %2,%3,%4
1223 sllx %4,32,%5
1224 srl %6,0,%3
1225 sub %1,%5,%5
1226 srlx %5,32,%5
1227 addcc %4,%5,%4
1228 srlx %7,32,%5
1229 mulx %3,%5,%3
1230 mulx %2,%5,%5
1231 sethi %%hi(0x80000000),%2
1232 addcc %4,%3,%4
1233 srlx %4,32,%4
1234 add %2,%2,%2
1235 movcc %%xcc,%%g0,%2
1236 addcc %5,%4,%5
1237 sllx %3,32,%3
1238 add %1,%3,%1
1239 add %5,%2,%0" \
1240 : "=r" ((UDItype)(wh)), \
1241 "=&r" ((UDItype)(wl)), \
1242 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
1243 : "r" ((UDItype)(u)), \
1244 "r" ((UDItype)(v)) \
1245 __CLOBBER_CC); \
1246 } while (0)
1247#define UMUL_TIME 96
1248#define UDIV_TIME 230
1249#endif /* __sparc_v9__ */
1250
1251#if defined (__vax__) && W_TYPE_SIZE == 32
28f540f4
RM
1252#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1253 __asm__ ("addl2 %5,%1
1254 adwc %3,%0" \
1da2d51a
UD
1255 : "=g" ((USItype) (sh)), \
1256 "=&g" ((USItype) (sl)) \
1257 : "%0" ((USItype) (ah)), \
1258 "g" ((USItype) (bh)), \
1259 "%1" ((USItype) (al)), \
1260 "g" ((USItype) (bl)))
28f540f4
RM
1261#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1262 __asm__ ("subl2 %5,%1
1263 sbwc %3,%0" \
1da2d51a
UD
1264 : "=g" ((USItype) (sh)), \
1265 "=&g" ((USItype) (sl)) \
1266 : "0" ((USItype) (ah)), \
1267 "g" ((USItype) (bh)), \
1268 "1" ((USItype) (al)), \
1269 "g" ((USItype) (bl)))
28f540f4
RM
1270#define umul_ppmm(xh, xl, m0, m1) \
1271 do { \
1da2d51a
UD
1272 union { \
1273 UDItype __ll; \
1274 struct {USItype __l, __h;} __i; \
1275 } __xx; \
28f540f4
RM
1276 USItype __m0 = (m0), __m1 = (m1); \
1277 __asm__ ("emul %1,%2,$0,%0" \
1da2d51a 1278 : "=r" (__xx.__ll) \
28f540f4
RM
1279 : "g" (__m0), \
1280 "g" (__m1)); \
1da2d51a
UD
1281 (xh) = __xx.__i.__h; \
1282 (xl) = __xx.__i.__l; \
28f540f4
RM
1283 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1284 + (((SItype) __m1 >> 31) & __m0)); \
1285 } while (0)
1286#define sdiv_qrnnd(q, r, n1, n0, d) \
1287 do { \
1288 union {DItype __ll; \
1289 struct {SItype __l, __h;} __i; \
1290 } __xx; \
1291 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1292 __asm__ ("ediv %3,%2,%0,%1" \
1293 : "=g" (q), "=g" (r) \
1da2d51a 1294 : "g" (__xx.__ll), "g" (d)); \
28f540f4
RM
1295 } while (0)
1296#endif /* __vax__ */
1297
e9b3e3c5
UD
1298#if defined (__z8000__) && W_TYPE_SIZE == 16
1299#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1300 __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
1301 : "=r" ((unsigned int)(sh)), \
1302 "=&r" ((unsigned int)(sl)) \
1303 : "%0" ((unsigned int)(ah)), \
1304 "r" ((unsigned int)(bh)), \
1305 "%1" ((unsigned int)(al)), \
1306 "rQR" ((unsigned int)(bl)))
1307#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1308 __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
1309 : "=r" ((unsigned int)(sh)), \
1310 "=&r" ((unsigned int)(sl)) \
1311 : "0" ((unsigned int)(ah)), \
1312 "r" ((unsigned int)(bh)), \
1313 "1" ((unsigned int)(al)), \
1314 "rQR" ((unsigned int)(bl)))
1315#define umul_ppmm(xh, xl, m0, m1) \
1316 do { \
1317 union {long int __ll; \
1318 struct {unsigned int __h, __l;} __i; \
1319 } __xx; \
1320 unsigned int __m0 = (m0), __m1 = (m1); \
1321 __asm__ ("mult %S0,%H3" \
1322 : "=r" (__xx.__i.__h), \
1323 "=r" (__xx.__i.__l) \
1324 : "%1" (__m0), \
1325 "rQR" (__m1)); \
1326 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
1327 (xh) += ((((signed int) __m0 >> 15) & __m1) \
1328 + (((signed int) __m1 >> 15) & __m0)); \
1329 } while (0)
1330#endif /* __z8000__ */
1331
28f540f4
RM
1332#endif /* __GNUC__ */
1333
28f540f4
RM
1334/* If this machine has no inline assembler, use C macros. */
1335
1336#if !defined (add_ssaaaa)
1337#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1338 do { \
e9b3e3c5 1339 UWtype __x; \
28f540f4
RM
1340 __x = (al) + (bl); \
1341 (sh) = (ah) + (bh) + (__x < (al)); \
1342 (sl) = __x; \
1343 } while (0)
1344#endif
1345
1346#if !defined (sub_ddmmss)
1347#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1348 do { \
e9b3e3c5 1349 UWtype __x; \
28f540f4
RM
1350 __x = (al) - (bl); \
1351 (sh) = (ah) - (bh) - (__x > (al)); \
1352 (sl) = __x; \
1353 } while (0)
1354#endif
1355
1356#if !defined (umul_ppmm)
1357#define umul_ppmm(w1, w0, u, v) \
1358 do { \
e9b3e3c5
UD
1359 UWtype __x0, __x1, __x2, __x3; \
1360 UHWtype __ul, __vl, __uh, __vh; \
28f540f4 1361 \
1da2d51a
UD
1362 __ul = __ll_lowpart (u); \
1363 __uh = __ll_highpart (u); \
1364 __vl = __ll_lowpart (v); \
1365 __vh = __ll_highpart (v); \
28f540f4 1366 \
e9b3e3c5
UD
1367 __x0 = (UWtype) __ul * __vl; \
1368 __x1 = (UWtype) __ul * __vh; \
1369 __x2 = (UWtype) __uh * __vl; \
1370 __x3 = (UWtype) __uh * __vh; \
28f540f4
RM
1371 \
1372 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1373 __x1 += __x2; /* but this indeed can */ \
1374 if (__x1 < __x2) /* did we get it? */ \
1375 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1376 \
1377 (w1) = __x3 + __ll_highpart (__x1); \
1da2d51a 1378 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
28f540f4
RM
1379 } while (0)
1380#endif
1381
1da2d51a
UD
1382#if !defined (__umulsidi3)
1383#define __umulsidi3(u, v) \
1384 ({DIunion __w; \
1385 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1386 __w.ll; })
8f5ca04b
RM
1387#endif
1388
28f540f4
RM
1389/* Define this unconditionally, so it can be used for debugging. */
1390#define __udiv_qrnnd_c(q, r, n1, n0, d) \
1391 do { \
e9b3e3c5
UD
1392 UWtype __d1, __d0, __q1, __q0; \
1393 UWtype __r1, __r0, __m; \
28f540f4
RM
1394 __d1 = __ll_highpart (d); \
1395 __d0 = __ll_lowpart (d); \
1396 \
1397 __r1 = (n1) % __d1; \
1398 __q1 = (n1) / __d1; \
e9b3e3c5 1399 __m = (UWtype) __q1 * __d0; \
28f540f4
RM
1400 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1401 if (__r1 < __m) \
1402 { \
1403 __q1--, __r1 += (d); \
1404 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1405 if (__r1 < __m) \
1406 __q1--, __r1 += (d); \
1407 } \
1408 __r1 -= __m; \
1409 \
1410 __r0 = __r1 % __d1; \
1411 __q0 = __r1 / __d1; \
e9b3e3c5 1412 __m = (UWtype) __q0 * __d0; \
28f540f4
RM
1413 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1414 if (__r0 < __m) \
1415 { \
1416 __q0--, __r0 += (d); \
1417 if (__r0 >= (d)) \
1418 if (__r0 < __m) \
1419 __q0--, __r0 += (d); \
1420 } \
1421 __r0 -= __m; \
1422 \
e9b3e3c5 1423 (q) = (UWtype) __q1 * __ll_B | __q0; \
28f540f4
RM
1424 (r) = __r0; \
1425 } while (0)
1426
1427/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1428 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1429#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1430#define udiv_qrnnd(q, r, nh, nl, d) \
1431 do { \
1da2d51a
UD
1432 USItype __r; \
1433 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
28f540f4
RM
1434 (r) = __r; \
1435 } while (0)
1436#endif
1437
1438/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1439#if !defined (udiv_qrnnd)
1440#define UDIV_NEEDS_NORMALIZATION 1
1441#define udiv_qrnnd __udiv_qrnnd_c
1442#endif
1443
1444#if !defined (count_leading_zeros)
1da2d51a 1445extern const UQItype __clz_tab[];
28f540f4
RM
1446#define count_leading_zeros(count, x) \
1447 do { \
e9b3e3c5
UD
1448 UWtype __xr = (x); \
1449 UWtype __a; \
28f540f4 1450 \
e9b3e3c5 1451 if (W_TYPE_SIZE <= 32) \
28f540f4 1452 { \
e9b3e3c5
UD
1453 __a = __xr < ((UWtype)1<<2*__BITS4) \
1454 ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4) \
1455 : (__xr < ((UWtype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
28f540f4
RM
1456 } \
1457 else \
1458 { \
e9b3e3c5 1459 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
28f540f4
RM
1460 if (((__xr >> __a) & 0xff) != 0) \
1461 break; \
1462 } \
1463 \
e9b3e3c5 1464 (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
28f540f4 1465 } while (0)
e9b3e3c5 1466#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
28f540f4
RM
1467#endif
1468
62818cfd
UD
1469#if !defined (count_trailing_zeros)
1470/* Define count_trailing_zeros using count_leading_zeros. The latter might be
1471 defined in asm, but if it is not, the C version above is good enough. */
1472#define count_trailing_zeros(count, x) \
1473 do { \
e9b3e3c5
UD
1474 UWtype __ctz_x = (x); \
1475 UWtype __ctz_c; \
62818cfd 1476 count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
e9b3e3c5 1477 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
62818cfd
UD
1478 } while (0)
1479#endif
1480
28f540f4
RM
1481#ifndef UDIV_NEEDS_NORMALIZATION
1482#define UDIV_NEEDS_NORMALIZATION 0
1483#endif