]>
git.ipfire.org Git - people/ms/u-boot.git/blob - include/linux/math64.h
1 #ifndef _LINUX_MATH64_H
2 #define _LINUX_MATH64_H
5 #include <linux/bitops.h>
6 #include <linux/types.h>
8 #if BITS_PER_LONG == 64
10 #define div64_long(x, y) div64_s64((x), (y))
11 #define div64_ul(x, y) div64_u64((x), (y))
14 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
16 * This is commonly provided by 32bit archs to provide an optimized 64bit
19 static inline u64
div_u64_rem(u64 dividend
, u32 divisor
, u32
*remainder
)
21 *remainder
= dividend
% divisor
;
22 return dividend
/ divisor
;
26 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
28 static inline s64
div_s64_rem(s64 dividend
, s32 divisor
, s32
*remainder
)
30 *remainder
= dividend
% divisor
;
31 return dividend
/ divisor
;
35 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
37 static inline u64
div64_u64_rem(u64 dividend
, u64 divisor
, u64
*remainder
)
39 *remainder
= dividend
% divisor
;
40 return dividend
/ divisor
;
44 * div64_u64 - unsigned 64bit divide with 64bit divisor
46 static inline u64
div64_u64(u64 dividend
, u64 divisor
)
48 return dividend
/ divisor
;
52 * div64_s64 - signed 64bit divide with 64bit divisor
54 static inline s64
div64_s64(s64 dividend
, s64 divisor
)
56 return dividend
/ divisor
;
59 #elif BITS_PER_LONG == 32
61 #define div64_long(x, y) div_s64((x), (y))
62 #define div64_ul(x, y) div_u64((x), (y))
65 static inline u64
div_u64_rem(u64 dividend
, u32 divisor
, u32
*remainder
)
67 *remainder
= do_div(dividend
, divisor
);
73 extern s64
div_s64_rem(s64 dividend
, s32 divisor
, s32
*remainder
);
77 extern u64
div64_u64_rem(u64 dividend
, u64 divisor
, u64
*remainder
);
81 extern u64
div64_u64(u64 dividend
, u64 divisor
);
85 extern s64
div64_s64(s64 dividend
, s64 divisor
);
88 #endif /* BITS_PER_LONG */
91 * div_u64 - unsigned 64bit divide with 32bit divisor
93 * This is the most common 64bit divide and should be used if possible,
94 * as many 32bit archs can optimize this variant better than a full 64bit
98 static inline u64
div_u64(u64 dividend
, u32 divisor
)
101 return div_u64_rem(dividend
, divisor
, &remainder
);
106 * div_s64 - signed 64bit divide with 32bit divisor
109 static inline s64
div_s64(s64 dividend
, s32 divisor
)
112 return div_s64_rem(dividend
, divisor
, &remainder
);
116 u32
iter_div_u64_rem(u64 dividend
, u32 divisor
, u64
*remainder
);
118 static __always_inline u32
119 __iter_div_u64_rem(u64 dividend
, u32 divisor
, u64
*remainder
)
123 while (dividend
>= divisor
) {
124 /* The following asm() prevents the compiler from
125 optimising this loop into a modulo operation. */
126 asm("" : "+rm"(dividend
));
132 *remainder
= dividend
;
139 * Many a GCC version messes this up and generates a 64x64 mult :-(
141 static inline u64
mul_u32_u32(u32 a
, u32 b
)
147 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
149 #ifndef mul_u64_u32_shr
150 static inline u64
mul_u64_u32_shr(u64 a
, u32 mul
, unsigned int shift
)
152 return (u64
)(((unsigned __int128
)a
* mul
) >> shift
);
154 #endif /* mul_u64_u32_shr */
156 #ifndef mul_u64_u64_shr
157 static inline u64
mul_u64_u64_shr(u64 a
, u64 mul
, unsigned int shift
)
159 return (u64
)(((unsigned __int128
)a
* mul
) >> shift
);
161 #endif /* mul_u64_u64_shr */
165 #ifndef mul_u64_u32_shr
166 static inline u64
mul_u64_u32_shr(u64 a
, u32 mul
, unsigned int shift
)
174 ret
= mul_u32_u32(al
, mul
) >> shift
;
176 ret
+= mul_u32_u32(ah
, mul
) << (32 - shift
);
180 #endif /* mul_u64_u32_shr */
182 #ifndef mul_u64_u64_shr
183 static inline u64
mul_u64_u64_shr(u64 a
, u64 b
, unsigned int shift
)
194 } rl
, rm
, rn
, rh
, a0
, b0
;
200 rl
.ll
= mul_u32_u32(a0
.l
.low
, b0
.l
.low
);
201 rm
.ll
= mul_u32_u32(a0
.l
.low
, b0
.l
.high
);
202 rn
.ll
= mul_u32_u32(a0
.l
.high
, b0
.l
.low
);
203 rh
.ll
= mul_u32_u32(a0
.l
.high
, b0
.l
.high
);
206 * Each of these lines computes a 64-bit intermediate result into "c",
207 * starting at bits 32-95. The low 32-bits go into the result of the
208 * multiplication, the high 32-bits are carried into the next step.
210 rl
.l
.high
= c
= (u64
)rl
.l
.high
+ rm
.l
.low
+ rn
.l
.low
;
211 rh
.l
.low
= c
= (c
>> 32) + rm
.l
.high
+ rn
.l
.high
+ rh
.l
.low
;
212 rh
.l
.high
= (c
>> 32) + rh
.l
.high
;
215 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
216 * shift it right and throw away the high part of the result.
221 return (rl
.ll
>> shift
) | (rh
.ll
<< (64 - shift
));
222 return rh
.ll
>> (shift
& 63);
224 #endif /* mul_u64_u64_shr */
228 #ifndef mul_u64_u32_div
229 static inline u64
mul_u64_u32_div(u64 a
, u32 mul
, u32 divisor
)
243 rl
.ll
= mul_u32_u32(u
.l
.low
, mul
);
244 rh
.ll
= mul_u32_u32(u
.l
.high
, mul
) + rl
.l
.high
;
246 /* Bits 32-63 of the result will be in rh.l.low. */
247 rl
.l
.high
= do_div(rh
.ll
, divisor
);
249 /* Bits 0-31 of the result will be in rl.l.low. */
250 do_div(rl
.ll
, divisor
);
252 rl
.l
.high
= rh
.l
.low
;
255 #endif /* mul_u64_u32_div */
257 #endif /* _LINUX_MATH64_H */