]>
Commit | Line | Data |
---|---|---|
05f778c8 TS |
1 | /* |
2 | * Utility compute operations used by translated code. | |
3 | * | |
4 | * Copyright (c) 2007 Thiemo Seufer | |
5 | * Copyright (c) 2007 Jocelyn Mayer | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
175de524 | 25 | |
8ac2d6c5 LP |
26 | /* Portions of this work are licensed under the terms of the GNU GPL, |
27 | * version 2 or later. See the COPYING file in the top-level directory. | |
28 | */ | |
29 | ||
cb9c377f | 30 | #ifndef HOST_UTILS_H |
175de524 | 31 | #define HOST_UTILS_H |
05f778c8 | 32 | |
652a4b7e | 33 | #include "qemu/bswap.h" |
4724bbd2 | 34 | #include "qemu/int128.h" |
cebdff77 | 35 | |
f540166b | 36 | #ifdef CONFIG_INT128 |
facd2857 BS |
37 | static inline void mulu64(uint64_t *plow, uint64_t *phigh, |
38 | uint64_t a, uint64_t b) | |
7a51ad82 | 39 | { |
f540166b RH |
40 | __uint128_t r = (__uint128_t)a * b; |
41 | *plow = r; | |
42 | *phigh = r >> 64; | |
7a51ad82 | 43 | } |
f540166b | 44 | |
facd2857 BS |
45 | static inline void muls64(uint64_t *plow, uint64_t *phigh, |
46 | int64_t a, int64_t b) | |
7a51ad82 | 47 | { |
f540166b RH |
48 | __int128_t r = (__int128_t)a * b; |
49 | *plow = r; | |
50 | *phigh = r >> 64; | |
7a51ad82 | 51 | } |
98d1eb27 | 52 | |
49caffe0 PM |
53 | /* compute with 96 bit intermediate result: (a*b)/c */ |
54 | static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) | |
55 | { | |
56 | return (__int128_t)a * b / c; | |
57 | } | |
58 | ||
47de6c4c NP |
59 | static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c) |
60 | { | |
61 | return ((__int128_t)a * b + c - 1) / c; | |
62 | } | |
63 | ||
40f3e79a LP |
64 | static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh, |
65 | uint64_t divisor) | |
98d1eb27 | 66 | { |
9276a31c LP |
67 | __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; |
68 | __uint128_t result = dividend / divisor; | |
40f3e79a | 69 | |
9276a31c | 70 | *plow = result; |
40f3e79a LP |
71 | *phigh = result >> 64; |
72 | return dividend % divisor; | |
98d1eb27 | 73 | } |
e44259b6 | 74 | |
40f3e79a LP |
75 | static inline int64_t divs128(uint64_t *plow, int64_t *phigh, |
76 | int64_t divisor) | |
e44259b6 | 77 | { |
40f3e79a | 78 | __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; |
9276a31c | 79 | __int128_t result = dividend / divisor; |
40f3e79a | 80 | |
9276a31c | 81 | *plow = result; |
40f3e79a LP |
82 | *phigh = result >> 64; |
83 | return dividend % divisor; | |
e44259b6 | 84 | } |
7a51ad82 | 85 | #else |
db7b62e7 LP |
86 | void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b); |
87 | void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b); | |
40f3e79a LP |
88 | uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); |
89 | int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor); | |
49caffe0 | 90 | |
47de6c4c NP |
91 | static inline uint64_t muldiv64_rounding(uint64_t a, uint32_t b, uint32_t c, |
92 | bool round_up) | |
49caffe0 PM |
93 | { |
94 | union { | |
95 | uint64_t ll; | |
96 | struct { | |
e03b5686 | 97 | #if HOST_BIG_ENDIAN |
49caffe0 PM |
98 | uint32_t high, low; |
99 | #else | |
100 | uint32_t low, high; | |
101 | #endif | |
102 | } l; | |
103 | } u, res; | |
104 | uint64_t rl, rh; | |
105 | ||
106 | u.ll = a; | |
107 | rl = (uint64_t)u.l.low * (uint64_t)b; | |
47de6c4c NP |
108 | if (round_up) { |
109 | rl += c - 1; | |
110 | } | |
49caffe0 PM |
111 | rh = (uint64_t)u.l.high * (uint64_t)b; |
112 | rh += (rl >> 32); | |
113 | res.l.high = rh / c; | |
114 | res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; | |
115 | return res.ll; | |
116 | } | |
47de6c4c NP |
117 | |
118 | static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) | |
119 | { | |
120 | return muldiv64_rounding(a, b, c, false); | |
121 | } | |
122 | ||
123 | static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c) | |
124 | { | |
125 | return muldiv64_rounding(a, b, c, true); | |
126 | } | |
7a51ad82 JM |
127 | #endif |
128 | ||
31fe256d KO |
129 | /** |
130 | * clz8 - count leading zeros in a 8-bit value. | |
131 | * @val: The value to search | |
132 | * | |
133 | * Returns 8 if the value is zero. Note that the GCC builtin is | |
134 | * undefined if the value is zero. | |
135 | * | |
136 | * Note that the GCC builtin will upcast its argument to an `unsigned int` | |
137 | * so this function subtracts off the number of prepended zeroes. | |
138 | */ | |
139 | static inline int clz8(uint8_t val) | |
140 | { | |
141 | return val ? __builtin_clz(val) - 24 : 8; | |
142 | } | |
143 | ||
144 | /** | |
145 | * clz16 - count leading zeros in a 16-bit value. | |
146 | * @val: The value to search | |
147 | * | |
148 | * Returns 16 if the value is zero. Note that the GCC builtin is | |
149 | * undefined if the value is zero. | |
150 | * | |
151 | * Note that the GCC builtin will upcast its argument to an `unsigned int` | |
152 | * so this function subtracts off the number of prepended zeroes. | |
153 | */ | |
154 | static inline int clz16(uint16_t val) | |
155 | { | |
156 | return val ? __builtin_clz(val) - 16 : 16; | |
157 | } | |
158 | ||
72d81155 RH |
159 | /** |
160 | * clz32 - count leading zeros in a 32-bit value. | |
161 | * @val: The value to search | |
162 | * | |
163 | * Returns 32 if the value is zero. Note that the GCC builtin is | |
164 | * undefined if the value is zero. | |
165 | */ | |
facd2857 | 166 | static inline int clz32(uint32_t val) |
05f778c8 | 167 | { |
72d81155 | 168 | return val ? __builtin_clz(val) : 32; |
05f778c8 TS |
169 | } |
170 | ||
72d81155 RH |
171 | /** |
172 | * clo32 - count leading ones in a 32-bit value. | |
173 | * @val: The value to search | |
174 | * | |
175 | * Returns 32 if the value is -1. | |
176 | */ | |
facd2857 | 177 | static inline int clo32(uint32_t val) |
05f778c8 TS |
178 | { |
179 | return clz32(~val); | |
180 | } | |
181 | ||
72d81155 RH |
182 | /** |
183 | * clz64 - count leading zeros in a 64-bit value. | |
184 | * @val: The value to search | |
185 | * | |
186 | * Returns 64 if the value is zero. Note that the GCC builtin is | |
187 | * undefined if the value is zero. | |
188 | */ | |
facd2857 | 189 | static inline int clz64(uint64_t val) |
05f778c8 | 190 | { |
72d81155 | 191 | return val ? __builtin_clzll(val) : 64; |
05f778c8 TS |
192 | } |
193 | ||
72d81155 RH |
194 | /** |
195 | * clo64 - count leading ones in a 64-bit value. | |
196 | * @val: The value to search | |
197 | * | |
198 | * Returns 64 if the value is -1. | |
199 | */ | |
facd2857 | 200 | static inline int clo64(uint64_t val) |
05f778c8 TS |
201 | { |
202 | return clz64(~val); | |
203 | } | |
b9ef45ff | 204 | |
31fe256d KO |
205 | /** |
206 | * ctz8 - count trailing zeros in a 8-bit value. | |
207 | * @val: The value to search | |
208 | * | |
209 | * Returns 8 if the value is zero. Note that the GCC builtin is | |
210 | * undefined if the value is zero. | |
211 | */ | |
212 | static inline int ctz8(uint8_t val) | |
213 | { | |
214 | return val ? __builtin_ctz(val) : 8; | |
215 | } | |
216 | ||
217 | /** | |
218 | * ctz16 - count trailing zeros in a 16-bit value. | |
219 | * @val: The value to search | |
220 | * | |
221 | * Returns 16 if the value is zero. Note that the GCC builtin is | |
222 | * undefined if the value is zero. | |
223 | */ | |
224 | static inline int ctz16(uint16_t val) | |
225 | { | |
226 | return val ? __builtin_ctz(val) : 16; | |
227 | } | |
228 | ||
72d81155 RH |
229 | /** |
230 | * ctz32 - count trailing zeros in a 32-bit value. | |
231 | * @val: The value to search | |
232 | * | |
233 | * Returns 32 if the value is zero. Note that the GCC builtin is | |
234 | * undefined if the value is zero. | |
235 | */ | |
facd2857 | 236 | static inline int ctz32(uint32_t val) |
b9ef45ff | 237 | { |
72d81155 | 238 | return val ? __builtin_ctz(val) : 32; |
c8906845 AZ |
239 | } |
240 | ||
72d81155 RH |
241 | /** |
242 | * cto32 - count trailing ones in a 32-bit value. | |
243 | * @val: The value to search | |
244 | * | |
245 | * Returns 32 if the value is -1. | |
246 | */ | |
facd2857 | 247 | static inline int cto32(uint32_t val) |
c8906845 | 248 | { |
b9ef45ff JM |
249 | return ctz32(~val); |
250 | } | |
251 | ||
72d81155 RH |
252 | /** |
253 | * ctz64 - count trailing zeros in a 64-bit value. | |
254 | * @val: The value to search | |
255 | * | |
256 | * Returns 64 if the value is zero. Note that the GCC builtin is | |
257 | * undefined if the value is zero. | |
258 | */ | |
facd2857 | 259 | static inline int ctz64(uint64_t val) |
b9ef45ff | 260 | { |
72d81155 | 261 | return val ? __builtin_ctzll(val) : 64; |
b9ef45ff JM |
262 | } |
263 | ||
72d81155 | 264 | /** |
1c884abe | 265 | * cto64 - count trailing ones in a 64-bit value. |
72d81155 RH |
266 | * @val: The value to search |
267 | * | |
268 | * Returns 64 if the value is -1. | |
269 | */ | |
facd2857 | 270 | static inline int cto64(uint64_t val) |
b9ef45ff JM |
271 | { |
272 | return ctz64(~val); | |
273 | } | |
274 | ||
afd3fe4c CF |
275 | /** |
276 | * clrsb32 - count leading redundant sign bits in a 32-bit value. | |
277 | * @val: The value to search | |
278 | * | |
279 | * Returns the number of bits following the sign bit that are equal to it. | |
280 | * No special cases; output range is [0-31]. | |
281 | */ | |
282 | static inline int clrsb32(uint32_t val) | |
283 | { | |
f773b423 | 284 | #if __has_builtin(__builtin_clrsb) || !defined(__clang__) |
afd3fe4c CF |
285 | return __builtin_clrsb(val); |
286 | #else | |
287 | return clz32(val ^ ((int32_t)val >> 1)) - 1; | |
288 | #endif | |
289 | } | |
290 | ||
291 | /** | |
292 | * clrsb64 - count leading redundant sign bits in a 64-bit value. | |
293 | * @val: The value to search | |
294 | * | |
295 | * Returns the number of bits following the sign bit that are equal to it. | |
296 | * No special cases; output range is [0-63]. | |
297 | */ | |
298 | static inline int clrsb64(uint64_t val) | |
299 | { | |
f773b423 | 300 | #if __has_builtin(__builtin_clrsbll) || !defined(__clang__) |
afd3fe4c CF |
301 | return __builtin_clrsbll(val); |
302 | #else | |
303 | return clz64(val ^ ((int64_t)val >> 1)) - 1; | |
304 | #endif | |
305 | } | |
306 | ||
72d81155 RH |
307 | /** |
308 | * ctpop8 - count the population of one bits in an 8-bit value. | |
309 | * @val: The value to search | |
310 | */ | |
facd2857 | 311 | static inline int ctpop8(uint8_t val) |
b9ef45ff | 312 | { |
72d81155 | 313 | return __builtin_popcount(val); |
b9ef45ff JM |
314 | } |
315 | ||
24899cdc PB |
316 | /* |
317 | * parity8 - return the parity (1 = odd) of an 8-bit value. | |
318 | * @val: The value to search | |
319 | */ | |
320 | static inline int parity8(uint8_t val) | |
321 | { | |
322 | return __builtin_parity(val); | |
323 | } | |
324 | ||
72d81155 RH |
325 | /** |
326 | * ctpop16 - count the population of one bits in a 16-bit value. | |
327 | * @val: The value to search | |
328 | */ | |
facd2857 | 329 | static inline int ctpop16(uint16_t val) |
b9ef45ff | 330 | { |
72d81155 | 331 | return __builtin_popcount(val); |
b9ef45ff JM |
332 | } |
333 | ||
72d81155 RH |
334 | /** |
335 | * ctpop32 - count the population of one bits in a 32-bit value. | |
336 | * @val: The value to search | |
337 | */ | |
facd2857 | 338 | static inline int ctpop32(uint32_t val) |
b9ef45ff | 339 | { |
7d019980 | 340 | return __builtin_popcount(val); |
b9ef45ff JM |
341 | } |
342 | ||
72d81155 RH |
343 | /** |
344 | * ctpop64 - count the population of one bits in a 64-bit value. | |
345 | * @val: The value to search | |
346 | */ | |
facd2857 | 347 | static inline int ctpop64(uint64_t val) |
b9ef45ff | 348 | { |
7d019980 | 349 | return __builtin_popcountll(val); |
3800af9e | 350 | } |
cb9c377f | 351 | |
652a4b7e RH |
352 | /** |
353 | * revbit8 - reverse the bits in an 8-bit value. | |
354 | * @x: The value to modify. | |
355 | */ | |
356 | static inline uint8_t revbit8(uint8_t x) | |
357 | { | |
5140d6be RH |
358 | #if __has_builtin(__builtin_bitreverse8) |
359 | return __builtin_bitreverse8(x); | |
360 | #else | |
652a4b7e RH |
361 | /* Assign the correct nibble position. */ |
362 | x = ((x & 0xf0) >> 4) | |
363 | | ((x & 0x0f) << 4); | |
364 | /* Assign the correct bit position. */ | |
365 | x = ((x & 0x88) >> 3) | |
366 | | ((x & 0x44) >> 1) | |
367 | | ((x & 0x22) << 1) | |
368 | | ((x & 0x11) << 3); | |
369 | return x; | |
5140d6be | 370 | #endif |
652a4b7e RH |
371 | } |
372 | ||
373 | /** | |
374 | * revbit16 - reverse the bits in a 16-bit value. | |
375 | * @x: The value to modify. | |
376 | */ | |
377 | static inline uint16_t revbit16(uint16_t x) | |
378 | { | |
5140d6be RH |
379 | #if __has_builtin(__builtin_bitreverse16) |
380 | return __builtin_bitreverse16(x); | |
381 | #else | |
652a4b7e RH |
382 | /* Assign the correct byte position. */ |
383 | x = bswap16(x); | |
384 | /* Assign the correct nibble position. */ | |
385 | x = ((x & 0xf0f0) >> 4) | |
386 | | ((x & 0x0f0f) << 4); | |
387 | /* Assign the correct bit position. */ | |
388 | x = ((x & 0x8888) >> 3) | |
389 | | ((x & 0x4444) >> 1) | |
390 | | ((x & 0x2222) << 1) | |
391 | | ((x & 0x1111) << 3); | |
392 | return x; | |
5140d6be | 393 | #endif |
652a4b7e RH |
394 | } |
395 | ||
396 | /** | |
397 | * revbit32 - reverse the bits in a 32-bit value. | |
398 | * @x: The value to modify. | |
399 | */ | |
400 | static inline uint32_t revbit32(uint32_t x) | |
401 | { | |
5140d6be RH |
402 | #if __has_builtin(__builtin_bitreverse32) |
403 | return __builtin_bitreverse32(x); | |
404 | #else | |
652a4b7e RH |
405 | /* Assign the correct byte position. */ |
406 | x = bswap32(x); | |
407 | /* Assign the correct nibble position. */ | |
408 | x = ((x & 0xf0f0f0f0u) >> 4) | |
409 | | ((x & 0x0f0f0f0fu) << 4); | |
410 | /* Assign the correct bit position. */ | |
411 | x = ((x & 0x88888888u) >> 3) | |
412 | | ((x & 0x44444444u) >> 1) | |
413 | | ((x & 0x22222222u) << 1) | |
414 | | ((x & 0x11111111u) << 3); | |
415 | return x; | |
5140d6be | 416 | #endif |
652a4b7e RH |
417 | } |
418 | ||
419 | /** | |
420 | * revbit64 - reverse the bits in a 64-bit value. | |
421 | * @x: The value to modify. | |
422 | */ | |
423 | static inline uint64_t revbit64(uint64_t x) | |
424 | { | |
5140d6be RH |
425 | #if __has_builtin(__builtin_bitreverse64) |
426 | return __builtin_bitreverse64(x); | |
427 | #else | |
652a4b7e RH |
428 | /* Assign the correct byte position. */ |
429 | x = bswap64(x); | |
430 | /* Assign the correct nibble position. */ | |
431 | x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4) | |
432 | | ((x & 0x0f0f0f0f0f0f0f0full) << 4); | |
433 | /* Assign the correct bit position. */ | |
434 | x = ((x & 0x8888888888888888ull) >> 3) | |
435 | | ((x & 0x4444444444444444ull) >> 1) | |
436 | | ((x & 0x2222222222222222ull) << 1) | |
437 | | ((x & 0x1111111111111111ull) << 3); | |
438 | return x; | |
5140d6be | 439 | #endif |
652a4b7e RH |
440 | } |
441 | ||
d03bba0b LP |
442 | /** |
443 | * Return the absolute value of a 64-bit integer as an unsigned 64-bit value | |
444 | */ | |
445 | static inline uint64_t uabs64(int64_t v) | |
446 | { | |
447 | return v < 0 ? -v : v; | |
448 | } | |
449 | ||
cec07c0b RH |
450 | /** |
451 | * sadd32_overflow - addition with overflow indication | |
452 | * @x, @y: addends | |
453 | * @ret: Output for sum | |
454 | * | |
455 | * Computes *@ret = @x + @y, and returns true if and only if that | |
456 | * value has been truncated. | |
457 | */ | |
458 | static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret) | |
459 | { | |
cec07c0b | 460 | return __builtin_add_overflow(x, y, ret); |
cec07c0b RH |
461 | } |
462 | ||
463 | /** | |
464 | * sadd64_overflow - addition with overflow indication | |
465 | * @x, @y: addends | |
466 | * @ret: Output for sum | |
467 | * | |
468 | * Computes *@ret = @x + @y, and returns true if and only if that | |
469 | * value has been truncated. | |
470 | */ | |
471 | static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret) | |
472 | { | |
cec07c0b | 473 | return __builtin_add_overflow(x, y, ret); |
cec07c0b RH |
474 | } |
475 | ||
476 | /** | |
477 | * uadd32_overflow - addition with overflow indication | |
478 | * @x, @y: addends | |
479 | * @ret: Output for sum | |
480 | * | |
481 | * Computes *@ret = @x + @y, and returns true if and only if that | |
482 | * value has been truncated. | |
483 | */ | |
484 | static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret) | |
485 | { | |
cec07c0b | 486 | return __builtin_add_overflow(x, y, ret); |
cec07c0b RH |
487 | } |
488 | ||
489 | /** | |
490 | * uadd64_overflow - addition with overflow indication | |
491 | * @x, @y: addends | |
492 | * @ret: Output for sum | |
493 | * | |
494 | * Computes *@ret = @x + @y, and returns true if and only if that | |
495 | * value has been truncated. | |
496 | */ | |
497 | static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret) | |
498 | { | |
cec07c0b | 499 | return __builtin_add_overflow(x, y, ret); |
cec07c0b RH |
500 | } |
501 | ||
502 | /** | |
503 | * ssub32_overflow - subtraction with overflow indication | |
504 | * @x: Minuend | |
505 | * @y: Subtrahend | |
506 | * @ret: Output for difference | |
507 | * | |
508 | * Computes *@ret = @x - @y, and returns true if and only if that | |
509 | * value has been truncated. | |
510 | */ | |
511 | static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret) | |
512 | { | |
cec07c0b | 513 | return __builtin_sub_overflow(x, y, ret); |
cec07c0b RH |
514 | } |
515 | ||
516 | /** | |
517 | * ssub64_overflow - subtraction with overflow indication | |
518 | * @x: Minuend | |
519 | * @y: Subtrahend | |
520 | * @ret: Output for sum | |
521 | * | |
522 | * Computes *@ret = @x - @y, and returns true if and only if that | |
523 | * value has been truncated. | |
524 | */ | |
525 | static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret) | |
526 | { | |
cec07c0b | 527 | return __builtin_sub_overflow(x, y, ret); |
cec07c0b RH |
528 | } |
529 | ||
530 | /** | |
531 | * usub32_overflow - subtraction with overflow indication | |
532 | * @x: Minuend | |
533 | * @y: Subtrahend | |
534 | * @ret: Output for sum | |
535 | * | |
536 | * Computes *@ret = @x - @y, and returns true if and only if that | |
537 | * value has been truncated. | |
538 | */ | |
539 | static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret) | |
540 | { | |
cec07c0b | 541 | return __builtin_sub_overflow(x, y, ret); |
cec07c0b RH |
542 | } |
543 | ||
544 | /** | |
545 | * usub64_overflow - subtraction with overflow indication | |
546 | * @x: Minuend | |
547 | * @y: Subtrahend | |
548 | * @ret: Output for sum | |
549 | * | |
550 | * Computes *@ret = @x - @y, and returns true if and only if that | |
551 | * value has been truncated. | |
552 | */ | |
553 | static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret) | |
554 | { | |
cec07c0b | 555 | return __builtin_sub_overflow(x, y, ret); |
cec07c0b RH |
556 | } |
557 | ||
558 | /** | |
559 | * smul32_overflow - multiplication with overflow indication | |
560 | * @x, @y: Input multipliers | |
561 | * @ret: Output for product | |
562 | * | |
563 | * Computes *@ret = @x * @y, and returns true if and only if that | |
564 | * value has been truncated. | |
565 | */ | |
566 | static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret) | |
567 | { | |
cec07c0b | 568 | return __builtin_mul_overflow(x, y, ret); |
cec07c0b RH |
569 | } |
570 | ||
571 | /** | |
572 | * smul64_overflow - multiplication with overflow indication | |
573 | * @x, @y: Input multipliers | |
574 | * @ret: Output for product | |
575 | * | |
576 | * Computes *@ret = @x * @y, and returns true if and only if that | |
577 | * value has been truncated. | |
578 | */ | |
579 | static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret) | |
580 | { | |
cec07c0b | 581 | return __builtin_mul_overflow(x, y, ret); |
cec07c0b RH |
582 | } |
583 | ||
584 | /** | |
585 | * umul32_overflow - multiplication with overflow indication | |
586 | * @x, @y: Input multipliers | |
587 | * @ret: Output for product | |
588 | * | |
589 | * Computes *@ret = @x * @y, and returns true if and only if that | |
590 | * value has been truncated. | |
591 | */ | |
592 | static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret) | |
593 | { | |
cec07c0b | 594 | return __builtin_mul_overflow(x, y, ret); |
cec07c0b RH |
595 | } |
596 | ||
597 | /** | |
598 | * umul64_overflow - multiplication with overflow indication | |
599 | * @x, @y: Input multipliers | |
600 | * @ret: Output for product | |
601 | * | |
602 | * Computes *@ret = @x * @y, and returns true if and only if that | |
603 | * value has been truncated. | |
604 | */ | |
605 | static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret) | |
606 | { | |
cec07c0b | 607 | return __builtin_mul_overflow(x, y, ret); |
cec07c0b RH |
608 | } |
609 | ||
e06049f3 LP |
610 | /* |
611 | * Unsigned 128x64 multiplication. | |
612 | * Returns true if the result got truncated to 128 bits. | |
613 | * Otherwise, returns false and the multiplication result via plow and phigh. | |
614 | */ | |
615 | static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor) | |
616 | { | |
21d4e557 | 617 | #if defined(CONFIG_INT128) |
e06049f3 LP |
618 | bool res; |
619 | __uint128_t r; | |
620 | __uint128_t f = ((__uint128_t)*phigh << 64) | *plow; | |
621 | res = __builtin_mul_overflow(f, factor, &r); | |
622 | ||
623 | *plow = r; | |
624 | *phigh = r >> 64; | |
625 | ||
626 | return res; | |
627 | #else | |
628 | uint64_t dhi = *phigh; | |
629 | uint64_t dlo = *plow; | |
630 | uint64_t ahi; | |
631 | uint64_t blo, bhi; | |
632 | ||
633 | if (dhi == 0) { | |
634 | mulu64(plow, phigh, dlo, factor); | |
635 | return false; | |
636 | } | |
637 | ||
638 | mulu64(plow, &ahi, dlo, factor); | |
639 | mulu64(&blo, &bhi, dhi, factor); | |
640 | ||
641 | return uadd64_overflow(ahi, blo, phigh) || bhi != 0; | |
642 | #endif | |
643 | } | |
644 | ||
1ec8070e RH |
645 | /** |
646 | * uadd64_carry - addition with carry-in and carry-out | |
647 | * @x, @y: addends | |
648 | * @pcarry: in-out carry value | |
649 | * | |
650 | * Computes @x + @y + *@pcarry, placing the carry-out back | |
651 | * into *@pcarry and returning the 64-bit sum. | |
652 | */ | |
653 | static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry) | |
654 | { | |
655 | #if __has_builtin(__builtin_addcll) | |
656 | unsigned long long c = *pcarry; | |
657 | x = __builtin_addcll(x, y, c, &c); | |
658 | *pcarry = c & 1; | |
659 | return x; | |
660 | #else | |
661 | bool c = *pcarry; | |
662 | /* This is clang's internal expansion of __builtin_addc. */ | |
663 | c = uadd64_overflow(x, c, &x); | |
664 | c |= uadd64_overflow(x, y, &x); | |
665 | *pcarry = c; | |
666 | return x; | |
667 | #endif | |
668 | } | |
669 | ||
670 | /** | |
671 | * usub64_borrow - subtraction with borrow-in and borrow-out | |
672 | * @x, @y: addends | |
673 | * @pborrow: in-out borrow value | |
674 | * | |
675 | * Computes @x - @y - *@pborrow, placing the borrow-out back | |
676 | * into *@pborrow and returning the 64-bit sum. | |
677 | */ | |
678 | static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow) | |
679 | { | |
b0438861 | 680 | #if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN) |
1ec8070e RH |
681 | unsigned long long b = *pborrow; |
682 | x = __builtin_subcll(x, y, b, &b); | |
683 | *pborrow = b & 1; | |
684 | return x; | |
685 | #else | |
686 | bool b = *pborrow; | |
687 | b = usub64_overflow(x, b, &x); | |
688 | b |= usub64_overflow(x, y, &x); | |
689 | *pborrow = b; | |
690 | return x; | |
691 | #endif | |
692 | } | |
693 | ||
01654373 RH |
694 | /* Host type specific sizes of these routines. */ |
695 | ||
696 | #if ULONG_MAX == UINT32_MAX | |
697 | # define clzl clz32 | |
698 | # define ctzl ctz32 | |
699 | # define clol clo32 | |
700 | # define ctol cto32 | |
701 | # define ctpopl ctpop32 | |
652a4b7e | 702 | # define revbitl revbit32 |
01654373 RH |
703 | #elif ULONG_MAX == UINT64_MAX |
704 | # define clzl clz64 | |
705 | # define ctzl ctz64 | |
706 | # define clol clo64 | |
707 | # define ctol cto64 | |
708 | # define ctpopl ctpop64 | |
652a4b7e | 709 | # define revbitl revbit64 |
01654373 RH |
710 | #else |
711 | # error Unknown sizeof long | |
712 | #endif | |
713 | ||
8f1ed5f5 PM |
714 | static inline bool is_power_of_2(uint64_t value) |
715 | { | |
716 | if (!value) { | |
e52eeb46 | 717 | return false; |
8f1ed5f5 PM |
718 | } |
719 | ||
720 | return !(value & (value - 1)); | |
721 | } | |
722 | ||
43c64a09 MA |
723 | /** |
724 | * Return @value rounded down to the nearest power of two or zero. | |
725 | */ | |
726 | static inline uint64_t pow2floor(uint64_t value) | |
8f1ed5f5 | 727 | { |
43c64a09 MA |
728 | if (!value) { |
729 | /* Avoid undefined shift by 64 */ | |
730 | return 0; | |
8f1ed5f5 | 731 | } |
43c64a09 | 732 | return 0x8000000000000000ull >> clz64(value); |
8f1ed5f5 PM |
733 | } |
734 | ||
362aaf14 MA |
735 | /* |
736 | * Return @value rounded up to the nearest power of two modulo 2^64. | |
737 | * This is *zero* for @value > 2^63, so be careful. | |
738 | */ | |
8f1ed5f5 PM |
739 | static inline uint64_t pow2ceil(uint64_t value) |
740 | { | |
362aaf14 MA |
741 | int n = clz64(value - 1); |
742 | ||
743 | if (!n) { | |
744 | /* | |
745 | * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63 | |
746 | * Therefore, either @value == 0 or @value > 2^63. | |
747 | * If it's 0, return 1, else return 0. | |
748 | */ | |
749 | return !value; | |
8f1ed5f5 | 750 | } |
362aaf14 | 751 | return 0x8000000000000000ull >> (n - 1); |
8f1ed5f5 PM |
752 | } |
753 | ||
37e626ce YS |
754 | static inline uint32_t pow2roundup32(uint32_t x) |
755 | { | |
756 | x |= (x >> 1); | |
757 | x |= (x >> 2); | |
758 | x |= (x >> 4); | |
759 | x |= (x >> 8); | |
760 | x |= (x >> 16); | |
761 | return x + 1; | |
762 | } | |
763 | ||
f539fbe3 JRZ |
764 | /** |
765 | * urshift - 128-bit Unsigned Right Shift. | |
766 | * @plow: in/out - lower 64-bit integer. | |
767 | * @phigh: in/out - higher 64-bit integer. | |
768 | * @shift: in - bytes to shift, between 0 and 127. | |
769 | * | |
770 | * Result is zero-extended and stored in plow/phigh, which are | |
771 | * input/output variables. Shift values outside the range will | |
772 | * be mod to 128. In other words, the caller is responsible to | |
773 | * verify/assert both the shift range and plow/phigh pointers. | |
774 | */ | |
775 | void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift); | |
776 | ||
777 | /** | |
778 | * ulshift - 128-bit Unsigned Left Shift. | |
779 | * @plow: in/out - lower 64-bit integer. | |
780 | * @phigh: in/out - higher 64-bit integer. | |
781 | * @shift: in - bytes to shift, between 0 and 127. | |
782 | * @overflow: out - true if any 1-bit is shifted out. | |
783 | * | |
784 | * Result is zero-extended and stored in plow/phigh, which are | |
785 | * input/output variables. Shift values outside the range will | |
786 | * be mod to 128. In other words, the caller is responsible to | |
787 | * verify/assert both the shift range and plow/phigh pointers. | |
788 | */ | |
789 | void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow); | |
790 | ||
8ac2d6c5 LP |
791 | /* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd |
792 | * (https://gmplib.org/repo/gmp/file/tip/longlong.h) | |
793 | * | |
794 | * Licensed under the GPLv2/LGPLv3 | |
795 | */ | |
796 | static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, | |
797 | uint64_t n0, uint64_t d) | |
798 | { | |
799 | #if defined(__x86_64__) | |
800 | uint64_t q; | |
801 | asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); | |
802 | return q; | |
803 | #elif defined(__s390x__) && !defined(__clang__) | |
804 | /* Need to use a TImode type to get an even register pair for DLGR. */ | |
805 | unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; | |
806 | asm("dlgr %0, %1" : "+r"(n) : "r"(d)); | |
807 | *r = n >> 64; | |
808 | return n; | |
809 | #elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) | |
810 | /* From Power ISA 2.06, programming note for divdeu. */ | |
811 | uint64_t q1, q2, Q, r1, r2, R; | |
812 | asm("divdeu %0,%2,%4; divdu %1,%3,%4" | |
813 | : "=&r"(q1), "=r"(q2) | |
814 | : "r"(n1), "r"(n0), "r"(d)); | |
815 | r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ | |
816 | r2 = n0 - (q2 * d); | |
817 | Q = q1 + q2; | |
818 | R = r1 + r2; | |
819 | if (R >= d || R < r2) { /* overflow implies R > d */ | |
820 | Q += 1; | |
821 | R -= d; | |
822 | } | |
823 | *r = R; | |
824 | return Q; | |
825 | #else | |
826 | uint64_t d0, d1, q0, q1, r1, r0, m; | |
827 | ||
828 | d0 = (uint32_t)d; | |
829 | d1 = d >> 32; | |
830 | ||
831 | r1 = n1 % d1; | |
832 | q1 = n1 / d1; | |
833 | m = q1 * d0; | |
834 | r1 = (r1 << 32) | (n0 >> 32); | |
835 | if (r1 < m) { | |
836 | q1 -= 1; | |
837 | r1 += d; | |
838 | if (r1 >= d) { | |
839 | if (r1 < m) { | |
840 | q1 -= 1; | |
841 | r1 += d; | |
842 | } | |
843 | } | |
844 | } | |
845 | r1 -= m; | |
846 | ||
847 | r0 = r1 % d1; | |
848 | q0 = r1 / d1; | |
849 | m = q0 * d0; | |
850 | r0 = (r0 << 32) | (uint32_t)n0; | |
851 | if (r0 < m) { | |
852 | q0 -= 1; | |
853 | r0 += d; | |
854 | if (r0 >= d) { | |
855 | if (r0 < m) { | |
856 | q0 -= 1; | |
857 | r0 += d; | |
858 | } | |
859 | } | |
860 | } | |
861 | r0 -= m; | |
862 | ||
863 | *r = r0; | |
864 | return (q1 << 32) | q0; | |
865 | #endif | |
866 | } | |
867 | ||
4724bbd2 | 868 | Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor); |
62c9947f | 869 | Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor); |
cb9c377f | 870 | #endif |