]>
git.ipfire.org Git - thirdparty/qemu.git/blob - include/qemu/host-utils.h
4d28fa22cfa8de29b7fefd5178551471e57dbb53
2 * Utility compute operations used by translated code.
4 * Copyright (c) 2007 Thiemo Seufer
5 * Copyright (c) 2007 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 /* Portions of this work are licensed under the terms of the GNU GPL,
27 * version 2 or later. See the COPYING file in the top-level directory.
33 #include "qemu/bswap.h"
34 #include "qemu/int128.h"
37 static inline void mulu64(uint64_t *plow
, uint64_t *phigh
,
38 uint64_t a
, uint64_t b
)
40 __uint128_t r
= (__uint128_t
)a
* b
;
45 static inline void muls64(uint64_t *plow
, uint64_t *phigh
,
48 __int128_t r
= (__int128_t
)a
* b
;
53 /* compute with 96 bit intermediate result: (a*b)/c */
54 static inline uint64_t muldiv64(uint64_t a
, uint32_t b
, uint32_t c
)
56 return (__int128_t
)a
* b
/ c
;
59 static inline uint64_t muldiv64_round_up(uint64_t a
, uint32_t b
, uint32_t c
)
61 return ((__int128_t
)a
* b
+ c
- 1) / c
;
64 static inline uint64_t divu128(uint64_t *plow
, uint64_t *phigh
,
67 __uint128_t dividend
= ((__uint128_t
)*phigh
<< 64) | *plow
;
68 __uint128_t result
= dividend
/ divisor
;
71 *phigh
= result
>> 64;
72 return dividend
% divisor
;
75 static inline int64_t divs128(uint64_t *plow
, int64_t *phigh
,
78 __int128_t dividend
= ((__int128_t
)*phigh
<< 64) | *plow
;
79 __int128_t result
= dividend
/ divisor
;
82 *phigh
= result
>> 64;
83 return dividend
% divisor
;
86 void muls64(uint64_t *plow
, uint64_t *phigh
, int64_t a
, int64_t b
);
87 void mulu64(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
);
88 uint64_t divu128(uint64_t *plow
, uint64_t *phigh
, uint64_t divisor
);
89 int64_t divs128(uint64_t *plow
, int64_t *phigh
, int64_t divisor
);
91 static inline uint64_t muldiv64_rounding(uint64_t a
, uint32_t b
, uint32_t c
,
107 rl
= (uint64_t)u
.l
.low
* (uint64_t)b
;
111 rh
= (uint64_t)u
.l
.high
* (uint64_t)b
;
114 res
.l
.low
= (((rh
% c
) << 32) + (rl
& 0xffffffff)) / c
;
118 static inline uint64_t muldiv64(uint64_t a
, uint32_t b
, uint32_t c
)
120 return muldiv64_rounding(a
, b
, c
, false);
123 static inline uint64_t muldiv64_round_up(uint64_t a
, uint32_t b
, uint32_t c
)
125 return muldiv64_rounding(a
, b
, c
, true);
130 * clz8 - count leading zeros in a 8-bit value.
131 * @val: The value to search
133 * Returns 8 if the value is zero. Note that the GCC builtin is
134 * undefined if the value is zero.
136 * Note that the GCC builtin will upcast its argument to an `unsigned int`
137 * so this function subtracts off the number of prepended zeroes.
139 static inline int clz8(uint8_t val
)
141 return val
? __builtin_clz(val
) - 24 : 8;
145 * clz16 - count leading zeros in a 16-bit value.
146 * @val: The value to search
148 * Returns 16 if the value is zero. Note that the GCC builtin is
149 * undefined if the value is zero.
151 * Note that the GCC builtin will upcast its argument to an `unsigned int`
152 * so this function subtracts off the number of prepended zeroes.
154 static inline int clz16(uint16_t val
)
156 return val
? __builtin_clz(val
) - 16 : 16;
160 * clz32 - count leading zeros in a 32-bit value.
161 * @val: The value to search
163 * Returns 32 if the value is zero. Note that the GCC builtin is
164 * undefined if the value is zero.
166 static inline int clz32(uint32_t val
)
168 return val
? __builtin_clz(val
) : 32;
172 * clo32 - count leading ones in a 32-bit value.
173 * @val: The value to search
175 * Returns 32 if the value is -1.
177 static inline int clo32(uint32_t val
)
183 * clz64 - count leading zeros in a 64-bit value.
184 * @val: The value to search
186 * Returns 64 if the value is zero. Note that the GCC builtin is
187 * undefined if the value is zero.
189 static inline int clz64(uint64_t val
)
191 return val
? __builtin_clzll(val
) : 64;
195 * clo64 - count leading ones in a 64-bit value.
196 * @val: The value to search
198 * Returns 64 if the value is -1.
200 static inline int clo64(uint64_t val
)
206 * ctz8 - count trailing zeros in a 8-bit value.
207 * @val: The value to search
209 * Returns 8 if the value is zero. Note that the GCC builtin is
210 * undefined if the value is zero.
212 static inline int ctz8(uint8_t val
)
214 return val
? __builtin_ctz(val
) : 8;
218 * ctz16 - count trailing zeros in a 16-bit value.
219 * @val: The value to search
221 * Returns 16 if the value is zero. Note that the GCC builtin is
222 * undefined if the value is zero.
224 static inline int ctz16(uint16_t val
)
226 return val
? __builtin_ctz(val
) : 16;
230 * ctz32 - count trailing zeros in a 32-bit value.
231 * @val: The value to search
233 * Returns 32 if the value is zero. Note that the GCC builtin is
234 * undefined if the value is zero.
236 static inline int ctz32(uint32_t val
)
238 return val
? __builtin_ctz(val
) : 32;
242 * cto32 - count trailing ones in a 32-bit value.
243 * @val: The value to search
245 * Returns 32 if the value is -1.
247 static inline int cto32(uint32_t val
)
253 * ctz64 - count trailing zeros in a 64-bit value.
254 * @val: The value to search
256 * Returns 64 if the value is zero. Note that the GCC builtin is
257 * undefined if the value is zero.
259 static inline int ctz64(uint64_t val
)
261 return val
? __builtin_ctzll(val
) : 64;
265 * cto64 - count trailing ones in a 64-bit value.
266 * @val: The value to search
268 * Returns 64 if the value is -1.
270 static inline int cto64(uint64_t val
)
276 * clrsb32 - count leading redundant sign bits in a 32-bit value.
277 * @val: The value to search
279 * Returns the number of bits following the sign bit that are equal to it.
280 * No special cases; output range is [0-31].
282 static inline int clrsb32(uint32_t val
)
284 #if __has_builtin(__builtin_clrsb) || !defined(__clang__)
285 return __builtin_clrsb(val
);
287 return clz32(val
^ ((int32_t)val
>> 1)) - 1;
292 * clrsb64 - count leading redundant sign bits in a 64-bit value.
293 * @val: The value to search
295 * Returns the number of bits following the sign bit that are equal to it.
296 * No special cases; output range is [0-63].
298 static inline int clrsb64(uint64_t val
)
300 #if __has_builtin(__builtin_clrsbll) || !defined(__clang__)
301 return __builtin_clrsbll(val
);
303 return clz64(val
^ ((int64_t)val
>> 1)) - 1;
308 * ctpop8 - count the population of one bits in an 8-bit value.
309 * @val: The value to search
311 static inline int ctpop8(uint8_t val
)
313 return __builtin_popcount(val
);
317 * parity8 - return the parity (1 = odd) of an 8-bit value.
318 * @val: The value to search
320 static inline int parity8(uint8_t val
)
322 return __builtin_parity(val
);
326 * ctpop16 - count the population of one bits in a 16-bit value.
327 * @val: The value to search
329 static inline int ctpop16(uint16_t val
)
331 return __builtin_popcount(val
);
335 * ctpop32 - count the population of one bits in a 32-bit value.
336 * @val: The value to search
338 static inline int ctpop32(uint32_t val
)
340 return __builtin_popcount(val
);
344 * ctpop64 - count the population of one bits in a 64-bit value.
345 * @val: The value to search
347 static inline int ctpop64(uint64_t val
)
349 return __builtin_popcountll(val
);
353 * revbit8 - reverse the bits in an 8-bit value.
354 * @x: The value to modify.
356 static inline uint8_t revbit8(uint8_t x
)
358 #if __has_builtin(__builtin_bitreverse8)
359 return __builtin_bitreverse8(x
);
361 /* Assign the correct nibble position. */
362 x
= ((x
& 0xf0) >> 4)
364 /* Assign the correct bit position. */
365 x
= ((x
& 0x88) >> 3)
374 * revbit16 - reverse the bits in a 16-bit value.
375 * @x: The value to modify.
377 static inline uint16_t revbit16(uint16_t x
)
379 #if __has_builtin(__builtin_bitreverse16)
380 return __builtin_bitreverse16(x
);
382 /* Assign the correct byte position. */
384 /* Assign the correct nibble position. */
385 x
= ((x
& 0xf0f0) >> 4)
386 | ((x
& 0x0f0f) << 4);
387 /* Assign the correct bit position. */
388 x
= ((x
& 0x8888) >> 3)
389 | ((x
& 0x4444) >> 1)
390 | ((x
& 0x2222) << 1)
391 | ((x
& 0x1111) << 3);
397 * revbit32 - reverse the bits in a 32-bit value.
398 * @x: The value to modify.
400 static inline uint32_t revbit32(uint32_t x
)
402 #if __has_builtin(__builtin_bitreverse32)
403 return __builtin_bitreverse32(x
);
405 /* Assign the correct byte position. */
407 /* Assign the correct nibble position. */
408 x
= ((x
& 0xf0f0f0f0u
) >> 4)
409 | ((x
& 0x0f0f0f0fu
) << 4);
410 /* Assign the correct bit position. */
411 x
= ((x
& 0x88888888u
) >> 3)
412 | ((x
& 0x44444444u
) >> 1)
413 | ((x
& 0x22222222u
) << 1)
414 | ((x
& 0x11111111u
) << 3);
420 * revbit64 - reverse the bits in a 64-bit value.
421 * @x: The value to modify.
423 static inline uint64_t revbit64(uint64_t x
)
425 #if __has_builtin(__builtin_bitreverse64)
426 return __builtin_bitreverse64(x
);
428 /* Assign the correct byte position. */
430 /* Assign the correct nibble position. */
431 x
= ((x
& 0xf0f0f0f0f0f0f0f0ull
) >> 4)
432 | ((x
& 0x0f0f0f0f0f0f0f0full
) << 4);
433 /* Assign the correct bit position. */
434 x
= ((x
& 0x8888888888888888ull
) >> 3)
435 | ((x
& 0x4444444444444444ull
) >> 1)
436 | ((x
& 0x2222222222222222ull
) << 1)
437 | ((x
& 0x1111111111111111ull
) << 3);
443 * Return the absolute value of a 64-bit integer as an unsigned 64-bit value
445 static inline uint64_t uabs64(int64_t v
)
447 return v
< 0 ? -v
: v
;
451 * sadd32_overflow - addition with overflow indication
453 * @ret: Output for sum
455 * Computes *@ret = @x + @y, and returns true if and only if that
456 * value has been truncated.
458 static inline bool sadd32_overflow(int32_t x
, int32_t y
, int32_t *ret
)
460 return __builtin_add_overflow(x
, y
, ret
);
464 * sadd64_overflow - addition with overflow indication
466 * @ret: Output for sum
468 * Computes *@ret = @x + @y, and returns true if and only if that
469 * value has been truncated.
471 static inline bool sadd64_overflow(int64_t x
, int64_t y
, int64_t *ret
)
473 return __builtin_add_overflow(x
, y
, ret
);
477 * uadd32_overflow - addition with overflow indication
479 * @ret: Output for sum
481 * Computes *@ret = @x + @y, and returns true if and only if that
482 * value has been truncated.
484 static inline bool uadd32_overflow(uint32_t x
, uint32_t y
, uint32_t *ret
)
486 return __builtin_add_overflow(x
, y
, ret
);
490 * uadd64_overflow - addition with overflow indication
492 * @ret: Output for sum
494 * Computes *@ret = @x + @y, and returns true if and only if that
495 * value has been truncated.
497 static inline bool uadd64_overflow(uint64_t x
, uint64_t y
, uint64_t *ret
)
499 return __builtin_add_overflow(x
, y
, ret
);
503 * ssub32_overflow - subtraction with overflow indication
506 * @ret: Output for difference
508 * Computes *@ret = @x - @y, and returns true if and only if that
509 * value has been truncated.
511 static inline bool ssub32_overflow(int32_t x
, int32_t y
, int32_t *ret
)
513 return __builtin_sub_overflow(x
, y
, ret
);
517 * ssub64_overflow - subtraction with overflow indication
520 * @ret: Output for sum
522 * Computes *@ret = @x - @y, and returns true if and only if that
523 * value has been truncated.
525 static inline bool ssub64_overflow(int64_t x
, int64_t y
, int64_t *ret
)
527 return __builtin_sub_overflow(x
, y
, ret
);
531 * usub32_overflow - subtraction with overflow indication
534 * @ret: Output for sum
536 * Computes *@ret = @x - @y, and returns true if and only if that
537 * value has been truncated.
539 static inline bool usub32_overflow(uint32_t x
, uint32_t y
, uint32_t *ret
)
541 return __builtin_sub_overflow(x
, y
, ret
);
545 * usub64_overflow - subtraction with overflow indication
548 * @ret: Output for sum
550 * Computes *@ret = @x - @y, and returns true if and only if that
551 * value has been truncated.
553 static inline bool usub64_overflow(uint64_t x
, uint64_t y
, uint64_t *ret
)
555 return __builtin_sub_overflow(x
, y
, ret
);
559 * smul32_overflow - multiplication with overflow indication
560 * @x, @y: Input multipliers
561 * @ret: Output for product
563 * Computes *@ret = @x * @y, and returns true if and only if that
564 * value has been truncated.
566 static inline bool smul32_overflow(int32_t x
, int32_t y
, int32_t *ret
)
568 return __builtin_mul_overflow(x
, y
, ret
);
572 * smul64_overflow - multiplication with overflow indication
573 * @x, @y: Input multipliers
574 * @ret: Output for product
576 * Computes *@ret = @x * @y, and returns true if and only if that
577 * value has been truncated.
579 static inline bool smul64_overflow(int64_t x
, int64_t y
, int64_t *ret
)
581 return __builtin_mul_overflow(x
, y
, ret
);
585 * umul32_overflow - multiplication with overflow indication
586 * @x, @y: Input multipliers
587 * @ret: Output for product
589 * Computes *@ret = @x * @y, and returns true if and only if that
590 * value has been truncated.
592 static inline bool umul32_overflow(uint32_t x
, uint32_t y
, uint32_t *ret
)
594 return __builtin_mul_overflow(x
, y
, ret
);
598 * umul64_overflow - multiplication with overflow indication
599 * @x, @y: Input multipliers
600 * @ret: Output for product
602 * Computes *@ret = @x * @y, and returns true if and only if that
603 * value has been truncated.
605 static inline bool umul64_overflow(uint64_t x
, uint64_t y
, uint64_t *ret
)
607 return __builtin_mul_overflow(x
, y
, ret
);
611 * Unsigned 128x64 multiplication.
612 * Returns true if the result got truncated to 128 bits.
613 * Otherwise, returns false and the multiplication result via plow and phigh.
615 static inline bool mulu128(uint64_t *plow
, uint64_t *phigh
, uint64_t factor
)
617 #if defined(CONFIG_INT128)
620 __uint128_t f
= ((__uint128_t
)*phigh
<< 64) | *plow
;
621 res
= __builtin_mul_overflow(f
, factor
, &r
);
628 uint64_t dhi
= *phigh
;
629 uint64_t dlo
= *plow
;
634 mulu64(plow
, phigh
, dlo
, factor
);
638 mulu64(plow
, &ahi
, dlo
, factor
);
639 mulu64(&blo
, &bhi
, dhi
, factor
);
641 return uadd64_overflow(ahi
, blo
, phigh
) || bhi
!= 0;
646 * uadd64_carry - addition with carry-in and carry-out
648 * @pcarry: in-out carry value
650 * Computes @x + @y + *@pcarry, placing the carry-out back
651 * into *@pcarry and returning the 64-bit sum.
653 static inline uint64_t uadd64_carry(uint64_t x
, uint64_t y
, bool *pcarry
)
655 #if __has_builtin(__builtin_addcll)
656 unsigned long long c
= *pcarry
;
657 x
= __builtin_addcll(x
, y
, c
, &c
);
662 /* This is clang's internal expansion of __builtin_addc. */
663 c
= uadd64_overflow(x
, c
, &x
);
664 c
|= uadd64_overflow(x
, y
, &x
);
671 * usub64_borrow - subtraction with borrow-in and borrow-out
673 * @pborrow: in-out borrow value
675 * Computes @x - @y - *@pborrow, placing the borrow-out back
676 * into *@pborrow and returning the 64-bit sum.
678 static inline uint64_t usub64_borrow(uint64_t x
, uint64_t y
, bool *pborrow
)
680 #if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
681 unsigned long long b
= *pborrow
;
682 x
= __builtin_subcll(x
, y
, b
, &b
);
687 b
= usub64_overflow(x
, b
, &x
);
688 b
|= usub64_overflow(x
, y
, &x
);
694 /* Host type specific sizes of these routines. */
696 #if ULONG_MAX == UINT32_MAX
701 # define ctpopl ctpop32
702 # define revbitl revbit32
703 #elif ULONG_MAX == UINT64_MAX
708 # define ctpopl ctpop64
709 # define revbitl revbit64
711 # error Unknown sizeof long
714 static inline bool is_power_of_2(uint64_t value
)
720 return !(value
& (value
- 1));
724 * Return @value rounded down to the nearest power of two or zero.
726 static inline uint64_t pow2floor(uint64_t value
)
729 /* Avoid undefined shift by 64 */
732 return 0x8000000000000000ull
>> clz64(value
);
736 * Return @value rounded up to the nearest power of two modulo 2^64.
737 * This is *zero* for @value > 2^63, so be careful.
739 static inline uint64_t pow2ceil(uint64_t value
)
741 int n
= clz64(value
- 1);
745 * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63
746 * Therefore, either @value == 0 or @value > 2^63.
747 * If it's 0, return 1, else return 0.
751 return 0x8000000000000000ull
>> (n
- 1);
754 static inline uint32_t pow2roundup32(uint32_t x
)
765 * urshift - 128-bit Unsigned Right Shift.
766 * @plow: in/out - lower 64-bit integer.
767 * @phigh: in/out - higher 64-bit integer.
768 * @shift: in - bytes to shift, between 0 and 127.
770 * Result is zero-extended and stored in plow/phigh, which are
771 * input/output variables. Shift values outside the range will
772 * be mod to 128. In other words, the caller is responsible to
773 * verify/assert both the shift range and plow/phigh pointers.
775 void urshift(uint64_t *plow
, uint64_t *phigh
, int32_t shift
);
778 * ulshift - 128-bit Unsigned Left Shift.
779 * @plow: in/out - lower 64-bit integer.
780 * @phigh: in/out - higher 64-bit integer.
781 * @shift: in - bytes to shift, between 0 and 127.
782 * @overflow: out - true if any 1-bit is shifted out.
784 * Result is zero-extended and stored in plow/phigh, which are
785 * input/output variables. Shift values outside the range will
786 * be mod to 128. In other words, the caller is responsible to
787 * verify/assert both the shift range and plow/phigh pointers.
789 void ulshift(uint64_t *plow
, uint64_t *phigh
, int32_t shift
, bool *overflow
);
791 /* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
792 * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
794 * Licensed under the GPLv2/LGPLv3
796 static inline uint64_t udiv_qrnnd(uint64_t *r
, uint64_t n1
,
797 uint64_t n0
, uint64_t d
)
799 #if defined(__x86_64__)
801 asm("divq %4" : "=a"(q
), "=d"(*r
) : "0"(n0
), "1"(n1
), "rm"(d
));
803 #elif defined(__s390x__) && !defined(__clang__)
804 /* Need to use a TImode type to get an even register pair for DLGR. */
805 unsigned __int128 n
= (unsigned __int128
)n1
<< 64 | n0
;
806 asm("dlgr %0, %1" : "+r"(n
) : "r"(d
));
809 #elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
810 /* From Power ISA 2.06, programming note for divdeu. */
811 uint64_t q1
, q2
, Q
, r1
, r2
, R
;
812 asm("divdeu %0,%2,%4; divdu %1,%3,%4"
813 : "=&r"(q1
), "=r"(q2
)
814 : "r"(n1
), "r"(n0
), "r"(d
));
815 r1
= -(q1
* d
); /* low part of (n1<<64) - (q1 * d) */
819 if (R
>= d
|| R
< r2
) { /* overflow implies R > d */
826 uint64_t d0
, d1
, q0
, q1
, r1
, r0
, m
;
834 r1
= (r1
<< 32) | (n0
>> 32);
850 r0
= (r0
<< 32) | (uint32_t)n0
;
864 return (q1
<< 32) | q0
;
868 Int128
divu256(Int128
*plow
, Int128
*phigh
, Int128 divisor
);
869 Int128
divs256(Int128
*plow
, Int128
*phigh
, Int128 divisor
);