From: David Laight Date: Wed, 5 Nov 2025 20:10:27 +0000 (+0000) Subject: lib: mul_u64_u64_div_u64(): rename parameter 'c' to 'd' X-Git-Tag: v6.19-rc1~70^2~93 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5944f875ac27cae8b831206aef011a444efa637d;p=thirdparty%2Fkernel%2Flinux.git lib: mul_u64_u64_div_u64(): rename parameter 'c' to 'd' Patch series "Implement mul_u64_u64_div_u64_roundup()", v5. The pwm-stm32.c code wants a 'rounding up' version of mul_u64_u64_div_u64(). This can be done simply by adding 'divisor - 1' to the 128bit product. Implement mul_u64_add_u64_div_u64(a, b, c, d) = (a * b + c)/d based on the existing code. Define mul_u64_u64_div_u64(a, b, d) as mul_u64_add_u64_div_u64(a, b, 0, d) and mul_u64_u64_div_u64_roundup(a, b, d) as mul_u64_add_u64_div_u64(a, b, d-1, d). Only x86-64 has an optimsed (asm) version of the function. That is optimised to avoid the 'add c' when c is known to be zero. In all other cases the extra code will be noise compared to the software divide code. The test module has been updated to test mul_u64_u64_div_u64_roundup() and also enhanced it to verify the C division code on x86-64 and the 32bit division code on 64bit. This patch (of 9): Change to prototype from mul_u64_u64_div_u64(u64 a, u64 b, u64 c) to mul_u64_u64_div_u64(u64 a, u64 b, u64 d). Using 'd' for 'divisor' makes more sense. An upcoming change adds a 'c' parameter to calculate (a * b + c)/d. Link: https://lkml.kernel.org/r/20251105201035.64043-1-david.laight.linux@gmail.com Link: https://lkml.kernel.org/r/20251105201035.64043-2-david.laight.linux@gmail.com Signed-off-by: David Laight Reviewed-by: Nicolas Pitre Cc: Biju Das Cc: Borislav Betkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jens Axboe Cc: Li RongQing Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleinxer Cc: Uwe Kleine-König Signed-off-by: Andrew Morton --- diff --git a/lib/math/div64.c b/lib/math/div64.c index bf77b9843175e..0ebff850fd4dd 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -184,10 +184,10 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) EXPORT_SYMBOL(iter_div_u64_rem); #ifndef mul_u64_u64_div_u64 -u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) { if (ilog2(a) + ilog2(b) <= 62) - return div64_u64(a * b, c); + return div64_u64(a * b, d); #if defined(__SIZEOF_INT128__) @@ -212,37 +212,37 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) #endif - /* make sure c is not zero, trigger runtime exception otherwise */ - if (unlikely(c == 0)) { + /* make sure d is not zero, trigger runtime exception otherwise */ + if (unlikely(d == 0)) { unsigned long zero = 0; OPTIMIZER_HIDE_VAR(zero); return ~0UL/zero; } - int shift = __builtin_ctzll(c); + int shift = __builtin_ctzll(d); /* try reducing the fraction in case the dividend becomes <= 64 bits */ if ((n_hi >> shift) == 0) { u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo; - return div64_u64(n, c >> shift); + return div64_u64(n, d >> shift); /* * The remainder value if needed would be: - * res = div64_u64_rem(n, c >> shift, &rem); + * res = div64_u64_rem(n, d >> shift, &rem); * rem = (rem << shift) + (n_lo - (n << shift)); */ } - if (n_hi >= c) { + if (n_hi >= d) { /* overflow: result is unrepresentable in a u64 */ return -1; } /* Do the full 128 by 64 bits division */ - shift = __builtin_clzll(c); - c <<= shift; + shift = __builtin_clzll(d); + d <<= shift; int p = 64 + shift; u64 res = 0; @@ -257,8 +257,8 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) n_hi <<= shift; n_hi |= n_lo >> (64 - shift); n_lo <<= shift; - if (carry || (n_hi >= c)) { - n_hi -= c; + if (carry || (n_hi >= d)) { + n_hi -= d; res |= 1ULL << p; } } while (n_hi);