From: Anna-Maria Behnsen Date: Mon, 3 Mar 2025 11:11:17 +0000 (+0100) Subject: x86/vdso: Prepare introduction of struct vdso_clock X-Git-Tag: v6.15-rc1~202^2~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=bf0eff816e467f8decb9b3ac0218cc26060e359f;p=thirdparty%2Fkernel%2Flinux.git x86/vdso: Prepare introduction of struct vdso_clock To support multiple PTP clocks, the VDSO data structure needs to be reworked. All clock specific data will end up in struct vdso_clock and in struct vdso_time_data there will be array of VDSO clocks. At the moment, vdso_clock is simply a define which maps vdso_clock to vdso_time_data. To prepare for the rework of the data structures, replace the struct vdso_time_data pointer with a struct vdso_clock pointer where applicable. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Nam Cao Signed-off-by: Thomas Weißschuh Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20250303-vdso-clock-v1-15-c1b5c69a166f@linutronix.de --- diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index edec796832e08..9e52cc46e1da9 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -261,7 +261,7 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode, return U64_MAX; } -static inline bool arch_vdso_clocksource_ok(const struct vdso_time_data *vd) +static inline bool arch_vdso_clocksource_ok(const struct vdso_clock *vc) { return true; } @@ -300,34 +300,34 @@ static inline bool arch_vdso_cycles_ok(u64 cycles) * declares everything with the MSB/Sign-bit set as invalid. Therefore the * effective mask is S64_MAX. */ -static __always_inline u64 vdso_calc_ns(const struct vdso_time_data *vd, u64 cycles, u64 base) +static __always_inline u64 vdso_calc_ns(const struct vdso_clock *vc, u64 cycles, u64 base) { - u64 delta = cycles - vd->cycle_last; + u64 delta = cycles - vc->cycle_last; /* * Negative motion and deltas which can cause multiplication * overflow require special treatment. This check covers both as - * negative motion is guaranteed to be greater than @vd::max_cycles + * negative motion is guaranteed to be greater than @vc::max_cycles * due to unsigned comparison. * * Due to the MSB/Sign-bit being used as invalid marker (see * arch_vdso_cycles_ok() above), the effective mask is S64_MAX, but that * case is also unlikely and will also take the unlikely path here. */ - if (unlikely(delta > vd->max_cycles)) { + if (unlikely(delta > vc->max_cycles)) { /* * Due to the above mentioned TSC wobbles, filter out * negative motion. Per the above masking, the effective * sign bit is now bit 62. */ if (delta & (1ULL << 62)) - return base >> vd->shift; + return base >> vc->shift; /* Handle multiplication overflow gracefully */ - return mul_u64_u32_add_u64_shr(delta & S64_MAX, vd->mult, base, vd->shift); + return mul_u64_u32_add_u64_shr(delta & S64_MAX, vc->mult, base, vc->shift); } - return ((delta * vd->mult) + base) >> vd->shift; + return ((delta * vc->mult) + base) >> vc->shift; } #define vdso_calc_ns vdso_calc_ns