]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
vdso: Make delta calculation overflow safe
authorAdrian Hunter <adrian.hunter@intel.com>
Mon, 25 Mar 2024 06:40:11 +0000 (08:40 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 8 Apr 2024 13:03:07 +0000 (15:03 +0200)
Kernel timekeeping is designed to keep the change in cycles (since the last
timer interrupt) below max_cycles, which prevents multiplication overflow
when converting cycles to nanoseconds. However, if timer interrupts stop,
the calculation will eventually overflow.

Add protection against that, enabled by config option
CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT. Check against max_cycles, falling
back to a slower higher precision calculation.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240325064023.2997-8-adrian.hunter@intel.com
lib/vdso/gettimeofday.c

index 9fa90e0794c95b7cf2499b3347f9c3becaecc90c..9c3a8d2440c926785163a060b6547454cb0ecb22 100644 (file)
 # define VDSO_DELTA_MASK(vd)   (vd->mask)
 #endif
 
+#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+       return delta < vd->max_cycles;
+}
+#else
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+       return true;
+}
+#endif
+
 #ifndef vdso_shift_ns
 static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
 {
@@ -28,7 +40,10 @@ static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles,
 {
        u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
 
-       return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+       if (likely(vdso_delta_ok(vd, delta)))
+               return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+
+       return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
 }
 #endif /* vdso_calc_ns */