]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
vdso/helpers: Add helpers for seqlocks of single vdso_clock
authorThomas Weißschuh <thomas.weissschuh@linutronix.de>
Tue, 1 Jul 2025 08:57:58 +0000 (10:57 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 9 Jul 2025 09:52:34 +0000 (11:52 +0200)
Auxiliary clocks will have their vDSO data in a dedicated 'struct vdso_clock',
which needs to be synchronized independently.

Add a helper to synchronize a single vDSO clock.

[ tglx: Move the SMP memory barriers to the call sites and get rid of the
   confusing first/last arguments and conditional barriers ]

Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250701-vdso-auxclock-v1-4-df7d9f87b9b8@linutronix.de
include/vdso/helpers.h

index 0a98fed550ba66a84a620fbbd6aee3e3029b4772..1a5ee9d9052c38553c0cdf413e0030286f884d53 100644 (file)
@@ -28,17 +28,47 @@ static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
        return seq != start;
 }
 
-static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
 {
-       struct vdso_clock *vc = vd->clock_data;
+       /*
+        * WRITE_ONCE() is required otherwise the compiler can validly tear
+        * updates to vc->seq and it is possible that the value seen by the
+        * reader is inconsistent.
+        */
+       WRITE_ONCE(vc->seq, vc->seq + 1);
+}
 
+static __always_inline void vdso_write_seq_end(struct vdso_clock *vc)
+{
        /*
         * WRITE_ONCE() is required otherwise the compiler can validly tear
-        * updates to vd[x].seq and it is possible that the value seen by the
+        * updates to vc->seq and it is possible that the value seen by the
         * reader is inconsistent.
         */
-       WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
-       WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+       WRITE_ONCE(vc->seq, vc->seq + 1);
+}
+
+static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc)
+{
+       vdso_write_seq_begin(vc);
+       /* Ensure the sequence invalidation is visible before data is modified */
+       smp_wmb();
+}
+
+static __always_inline void vdso_write_end_clock(struct vdso_clock *vc)
+{
+       /* Ensure the data update is visible before the sequence is set valid again */
+       smp_wmb();
+       vdso_write_seq_end(vc);
+}
+
+static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
+{
+       struct vdso_clock *vc = vd->clock_data;
+
+       vdso_write_seq_begin(&vc[CS_HRES_COARSE]);
+       vdso_write_seq_begin(&vc[CS_RAW]);
+       /* Ensure the sequence invalidation is visible before data is modified */
        smp_wmb();
 }
 
@@ -46,14 +76,10 @@ static __always_inline void vdso_write_end(struct vdso_time_data *vd)
 {
        struct vdso_clock *vc = vd->clock_data;
 
+       /* Ensure the data update is visible before the sequence is set valid again */
        smp_wmb();
-       /*
-        * WRITE_ONCE() is required otherwise the compiler can validly tear
-        * updates to vd[x].seq and it is possible that the value seen by the
-        * reader is inconsistent.
-        */
-       WRITE_ONCE(vc[CS_HRES_COARSE].seq, vc[CS_HRES_COARSE].seq + 1);
-       WRITE_ONCE(vc[CS_RAW].seq, vc[CS_RAW].seq + 1);
+       vdso_write_seq_end(&vc[CS_HRES_COARSE]);
+       vdso_write_seq_end(&vc[CS_RAW]);
 }
 
 #endif /* !__ASSEMBLY__ */