1 // SPDX-License-Identifier: GPL-2.0
3 * Generic userspace implementations of gettimeofday() and similar.
5 #include <vdso/datapage.h>
6 #include <vdso/helpers.h>
8 #ifndef vdso_calc_delta
10 * Default implementation which works for all sane clocksources. That
11 * obviously excludes x86/TSC.
13 static __always_inline
14 u64
vdso_calc_delta(u64 cycles
, u64 last
, u64 mask
, u32 mult
)
16 return ((cycles
- last
) & mask
) * mult
;
21 static __always_inline u64
vdso_shift_ns(u64 ns
, u32 shift
)
27 #ifndef __arch_vdso_hres_capable
28 static inline bool __arch_vdso_hres_capable(void)
34 #ifndef vdso_clocksource_ok
35 static inline bool vdso_clocksource_ok(const struct vdso_data
*vd
)
37 return vd
->clock_mode
!= VDSO_CLOCKMODE_NONE
;
42 static int do_hres_timens(const struct vdso_data
*vdns
, clockid_t clk
,
43 struct __kernel_timespec
*ts
)
45 const struct vdso_data
*vd
= __arch_get_timens_vdso_data();
46 const struct timens_offset
*offs
= &vdns
->offset
[clk
];
47 const struct vdso_timestamp
*vdso_ts
;
52 if (clk
!= CLOCK_MONOTONIC_RAW
)
53 vd
= &vd
[CS_HRES_COARSE
];
56 vdso_ts
= &vd
->basetime
[clk
];
59 seq
= vdso_read_begin(vd
);
61 if (unlikely(!vdso_clocksource_ok(vd
)))
64 cycles
= __arch_get_hw_counter(vd
->clock_mode
);
66 last
= vd
->cycle_last
;
67 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
68 ns
= vdso_shift_ns(ns
, vd
->shift
);
70 } while (unlikely(vdso_read_retry(vd
, seq
)));
72 /* Add the namespace offset */
77 * Do this outside the loop: a race inside the loop could result
78 * in __iter_div_u64_rem() being extremely slow.
80 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
86 static __always_inline
const struct vdso_data
*__arch_get_timens_vdso_data(void)
91 static int do_hres_timens(const struct vdso_data
*vdns
, clockid_t clk
,
92 struct __kernel_timespec
*ts
)
98 static __always_inline
int do_hres(const struct vdso_data
*vd
, clockid_t clk
,
99 struct __kernel_timespec
*ts
)
101 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
102 u64 cycles
, last
, sec
, ns
;
105 /* Allows to compile the high resolution parts out */
106 if (!__arch_vdso_hres_capable())
111 * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
112 * enabled tasks have a special VVAR page installed which
113 * has vd->seq set to 1 and vd->clock_mode set to
114 * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
115 * this does not affect performance because if vd->seq is
116 * odd, i.e. a concurrent update is in progress the extra
117 * check for vd->clock_mode is just a few extra
118 * instructions while spin waiting for vd->seq to become
121 while (unlikely((seq
= READ_ONCE(vd
->seq
)) & 1)) {
122 if (IS_ENABLED(CONFIG_TIME_NS
) &&
123 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
124 return do_hres_timens(vd
, clk
, ts
);
129 if (unlikely(!vdso_clocksource_ok(vd
)))
132 cycles
= __arch_get_hw_counter(vd
->clock_mode
);
134 last
= vd
->cycle_last
;
135 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
136 ns
= vdso_shift_ns(ns
, vd
->shift
);
138 } while (unlikely(vdso_read_retry(vd
, seq
)));
141 * Do this outside the loop: a race inside the loop could result
142 * in __iter_div_u64_rem() being extremely slow.
144 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
150 #ifdef CONFIG_TIME_NS
151 static int do_coarse_timens(const struct vdso_data
*vdns
, clockid_t clk
,
152 struct __kernel_timespec
*ts
)
154 const struct vdso_data
*vd
= __arch_get_timens_vdso_data();
155 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
156 const struct timens_offset
*offs
= &vdns
->offset
[clk
];
162 seq
= vdso_read_begin(vd
);
164 nsec
= vdso_ts
->nsec
;
165 } while (unlikely(vdso_read_retry(vd
, seq
)));
167 /* Add the namespace offset */
172 * Do this outside the loop: a race inside the loop could result
173 * in __iter_div_u64_rem() being extremely slow.
175 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
180 static int do_coarse_timens(const struct vdso_data
*vdns
, clockid_t clk
,
181 struct __kernel_timespec
*ts
)
187 static __always_inline
int do_coarse(const struct vdso_data
*vd
, clockid_t clk
,
188 struct __kernel_timespec
*ts
)
190 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
195 * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
198 while ((seq
= READ_ONCE(vd
->seq
)) & 1) {
199 if (IS_ENABLED(CONFIG_TIME_NS
) &&
200 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
201 return do_coarse_timens(vd
, clk
, ts
);
206 ts
->tv_sec
= vdso_ts
->sec
;
207 ts
->tv_nsec
= vdso_ts
->nsec
;
208 } while (unlikely(vdso_read_retry(vd
, seq
)));
213 static __maybe_unused
int
214 __cvdso_clock_gettime_common(const struct vdso_data
*vd
, clockid_t clock
,
215 struct __kernel_timespec
*ts
)
219 /* Check for negative values or invalid clocks */
220 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
224 * Convert the clockid to a bitmask and use it to check which
225 * clocks are handled in the VDSO directly.
228 if (likely(msk
& VDSO_HRES
))
229 vd
= &vd
[CS_HRES_COARSE
];
230 else if (msk
& VDSO_COARSE
)
231 return do_coarse(&vd
[CS_HRES_COARSE
], clock
, ts
);
232 else if (msk
& VDSO_RAW
)
237 return do_hres(vd
, clock
, ts
);
240 static __maybe_unused
int
241 __cvdso_clock_gettime_data(const struct vdso_data
*vd
, clockid_t clock
,
242 struct __kernel_timespec
*ts
)
244 int ret
= __cvdso_clock_gettime_common(vd
, clock
, ts
);
247 return clock_gettime_fallback(clock
, ts
);
251 static __maybe_unused
int
252 __cvdso_clock_gettime(clockid_t clock
, struct __kernel_timespec
*ts
)
254 return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock
, ts
);
258 static __maybe_unused
int
259 __cvdso_clock_gettime32_data(const struct vdso_data
*vd
, clockid_t clock
,
260 struct old_timespec32
*res
)
262 struct __kernel_timespec ts
;
265 ret
= __cvdso_clock_gettime_common(vd
, clock
, &ts
);
268 return clock_gettime32_fallback(clock
, res
);
271 res
->tv_sec
= ts
.tv_sec
;
272 res
->tv_nsec
= ts
.tv_nsec
;
277 static __maybe_unused
int
278 __cvdso_clock_gettime32(clockid_t clock
, struct old_timespec32
*res
)
280 return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock
, res
);
282 #endif /* BUILD_VDSO32 */
284 static __maybe_unused
int
285 __cvdso_gettimeofday_data(const struct vdso_data
*vd
,
286 struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
289 if (likely(tv
!= NULL
)) {
290 struct __kernel_timespec ts
;
292 if (do_hres(&vd
[CS_HRES_COARSE
], CLOCK_REALTIME
, &ts
))
293 return gettimeofday_fallback(tv
, tz
);
295 tv
->tv_sec
= ts
.tv_sec
;
296 tv
->tv_usec
= (u32
)ts
.tv_nsec
/ NSEC_PER_USEC
;
299 if (unlikely(tz
!= NULL
)) {
300 if (IS_ENABLED(CONFIG_TIME_NS
) &&
301 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
302 vd
= __arch_get_timens_vdso_data();
304 tz
->tz_minuteswest
= vd
[CS_HRES_COARSE
].tz_minuteswest
;
305 tz
->tz_dsttime
= vd
[CS_HRES_COARSE
].tz_dsttime
;
311 static __maybe_unused
int
312 __cvdso_gettimeofday(struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
314 return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv
, tz
);
318 static __maybe_unused __kernel_old_time_t
319 __cvdso_time_data(const struct vdso_data
*vd
, __kernel_old_time_t
*time
)
321 __kernel_old_time_t t
;
323 if (IS_ENABLED(CONFIG_TIME_NS
) &&
324 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
325 vd
= __arch_get_timens_vdso_data();
327 t
= READ_ONCE(vd
[CS_HRES_COARSE
].basetime
[CLOCK_REALTIME
].sec
);
335 static __maybe_unused __kernel_old_time_t
__cvdso_time(__kernel_old_time_t
*time
)
337 return __cvdso_time_data(__arch_get_vdso_data(), time
);
339 #endif /* VDSO_HAS_TIME */
341 #ifdef VDSO_HAS_CLOCK_GETRES
342 static __maybe_unused
343 int __cvdso_clock_getres_common(const struct vdso_data
*vd
, clockid_t clock
,
344 struct __kernel_timespec
*res
)
349 /* Check for negative values or invalid clocks */
350 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
353 if (IS_ENABLED(CONFIG_TIME_NS
) &&
354 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
355 vd
= __arch_get_timens_vdso_data();
358 * Convert the clockid to a bitmask and use it to check which
359 * clocks are handled in the VDSO directly.
362 if (msk
& (VDSO_HRES
| VDSO_RAW
)) {
364 * Preserves the behaviour of posix_get_hrtimer_res().
366 ns
= READ_ONCE(vd
[CS_HRES_COARSE
].hrtimer_res
);
367 } else if (msk
& VDSO_COARSE
) {
369 * Preserves the behaviour of posix_get_coarse_res().
383 static __maybe_unused
384 int __cvdso_clock_getres_data(const struct vdso_data
*vd
, clockid_t clock
,
385 struct __kernel_timespec
*res
)
387 int ret
= __cvdso_clock_getres_common(vd
, clock
, res
);
390 return clock_getres_fallback(clock
, res
);
394 static __maybe_unused
395 int __cvdso_clock_getres(clockid_t clock
, struct __kernel_timespec
*res
)
397 return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock
, res
);
401 static __maybe_unused
int
402 __cvdso_clock_getres_time32_data(const struct vdso_data
*vd
, clockid_t clock
,
403 struct old_timespec32
*res
)
405 struct __kernel_timespec ts
;
408 ret
= __cvdso_clock_getres_common(vd
, clock
, &ts
);
411 return clock_getres32_fallback(clock
, res
);
414 res
->tv_sec
= ts
.tv_sec
;
415 res
->tv_nsec
= ts
.tv_nsec
;
420 static __maybe_unused
int
421 __cvdso_clock_getres_time32(clockid_t clock
, struct old_timespec32
*res
)
423 return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
426 #endif /* BUILD_VDSO32 */
427 #endif /* VDSO_HAS_CLOCK_GETRES */