1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2006 Andi Kleen, SUSE Labs.
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
14 #include <uapi/linux/time.h>
15 #include <asm/vgtod.h>
17 #include <asm/unistd.h>
19 #include <asm/pvclock.h>
20 #include <asm/mshyperv.h>
21 #include <linux/math64.h>
22 #include <linux/time.h>
23 #include <linux/kernel.h>
25 #define gtod (&VVAR(vsyscall_gtod_data))
27 extern int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
);
28 extern int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
);
29 extern time_t __vdso_time(time_t *t
);
31 #ifdef CONFIG_PARAVIRT_CLOCK
32 extern u8 pvclock_page
[PAGE_SIZE
]
33 __attribute__((visibility("hidden")));
36 #ifdef CONFIG_HYPERV_TSCPAGE
37 extern u8 hvclock_page
[PAGE_SIZE
]
38 __attribute__((visibility("hidden")));
43 notrace
static long vdso_fallback_gettime(long clock
, struct timespec
*ts
)
46 asm ("syscall" : "=a" (ret
), "=m" (*ts
) :
47 "0" (__NR_clock_gettime
), "D" (clock
), "S" (ts
) :
54 notrace
static long vdso_fallback_gettime(long clock
, struct timespec
*ts
)
60 "mov %[clock], %%ebx \n"
61 "call __kernel_vsyscall \n"
63 : "=a" (ret
), "=m" (*ts
)
64 : "0" (__NR_clock_gettime
), [clock
] "g" (clock
), "c" (ts
)
71 #ifdef CONFIG_PARAVIRT_CLOCK
72 static notrace
const struct pvclock_vsyscall_time_info
*get_pvti0(void)
74 return (const struct pvclock_vsyscall_time_info
*)&pvclock_page
;
77 static notrace u64
vread_pvclock(void)
79 const struct pvclock_vcpu_time_info
*pvti
= &get_pvti0()->pvti
;
84 * Note: The kernel and hypervisor must guarantee that cpu ID
85 * number maps 1:1 to per-CPU pvclock time info.
87 * Because the hypervisor is entirely unaware of guest userspace
88 * preemption, it cannot guarantee that per-CPU pvclock time
89 * info is updated if the underlying CPU changes or that that
90 * version is increased whenever underlying CPU changes.
92 * On KVM, we are guaranteed that pvti updates for any vCPU are
93 * atomic as seen by *all* vCPUs. This is an even stronger
94 * guarantee than we get with a normal seqlock.
96 * On Xen, we don't appear to have that guarantee, but Xen still
97 * supplies a valid seqlock using the version field.
99 * We only do pvclock vdso timing at all if
100 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
101 * mean that all vCPUs have matching pvti and that the TSC is
102 * synced, so we can just look at vCPU 0's pvti.
106 version
= pvclock_read_begin(pvti
);
108 if (unlikely(!(pvti
->flags
& PVCLOCK_TSC_STABLE_BIT
)))
111 ret
= __pvclock_read_cycles(pvti
, rdtsc_ordered());
112 } while (pvclock_read_retry(pvti
, version
));
117 #ifdef CONFIG_HYPERV_TSCPAGE
118 static notrace u64
vread_hvclock(void)
120 const struct ms_hyperv_tsc_page
*tsc_pg
=
121 (const struct ms_hyperv_tsc_page
*)&hvclock_page
;
123 return hv_read_tsc_page(tsc_pg
);
127 notrace
static inline u64
vgetcyc(int mode
)
129 if (mode
== VCLOCK_TSC
)
130 return (u64
)rdtsc_ordered();
133 * For any memory-mapped vclock type, we need to make sure that gcc
134 * doesn't cleverly hoist a load before the mode check. Otherwise we
135 * might end up touching the memory-mapped page even if the vclock in
136 * question isn't enabled, which will segfault. Hence the barriers.
138 #ifdef CONFIG_PARAVIRT_CLOCK
139 if (mode
== VCLOCK_PVCLOCK
) {
141 return vread_pvclock();
144 #ifdef CONFIG_HYPERV_TSCPAGE
145 if (mode
== VCLOCK_HVCLOCK
) {
147 return vread_hvclock();
153 notrace
static int do_hres(clockid_t clk
, struct timespec
*ts
)
155 struct vgtod_ts
*base
= >od
->basetime
[clk
];
156 u64 cycles
, last
, sec
, ns
;
160 seq
= gtod_read_begin(gtod
);
161 cycles
= vgetcyc(gtod
->vclock_mode
);
163 last
= gtod
->cycle_last
;
164 if (unlikely((s64
)cycles
< 0))
165 return vdso_fallback_gettime(clk
, ts
);
167 ns
+= (cycles
- last
) * gtod
->mult
;
170 } while (unlikely(gtod_read_retry(gtod
, seq
)));
173 * Do this outside the loop: a race inside the loop could result
174 * in __iter_div_u64_rem() being extremely slow.
176 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
182 notrace
static void do_coarse(clockid_t clk
, struct timespec
*ts
)
184 struct vgtod_ts
*base
= >od
->basetime
[clk
];
188 seq
= gtod_read_begin(gtod
);
189 ts
->tv_sec
= base
->sec
;
190 ts
->tv_nsec
= base
->nsec
;
191 } while (unlikely(gtod_read_retry(gtod
, seq
)));
194 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
198 /* Sort out negative (CPU/FD) and invalid clocks */
199 if (unlikely((unsigned int) clock
>= MAX_CLOCKS
))
200 return vdso_fallback_gettime(clock
, ts
);
203 * Convert the clockid to a bitmask and use it to check which
204 * clocks are handled in the VDSO directly.
207 if (likely(msk
& VGTOD_HRES
)) {
208 return do_hres(clock
, ts
);
209 } else if (msk
& VGTOD_COARSE
) {
210 do_coarse(clock
, ts
);
213 return vdso_fallback_gettime(clock
, ts
);
216 int clock_gettime(clockid_t
, struct timespec
*)
217 __attribute__((weak
, alias("__vdso_clock_gettime")));
219 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
221 if (likely(tv
!= NULL
)) {
222 struct timespec
*ts
= (struct timespec
*) tv
;
224 do_hres(CLOCK_REALTIME
, ts
);
227 if (unlikely(tz
!= NULL
)) {
228 tz
->tz_minuteswest
= gtod
->tz_minuteswest
;
229 tz
->tz_dsttime
= gtod
->tz_dsttime
;
234 int gettimeofday(struct timeval
*, struct timezone
*)
235 __attribute__((weak
, alias("__vdso_gettimeofday")));
238 * This will break when the xtime seconds get inaccurate, but that is
241 notrace
time_t __vdso_time(time_t *t
)
243 /* This is atomic on x86 so we don't need any locks. */
244 time_t result
= READ_ONCE(gtod
->basetime
[CLOCK_REALTIME
].sec
);
250 time_t time(time_t *t
)
251 __attribute__((weak
, alias("__vdso_time")));