1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Userspace implementations of gettimeofday() and friends.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/linkage.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/unistd.h>
14 #define NSEC_PER_SEC_LO16 0xca00
15 #define NSEC_PER_SEC_HI16 0x3b9a
23 * Conventions for macro arguments:
24 * - An argument is write-only if its name starts with "res".
25 * - All other arguments are read-only, unless otherwise specified.
29 9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
30 tbnz seqcnt, #0, 9999b
34 .macro seqcnt_check fail
36 ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
41 .macro syscall_check fail
42 ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
46 .macro get_nsec_per_sec res
47 mov \res, #NSEC_PER_SEC_LO16
48 movk \res, #NSEC_PER_SEC_HI16, lsl #16
52 * Returns the clock delta, in nanoseconds left-shifted by the clock
55 .macro get_clock_shifted_nsec res, cycle_last, mult
56 /* Read the virtual counter. */
59 /* Calculate cycle delta and convert to ns. */
60 sub \res, x_tmp, \cycle_last
61 /* We can only guarantee 56 bits of precision. */
62 movn x_tmp, #0xff00, lsl #48
66 * Fake address dependency from the value computed from the counter
67 * register to subsequent data page accesses so that the sequence
68 * locking also orders the read of the counter.
71 add vdso_data, vdso_data, x_tmp
75 * Returns in res_{sec,nsec} the REALTIME timespec, based on the
76 * "wall time" (xtime) and the clock_mono delta.
78 .macro get_ts_realtime res_sec, res_nsec, \
79 clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
80 add \res_nsec, \clock_nsec, \xtime_nsec
81 udiv x_tmp, \res_nsec, \nsec_to_sec
82 add \res_sec, \xtime_sec, x_tmp
83 msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
87 * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
88 * used for CLOCK_MONOTONIC_RAW.
90 .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
91 udiv \res_sec, \clock_nsec, \nsec_to_sec
92 msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
95 /* sec and nsec are modified in place. */
96 .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
98 add \sec, \sec, \ts_sec
99 add \nsec, \nsec, \ts_nsec
101 /* Normalise the new timespec. */
102 cmp \nsec, \nsec_to_sec
104 sub \nsec, \nsec, \nsec_to_sec
109 add \nsec, \nsec, \nsec_to_sec
114 .macro clock_gettime_return, shift=0
118 stp x10, x11, [x1, #TSPEC_TV_SEC]
123 .macro jump_slot jumptable, index, label
124 .if (. - \jumptable) != 4 * (\index)
125 .error "Jump slot index mismatch"
132 /* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
133 ENTRY(__kernel_gettimeofday)
135 adr vdso_data, _vdso_data
136 /* If tv is NULL, skip to the timezone code. */
139 /* Compute the time of day. */
141 syscall_check fail=4f
142 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
143 /* w11 = cs_mono_mult, w12 = cs_shift */
144 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
145 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
147 get_nsec_per_sec res=x9
150 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
152 get_ts_realtime res_sec=x10, res_nsec=x11, \
153 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
155 /* Convert ns to us. */
159 stp x10, x11, [x0, #TVAL_TV_SEC]
161 /* If tz is NULL, return 0. */
163 ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
164 stp w4, w5, [x1, #TZ_MINWEST]
169 /* Syscall fallback. */
170 mov x8, #__NR_gettimeofday
174 ENDPROC(__kernel_gettimeofday)
176 #define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
178 /* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
179 ENTRY(__kernel_clock_gettime)
181 cmp w0, #JUMPSLOT_MAX
183 adr vdso_data, _vdso_data
185 add x_tmp, x_tmp, w0, uxtw #2
190 jump_slot jumptable, CLOCK_REALTIME, realtime
191 jump_slot jumptable, CLOCK_MONOTONIC, monotonic
194 jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
195 jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
196 jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
198 .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
199 .error "Wrong jumptable size"
205 syscall_check fail=syscall
206 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
207 /* w11 = cs_mono_mult, w12 = cs_shift */
208 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
209 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
211 /* All computations are done with left-shifted nsecs. */
212 get_nsec_per_sec res=x9
215 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
216 seqcnt_check fail=realtime
217 get_ts_realtime res_sec=x10, res_nsec=x11, \
218 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
219 clock_gettime_return, shift=1
224 syscall_check fail=syscall
225 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
226 /* w11 = cs_mono_mult, w12 = cs_shift */
227 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
228 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
229 ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
231 /* All computations are done with left-shifted nsecs. */
233 get_nsec_per_sec res=x9
236 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
237 seqcnt_check fail=monotonic
238 get_ts_realtime res_sec=x10, res_nsec=x11, \
239 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
241 add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
242 clock_gettime_return, shift=1
247 syscall_check fail=syscall
248 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
249 /* w11 = cs_raw_mult, w12 = cs_shift */
250 ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
251 ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
253 /* All computations are done with left-shifted nsecs. */
254 get_nsec_per_sec res=x9
257 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
258 seqcnt_check fail=monotonic_raw
259 get_ts_clock_raw res_sec=x10, res_nsec=x11, \
260 clock_nsec=x15, nsec_to_sec=x9
262 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
263 clock_gettime_return, shift=1
268 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
269 seqcnt_check fail=realtime_coarse
275 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
276 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
277 seqcnt_check fail=monotonic_coarse
279 /* Computations are done in (non-shifted) nsecs. */
280 get_nsec_per_sec res=x9
281 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
285 syscall: /* Syscall fallback. */
286 mov x8, #__NR_clock_gettime
290 ENDPROC(__kernel_clock_gettime)
292 /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
293 ENTRY(__kernel_clock_getres)
295 cmp w0, #CLOCK_REALTIME
296 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
297 ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
300 adr vdso_data, _vdso_data
301 ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
304 cmp w0, #CLOCK_REALTIME_COARSE
305 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
312 3: /* res == NULL. */
316 4: /* Syscall fallback. */
317 mov x8, #__NR_clock_getres
321 .quad CLOCK_COARSE_RES
323 ENDPROC(__kernel_clock_getres)