]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arm64/kernel/vdso/gettimeofday.S
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[thirdparty/linux.git] / arch / arm64 / kernel / vdso / gettimeofday.S
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Userspace implementations of gettimeofday() and friends.
4 *
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #include <linux/linkage.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/unistd.h>
13
14 #define NSEC_PER_SEC_LO16 0xca00
15 #define NSEC_PER_SEC_HI16 0x3b9a
16
17 vdso_data .req x6
18 seqcnt .req w7
19 w_tmp .req w8
20 x_tmp .req x8
21
22 /*
23 * Conventions for macro arguments:
24 * - An argument is write-only if its name starts with "res".
25 * - All other arguments are read-only, unless otherwise specified.
26 */
27
28 .macro seqcnt_acquire
29 9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
30 tbnz seqcnt, #0, 9999b
31 dmb ishld
32 .endm
33
34 .macro seqcnt_check fail
35 dmb ishld
36 ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
37 cmp w_tmp, seqcnt
38 b.ne \fail
39 .endm
40
41 .macro syscall_check fail
42 ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
43 cbnz w_tmp, \fail
44 .endm
45
46 .macro get_nsec_per_sec res
47 mov \res, #NSEC_PER_SEC_LO16
48 movk \res, #NSEC_PER_SEC_HI16, lsl #16
49 .endm
50
51 /*
52 * Returns the clock delta, in nanoseconds left-shifted by the clock
53 * shift.
54 */
55 .macro get_clock_shifted_nsec res, cycle_last, mult
56 /* Read the virtual counter. */
57 isb
58 mrs x_tmp, cntvct_el0
59 /* Calculate cycle delta and convert to ns. */
60 sub \res, x_tmp, \cycle_last
61 /* We can only guarantee 56 bits of precision. */
62 movn x_tmp, #0xff00, lsl #48
63 and \res, x_tmp, \res
64 mul \res, \res, \mult
65 /*
66 * Fake address dependency from the value computed from the counter
67 * register to subsequent data page accesses so that the sequence
68 * locking also orders the read of the counter.
69 */
70 and x_tmp, \res, xzr
71 add vdso_data, vdso_data, x_tmp
72 .endm
73
74 /*
75 * Returns in res_{sec,nsec} the REALTIME timespec, based on the
76 * "wall time" (xtime) and the clock_mono delta.
77 */
78 .macro get_ts_realtime res_sec, res_nsec, \
79 clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
80 add \res_nsec, \clock_nsec, \xtime_nsec
81 udiv x_tmp, \res_nsec, \nsec_to_sec
82 add \res_sec, \xtime_sec, x_tmp
83 msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
84 .endm
85
86 /*
87 * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
88 * used for CLOCK_MONOTONIC_RAW.
89 */
90 .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
91 udiv \res_sec, \clock_nsec, \nsec_to_sec
92 msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
93 .endm
94
95 /* sec and nsec are modified in place. */
96 .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
97 /* Add timespec. */
98 add \sec, \sec, \ts_sec
99 add \nsec, \nsec, \ts_nsec
100
101 /* Normalise the new timespec. */
102 cmp \nsec, \nsec_to_sec
103 b.lt 9999f
104 sub \nsec, \nsec, \nsec_to_sec
105 add \sec, \sec, #1
106 9999:
107 cmp \nsec, #0
108 b.ge 9998f
109 add \nsec, \nsec, \nsec_to_sec
110 sub \sec, \sec, #1
111 9998:
112 .endm
113
114 .macro clock_gettime_return, shift=0
115 .if \shift == 1
116 lsr x11, x11, x12
117 .endif
118 stp x10, x11, [x1, #TSPEC_TV_SEC]
119 mov x0, xzr
120 ret
121 .endm
122
123 .macro jump_slot jumptable, index, label
124 .if (. - \jumptable) != 4 * (\index)
125 .error "Jump slot index mismatch"
126 .endif
127 b \label
128 .endm
129
130 .text
131
132 /* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
133 ENTRY(__kernel_gettimeofday)
134 .cfi_startproc
135 adr vdso_data, _vdso_data
136 /* If tv is NULL, skip to the timezone code. */
137 cbz x0, 2f
138
139 /* Compute the time of day. */
140 1: seqcnt_acquire
141 syscall_check fail=4f
142 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
143 /* w11 = cs_mono_mult, w12 = cs_shift */
144 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
145 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
146
147 get_nsec_per_sec res=x9
148 lsl x9, x9, x12
149
150 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
151 seqcnt_check fail=1b
152 get_ts_realtime res_sec=x10, res_nsec=x11, \
153 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
154
155 /* Convert ns to us. */
156 mov x13, #1000
157 lsl x13, x13, x12
158 udiv x11, x11, x13
159 stp x10, x11, [x0, #TVAL_TV_SEC]
160 2:
161 /* If tz is NULL, return 0. */
162 cbz x1, 3f
163 ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
164 stp w4, w5, [x1, #TZ_MINWEST]
165 3:
166 mov x0, xzr
167 ret
168 4:
169 /* Syscall fallback. */
170 mov x8, #__NR_gettimeofday
171 svc #0
172 ret
173 .cfi_endproc
174 ENDPROC(__kernel_gettimeofday)
175
176 #define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
177
178 /* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
179 ENTRY(__kernel_clock_gettime)
180 .cfi_startproc
181 cmp w0, #JUMPSLOT_MAX
182 b.hi syscall
183 adr vdso_data, _vdso_data
184 adr x_tmp, jumptable
185 add x_tmp, x_tmp, w0, uxtw #2
186 br x_tmp
187
188 ALIGN
189 jumptable:
190 jump_slot jumptable, CLOCK_REALTIME, realtime
191 jump_slot jumptable, CLOCK_MONOTONIC, monotonic
192 b syscall
193 b syscall
194 jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
195 jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
196 jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
197
198 .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
199 .error "Wrong jumptable size"
200 .endif
201
202 ALIGN
203 realtime:
204 seqcnt_acquire
205 syscall_check fail=syscall
206 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
207 /* w11 = cs_mono_mult, w12 = cs_shift */
208 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
209 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
210
211 /* All computations are done with left-shifted nsecs. */
212 get_nsec_per_sec res=x9
213 lsl x9, x9, x12
214
215 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
216 seqcnt_check fail=realtime
217 get_ts_realtime res_sec=x10, res_nsec=x11, \
218 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
219 clock_gettime_return, shift=1
220
221 ALIGN
222 monotonic:
223 seqcnt_acquire
224 syscall_check fail=syscall
225 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
226 /* w11 = cs_mono_mult, w12 = cs_shift */
227 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
228 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
229 ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
230
231 /* All computations are done with left-shifted nsecs. */
232 lsl x4, x4, x12
233 get_nsec_per_sec res=x9
234 lsl x9, x9, x12
235
236 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
237 seqcnt_check fail=monotonic
238 get_ts_realtime res_sec=x10, res_nsec=x11, \
239 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
240
241 add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
242 clock_gettime_return, shift=1
243
244 ALIGN
245 monotonic_raw:
246 seqcnt_acquire
247 syscall_check fail=syscall
248 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
249 /* w11 = cs_raw_mult, w12 = cs_shift */
250 ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
251 ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
252
253 /* All computations are done with left-shifted nsecs. */
254 get_nsec_per_sec res=x9
255 lsl x9, x9, x12
256
257 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
258 seqcnt_check fail=monotonic_raw
259 get_ts_clock_raw res_sec=x10, res_nsec=x11, \
260 clock_nsec=x15, nsec_to_sec=x9
261
262 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
263 clock_gettime_return, shift=1
264
265 ALIGN
266 realtime_coarse:
267 seqcnt_acquire
268 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
269 seqcnt_check fail=realtime_coarse
270 clock_gettime_return
271
272 ALIGN
273 monotonic_coarse:
274 seqcnt_acquire
275 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
276 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
277 seqcnt_check fail=monotonic_coarse
278
279 /* Computations are done in (non-shifted) nsecs. */
280 get_nsec_per_sec res=x9
281 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
282 clock_gettime_return
283
284 ALIGN
285 syscall: /* Syscall fallback. */
286 mov x8, #__NR_clock_gettime
287 svc #0
288 ret
289 .cfi_endproc
290 ENDPROC(__kernel_clock_gettime)
291
292 /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
293 ENTRY(__kernel_clock_getres)
294 .cfi_startproc
295 cmp w0, #CLOCK_REALTIME
296 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
297 ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
298 b.ne 1f
299
300 adr vdso_data, _vdso_data
301 ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
302 b 2f
303 1:
304 cmp w0, #CLOCK_REALTIME_COARSE
305 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
306 b.ne 4f
307 ldr x2, 5f
308 2:
309 cbz x1, 3f
310 stp xzr, x2, [x1]
311
312 3: /* res == NULL. */
313 mov w0, wzr
314 ret
315
316 4: /* Syscall fallback. */
317 mov x8, #__NR_clock_getres
318 svc #0
319 ret
320 5:
321 .quad CLOCK_COARSE_RES
322 .cfi_endproc
323 ENDPROC(__kernel_clock_getres)