]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
a7f290da BH |
2 | /* |
3 | * Userland implementation of gettimeofday() for 32 bits processes in a | |
4 | * ppc64 kernel for use in the vDSO | |
5 | * | |
6 | * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org, | |
7 | * IBM Corp. | |
a7f290da | 8 | */ |
a7f290da BH |
9 | #include <asm/processor.h> |
10 | #include <asm/ppc_asm.h> | |
11 | #include <asm/vdso.h> | |
ec0895f0 | 12 | #include <asm/vdso_datapage.h> |
a7f290da BH |
13 | #include <asm/asm-offsets.h> |
14 | #include <asm/unistd.h> | |
15 | ||
597bc5c0 PM |
16 | /* Offset for the low 32-bit part of a field of long type */ |
17 | #ifdef CONFIG_PPC64 | |
18 | #define LOPART 4 | |
19 | #else | |
20 | #define LOPART 0 | |
21 | #endif | |
22 | ||
a7f290da BH |
23 | .text |
24 | /* | |
25 | * Exact prototype of gettimeofday | |
26 | * | |
27 | * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); | |
28 | * | |
29 | */ | |
30 | V_FUNCTION_BEGIN(__kernel_gettimeofday) | |
31 | .cfi_startproc | |
32 | mflr r12 | |
33 | .cfi_register lr,r12 | |
34 | ||
bfc2eae0 | 35 | mr. r10,r3 /* r10 saves tv */ |
a7f290da | 36 | mr r11,r4 /* r11 saves tz */ |
ec0895f0 | 37 | get_datapage r9, r0 |
74609f45 | 38 | beq 3f |
6e2f9e9c | 39 | LOAD_REG_IMMEDIATE(r7, 1000000) /* load up USEC_PER_SEC */ |
8fd63a9e PM |
40 | bl __do_get_tspec@local /* get sec/usec from tb & kernel */ |
41 | stw r3,TVAL32_TV_SEC(r10) | |
42 | stw r4,TVAL32_TV_USEC(r10) | |
a7f290da | 43 | |
74609f45 | 44 | 3: cmplwi r11,0 /* check if tz is NULL */ |
bfc2eae0 CL |
45 | mtlr r12 |
46 | crclr cr0*4+so | |
47 | li r3,0 | |
48 | beqlr | |
49 | ||
a7f290da BH |
50 | lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */ |
51 | lwz r5,CFG_TZ_DSTTIME(r9) | |
52 | stw r4,TZONE_TZ_MINWEST(r11) | |
53 | stw r5,TZONE_TZ_DSTTIME(r11) | |
54 | ||
a7f290da | 55 | blr |
a7f290da BH |
56 | .cfi_endproc |
57 | V_FUNCTION_END(__kernel_gettimeofday) | |
58 | ||
59 | /* | |
60 | * Exact prototype of clock_gettime() | |
61 | * | |
62 | * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); | |
63 | * | |
64 | */ | |
65 | V_FUNCTION_BEGIN(__kernel_clock_gettime) | |
66 | .cfi_startproc | |
67 | /* Check for supported clock IDs */ | |
68 | cmpli cr0,r3,CLOCK_REALTIME | |
69 | cmpli cr1,r3,CLOCK_MONOTONIC | |
0c37ec2a | 70 | cror cr0*4+eq,cr0*4+eq,cr1*4+eq |
654abc69 CL |
71 | |
72 | cmpli cr5,r3,CLOCK_REALTIME_COARSE | |
73 | cmpli cr6,r3,CLOCK_MONOTONIC_COARSE | |
74 | cror cr5*4+eq,cr5*4+eq,cr6*4+eq | |
75 | ||
76 | cror cr0*4+eq,cr0*4+eq,cr5*4+eq | |
77 | bne cr0, .Lgettime_fallback | |
a7f290da BH |
78 | |
79 | mflr r12 /* r12 saves lr */ | |
80 | .cfi_register lr,r12 | |
a7f290da | 81 | mr r11,r4 /* r11 saves tp */ |
ec0895f0 | 82 | get_datapage r9, r0 |
6e2f9e9c | 83 | LOAD_REG_IMMEDIATE(r7, NSEC_PER_SEC) /* load up NSEC_PER_SEC */ |
654abc69 CL |
84 | beq cr5, .Lcoarse_clocks |
85 | .Lprecise_clocks: | |
86 | bl __do_get_tspec@local /* get sec/nsec from tb & kernel */ | |
87 | bne cr1, .Lfinish /* not monotonic -> all done */ | |
a7f290da BH |
88 | |
89 | /* | |
90 | * CLOCK_MONOTONIC | |
91 | */ | |
92 | ||
a7f290da BH |
93 | /* now we must fixup using wall to monotonic. We need to snapshot |
94 | * that value and do the counter trick again. Fortunately, we still | |
95 | * have the counter value in r8 that was returned by __do_get_xsec. | |
597bc5c0 PM |
96 | * At this point, r3,r4 contain our sec/nsec values, r5 and r6 |
97 | * can be used, r7 contains NSEC_PER_SEC. | |
a7f290da BH |
98 | */ |
99 | ||
dd9a994f | 100 | lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) |
597bc5c0 | 101 | lwz r6,WTOM_CLOCK_NSEC(r9) |
a7f290da | 102 | |
597bc5c0 PM |
103 | /* We now have our offset in r5,r6. We create a fake dependency |
104 | * on that value and re-check the counter | |
a7f290da | 105 | */ |
597bc5c0 PM |
106 | or r0,r6,r5 |
107 | xor r0,r0,r0 | |
a7f290da | 108 | add r9,r9,r0 |
597bc5c0 | 109 | lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) |
a7f290da | 110 | cmpl cr0,r8,r0 /* check if updated */ |
654abc69 CL |
111 | bne- .Lprecise_clocks |
112 | b .Lfinish_monotonic | |
113 | ||
114 | /* | |
115 | * For coarse clocks we get data directly from the vdso data page, so | |
116 | * we don't need to call __do_get_tspec, but we still need to do the | |
117 | * counter trick. | |
118 | */ | |
119 | .Lcoarse_clocks: | |
120 | lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | |
121 | andi. r0,r8,1 /* pending update ? loop */ | |
122 | bne- .Lcoarse_clocks | |
123 | add r9,r9,r0 /* r0 is already 0 */ | |
124 | ||
125 | /* | |
126 | * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE | |
127 | * too | |
128 | */ | |
129 | lwz r3,STAMP_XTIME_SEC+LOPART(r9) | |
130 | lwz r4,STAMP_XTIME_NSEC+LOPART(r9) | |
131 | bne cr6,1f | |
132 | ||
133 | /* CLOCK_MONOTONIC_COARSE */ | |
134 | lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) | |
135 | lwz r6,WTOM_CLOCK_NSEC(r9) | |
136 | ||
137 | /* check if counter has updated */ | |
138 | or r0,r6,r5 | |
139 | 1: or r0,r0,r3 | |
140 | or r0,r0,r4 | |
141 | xor r0,r0,r0 | |
142 | add r3,r3,r0 | |
143 | lwz r0,CFG_TB_UPDATE_COUNT+LOPART(r9) | |
144 | cmpl cr0,r0,r8 /* check if updated */ | |
145 | bne- .Lcoarse_clocks | |
146 | ||
147 | /* Counter has not updated, so continue calculating proper values for | |
148 | * sec and nsec if monotonic coarse, or just return with the proper | |
149 | * values for realtime. | |
150 | */ | |
151 | bne cr6, .Lfinish | |
a7f290da | 152 | |
597bc5c0 | 153 | /* Calculate and store result. Note that this mimics the C code, |
a7f290da BH |
154 | * which may cause funny results if nsec goes negative... is that |
155 | * possible at all ? | |
156 | */ | |
654abc69 | 157 | .Lfinish_monotonic: |
597bc5c0 PM |
158 | add r3,r3,r5 |
159 | add r4,r4,r6 | |
160 | cmpw cr0,r4,r7 | |
161 | cmpwi cr1,r4,0 | |
a7f290da | 162 | blt 1f |
597bc5c0 | 163 | subf r4,r7,r4 |
a7f290da | 164 | addi r3,r3,1 |
654abc69 | 165 | 1: bge cr1, .Lfinish |
0c37ec2a | 166 | addi r3,r3,-1 |
597bc5c0 PM |
167 | add r4,r4,r7 |
168 | ||
654abc69 CL |
169 | .Lfinish: |
170 | stw r3,TSPC32_TV_SEC(r11) | |
a7f290da BH |
171 | stw r4,TSPC32_TV_NSEC(r11) |
172 | ||
173 | mtlr r12 | |
5d66da3d | 174 | crclr cr0*4+so |
a7f290da BH |
175 | li r3,0 |
176 | blr | |
177 | ||
178 | /* | |
179 | * syscall fallback | |
180 | */ | |
654abc69 | 181 | .Lgettime_fallback: |
a7f290da | 182 | li r0,__NR_clock_gettime |
56d20861 | 183 | .cfi_restore lr |
a7f290da BH |
184 | sc |
185 | blr | |
186 | .cfi_endproc | |
187 | V_FUNCTION_END(__kernel_clock_gettime) | |
188 | ||
189 | ||
190 | /* | |
191 | * Exact prototype of clock_getres() | |
192 | * | |
193 | * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); | |
194 | * | |
195 | */ | |
196 | V_FUNCTION_BEGIN(__kernel_clock_getres) | |
197 | .cfi_startproc | |
198 | /* Check for supported clock IDs */ | |
e33ffc95 CL |
199 | cmplwi cr0, r3, CLOCK_MAX |
200 | cmpwi cr1, r3, CLOCK_REALTIME_COARSE | |
201 | cmpwi cr7, r3, CLOCK_MONOTONIC_COARSE | |
202 | bgt cr0, 99f | |
203 | LOAD_REG_IMMEDIATE(r5, KTIME_LOW_RES) | |
204 | beq cr1, 1f | |
205 | beq cr7, 1f | |
a7f290da | 206 | |
55226345 VF |
207 | mflr r12 |
208 | .cfi_register lr,r12 | |
ec0895f0 | 209 | get_datapage r3, r0 |
55226345 VF |
210 | lwz r5, CLOCK_HRTIMER_RES(r3) |
211 | mtlr r12 | |
e33ffc95 | 212 | 1: li r3,0 |
a7f290da | 213 | cmpli cr0,r4,0 |
5d66da3d | 214 | crclr cr0*4+so |
a7f290da | 215 | beqlr |
a7f290da BH |
216 | stw r3,TSPC32_TV_SEC(r4) |
217 | stw r5,TSPC32_TV_NSEC(r4) | |
218 | blr | |
219 | ||
220 | /* | |
e963b7a2 | 221 | * syscall fallback |
a7f290da BH |
222 | */ |
223 | 99: | |
e963b7a2 CL |
224 | li r0,__NR_clock_getres |
225 | sc | |
a7f290da BH |
226 | blr |
227 | .cfi_endproc | |
228 | V_FUNCTION_END(__kernel_clock_getres) | |
229 | ||
230 | ||
fcb41a20 AZ |
231 | /* |
232 | * Exact prototype of time() | |
233 | * | |
234 | * time_t time(time *t); | |
235 | * | |
236 | */ | |
237 | V_FUNCTION_BEGIN(__kernel_time) | |
238 | .cfi_startproc | |
239 | mflr r12 | |
240 | .cfi_register lr,r12 | |
241 | ||
242 | mr r11,r3 /* r11 holds t */ | |
ec0895f0 | 243 | get_datapage r9, r0 |
fcb41a20 | 244 | |
1c11ca7a | 245 | lwz r3,STAMP_XTIME_SEC+LOPART(r9) |
fcb41a20 AZ |
246 | |
247 | cmplwi r11,0 /* check if t is NULL */ | |
bfc2eae0 | 248 | mtlr r12 |
fcb41a20 | 249 | crclr cr0*4+so |
bfc2eae0 CL |
250 | beqlr |
251 | stw r3,0(r11) /* store result at *t */ | |
fcb41a20 AZ |
252 | blr |
253 | .cfi_endproc | |
254 | V_FUNCTION_END(__kernel_time) | |
255 | ||
a7f290da | 256 | /* |
8fd63a9e PM |
257 | * This is the core of clock_gettime() and gettimeofday(), |
258 | * it returns the current time in r3 (seconds) and r4. | |
259 | * On entry, r7 gives the resolution of r4, either USEC_PER_SEC | |
260 | * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds. | |
597bc5c0 | 261 | * It expects the datapage ptr in r9 and doesn't clobber it. |
8fd63a9e | 262 | * It clobbers r0, r5 and r6. |
597bc5c0 PM |
263 | * On return, r8 contains the counter value that can be reused. |
264 | * This clobbers cr0 but not any other cr field. | |
265 | */ | |
266 | __do_get_tspec: | |
267 | .cfi_startproc | |
268 | /* Check for update count & load values. We use the low | |
269 | * order 32 bits of the update count | |
270 | */ | |
271 | 1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | |
272 | andi. r0,r8,1 /* pending update ? loop */ | |
273 | bne- 1b | |
274 | xor r0,r8,r8 /* create dependency */ | |
275 | add r9,r9,r0 | |
276 | ||
277 | /* Load orig stamp (offset to TB) */ | |
278 | lwz r5,CFG_TB_ORIG_STAMP(r9) | |
279 | lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) | |
280 | ||
281 | /* Get a stable TB value */ | |
72e4b2cd CL |
282 | 2: MFTBU(r3) |
283 | MFTBL(r4) | |
284 | MFTBU(r0) | |
8fd63a9e | 285 | cmplw cr0,r3,r0 |
597bc5c0 PM |
286 | bne- 2b |
287 | ||
288 | /* Subtract tb orig stamp and shift left 12 bits. | |
289 | */ | |
8fd63a9e | 290 | subfc r4,r6,r4 |
597bc5c0 PM |
291 | subfe r0,r5,r3 |
292 | slwi r0,r0,12 | |
8fd63a9e PM |
293 | rlwimi. r0,r4,12,20,31 |
294 | slwi r4,r4,12 | |
597bc5c0 | 295 | |
8fd63a9e PM |
296 | /* |
297 | * Load scale factor & do multiplication. | |
298 | * We only use the high 32 bits of the tb_to_xs value. | |
299 | * Even with a 1GHz timebase clock, the high 32 bits of | |
300 | * tb_to_xs will be at least 4 million, so the error from | |
301 | * ignoring the low 32 bits will be no more than 0.25ppm. | |
302 | * The error will just make the clock run very very slightly | |
303 | * slow until the next time the kernel updates the VDSO data, | |
304 | * at which point the clock will catch up to the kernel's value, | |
305 | * so there is no long-term error accumulation. | |
306 | */ | |
597bc5c0 | 307 | lwz r5,CFG_TB_TO_XS(r9) /* load values */ |
8fd63a9e | 308 | mulhwu r4,r4,r5 |
597bc5c0 PM |
309 | li r3,0 |
310 | ||
311 | beq+ 4f /* skip high part computation if 0 */ | |
312 | mulhwu r3,r0,r5 | |
8fd63a9e | 313 | mullw r5,r0,r5 |
597bc5c0 PM |
314 | addc r4,r4,r5 |
315 | addze r3,r3 | |
8fd63a9e PM |
316 | 4: |
317 | /* At this point, we have seconds since the xtime stamp | |
318 | * as a 32.32 fixed-point number in r3 and r4. | |
319 | * Load & add the xtime stamp. | |
597bc5c0 | 320 | */ |
1c11ca7a | 321 | lwz r5,STAMP_XTIME_SEC+LOPART(r9) |
8fd63a9e PM |
322 | lwz r6,STAMP_SEC_FRAC(r9) |
323 | addc r4,r4,r6 | |
597bc5c0 PM |
324 | adde r3,r3,r5 |
325 | ||
8fd63a9e PM |
326 | /* We create a fake dependency on the result in r3/r4 |
327 | * and re-check the counter | |
597bc5c0 PM |
328 | */ |
329 | or r6,r4,r3 | |
330 | xor r0,r6,r6 | |
331 | add r9,r9,r0 | |
332 | lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | |
8fd63a9e | 333 | cmplw cr0,r8,r0 /* check if updated */ |
597bc5c0 PM |
334 | bne- 1b |
335 | ||
8fd63a9e | 336 | mulhwu r4,r4,r7 /* convert to micro or nanoseconds */ |
597bc5c0 PM |
337 | |
338 | blr | |
339 | .cfi_endproc |