]> git.ipfire.org Git - people/ms/linux.git/blob - arch/ia64/kernel/time.c
treewide: Add SPDX license identifier for missed files
[people/ms/linux.git] / arch / ia64 / kernel / time.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/ia64/kernel/time.c
4 *
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * David Mosberger <davidm@hpl.hp.com>
8 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
9 * Copyright (C) 1999-2000 VA Linux Systems
10 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
11 */
12
13 #include <linux/cpu.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/profile.h>
18 #include <linux/sched.h>
19 #include <linux/time.h>
20 #include <linux/nmi.h>
21 #include <linux/interrupt.h>
22 #include <linux/efi.h>
23 #include <linux/timex.h>
24 #include <linux/timekeeper_internal.h>
25 #include <linux/platform_device.h>
26 #include <linux/sched/cputime.h>
27
28 #include <asm/machvec.h>
29 #include <asm/delay.h>
30 #include <asm/hw_irq.h>
31 #include <asm/ptrace.h>
32 #include <asm/sal.h>
33 #include <asm/sections.h>
34
35 #include "fsyscall_gtod_data.h"
36
37 static u64 itc_get_cycles(struct clocksource *cs);
38
39 struct fsyscall_gtod_data_t fsyscall_gtod_data;
40
41 struct itc_jitter_data_t itc_jitter_data;
42
43 volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
44
45 #ifdef CONFIG_IA64_DEBUG_IRQ
46
47 unsigned long last_cli_ip;
48 EXPORT_SYMBOL(last_cli_ip);
49
50 #endif
51
52 static struct clocksource clocksource_itc = {
53 .name = "itc",
54 .rating = 350,
55 .read = itc_get_cycles,
56 .mask = CLOCKSOURCE_MASK(64),
57 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
58 };
59 static struct clocksource *itc_clocksource;
60
61 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
62
63 #include <linux/kernel_stat.h>
64
65 extern u64 cycle_to_nsec(u64 cyc);
66
67 void vtime_flush(struct task_struct *tsk)
68 {
69 struct thread_info *ti = task_thread_info(tsk);
70 u64 delta;
71
72 if (ti->utime)
73 account_user_time(tsk, cycle_to_nsec(ti->utime));
74
75 if (ti->gtime)
76 account_guest_time(tsk, cycle_to_nsec(ti->gtime));
77
78 if (ti->idle_time)
79 account_idle_time(cycle_to_nsec(ti->idle_time));
80
81 if (ti->stime) {
82 delta = cycle_to_nsec(ti->stime);
83 account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
84 }
85
86 if (ti->hardirq_time) {
87 delta = cycle_to_nsec(ti->hardirq_time);
88 account_system_index_time(tsk, delta, CPUTIME_IRQ);
89 }
90
91 if (ti->softirq_time) {
92 delta = cycle_to_nsec(ti->softirq_time);
93 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
94 }
95
96 ti->utime = 0;
97 ti->gtime = 0;
98 ti->idle_time = 0;
99 ti->stime = 0;
100 ti->hardirq_time = 0;
101 ti->softirq_time = 0;
102 }
103
104 /*
105 * Called from the context switch with interrupts disabled, to charge all
106 * accumulated times to the current process, and to prepare accounting on
107 * the next process.
108 */
109 void arch_vtime_task_switch(struct task_struct *prev)
110 {
111 struct thread_info *pi = task_thread_info(prev);
112 struct thread_info *ni = task_thread_info(current);
113
114 ni->ac_stamp = pi->ac_stamp;
115 ni->ac_stime = ni->ac_utime = 0;
116 }
117
118 /*
119 * Account time for a transition between system, hard irq or soft irq state.
120 * Note that this function is called with interrupts enabled.
121 */
122 static __u64 vtime_delta(struct task_struct *tsk)
123 {
124 struct thread_info *ti = task_thread_info(tsk);
125 __u64 now, delta_stime;
126
127 WARN_ON_ONCE(!irqs_disabled());
128
129 now = ia64_get_itc();
130 delta_stime = now - ti->ac_stamp;
131 ti->ac_stamp = now;
132
133 return delta_stime;
134 }
135
136 void vtime_account_system(struct task_struct *tsk)
137 {
138 struct thread_info *ti = task_thread_info(tsk);
139 __u64 stime = vtime_delta(tsk);
140
141 if ((tsk->flags & PF_VCPU) && !irq_count())
142 ti->gtime += stime;
143 else if (hardirq_count())
144 ti->hardirq_time += stime;
145 else if (in_serving_softirq())
146 ti->softirq_time += stime;
147 else
148 ti->stime += stime;
149 }
150 EXPORT_SYMBOL_GPL(vtime_account_system);
151
152 void vtime_account_idle(struct task_struct *tsk)
153 {
154 struct thread_info *ti = task_thread_info(tsk);
155
156 ti->idle_time += vtime_delta(tsk);
157 }
158
159 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
160
161 static irqreturn_t
162 timer_interrupt (int irq, void *dev_id)
163 {
164 unsigned long new_itm;
165
166 if (cpu_is_offline(smp_processor_id())) {
167 return IRQ_HANDLED;
168 }
169
170 platform_timer_interrupt(irq, dev_id);
171
172 new_itm = local_cpu_data->itm_next;
173
174 if (!time_after(ia64_get_itc(), new_itm))
175 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
176 ia64_get_itc(), new_itm);
177
178 profile_tick(CPU_PROFILING);
179
180 while (1) {
181 update_process_times(user_mode(get_irq_regs()));
182
183 new_itm += local_cpu_data->itm_delta;
184
185 if (smp_processor_id() == time_keeper_id)
186 xtime_update(1);
187
188 local_cpu_data->itm_next = new_itm;
189
190 if (time_after(new_itm, ia64_get_itc()))
191 break;
192
193 /*
194 * Allow IPIs to interrupt the timer loop.
195 */
196 local_irq_enable();
197 local_irq_disable();
198 }
199
200 do {
201 /*
202 * If we're too close to the next clock tick for
203 * comfort, we increase the safety margin by
204 * intentionally dropping the next tick(s). We do NOT
205 * update itm.next because that would force us to call
206 * xtime_update() which in turn would let our clock run
207 * too fast (with the potentially devastating effect
208 * of losing monotony of time).
209 */
210 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
211 new_itm += local_cpu_data->itm_delta;
212 ia64_set_itm(new_itm);
213 /* double check, in case we got hit by a (slow) PMI: */
214 } while (time_after_eq(ia64_get_itc(), new_itm));
215 return IRQ_HANDLED;
216 }
217
218 /*
219 * Encapsulate access to the itm structure for SMP.
220 */
221 void
222 ia64_cpu_local_tick (void)
223 {
224 int cpu = smp_processor_id();
225 unsigned long shift = 0, delta;
226
227 /* arrange for the cycle counter to generate a timer interrupt: */
228 ia64_set_itv(IA64_TIMER_VECTOR);
229
230 delta = local_cpu_data->itm_delta;
231 /*
232 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
233 * same time:
234 */
235 if (cpu) {
236 unsigned long hi = 1UL << ia64_fls(cpu);
237 shift = (2*(cpu - hi) + 1) * delta/hi/2;
238 }
239 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
240 ia64_set_itm(local_cpu_data->itm_next);
241 }
242
243 static int nojitter;
244
245 static int __init nojitter_setup(char *str)
246 {
247 nojitter = 1;
248 printk("Jitter checking for ITC timers disabled\n");
249 return 1;
250 }
251
252 __setup("nojitter", nojitter_setup);
253
254
255 void ia64_init_itm(void)
256 {
257 unsigned long platform_base_freq, itc_freq;
258 struct pal_freq_ratio itc_ratio, proc_ratio;
259 long status, platform_base_drift, itc_drift;
260
261 /*
262 * According to SAL v2.6, we need to use a SAL call to determine the platform base
263 * frequency and then a PAL call to determine the frequency ratio between the ITC
264 * and the base frequency.
265 */
266 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
267 &platform_base_freq, &platform_base_drift);
268 if (status != 0) {
269 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
270 } else {
271 status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
272 if (status != 0)
273 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
274 }
275 if (status != 0) {
276 /* invent "random" values */
277 printk(KERN_ERR
278 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
279 platform_base_freq = 100000000;
280 platform_base_drift = -1; /* no drift info */
281 itc_ratio.num = 3;
282 itc_ratio.den = 1;
283 }
284 if (platform_base_freq < 40000000) {
285 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
286 platform_base_freq);
287 platform_base_freq = 75000000;
288 platform_base_drift = -1;
289 }
290 if (!proc_ratio.den)
291 proc_ratio.den = 1; /* avoid division by zero */
292 if (!itc_ratio.den)
293 itc_ratio.den = 1; /* avoid division by zero */
294
295 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
296
297 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
298 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
299 "ITC freq=%lu.%03luMHz", smp_processor_id(),
300 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
301 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
302
303 if (platform_base_drift != -1) {
304 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
305 printk("+/-%ldppm\n", itc_drift);
306 } else {
307 itc_drift = -1;
308 printk("\n");
309 }
310
311 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
312 local_cpu_data->itc_freq = itc_freq;
313 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
314 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
315 + itc_freq/2)/itc_freq;
316
317 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
318 #ifdef CONFIG_SMP
319 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
320 * Jitter compensation requires a cmpxchg which may limit
321 * the scalability of the syscalls for retrieving time.
322 * The ITC synchronization is usually successful to within a few
323 * ITC ticks but this is not a sure thing. If you need to improve
324 * timer performance in SMP situations then boot the kernel with the
325 * "nojitter" option. However, doing so may result in time fluctuating (maybe
326 * even going backward) if the ITC offsets between the individual CPUs
327 * are too large.
328 */
329 if (!nojitter)
330 itc_jitter_data.itc_jitter = 1;
331 #endif
332 } else
333 /*
334 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
335 * ITC values may fluctuate significantly between processors.
336 * Clock should not be used for hrtimers. Mark itc as only
337 * useful for boot and testing.
338 *
339 * Note that jitter compensation is off! There is no point of
340 * synchronizing ITCs since they may be large differentials
341 * that change over time.
342 *
343 * The only way to fix this would be to repeatedly sync the
344 * ITCs. Until that time we have to avoid ITC.
345 */
346 clocksource_itc.rating = 50;
347
348 /* avoid softlock up message when cpu is unplug and plugged again. */
349 touch_softlockup_watchdog();
350
351 /* Setup the CPU local timer tick */
352 ia64_cpu_local_tick();
353
354 if (!itc_clocksource) {
355 clocksource_register_hz(&clocksource_itc,
356 local_cpu_data->itc_freq);
357 itc_clocksource = &clocksource_itc;
358 }
359 }
360
361 static u64 itc_get_cycles(struct clocksource *cs)
362 {
363 unsigned long lcycle, now, ret;
364
365 if (!itc_jitter_data.itc_jitter)
366 return get_cycles();
367
368 lcycle = itc_jitter_data.itc_lastcycle;
369 now = get_cycles();
370 if (lcycle && time_after(lcycle, now))
371 return lcycle;
372
373 /*
374 * Keep track of the last timer value returned.
375 * In an SMP environment, you could lose out in contention of
376 * cmpxchg. If so, your cmpxchg returns new value which the
377 * winner of contention updated to. Use the new value instead.
378 */
379 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
380 if (unlikely(ret != lcycle))
381 return ret;
382
383 return now;
384 }
385
386
387 static struct irqaction timer_irqaction = {
388 .handler = timer_interrupt,
389 .flags = IRQF_IRQPOLL,
390 .name = "timer"
391 };
392
393 void read_persistent_clock64(struct timespec64 *ts)
394 {
395 efi_gettimeofday(ts);
396 }
397
398 void __init
399 time_init (void)
400 {
401 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
402 ia64_init_itm();
403 }
404
405 /*
406 * Generic udelay assumes that if preemption is allowed and the thread
407 * migrates to another CPU, that the ITC values are synchronized across
408 * all CPUs.
409 */
410 static void
411 ia64_itc_udelay (unsigned long usecs)
412 {
413 unsigned long start = ia64_get_itc();
414 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
415
416 while (time_before(ia64_get_itc(), end))
417 cpu_relax();
418 }
419
420 void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
421
422 void
423 udelay (unsigned long usecs)
424 {
425 (*ia64_udelay)(usecs);
426 }
427 EXPORT_SYMBOL(udelay);
428
429 /* IA64 doesn't cache the timezone */
430 void update_vsyscall_tz(void)
431 {
432 }
433
434 void update_vsyscall(struct timekeeper *tk)
435 {
436 write_seqcount_begin(&fsyscall_gtod_data.seq);
437
438 /* copy vsyscall data */
439 fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
440 fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
441 fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
442 fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
443 fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
444
445 fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
446 fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
447
448 fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
449 + tk->wall_to_monotonic.tv_sec;
450 fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
451 + ((u64)tk->wall_to_monotonic.tv_nsec
452 << tk->tkr_mono.shift);
453
454 /* normalize */
455 while (fsyscall_gtod_data.monotonic_time.snsec >=
456 (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
457 fsyscall_gtod_data.monotonic_time.snsec -=
458 ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
459 fsyscall_gtod_data.monotonic_time.sec++;
460 }
461
462 write_seqcount_end(&fsyscall_gtod_data.seq);
463 }
464