]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/x86/xen/time.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[people/arne_f/kernel.git] / arch / x86 / xen / time.c
1 /*
2 * Xen time implementation.
3 *
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
7 *
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9 */
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/clocksource.h>
13 #include <linux/clockchips.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/math64.h>
16 #include <linux/gfp.h>
17
18 #include <asm/pvclock.h>
19 #include <asm/xen/hypervisor.h>
20 #include <asm/xen/hypercall.h>
21
22 #include <xen/events.h>
23 #include <xen/interface/xen.h>
24 #include <xen/interface/vcpu.h>
25
26 #include "xen-ops.h"
27
28 #define XEN_SHIFT 22
29
30 /* Xen may fire a timer up to this many ns early */
31 #define TIMER_SLOP 100000
32 #define NS_PER_TICK (1000000000LL / HZ)
33
34 /* runstate info updated by Xen */
35 static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
36
37 /* snapshots of runstate info */
38 static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
39
40 /* unused ns of stolen and blocked time */
41 static DEFINE_PER_CPU(u64, xen_residual_stolen);
42 static DEFINE_PER_CPU(u64, xen_residual_blocked);
43
44 /* return an consistent snapshot of 64-bit time/counter value */
45 static u64 get64(const u64 *p)
46 {
47 u64 ret;
48
49 if (BITS_PER_LONG < 64) {
50 u32 *p32 = (u32 *)p;
51 u32 h, l;
52
53 /*
54 * Read high then low, and then make sure high is
55 * still the same; this will only loop if low wraps
56 * and carries into high.
57 * XXX some clean way to make this endian-proof?
58 */
59 do {
60 h = p32[1];
61 barrier();
62 l = p32[0];
63 barrier();
64 } while (p32[1] != h);
65
66 ret = (((u64)h) << 32) | l;
67 } else
68 ret = *p;
69
70 return ret;
71 }
72
73 /*
74 * Runstate accounting
75 */
76 static void get_runstate_snapshot(struct vcpu_runstate_info *res)
77 {
78 u64 state_time;
79 struct vcpu_runstate_info *state;
80
81 BUG_ON(preemptible());
82
83 state = &__get_cpu_var(xen_runstate);
84
85 /*
86 * The runstate info is always updated by the hypervisor on
87 * the current CPU, so there's no need to use anything
88 * stronger than a compiler barrier when fetching it.
89 */
90 do {
91 state_time = get64(&state->state_entry_time);
92 barrier();
93 *res = *state;
94 barrier();
95 } while (get64(&state->state_entry_time) != state_time);
96 }
97
98 /* return true when a vcpu could run but has no real cpu to run on */
99 bool xen_vcpu_stolen(int vcpu)
100 {
101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
102 }
103
104 void xen_setup_runstate_info(int cpu)
105 {
106 struct vcpu_register_runstate_memory_area area;
107
108 area.addr.v = &per_cpu(xen_runstate, cpu);
109
110 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
111 cpu, &area))
112 BUG();
113 }
114
115 static void do_stolen_accounting(void)
116 {
117 struct vcpu_runstate_info state;
118 struct vcpu_runstate_info *snap;
119 s64 blocked, runnable, offline, stolen;
120 cputime_t ticks;
121
122 get_runstate_snapshot(&state);
123
124 WARN_ON(state.state != RUNSTATE_running);
125
126 snap = &__get_cpu_var(xen_runstate_snapshot);
127
128 /* work out how much time the VCPU has not been runn*ing* */
129 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
130 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
131 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
132
133 *snap = state;
134
135 /* Add the appropriate number of ticks of stolen time,
136 including any left-overs from last time. */
137 stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
138
139 if (stolen < 0)
140 stolen = 0;
141
142 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
143 __get_cpu_var(xen_residual_stolen) = stolen;
144 account_steal_ticks(ticks);
145
146 /* Add the appropriate number of ticks of blocked time,
147 including any left-overs from last time. */
148 blocked += __get_cpu_var(xen_residual_blocked);
149
150 if (blocked < 0)
151 blocked = 0;
152
153 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
154 __get_cpu_var(xen_residual_blocked) = blocked;
155 account_idle_ticks(ticks);
156 }
157
158 /*
159 * Xen sched_clock implementation. Returns the number of unstolen
160 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
161 * states.
162 */
163 unsigned long long xen_sched_clock(void)
164 {
165 struct vcpu_runstate_info state;
166 cycle_t now;
167 u64 ret;
168 s64 offset;
169
170 /*
171 * Ideally sched_clock should be called on a per-cpu basis
172 * anyway, so preempt should already be disabled, but that's
173 * not current practice at the moment.
174 */
175 preempt_disable();
176
177 now = xen_clocksource_read();
178
179 get_runstate_snapshot(&state);
180
181 WARN_ON(state.state != RUNSTATE_running);
182
183 offset = now - state.state_entry_time;
184 if (offset < 0)
185 offset = 0;
186
187 ret = state.time[RUNSTATE_blocked] +
188 state.time[RUNSTATE_running] +
189 offset;
190
191 preempt_enable();
192
193 return ret;
194 }
195
196
197 /* Get the TSC speed from Xen */
198 unsigned long xen_tsc_khz(void)
199 {
200 struct pvclock_vcpu_time_info *info =
201 &HYPERVISOR_shared_info->vcpu_info[0].time;
202
203 return pvclock_tsc_khz(info);
204 }
205
206 cycle_t xen_clocksource_read(void)
207 {
208 struct pvclock_vcpu_time_info *src;
209 cycle_t ret;
210
211 src = &get_cpu_var(xen_vcpu)->time;
212 ret = pvclock_clocksource_read(src);
213 put_cpu_var(xen_vcpu);
214 return ret;
215 }
216
217 static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
218 {
219 return xen_clocksource_read();
220 }
221
222 static void xen_read_wallclock(struct timespec *ts)
223 {
224 struct shared_info *s = HYPERVISOR_shared_info;
225 struct pvclock_wall_clock *wall_clock = &(s->wc);
226 struct pvclock_vcpu_time_info *vcpu_time;
227
228 vcpu_time = &get_cpu_var(xen_vcpu)->time;
229 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
230 put_cpu_var(xen_vcpu);
231 }
232
233 unsigned long xen_get_wallclock(void)
234 {
235 struct timespec ts;
236
237 xen_read_wallclock(&ts);
238 return ts.tv_sec;
239 }
240
241 int xen_set_wallclock(unsigned long now)
242 {
243 /* do nothing for domU */
244 return -1;
245 }
246
247 static struct clocksource xen_clocksource __read_mostly = {
248 .name = "xen",
249 .rating = 400,
250 .read = xen_clocksource_get_cycles,
251 .mask = ~0,
252 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
253 .shift = XEN_SHIFT,
254 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
255 };
256
257 /*
258 Xen clockevent implementation
259
260 Xen has two clockevent implementations:
261
262 The old timer_op one works with all released versions of Xen prior
263 to version 3.0.4. This version of the hypervisor provides a
264 single-shot timer with nanosecond resolution. However, sharing the
265 same event channel is a 100Hz tick which is delivered while the
266 vcpu is running. We don't care about or use this tick, but it will
267 cause the core time code to think the timer fired too soon, and
268 will end up resetting it each time. It could be filtered, but
269 doing so has complications when the ktime clocksource is not yet
270 the xen clocksource (ie, at boot time).
271
272 The new vcpu_op-based timer interface allows the tick timer period
273 to be changed or turned off. The tick timer is not useful as a
274 periodic timer because events are only delivered to running vcpus.
275 The one-shot timer can report when a timeout is in the past, so
276 set_next_event is capable of returning -ETIME when appropriate.
277 This interface is used when available.
278 */
279
280
281 /*
282 Get a hypervisor absolute time. In theory we could maintain an
283 offset between the kernel's time and the hypervisor's time, and
284 apply that to a kernel's absolute timeout. Unfortunately the
285 hypervisor and kernel times can drift even if the kernel is using
286 the Xen clocksource, because ntp can warp the kernel's clocksource.
287 */
288 static s64 get_abs_timeout(unsigned long delta)
289 {
290 return xen_clocksource_read() + delta;
291 }
292
293 static void xen_timerop_set_mode(enum clock_event_mode mode,
294 struct clock_event_device *evt)
295 {
296 switch (mode) {
297 case CLOCK_EVT_MODE_PERIODIC:
298 /* unsupported */
299 WARN_ON(1);
300 break;
301
302 case CLOCK_EVT_MODE_ONESHOT:
303 case CLOCK_EVT_MODE_RESUME:
304 break;
305
306 case CLOCK_EVT_MODE_UNUSED:
307 case CLOCK_EVT_MODE_SHUTDOWN:
308 HYPERVISOR_set_timer_op(0); /* cancel timeout */
309 break;
310 }
311 }
312
313 static int xen_timerop_set_next_event(unsigned long delta,
314 struct clock_event_device *evt)
315 {
316 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
317
318 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
319 BUG();
320
321 /* We may have missed the deadline, but there's no real way of
322 knowing for sure. If the event was in the past, then we'll
323 get an immediate interrupt. */
324
325 return 0;
326 }
327
328 static const struct clock_event_device xen_timerop_clockevent = {
329 .name = "xen",
330 .features = CLOCK_EVT_FEAT_ONESHOT,
331
332 .max_delta_ns = 0xffffffff,
333 .min_delta_ns = TIMER_SLOP,
334
335 .mult = 1,
336 .shift = 0,
337 .rating = 500,
338
339 .set_mode = xen_timerop_set_mode,
340 .set_next_event = xen_timerop_set_next_event,
341 };
342
343
344
345 static void xen_vcpuop_set_mode(enum clock_event_mode mode,
346 struct clock_event_device *evt)
347 {
348 int cpu = smp_processor_id();
349
350 switch (mode) {
351 case CLOCK_EVT_MODE_PERIODIC:
352 WARN_ON(1); /* unsupported */
353 break;
354
355 case CLOCK_EVT_MODE_ONESHOT:
356 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
357 BUG();
358 break;
359
360 case CLOCK_EVT_MODE_UNUSED:
361 case CLOCK_EVT_MODE_SHUTDOWN:
362 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
363 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
364 BUG();
365 break;
366 case CLOCK_EVT_MODE_RESUME:
367 break;
368 }
369 }
370
371 static int xen_vcpuop_set_next_event(unsigned long delta,
372 struct clock_event_device *evt)
373 {
374 int cpu = smp_processor_id();
375 struct vcpu_set_singleshot_timer single;
376 int ret;
377
378 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
379
380 single.timeout_abs_ns = get_abs_timeout(delta);
381 single.flags = VCPU_SSHOTTMR_future;
382
383 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
384
385 BUG_ON(ret != 0 && ret != -ETIME);
386
387 return ret;
388 }
389
390 static const struct clock_event_device xen_vcpuop_clockevent = {
391 .name = "xen",
392 .features = CLOCK_EVT_FEAT_ONESHOT,
393
394 .max_delta_ns = 0xffffffff,
395 .min_delta_ns = TIMER_SLOP,
396
397 .mult = 1,
398 .shift = 0,
399 .rating = 500,
400
401 .set_mode = xen_vcpuop_set_mode,
402 .set_next_event = xen_vcpuop_set_next_event,
403 };
404
405 static const struct clock_event_device *xen_clockevent =
406 &xen_timerop_clockevent;
407 static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
408
409 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
410 {
411 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
412 irqreturn_t ret;
413
414 ret = IRQ_NONE;
415 if (evt->event_handler) {
416 evt->event_handler(evt);
417 ret = IRQ_HANDLED;
418 }
419
420 do_stolen_accounting();
421
422 return ret;
423 }
424
425 void xen_setup_timer(int cpu)
426 {
427 const char *name;
428 struct clock_event_device *evt;
429 int irq;
430
431 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
432
433 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
434 if (!name)
435 name = "<timer kasprintf failed>";
436
437 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
438 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
439 name, NULL);
440
441 evt = &per_cpu(xen_clock_events, cpu);
442 memcpy(evt, xen_clockevent, sizeof(*evt));
443
444 evt->cpumask = cpumask_of(cpu);
445 evt->irq = irq;
446 }
447
448 void xen_teardown_timer(int cpu)
449 {
450 struct clock_event_device *evt;
451 BUG_ON(cpu == 0);
452 evt = &per_cpu(xen_clock_events, cpu);
453 unbind_from_irqhandler(evt->irq, NULL);
454 }
455
456 void xen_setup_cpu_clockevents(void)
457 {
458 BUG_ON(preemptible());
459
460 clockevents_register_device(&__get_cpu_var(xen_clock_events));
461 }
462
463 void xen_timer_resume(void)
464 {
465 int cpu;
466
467 if (xen_clockevent != &xen_vcpuop_clockevent)
468 return;
469
470 for_each_online_cpu(cpu) {
471 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
472 BUG();
473 }
474 }
475
476 __init void xen_time_init(void)
477 {
478 int cpu = smp_processor_id();
479
480 clocksource_register(&xen_clocksource);
481
482 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
483 /* Successfully turned off 100Hz tick, so we have the
484 vcpuop-based timer interface */
485 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
486 xen_clockevent = &xen_vcpuop_clockevent;
487 }
488
489 /* Set initial system time with full resolution */
490 xen_read_wallclock(&xtime);
491 set_normalized_timespec(&wall_to_monotonic,
492 -xtime.tv_sec, -xtime.tv_nsec);
493
494 setup_force_cpu_cap(X86_FEATURE_TSC);
495
496 xen_setup_runstate_info(cpu);
497 xen_setup_timer(cpu);
498 xen_setup_cpu_clockevents();
499 }