1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19 #include <asm/kvm_nested.h>
21 #include <kvm/arm_vgic.h>
22 #include <kvm/arm_arch_timer.h>
26 static struct timecounter
*timecounter
;
27 static unsigned int host_vtimer_irq
;
28 static unsigned int host_ptimer_irq
;
29 static u32 host_vtimer_irq_flags
;
30 static u32 host_ptimer_irq_flags
;
32 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state
);
34 static const u8 default_ppi
[] = {
41 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
);
42 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
43 struct arch_timer_context
*timer_ctx
);
44 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
);
45 static void kvm_arm_timer_write(struct kvm_vcpu
*vcpu
,
46 struct arch_timer_context
*timer
,
47 enum kvm_arch_timer_regs treg
,
49 static u64
kvm_arm_timer_read(struct kvm_vcpu
*vcpu
,
50 struct arch_timer_context
*timer
,
51 enum kvm_arch_timer_regs treg
);
52 static bool kvm_arch_timer_get_input_level(int vintid
);
54 static struct irq_ops arch_timer_irq_ops
= {
55 .get_input_level
= kvm_arch_timer_get_input_level
,
58 static int nr_timers(struct kvm_vcpu
*vcpu
)
60 if (!vcpu_has_nv(vcpu
))
61 return NR_KVM_EL0_TIMERS
;
66 u32
timer_get_ctl(struct arch_timer_context
*ctxt
)
68 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
70 switch(arch_timer_ctx_index(ctxt
)) {
72 return __vcpu_sys_reg(vcpu
, CNTV_CTL_EL0
);
74 return __vcpu_sys_reg(vcpu
, CNTP_CTL_EL0
);
76 return __vcpu_sys_reg(vcpu
, CNTHV_CTL_EL2
);
78 return __vcpu_sys_reg(vcpu
, CNTHP_CTL_EL2
);
85 u64
timer_get_cval(struct arch_timer_context
*ctxt
)
87 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
89 switch(arch_timer_ctx_index(ctxt
)) {
91 return __vcpu_sys_reg(vcpu
, CNTV_CVAL_EL0
);
93 return __vcpu_sys_reg(vcpu
, CNTP_CVAL_EL0
);
95 return __vcpu_sys_reg(vcpu
, CNTHV_CVAL_EL2
);
97 return __vcpu_sys_reg(vcpu
, CNTHP_CVAL_EL2
);
104 static u64
timer_get_offset(struct arch_timer_context
*ctxt
)
111 if (ctxt
->offset
.vm_offset
)
112 offset
+= *ctxt
->offset
.vm_offset
;
113 if (ctxt
->offset
.vcpu_offset
)
114 offset
+= *ctxt
->offset
.vcpu_offset
;
119 static void timer_set_ctl(struct arch_timer_context
*ctxt
, u32 ctl
)
121 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
123 switch(arch_timer_ctx_index(ctxt
)) {
125 __vcpu_sys_reg(vcpu
, CNTV_CTL_EL0
) = ctl
;
128 __vcpu_sys_reg(vcpu
, CNTP_CTL_EL0
) = ctl
;
131 __vcpu_sys_reg(vcpu
, CNTHV_CTL_EL2
) = ctl
;
134 __vcpu_sys_reg(vcpu
, CNTHP_CTL_EL2
) = ctl
;
141 static void timer_set_cval(struct arch_timer_context
*ctxt
, u64 cval
)
143 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
145 switch(arch_timer_ctx_index(ctxt
)) {
147 __vcpu_sys_reg(vcpu
, CNTV_CVAL_EL0
) = cval
;
150 __vcpu_sys_reg(vcpu
, CNTP_CVAL_EL0
) = cval
;
153 __vcpu_sys_reg(vcpu
, CNTHV_CVAL_EL2
) = cval
;
156 __vcpu_sys_reg(vcpu
, CNTHP_CVAL_EL2
) = cval
;
163 static void timer_set_offset(struct arch_timer_context
*ctxt
, u64 offset
)
165 if (!ctxt
->offset
.vm_offset
) {
166 WARN(offset
, "timer %ld\n", arch_timer_ctx_index(ctxt
));
170 WRITE_ONCE(*ctxt
->offset
.vm_offset
, offset
);
173 u64
kvm_phys_timer_read(void)
175 return timecounter
->cc
->read(timecounter
->cc
);
178 void get_timer_map(struct kvm_vcpu
*vcpu
, struct timer_map
*map
)
180 if (vcpu_has_nv(vcpu
)) {
181 if (is_hyp_ctxt(vcpu
)) {
182 map
->direct_vtimer
= vcpu_hvtimer(vcpu
);
183 map
->direct_ptimer
= vcpu_hptimer(vcpu
);
184 map
->emul_vtimer
= vcpu_vtimer(vcpu
);
185 map
->emul_ptimer
= vcpu_ptimer(vcpu
);
187 map
->direct_vtimer
= vcpu_vtimer(vcpu
);
188 map
->direct_ptimer
= vcpu_ptimer(vcpu
);
189 map
->emul_vtimer
= vcpu_hvtimer(vcpu
);
190 map
->emul_ptimer
= vcpu_hptimer(vcpu
);
192 } else if (has_vhe()) {
193 map
->direct_vtimer
= vcpu_vtimer(vcpu
);
194 map
->direct_ptimer
= vcpu_ptimer(vcpu
);
195 map
->emul_vtimer
= NULL
;
196 map
->emul_ptimer
= NULL
;
198 map
->direct_vtimer
= vcpu_vtimer(vcpu
);
199 map
->direct_ptimer
= NULL
;
200 map
->emul_vtimer
= NULL
;
201 map
->emul_ptimer
= vcpu_ptimer(vcpu
);
204 trace_kvm_get_timer_map(vcpu
->vcpu_id
, map
);
207 static inline bool userspace_irqchip(struct kvm
*kvm
)
209 return static_branch_unlikely(&userspace_irqchip_in_use
) &&
210 unlikely(!irqchip_in_kernel(kvm
));
213 static void soft_timer_start(struct hrtimer
*hrt
, u64 ns
)
215 hrtimer_start(hrt
, ktime_add_ns(ktime_get(), ns
),
216 HRTIMER_MODE_ABS_HARD
);
219 static void soft_timer_cancel(struct hrtimer
*hrt
)
224 static irqreturn_t
kvm_arch_timer_handler(int irq
, void *dev_id
)
226 struct kvm_vcpu
*vcpu
= *(struct kvm_vcpu
**)dev_id
;
227 struct arch_timer_context
*ctx
;
228 struct timer_map map
;
231 * We may see a timer interrupt after vcpu_put() has been called which
232 * sets the CPU's vcpu pointer to NULL, because even though the timer
233 * has been disabled in timer_save_state(), the hardware interrupt
234 * signal may not have been retired from the interrupt controller yet.
239 get_timer_map(vcpu
, &map
);
241 if (irq
== host_vtimer_irq
)
242 ctx
= map
.direct_vtimer
;
244 ctx
= map
.direct_ptimer
;
246 if (kvm_timer_should_fire(ctx
))
247 kvm_timer_update_irq(vcpu
, true, ctx
);
249 if (userspace_irqchip(vcpu
->kvm
) &&
250 !static_branch_unlikely(&has_gic_active_state
))
251 disable_percpu_irq(host_vtimer_irq
);
256 static u64
kvm_counter_compute_delta(struct arch_timer_context
*timer_ctx
,
259 u64 now
= kvm_phys_timer_read() - timer_get_offset(timer_ctx
);
264 ns
= cyclecounter_cyc2ns(timecounter
->cc
,
267 &timer_ctx
->ns_frac
);
274 static u64
kvm_timer_compute_delta(struct arch_timer_context
*timer_ctx
)
276 return kvm_counter_compute_delta(timer_ctx
, timer_get_cval(timer_ctx
));
279 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
)
281 WARN_ON(timer_ctx
&& timer_ctx
->loaded
);
283 ((timer_get_ctl(timer_ctx
) &
284 (ARCH_TIMER_CTRL_IT_MASK
| ARCH_TIMER_CTRL_ENABLE
)) == ARCH_TIMER_CTRL_ENABLE
);
287 static bool vcpu_has_wfit_active(struct kvm_vcpu
*vcpu
)
289 return (cpus_have_final_cap(ARM64_HAS_WFXT
) &&
290 vcpu_get_flag(vcpu
, IN_WFIT
));
293 static u64
wfit_delay_ns(struct kvm_vcpu
*vcpu
)
295 u64 val
= vcpu_get_reg(vcpu
, kvm_vcpu_sys_get_rt(vcpu
));
296 struct arch_timer_context
*ctx
;
298 ctx
= is_hyp_ctxt(vcpu
) ? vcpu_hvtimer(vcpu
) : vcpu_vtimer(vcpu
);
300 return kvm_counter_compute_delta(ctx
, val
);
304 * Returns the earliest expiration time in ns among guest timers.
305 * Note that it will return 0 if none of timers can fire.
307 static u64
kvm_timer_earliest_exp(struct kvm_vcpu
*vcpu
)
309 u64 min_delta
= ULLONG_MAX
;
312 for (i
= 0; i
< nr_timers(vcpu
); i
++) {
313 struct arch_timer_context
*ctx
= &vcpu
->arch
.timer_cpu
.timers
[i
];
315 WARN(ctx
->loaded
, "timer %d loaded\n", i
);
316 if (kvm_timer_irq_can_fire(ctx
))
317 min_delta
= min(min_delta
, kvm_timer_compute_delta(ctx
));
320 if (vcpu_has_wfit_active(vcpu
))
321 min_delta
= min(min_delta
, wfit_delay_ns(vcpu
));
323 /* If none of timers can fire, then return 0 */
324 if (min_delta
== ULLONG_MAX
)
330 static enum hrtimer_restart
kvm_bg_timer_expire(struct hrtimer
*hrt
)
332 struct arch_timer_cpu
*timer
;
333 struct kvm_vcpu
*vcpu
;
336 timer
= container_of(hrt
, struct arch_timer_cpu
, bg_timer
);
337 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.timer_cpu
);
340 * Check that the timer has really expired from the guest's
341 * PoV (NTP on the host may have forced it to expire
342 * early). If we should have slept longer, restart it.
344 ns
= kvm_timer_earliest_exp(vcpu
);
346 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
347 return HRTIMER_RESTART
;
350 kvm_vcpu_wake_up(vcpu
);
351 return HRTIMER_NORESTART
;
354 static enum hrtimer_restart
kvm_hrtimer_expire(struct hrtimer
*hrt
)
356 struct arch_timer_context
*ctx
;
357 struct kvm_vcpu
*vcpu
;
360 ctx
= container_of(hrt
, struct arch_timer_context
, hrtimer
);
363 trace_kvm_timer_hrtimer_expire(ctx
);
366 * Check that the timer has really expired from the guest's
367 * PoV (NTP on the host may have forced it to expire
368 * early). If not ready, schedule for a later time.
370 ns
= kvm_timer_compute_delta(ctx
);
372 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
373 return HRTIMER_RESTART
;
376 kvm_timer_update_irq(vcpu
, true, ctx
);
377 return HRTIMER_NORESTART
;
380 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
)
382 enum kvm_arch_timers index
;
388 index
= arch_timer_ctx_index(timer_ctx
);
390 if (timer_ctx
->loaded
) {
396 cnt_ctl
= read_sysreg_el0(SYS_CNTV_CTL
);
400 cnt_ctl
= read_sysreg_el0(SYS_CNTP_CTL
);
403 /* GCC is braindead */
408 return (cnt_ctl
& ARCH_TIMER_CTRL_ENABLE
) &&
409 (cnt_ctl
& ARCH_TIMER_CTRL_IT_STAT
) &&
410 !(cnt_ctl
& ARCH_TIMER_CTRL_IT_MASK
);
413 if (!kvm_timer_irq_can_fire(timer_ctx
))
416 cval
= timer_get_cval(timer_ctx
);
417 now
= kvm_phys_timer_read() - timer_get_offset(timer_ctx
);
422 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
424 return vcpu_has_wfit_active(vcpu
) && wfit_delay_ns(vcpu
) == 0;
428 * Reflect the timer output level into the kvm_run structure
430 void kvm_timer_update_run(struct kvm_vcpu
*vcpu
)
432 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
433 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
434 struct kvm_sync_regs
*regs
= &vcpu
->run
->s
.regs
;
436 /* Populate the device bitmap with the timer states */
437 regs
->device_irq_level
&= ~(KVM_ARM_DEV_EL1_VTIMER
|
438 KVM_ARM_DEV_EL1_PTIMER
);
439 if (kvm_timer_should_fire(vtimer
))
440 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_VTIMER
;
441 if (kvm_timer_should_fire(ptimer
))
442 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_PTIMER
;
445 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
446 struct arch_timer_context
*timer_ctx
)
450 timer_ctx
->irq
.level
= new_level
;
451 trace_kvm_timer_update_irq(vcpu
->vcpu_id
, timer_irq(timer_ctx
),
452 timer_ctx
->irq
.level
);
454 if (!userspace_irqchip(vcpu
->kvm
)) {
455 ret
= kvm_vgic_inject_irq(vcpu
->kvm
, vcpu
,
456 timer_irq(timer_ctx
),
457 timer_ctx
->irq
.level
,
463 /* Only called for a fully emulated timer */
464 static void timer_emulate(struct arch_timer_context
*ctx
)
466 bool should_fire
= kvm_timer_should_fire(ctx
);
468 trace_kvm_timer_emulate(ctx
, should_fire
);
470 if (should_fire
!= ctx
->irq
.level
) {
471 kvm_timer_update_irq(ctx
->vcpu
, should_fire
, ctx
);
476 * If the timer can fire now, we don't need to have a soft timer
477 * scheduled for the future. If the timer cannot fire at all,
478 * then we also don't need a soft timer.
480 if (should_fire
|| !kvm_timer_irq_can_fire(ctx
))
483 soft_timer_start(&ctx
->hrtimer
, kvm_timer_compute_delta(ctx
));
486 static void set_cntvoff(u64 cntvoff
)
488 kvm_call_hyp(__kvm_timer_set_cntvoff
, cntvoff
);
491 static void set_cntpoff(u64 cntpoff
)
494 write_sysreg_s(cntpoff
, SYS_CNTPOFF_EL2
);
497 static void timer_save_state(struct arch_timer_context
*ctx
)
499 struct arch_timer_cpu
*timer
= vcpu_timer(ctx
->vcpu
);
500 enum kvm_arch_timers index
= arch_timer_ctx_index(ctx
);
506 local_irq_save(flags
);
516 timer_set_ctl(ctx
, read_sysreg_el0(SYS_CNTV_CTL
));
517 timer_set_cval(ctx
, read_sysreg_el0(SYS_CNTV_CVAL
));
519 /* Disable the timer */
520 write_sysreg_el0(0, SYS_CNTV_CTL
);
524 * The kernel may decide to run userspace after
525 * calling vcpu_put, so we reset cntvoff to 0 to
526 * ensure a consistent read between user accesses to
527 * the virtual counter and kernel access to the
528 * physical counter of non-VHE case.
530 * For VHE, the virtual counter uses a fixed virtual
531 * offset of zero, so no need to zero CNTVOFF_EL2
532 * register, but this is actually useful when switching
533 * between EL1/vEL2 with NV.
535 * Do it unconditionally, as this is either unavoidable
542 timer_set_ctl(ctx
, read_sysreg_el0(SYS_CNTP_CTL
));
543 cval
= read_sysreg_el0(SYS_CNTP_CVAL
);
545 cval
-= timer_get_offset(ctx
);
547 timer_set_cval(ctx
, cval
);
549 /* Disable the timer */
550 write_sysreg_el0(0, SYS_CNTP_CTL
);
559 trace_kvm_timer_save_state(ctx
);
563 local_irq_restore(flags
);
567 * Schedule the background timer before calling kvm_vcpu_halt, so that this
568 * thread is removed from its waitqueue and made runnable when there's a timer
569 * interrupt to handle.
571 static void kvm_timer_blocking(struct kvm_vcpu
*vcpu
)
573 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
574 struct timer_map map
;
576 get_timer_map(vcpu
, &map
);
579 * If no timers are capable of raising interrupts (disabled or
580 * masked), then there's no more work for us to do.
582 if (!kvm_timer_irq_can_fire(map
.direct_vtimer
) &&
583 !kvm_timer_irq_can_fire(map
.direct_ptimer
) &&
584 !kvm_timer_irq_can_fire(map
.emul_vtimer
) &&
585 !kvm_timer_irq_can_fire(map
.emul_ptimer
) &&
586 !vcpu_has_wfit_active(vcpu
))
590 * At least one guest time will expire. Schedule a background timer.
591 * Set the earliest expiration time among the guest timers.
593 soft_timer_start(&timer
->bg_timer
, kvm_timer_earliest_exp(vcpu
));
596 static void kvm_timer_unblocking(struct kvm_vcpu
*vcpu
)
598 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
600 soft_timer_cancel(&timer
->bg_timer
);
603 static void timer_restore_state(struct arch_timer_context
*ctx
)
605 struct arch_timer_cpu
*timer
= vcpu_timer(ctx
->vcpu
);
606 enum kvm_arch_timers index
= arch_timer_ctx_index(ctx
);
612 local_irq_save(flags
);
622 set_cntvoff(timer_get_offset(ctx
));
623 write_sysreg_el0(timer_get_cval(ctx
), SYS_CNTV_CVAL
);
625 write_sysreg_el0(timer_get_ctl(ctx
), SYS_CNTV_CTL
);
629 cval
= timer_get_cval(ctx
);
630 offset
= timer_get_offset(ctx
);
633 write_sysreg_el0(cval
, SYS_CNTP_CVAL
);
635 write_sysreg_el0(timer_get_ctl(ctx
), SYS_CNTP_CTL
);
641 trace_kvm_timer_restore_state(ctx
);
645 local_irq_restore(flags
);
648 static inline void set_timer_irq_phys_active(struct arch_timer_context
*ctx
, bool active
)
651 r
= irq_set_irqchip_state(ctx
->host_timer_irq
, IRQCHIP_STATE_ACTIVE
, active
);
655 static void kvm_timer_vcpu_load_gic(struct arch_timer_context
*ctx
)
657 struct kvm_vcpu
*vcpu
= ctx
->vcpu
;
658 bool phys_active
= false;
661 * Update the timer output so that it is likely to match the
662 * state we're about to restore. If the timer expires between
663 * this point and the register restoration, we'll take the
666 kvm_timer_update_irq(ctx
->vcpu
, kvm_timer_should_fire(ctx
), ctx
);
668 if (irqchip_in_kernel(vcpu
->kvm
))
669 phys_active
= kvm_vgic_map_is_active(vcpu
, timer_irq(ctx
));
671 phys_active
|= ctx
->irq
.level
;
673 set_timer_irq_phys_active(ctx
, phys_active
);
676 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu
*vcpu
)
678 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
681 * Update the timer output so that it is likely to match the
682 * state we're about to restore. If the timer expires between
683 * this point and the register restoration, we'll take the
686 kvm_timer_update_irq(vcpu
, kvm_timer_should_fire(vtimer
), vtimer
);
689 * When using a userspace irqchip with the architected timers and a
690 * host interrupt controller that doesn't support an active state, we
691 * must still prevent continuously exiting from the guest, and
692 * therefore mask the physical interrupt by disabling it on the host
693 * interrupt controller when the virtual level is high, such that the
694 * guest can make forward progress. Once we detect the output level
695 * being de-asserted, we unmask the interrupt again so that we exit
696 * from the guest when the timer fires.
698 if (vtimer
->irq
.level
)
699 disable_percpu_irq(host_vtimer_irq
);
701 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
704 /* If _pred is true, set bit in _set, otherwise set it in _clr */
705 #define assign_clear_set_bit(_pred, _bit, _clr, _set) \
713 static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu
*vcpu
,
714 struct timer_map
*map
)
718 if (!irqchip_in_kernel(vcpu
->kvm
))
722 * We only ever unmap the vtimer irq on a VHE system that runs nested
723 * virtualization, in which case we have both a valid emul_vtimer,
724 * emul_ptimer, direct_vtimer, and direct_ptimer.
726 * Since this is called from kvm_timer_vcpu_load(), a change between
727 * vEL2 and vEL1/0 will have just happened, and the timer_map will
728 * represent this, and therefore we switch the emul/direct mappings
731 hw
= kvm_vgic_get_map(vcpu
, timer_irq(map
->direct_vtimer
));
733 kvm_vgic_unmap_phys_irq(vcpu
, timer_irq(map
->emul_vtimer
));
734 kvm_vgic_unmap_phys_irq(vcpu
, timer_irq(map
->emul_ptimer
));
736 ret
= kvm_vgic_map_phys_irq(vcpu
,
737 map
->direct_vtimer
->host_timer_irq
,
738 timer_irq(map
->direct_vtimer
),
739 &arch_timer_irq_ops
);
741 ret
= kvm_vgic_map_phys_irq(vcpu
,
742 map
->direct_ptimer
->host_timer_irq
,
743 timer_irq(map
->direct_ptimer
),
744 &arch_timer_irq_ops
);
748 * The virtual offset behaviour is "interresting", as it
749 * always applies when HCR_EL2.E2H==0, but only when
750 * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
751 * track E2H when putting the HV timer in "direct" mode.
753 if (map
->direct_vtimer
== vcpu_hvtimer(vcpu
)) {
754 struct arch_timer_offset
*offs
= &map
->direct_vtimer
->offset
;
756 if (vcpu_el2_e2h_is_set(vcpu
))
757 offs
->vcpu_offset
= NULL
;
759 offs
->vcpu_offset
= &__vcpu_sys_reg(vcpu
, CNTVOFF_EL2
);
764 static void timer_set_traps(struct kvm_vcpu
*vcpu
, struct timer_map
*map
)
770 * No trapping gets configured here with nVHE. See
771 * __timer_enable_traps(), which is where the stuff happens.
777 * Our default policy is not to trap anything. As we progress
778 * within this function, reality kicks in and we start adding
779 * traps based on emulation requirements.
784 * We have two possibility to deal with a physical offset:
786 * - Either we have CNTPOFF (yay!) or the offset is 0:
787 * we let the guest freely access the HW
789 * - or neither of these condition apply:
790 * we trap accesses to the HW, but still use it
791 * after correcting the physical offset
793 if (!has_cntpoff() && timer_get_offset(map
->direct_ptimer
))
797 * Apply the enable bits that the guest hypervisor has requested for
798 * its own guest. We can only add traps that wouldn't have been set
801 if (vcpu_has_nv(vcpu
) && !is_hyp_ctxt(vcpu
)) {
802 u64 val
= __vcpu_sys_reg(vcpu
, CNTHCTL_EL2
);
804 /* Use the VHE format for mental sanity */
805 if (!vcpu_el2_e2h_is_set(vcpu
))
806 val
= (val
& (CNTHCTL_EL1PCEN
| CNTHCTL_EL1PCTEN
)) << 10;
808 tpt
|= !(val
& (CNTHCTL_EL1PCEN
<< 10));
809 tpc
|= !(val
& (CNTHCTL_EL1PCTEN
<< 10));
813 * Now that we have collected our requirements, compute the
814 * trap and enable bits.
819 assign_clear_set_bit(tpt
, CNTHCTL_EL1PCEN
<< 10, set
, clr
);
820 assign_clear_set_bit(tpc
, CNTHCTL_EL1PCTEN
<< 10, set
, clr
);
822 /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
823 sysreg_clear_set(cnthctl_el2
, clr
, set
);
826 void kvm_timer_vcpu_load(struct kvm_vcpu
*vcpu
)
828 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
829 struct timer_map map
;
831 if (unlikely(!timer
->enabled
))
834 get_timer_map(vcpu
, &map
);
836 if (static_branch_likely(&has_gic_active_state
)) {
837 if (vcpu_has_nv(vcpu
))
838 kvm_timer_vcpu_load_nested_switch(vcpu
, &map
);
840 kvm_timer_vcpu_load_gic(map
.direct_vtimer
);
841 if (map
.direct_ptimer
)
842 kvm_timer_vcpu_load_gic(map
.direct_ptimer
);
844 kvm_timer_vcpu_load_nogic(vcpu
);
847 kvm_timer_unblocking(vcpu
);
849 timer_restore_state(map
.direct_vtimer
);
850 if (map
.direct_ptimer
)
851 timer_restore_state(map
.direct_ptimer
);
853 timer_emulate(map
.emul_vtimer
);
855 timer_emulate(map
.emul_ptimer
);
857 timer_set_traps(vcpu
, &map
);
860 bool kvm_timer_should_notify_user(struct kvm_vcpu
*vcpu
)
862 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
863 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
864 struct kvm_sync_regs
*sregs
= &vcpu
->run
->s
.regs
;
867 if (likely(irqchip_in_kernel(vcpu
->kvm
)))
870 vlevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_VTIMER
;
871 plevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_PTIMER
;
873 return kvm_timer_should_fire(vtimer
) != vlevel
||
874 kvm_timer_should_fire(ptimer
) != plevel
;
877 void kvm_timer_vcpu_put(struct kvm_vcpu
*vcpu
)
879 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
880 struct timer_map map
;
882 if (unlikely(!timer
->enabled
))
885 get_timer_map(vcpu
, &map
);
887 timer_save_state(map
.direct_vtimer
);
888 if (map
.direct_ptimer
)
889 timer_save_state(map
.direct_ptimer
);
892 * Cancel soft timer emulation, because the only case where we
893 * need it after a vcpu_put is in the context of a sleeping VCPU, and
894 * in that case we already factor in the deadline for the physical
895 * timer when scheduling the bg_timer.
897 * In any case, we re-schedule the hrtimer for the physical timer when
898 * coming back to the VCPU thread in kvm_timer_vcpu_load().
901 soft_timer_cancel(&map
.emul_vtimer
->hrtimer
);
903 soft_timer_cancel(&map
.emul_ptimer
->hrtimer
);
905 if (kvm_vcpu_is_blocking(vcpu
))
906 kvm_timer_blocking(vcpu
);
910 * With a userspace irqchip we have to check if the guest de-asserted the
911 * timer and if so, unmask the timer irq signal on the host interrupt
912 * controller to ensure that we see future timer signals.
914 static void unmask_vtimer_irq_user(struct kvm_vcpu
*vcpu
)
916 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
918 if (!kvm_timer_should_fire(vtimer
)) {
919 kvm_timer_update_irq(vcpu
, false, vtimer
);
920 if (static_branch_likely(&has_gic_active_state
))
921 set_timer_irq_phys_active(vtimer
, false);
923 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
927 void kvm_timer_sync_user(struct kvm_vcpu
*vcpu
)
929 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
931 if (unlikely(!timer
->enabled
))
934 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
935 unmask_vtimer_irq_user(vcpu
);
938 void kvm_timer_vcpu_reset(struct kvm_vcpu
*vcpu
)
940 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
941 struct timer_map map
;
943 get_timer_map(vcpu
, &map
);
946 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
947 * and to 0 for ARMv7. We provide an implementation that always
948 * resets the timer to be disabled and unmasked and is compliant with
949 * the ARMv7 architecture.
951 for (int i
= 0; i
< nr_timers(vcpu
); i
++)
952 timer_set_ctl(vcpu_get_timer(vcpu
, i
), 0);
955 * A vcpu running at EL2 is in charge of the offset applied to
956 * the virtual timer, so use the physical VM offset, and point
957 * the vcpu offset to CNTVOFF_EL2.
959 if (vcpu_has_nv(vcpu
)) {
960 struct arch_timer_offset
*offs
= &vcpu_vtimer(vcpu
)->offset
;
962 offs
->vcpu_offset
= &__vcpu_sys_reg(vcpu
, CNTVOFF_EL2
);
963 offs
->vm_offset
= &vcpu
->kvm
->arch
.timer_data
.poffset
;
966 if (timer
->enabled
) {
967 for (int i
= 0; i
< nr_timers(vcpu
); i
++)
968 kvm_timer_update_irq(vcpu
, false,
969 vcpu_get_timer(vcpu
, i
));
971 if (irqchip_in_kernel(vcpu
->kvm
)) {
972 kvm_vgic_reset_mapped_irq(vcpu
, timer_irq(map
.direct_vtimer
));
973 if (map
.direct_ptimer
)
974 kvm_vgic_reset_mapped_irq(vcpu
, timer_irq(map
.direct_ptimer
));
979 soft_timer_cancel(&map
.emul_vtimer
->hrtimer
);
981 soft_timer_cancel(&map
.emul_ptimer
->hrtimer
);
984 static void timer_context_init(struct kvm_vcpu
*vcpu
, int timerid
)
986 struct arch_timer_context
*ctxt
= vcpu_get_timer(vcpu
, timerid
);
987 struct kvm
*kvm
= vcpu
->kvm
;
991 if (timerid
== TIMER_VTIMER
)
992 ctxt
->offset
.vm_offset
= &kvm
->arch
.timer_data
.voffset
;
994 ctxt
->offset
.vm_offset
= &kvm
->arch
.timer_data
.poffset
;
996 hrtimer_init(&ctxt
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_HARD
);
997 ctxt
->hrtimer
.function
= kvm_hrtimer_expire
;
1002 ctxt
->host_timer_irq
= host_ptimer_irq
;
1006 ctxt
->host_timer_irq
= host_vtimer_irq
;
1011 void kvm_timer_vcpu_init(struct kvm_vcpu
*vcpu
)
1013 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
1015 for (int i
= 0; i
< NR_KVM_TIMERS
; i
++)
1016 timer_context_init(vcpu
, i
);
1018 /* Synchronize offsets across timers of a VM if not already provided */
1019 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
, &vcpu
->kvm
->arch
.flags
)) {
1020 timer_set_offset(vcpu_vtimer(vcpu
), kvm_phys_timer_read());
1021 timer_set_offset(vcpu_ptimer(vcpu
), 0);
1024 hrtimer_init(&timer
->bg_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_HARD
);
1025 timer
->bg_timer
.function
= kvm_bg_timer_expire
;
1028 void kvm_timer_init_vm(struct kvm
*kvm
)
1030 for (int i
= 0; i
< NR_KVM_TIMERS
; i
++)
1031 kvm
->arch
.timer_data
.ppi
[i
] = default_ppi
[i
];
1034 void kvm_timer_cpu_up(void)
1036 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
1037 if (host_ptimer_irq
)
1038 enable_percpu_irq(host_ptimer_irq
, host_ptimer_irq_flags
);
1041 void kvm_timer_cpu_down(void)
1043 disable_percpu_irq(host_vtimer_irq
);
1044 if (host_ptimer_irq
)
1045 disable_percpu_irq(host_ptimer_irq
);
1048 int kvm_arm_timer_set_reg(struct kvm_vcpu
*vcpu
, u64 regid
, u64 value
)
1050 struct arch_timer_context
*timer
;
1053 case KVM_REG_ARM_TIMER_CTL
:
1054 timer
= vcpu_vtimer(vcpu
);
1055 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CTL
, value
);
1057 case KVM_REG_ARM_TIMER_CNT
:
1058 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
,
1059 &vcpu
->kvm
->arch
.flags
)) {
1060 timer
= vcpu_vtimer(vcpu
);
1061 timer_set_offset(timer
, kvm_phys_timer_read() - value
);
1064 case KVM_REG_ARM_TIMER_CVAL
:
1065 timer
= vcpu_vtimer(vcpu
);
1066 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CVAL
, value
);
1068 case KVM_REG_ARM_PTIMER_CTL
:
1069 timer
= vcpu_ptimer(vcpu
);
1070 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CTL
, value
);
1072 case KVM_REG_ARM_PTIMER_CNT
:
1073 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
,
1074 &vcpu
->kvm
->arch
.flags
)) {
1075 timer
= vcpu_ptimer(vcpu
);
1076 timer_set_offset(timer
, kvm_phys_timer_read() - value
);
1079 case KVM_REG_ARM_PTIMER_CVAL
:
1080 timer
= vcpu_ptimer(vcpu
);
1081 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CVAL
, value
);
1091 static u64
read_timer_ctl(struct arch_timer_context
*timer
)
1094 * Set ISTATUS bit if it's expired.
1095 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
1096 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
1097 * regardless of ENABLE bit for our implementation convenience.
1099 u32 ctl
= timer_get_ctl(timer
);
1101 if (!kvm_timer_compute_delta(timer
))
1102 ctl
|= ARCH_TIMER_CTRL_IT_STAT
;
1107 u64
kvm_arm_timer_get_reg(struct kvm_vcpu
*vcpu
, u64 regid
)
1110 case KVM_REG_ARM_TIMER_CTL
:
1111 return kvm_arm_timer_read(vcpu
,
1112 vcpu_vtimer(vcpu
), TIMER_REG_CTL
);
1113 case KVM_REG_ARM_TIMER_CNT
:
1114 return kvm_arm_timer_read(vcpu
,
1115 vcpu_vtimer(vcpu
), TIMER_REG_CNT
);
1116 case KVM_REG_ARM_TIMER_CVAL
:
1117 return kvm_arm_timer_read(vcpu
,
1118 vcpu_vtimer(vcpu
), TIMER_REG_CVAL
);
1119 case KVM_REG_ARM_PTIMER_CTL
:
1120 return kvm_arm_timer_read(vcpu
,
1121 vcpu_ptimer(vcpu
), TIMER_REG_CTL
);
1122 case KVM_REG_ARM_PTIMER_CNT
:
1123 return kvm_arm_timer_read(vcpu
,
1124 vcpu_ptimer(vcpu
), TIMER_REG_CNT
);
1125 case KVM_REG_ARM_PTIMER_CVAL
:
1126 return kvm_arm_timer_read(vcpu
,
1127 vcpu_ptimer(vcpu
), TIMER_REG_CVAL
);
1132 static u64
kvm_arm_timer_read(struct kvm_vcpu
*vcpu
,
1133 struct arch_timer_context
*timer
,
1134 enum kvm_arch_timer_regs treg
)
1139 case TIMER_REG_TVAL
:
1140 val
= timer_get_cval(timer
) - kvm_phys_timer_read() + timer_get_offset(timer
);
1141 val
= lower_32_bits(val
);
1145 val
= read_timer_ctl(timer
);
1148 case TIMER_REG_CVAL
:
1149 val
= timer_get_cval(timer
);
1153 val
= kvm_phys_timer_read() - timer_get_offset(timer
);
1156 case TIMER_REG_VOFF
:
1157 val
= *timer
->offset
.vcpu_offset
;
1167 u64
kvm_arm_timer_read_sysreg(struct kvm_vcpu
*vcpu
,
1168 enum kvm_arch_timers tmr
,
1169 enum kvm_arch_timer_regs treg
)
1171 struct arch_timer_context
*timer
;
1172 struct timer_map map
;
1175 get_timer_map(vcpu
, &map
);
1176 timer
= vcpu_get_timer(vcpu
, tmr
);
1178 if (timer
== map
.emul_vtimer
|| timer
== map
.emul_ptimer
)
1179 return kvm_arm_timer_read(vcpu
, timer
, treg
);
1182 timer_save_state(timer
);
1184 val
= kvm_arm_timer_read(vcpu
, timer
, treg
);
1186 timer_restore_state(timer
);
1192 static void kvm_arm_timer_write(struct kvm_vcpu
*vcpu
,
1193 struct arch_timer_context
*timer
,
1194 enum kvm_arch_timer_regs treg
,
1198 case TIMER_REG_TVAL
:
1199 timer_set_cval(timer
, kvm_phys_timer_read() - timer_get_offset(timer
) + (s32
)val
);
1203 timer_set_ctl(timer
, val
& ~ARCH_TIMER_CTRL_IT_STAT
);
1206 case TIMER_REG_CVAL
:
1207 timer_set_cval(timer
, val
);
1210 case TIMER_REG_VOFF
:
1211 *timer
->offset
.vcpu_offset
= val
;
1219 void kvm_arm_timer_write_sysreg(struct kvm_vcpu
*vcpu
,
1220 enum kvm_arch_timers tmr
,
1221 enum kvm_arch_timer_regs treg
,
1224 struct arch_timer_context
*timer
;
1225 struct timer_map map
;
1227 get_timer_map(vcpu
, &map
);
1228 timer
= vcpu_get_timer(vcpu
, tmr
);
1229 if (timer
== map
.emul_vtimer
|| timer
== map
.emul_ptimer
) {
1230 soft_timer_cancel(&timer
->hrtimer
);
1231 kvm_arm_timer_write(vcpu
, timer
, treg
, val
);
1232 timer_emulate(timer
);
1235 timer_save_state(timer
);
1236 kvm_arm_timer_write(vcpu
, timer
, treg
, val
);
1237 timer_restore_state(timer
);
1242 static int timer_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu
)
1245 irqd_set_forwarded_to_vcpu(d
);
1247 irqd_clr_forwarded_to_vcpu(d
);
1252 static int timer_irq_set_irqchip_state(struct irq_data
*d
,
1253 enum irqchip_irq_state which
, bool val
)
1255 if (which
!= IRQCHIP_STATE_ACTIVE
|| !irqd_is_forwarded_to_vcpu(d
))
1256 return irq_chip_set_parent_state(d
, which
, val
);
1259 irq_chip_mask_parent(d
);
1261 irq_chip_unmask_parent(d
);
1266 static void timer_irq_eoi(struct irq_data
*d
)
1268 if (!irqd_is_forwarded_to_vcpu(d
))
1269 irq_chip_eoi_parent(d
);
1272 static void timer_irq_ack(struct irq_data
*d
)
1275 if (d
->chip
->irq_ack
)
1276 d
->chip
->irq_ack(d
);
1279 static struct irq_chip timer_chip
= {
1281 .irq_ack
= timer_irq_ack
,
1282 .irq_mask
= irq_chip_mask_parent
,
1283 .irq_unmask
= irq_chip_unmask_parent
,
1284 .irq_eoi
= timer_irq_eoi
,
1285 .irq_set_type
= irq_chip_set_type_parent
,
1286 .irq_set_vcpu_affinity
= timer_irq_set_vcpu_affinity
,
1287 .irq_set_irqchip_state
= timer_irq_set_irqchip_state
,
1290 static int timer_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
1291 unsigned int nr_irqs
, void *arg
)
1293 irq_hw_number_t hwirq
= (uintptr_t)arg
;
1295 return irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
,
1299 static void timer_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
1300 unsigned int nr_irqs
)
1304 static const struct irq_domain_ops timer_domain_ops
= {
1305 .alloc
= timer_irq_domain_alloc
,
1306 .free
= timer_irq_domain_free
,
1309 static void kvm_irq_fixup_flags(unsigned int virq
, u32
*flags
)
1311 *flags
= irq_get_trigger_type(virq
);
1312 if (*flags
!= IRQF_TRIGGER_HIGH
&& *flags
!= IRQF_TRIGGER_LOW
) {
1313 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1315 *flags
= IRQF_TRIGGER_LOW
;
1319 static int kvm_irq_init(struct arch_timer_kvm_info
*info
)
1321 struct irq_domain
*domain
= NULL
;
1323 if (info
->virtual_irq
<= 0) {
1324 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1329 host_vtimer_irq
= info
->virtual_irq
;
1330 kvm_irq_fixup_flags(host_vtimer_irq
, &host_vtimer_irq_flags
);
1332 if (kvm_vgic_global_state
.no_hw_deactivation
) {
1333 struct fwnode_handle
*fwnode
;
1334 struct irq_data
*data
;
1336 fwnode
= irq_domain_alloc_named_fwnode("kvm-timer");
1340 /* Assume both vtimer and ptimer in the same parent */
1341 data
= irq_get_irq_data(host_vtimer_irq
);
1342 domain
= irq_domain_create_hierarchy(data
->domain
, 0,
1343 NR_KVM_TIMERS
, fwnode
,
1344 &timer_domain_ops
, NULL
);
1346 irq_domain_free_fwnode(fwnode
);
1350 arch_timer_irq_ops
.flags
|= VGIC_IRQ_SW_RESAMPLE
;
1351 WARN_ON(irq_domain_push_irq(domain
, host_vtimer_irq
,
1352 (void *)TIMER_VTIMER
));
1355 if (info
->physical_irq
> 0) {
1356 host_ptimer_irq
= info
->physical_irq
;
1357 kvm_irq_fixup_flags(host_ptimer_irq
, &host_ptimer_irq_flags
);
1360 WARN_ON(irq_domain_push_irq(domain
, host_ptimer_irq
,
1361 (void *)TIMER_PTIMER
));
1367 int __init
kvm_timer_hyp_init(bool has_gic
)
1369 struct arch_timer_kvm_info
*info
;
1372 info
= arch_timer_get_kvm_info();
1373 timecounter
= &info
->timecounter
;
1375 if (!timecounter
->cc
) {
1376 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1380 err
= kvm_irq_init(info
);
1384 /* First, do the virtual EL1 timer irq */
1386 err
= request_percpu_irq(host_vtimer_irq
, kvm_arch_timer_handler
,
1387 "kvm guest vtimer", kvm_get_running_vcpus());
1389 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1390 host_vtimer_irq
, err
);
1395 err
= irq_set_vcpu_affinity(host_vtimer_irq
,
1396 kvm_get_running_vcpus());
1398 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1399 goto out_free_vtimer_irq
;
1402 static_branch_enable(&has_gic_active_state
);
1405 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq
);
1407 /* Now let's do the physical EL1 timer irq */
1409 if (info
->physical_irq
> 0) {
1410 err
= request_percpu_irq(host_ptimer_irq
, kvm_arch_timer_handler
,
1411 "kvm guest ptimer", kvm_get_running_vcpus());
1413 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1414 host_ptimer_irq
, err
);
1415 goto out_free_vtimer_irq
;
1419 err
= irq_set_vcpu_affinity(host_ptimer_irq
,
1420 kvm_get_running_vcpus());
1422 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1423 goto out_free_ptimer_irq
;
1427 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq
);
1428 } else if (has_vhe()) {
1429 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1430 info
->physical_irq
);
1432 goto out_free_vtimer_irq
;
1437 out_free_ptimer_irq
:
1438 if (info
->physical_irq
> 0)
1439 free_percpu_irq(host_ptimer_irq
, kvm_get_running_vcpus());
1440 out_free_vtimer_irq
:
1441 free_percpu_irq(host_vtimer_irq
, kvm_get_running_vcpus());
1445 void kvm_timer_vcpu_terminate(struct kvm_vcpu
*vcpu
)
1447 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
1449 soft_timer_cancel(&timer
->bg_timer
);
1452 static bool timer_irqs_are_valid(struct kvm_vcpu
*vcpu
)
1457 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1459 for (int i
= 0; i
< nr_timers(vcpu
); i
++) {
1460 struct arch_timer_context
*ctx
;
1463 ctx
= vcpu_get_timer(vcpu
, i
);
1464 irq
= timer_irq(ctx
);
1465 if (kvm_vgic_set_owner(vcpu
, irq
, ctx
))
1469 * We know by construction that we only have PPIs, so
1470 * all values are less than 32.
1475 valid
= hweight32(ppis
) == nr_timers(vcpu
);
1478 set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE
, &vcpu
->kvm
->arch
.flags
);
1480 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1485 static bool kvm_arch_timer_get_input_level(int vintid
)
1487 struct kvm_vcpu
*vcpu
= kvm_get_running_vcpu();
1489 if (WARN(!vcpu
, "No vcpu context!\n"))
1492 for (int i
= 0; i
< nr_timers(vcpu
); i
++) {
1493 struct arch_timer_context
*ctx
;
1495 ctx
= vcpu_get_timer(vcpu
, i
);
1496 if (timer_irq(ctx
) == vintid
)
1497 return kvm_timer_should_fire(ctx
);
1500 /* A timer IRQ has fired, but no matching timer was found? */
1501 WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid
);
1506 int kvm_timer_enable(struct kvm_vcpu
*vcpu
)
1508 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
1509 struct timer_map map
;
1515 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1516 if (!irqchip_in_kernel(vcpu
->kvm
))
1520 * At this stage, we have the guarantee that the vgic is both
1521 * available and initialized.
1523 if (!timer_irqs_are_valid(vcpu
)) {
1524 kvm_debug("incorrectly configured timer irqs\n");
1528 get_timer_map(vcpu
, &map
);
1530 ret
= kvm_vgic_map_phys_irq(vcpu
,
1531 map
.direct_vtimer
->host_timer_irq
,
1532 timer_irq(map
.direct_vtimer
),
1533 &arch_timer_irq_ops
);
1537 if (map
.direct_ptimer
) {
1538 ret
= kvm_vgic_map_phys_irq(vcpu
,
1539 map
.direct_ptimer
->host_timer_irq
,
1540 timer_irq(map
.direct_ptimer
),
1541 &arch_timer_irq_ops
);
1552 /* If we have CNTPOFF, permanently set ECV to enable it */
1553 void kvm_timer_init_vhe(void)
1555 if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF
))
1556 sysreg_clear_set(cnthctl_el2
, 0, CNTHCTL_ECV
);
1559 int kvm_arm_timer_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
1561 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
1562 int irq
, idx
, ret
= 0;
1564 if (!irqchip_in_kernel(vcpu
->kvm
))
1567 if (get_user(irq
, uaddr
))
1570 if (!(irq_is_ppi(irq
)))
1573 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1575 if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE
,
1576 &vcpu
->kvm
->arch
.flags
)) {
1581 switch (attr
->attr
) {
1582 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
1585 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
1588 case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER
:
1589 idx
= TIMER_HVTIMER
;
1591 case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER
:
1592 idx
= TIMER_HPTIMER
;
1600 * We cannot validate the IRQ unicity before we run, so take it at
1601 * face value. The verdict will be given on first vcpu run, for each
1602 * vcpu. Yes this is late. Blame it on the stupid API.
1604 vcpu
->kvm
->arch
.timer_data
.ppi
[idx
] = irq
;
1607 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1611 int kvm_arm_timer_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
1613 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
1614 struct arch_timer_context
*timer
;
1617 switch (attr
->attr
) {
1618 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
1619 timer
= vcpu_vtimer(vcpu
);
1621 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
1622 timer
= vcpu_ptimer(vcpu
);
1624 case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER
:
1625 timer
= vcpu_hvtimer(vcpu
);
1627 case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER
:
1628 timer
= vcpu_hptimer(vcpu
);
1634 irq
= timer_irq(timer
);
1635 return put_user(irq
, uaddr
);
1638 int kvm_arm_timer_has_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
1640 switch (attr
->attr
) {
1641 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
1642 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
1643 case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER
:
1644 case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER
:
1651 int kvm_vm_ioctl_set_counter_offset(struct kvm
*kvm
,
1652 struct kvm_arm_counter_offset
*offset
)
1656 if (offset
->reserved
)
1659 mutex_lock(&kvm
->lock
);
1661 if (lock_all_vcpus(kvm
)) {
1662 set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
, &kvm
->arch
.flags
);
1665 * If userspace decides to set the offset using this
1666 * API rather than merely restoring the counter
1667 * values, the offset applies to both the virtual and
1670 kvm
->arch
.timer_data
.voffset
= offset
->counter_offset
;
1671 kvm
->arch
.timer_data
.poffset
= offset
->counter_offset
;
1673 unlock_all_vcpus(kvm
);
1678 mutex_unlock(&kvm
->lock
);