1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19 #include <asm/kvm_nested.h>
21 #include <kvm/arm_vgic.h>
22 #include <kvm/arm_arch_timer.h>
26 static struct timecounter
*timecounter
;
27 static unsigned int host_vtimer_irq
;
28 static unsigned int host_ptimer_irq
;
29 static u32 host_vtimer_irq_flags
;
30 static u32 host_ptimer_irq_flags
;
32 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state
);
34 static const u8 default_ppi
[] = {
41 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
);
42 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
43 struct arch_timer_context
*timer_ctx
);
44 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
);
45 static void kvm_arm_timer_write(struct kvm_vcpu
*vcpu
,
46 struct arch_timer_context
*timer
,
47 enum kvm_arch_timer_regs treg
,
49 static u64
kvm_arm_timer_read(struct kvm_vcpu
*vcpu
,
50 struct arch_timer_context
*timer
,
51 enum kvm_arch_timer_regs treg
);
52 static bool kvm_arch_timer_get_input_level(int vintid
);
54 static struct irq_ops arch_timer_irq_ops
= {
55 .get_input_level
= kvm_arch_timer_get_input_level
,
58 static int nr_timers(struct kvm_vcpu
*vcpu
)
60 if (!vcpu_has_nv(vcpu
))
61 return NR_KVM_EL0_TIMERS
;
66 u32
timer_get_ctl(struct arch_timer_context
*ctxt
)
68 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
70 switch(arch_timer_ctx_index(ctxt
)) {
72 return __vcpu_sys_reg(vcpu
, CNTV_CTL_EL0
);
74 return __vcpu_sys_reg(vcpu
, CNTP_CTL_EL0
);
76 return __vcpu_sys_reg(vcpu
, CNTHV_CTL_EL2
);
78 return __vcpu_sys_reg(vcpu
, CNTHP_CTL_EL2
);
85 u64
timer_get_cval(struct arch_timer_context
*ctxt
)
87 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
89 switch(arch_timer_ctx_index(ctxt
)) {
91 return __vcpu_sys_reg(vcpu
, CNTV_CVAL_EL0
);
93 return __vcpu_sys_reg(vcpu
, CNTP_CVAL_EL0
);
95 return __vcpu_sys_reg(vcpu
, CNTHV_CVAL_EL2
);
97 return __vcpu_sys_reg(vcpu
, CNTHP_CVAL_EL2
);
104 static u64
timer_get_offset(struct arch_timer_context
*ctxt
)
111 if (ctxt
->offset
.vm_offset
)
112 offset
+= *ctxt
->offset
.vm_offset
;
113 if (ctxt
->offset
.vcpu_offset
)
114 offset
+= *ctxt
->offset
.vcpu_offset
;
119 static void timer_set_ctl(struct arch_timer_context
*ctxt
, u32 ctl
)
121 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
123 switch(arch_timer_ctx_index(ctxt
)) {
125 __vcpu_sys_reg(vcpu
, CNTV_CTL_EL0
) = ctl
;
128 __vcpu_sys_reg(vcpu
, CNTP_CTL_EL0
) = ctl
;
131 __vcpu_sys_reg(vcpu
, CNTHV_CTL_EL2
) = ctl
;
134 __vcpu_sys_reg(vcpu
, CNTHP_CTL_EL2
) = ctl
;
141 static void timer_set_cval(struct arch_timer_context
*ctxt
, u64 cval
)
143 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
145 switch(arch_timer_ctx_index(ctxt
)) {
147 __vcpu_sys_reg(vcpu
, CNTV_CVAL_EL0
) = cval
;
150 __vcpu_sys_reg(vcpu
, CNTP_CVAL_EL0
) = cval
;
153 __vcpu_sys_reg(vcpu
, CNTHV_CVAL_EL2
) = cval
;
156 __vcpu_sys_reg(vcpu
, CNTHP_CVAL_EL2
) = cval
;
163 static void timer_set_offset(struct arch_timer_context
*ctxt
, u64 offset
)
165 if (!ctxt
->offset
.vm_offset
) {
166 WARN(offset
, "timer %ld\n", arch_timer_ctx_index(ctxt
));
170 WRITE_ONCE(*ctxt
->offset
.vm_offset
, offset
);
173 u64
kvm_phys_timer_read(void)
175 return timecounter
->cc
->read(timecounter
->cc
);
178 void get_timer_map(struct kvm_vcpu
*vcpu
, struct timer_map
*map
)
180 if (vcpu_has_nv(vcpu
)) {
181 if (is_hyp_ctxt(vcpu
)) {
182 map
->direct_vtimer
= vcpu_hvtimer(vcpu
);
183 map
->direct_ptimer
= vcpu_hptimer(vcpu
);
184 map
->emul_vtimer
= vcpu_vtimer(vcpu
);
185 map
->emul_ptimer
= vcpu_ptimer(vcpu
);
187 map
->direct_vtimer
= vcpu_vtimer(vcpu
);
188 map
->direct_ptimer
= vcpu_ptimer(vcpu
);
189 map
->emul_vtimer
= vcpu_hvtimer(vcpu
);
190 map
->emul_ptimer
= vcpu_hptimer(vcpu
);
192 } else if (has_vhe()) {
193 map
->direct_vtimer
= vcpu_vtimer(vcpu
);
194 map
->direct_ptimer
= vcpu_ptimer(vcpu
);
195 map
->emul_vtimer
= NULL
;
196 map
->emul_ptimer
= NULL
;
198 map
->direct_vtimer
= vcpu_vtimer(vcpu
);
199 map
->direct_ptimer
= NULL
;
200 map
->emul_vtimer
= NULL
;
201 map
->emul_ptimer
= vcpu_ptimer(vcpu
);
204 trace_kvm_get_timer_map(vcpu
->vcpu_id
, map
);
207 static inline bool userspace_irqchip(struct kvm
*kvm
)
209 return static_branch_unlikely(&userspace_irqchip_in_use
) &&
210 unlikely(!irqchip_in_kernel(kvm
));
213 static void soft_timer_start(struct hrtimer
*hrt
, u64 ns
)
215 hrtimer_start(hrt
, ktime_add_ns(ktime_get(), ns
),
216 HRTIMER_MODE_ABS_HARD
);
219 static void soft_timer_cancel(struct hrtimer
*hrt
)
224 static irqreturn_t
kvm_arch_timer_handler(int irq
, void *dev_id
)
226 struct kvm_vcpu
*vcpu
= *(struct kvm_vcpu
**)dev_id
;
227 struct arch_timer_context
*ctx
;
228 struct timer_map map
;
231 * We may see a timer interrupt after vcpu_put() has been called which
232 * sets the CPU's vcpu pointer to NULL, because even though the timer
233 * has been disabled in timer_save_state(), the hardware interrupt
234 * signal may not have been retired from the interrupt controller yet.
239 get_timer_map(vcpu
, &map
);
241 if (irq
== host_vtimer_irq
)
242 ctx
= map
.direct_vtimer
;
244 ctx
= map
.direct_ptimer
;
246 if (kvm_timer_should_fire(ctx
))
247 kvm_timer_update_irq(vcpu
, true, ctx
);
249 if (userspace_irqchip(vcpu
->kvm
) &&
250 !static_branch_unlikely(&has_gic_active_state
))
251 disable_percpu_irq(host_vtimer_irq
);
256 static u64
kvm_counter_compute_delta(struct arch_timer_context
*timer_ctx
,
259 u64 now
= kvm_phys_timer_read() - timer_get_offset(timer_ctx
);
264 ns
= cyclecounter_cyc2ns(timecounter
->cc
,
267 &timer_ctx
->ns_frac
);
274 static u64
kvm_timer_compute_delta(struct arch_timer_context
*timer_ctx
)
276 return kvm_counter_compute_delta(timer_ctx
, timer_get_cval(timer_ctx
));
279 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
)
281 WARN_ON(timer_ctx
&& timer_ctx
->loaded
);
283 ((timer_get_ctl(timer_ctx
) &
284 (ARCH_TIMER_CTRL_IT_MASK
| ARCH_TIMER_CTRL_ENABLE
)) == ARCH_TIMER_CTRL_ENABLE
);
287 static bool vcpu_has_wfit_active(struct kvm_vcpu
*vcpu
)
289 return (cpus_have_final_cap(ARM64_HAS_WFXT
) &&
290 vcpu_get_flag(vcpu
, IN_WFIT
));
293 static u64
wfit_delay_ns(struct kvm_vcpu
*vcpu
)
295 u64 val
= vcpu_get_reg(vcpu
, kvm_vcpu_sys_get_rt(vcpu
));
296 struct arch_timer_context
*ctx
;
298 ctx
= (vcpu_has_nv(vcpu
) && is_hyp_ctxt(vcpu
)) ? vcpu_hvtimer(vcpu
)
301 return kvm_counter_compute_delta(ctx
, val
);
305 * Returns the earliest expiration time in ns among guest timers.
306 * Note that it will return 0 if none of timers can fire.
308 static u64
kvm_timer_earliest_exp(struct kvm_vcpu
*vcpu
)
310 u64 min_delta
= ULLONG_MAX
;
313 for (i
= 0; i
< nr_timers(vcpu
); i
++) {
314 struct arch_timer_context
*ctx
= &vcpu
->arch
.timer_cpu
.timers
[i
];
316 WARN(ctx
->loaded
, "timer %d loaded\n", i
);
317 if (kvm_timer_irq_can_fire(ctx
))
318 min_delta
= min(min_delta
, kvm_timer_compute_delta(ctx
));
321 if (vcpu_has_wfit_active(vcpu
))
322 min_delta
= min(min_delta
, wfit_delay_ns(vcpu
));
324 /* If none of timers can fire, then return 0 */
325 if (min_delta
== ULLONG_MAX
)
331 static enum hrtimer_restart
kvm_bg_timer_expire(struct hrtimer
*hrt
)
333 struct arch_timer_cpu
*timer
;
334 struct kvm_vcpu
*vcpu
;
337 timer
= container_of(hrt
, struct arch_timer_cpu
, bg_timer
);
338 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.timer_cpu
);
341 * Check that the timer has really expired from the guest's
342 * PoV (NTP on the host may have forced it to expire
343 * early). If we should have slept longer, restart it.
345 ns
= kvm_timer_earliest_exp(vcpu
);
347 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
348 return HRTIMER_RESTART
;
351 kvm_vcpu_wake_up(vcpu
);
352 return HRTIMER_NORESTART
;
355 static enum hrtimer_restart
kvm_hrtimer_expire(struct hrtimer
*hrt
)
357 struct arch_timer_context
*ctx
;
358 struct kvm_vcpu
*vcpu
;
361 ctx
= container_of(hrt
, struct arch_timer_context
, hrtimer
);
364 trace_kvm_timer_hrtimer_expire(ctx
);
367 * Check that the timer has really expired from the guest's
368 * PoV (NTP on the host may have forced it to expire
369 * early). If not ready, schedule for a later time.
371 ns
= kvm_timer_compute_delta(ctx
);
373 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
374 return HRTIMER_RESTART
;
377 kvm_timer_update_irq(vcpu
, true, ctx
);
378 return HRTIMER_NORESTART
;
381 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
)
383 enum kvm_arch_timers index
;
389 index
= arch_timer_ctx_index(timer_ctx
);
391 if (timer_ctx
->loaded
) {
397 cnt_ctl
= read_sysreg_el0(SYS_CNTV_CTL
);
401 cnt_ctl
= read_sysreg_el0(SYS_CNTP_CTL
);
404 /* GCC is braindead */
409 return (cnt_ctl
& ARCH_TIMER_CTRL_ENABLE
) &&
410 (cnt_ctl
& ARCH_TIMER_CTRL_IT_STAT
) &&
411 !(cnt_ctl
& ARCH_TIMER_CTRL_IT_MASK
);
414 if (!kvm_timer_irq_can_fire(timer_ctx
))
417 cval
= timer_get_cval(timer_ctx
);
418 now
= kvm_phys_timer_read() - timer_get_offset(timer_ctx
);
423 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
425 return vcpu_has_wfit_active(vcpu
) && wfit_delay_ns(vcpu
) == 0;
429 * Reflect the timer output level into the kvm_run structure
431 void kvm_timer_update_run(struct kvm_vcpu
*vcpu
)
433 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
434 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
435 struct kvm_sync_regs
*regs
= &vcpu
->run
->s
.regs
;
437 /* Populate the device bitmap with the timer states */
438 regs
->device_irq_level
&= ~(KVM_ARM_DEV_EL1_VTIMER
|
439 KVM_ARM_DEV_EL1_PTIMER
);
440 if (kvm_timer_should_fire(vtimer
))
441 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_VTIMER
;
442 if (kvm_timer_should_fire(ptimer
))
443 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_PTIMER
;
446 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
447 struct arch_timer_context
*timer_ctx
)
451 timer_ctx
->irq
.level
= new_level
;
452 trace_kvm_timer_update_irq(vcpu
->vcpu_id
, timer_irq(timer_ctx
),
453 timer_ctx
->irq
.level
);
455 if (!userspace_irqchip(vcpu
->kvm
)) {
456 ret
= kvm_vgic_inject_irq(vcpu
->kvm
, vcpu
,
457 timer_irq(timer_ctx
),
458 timer_ctx
->irq
.level
,
464 /* Only called for a fully emulated timer */
465 static void timer_emulate(struct arch_timer_context
*ctx
)
467 bool should_fire
= kvm_timer_should_fire(ctx
);
469 trace_kvm_timer_emulate(ctx
, should_fire
);
471 if (should_fire
!= ctx
->irq
.level
) {
472 kvm_timer_update_irq(ctx
->vcpu
, should_fire
, ctx
);
477 * If the timer can fire now, we don't need to have a soft timer
478 * scheduled for the future. If the timer cannot fire at all,
479 * then we also don't need a soft timer.
481 if (should_fire
|| !kvm_timer_irq_can_fire(ctx
))
484 soft_timer_start(&ctx
->hrtimer
, kvm_timer_compute_delta(ctx
));
487 static void set_cntvoff(u64 cntvoff
)
489 kvm_call_hyp(__kvm_timer_set_cntvoff
, cntvoff
);
492 static void set_cntpoff(u64 cntpoff
)
495 write_sysreg_s(cntpoff
, SYS_CNTPOFF_EL2
);
498 static void timer_save_state(struct arch_timer_context
*ctx
)
500 struct arch_timer_cpu
*timer
= vcpu_timer(ctx
->vcpu
);
501 enum kvm_arch_timers index
= arch_timer_ctx_index(ctx
);
507 local_irq_save(flags
);
517 timer_set_ctl(ctx
, read_sysreg_el0(SYS_CNTV_CTL
));
518 timer_set_cval(ctx
, read_sysreg_el0(SYS_CNTV_CVAL
));
520 /* Disable the timer */
521 write_sysreg_el0(0, SYS_CNTV_CTL
);
525 * The kernel may decide to run userspace after
526 * calling vcpu_put, so we reset cntvoff to 0 to
527 * ensure a consistent read between user accesses to
528 * the virtual counter and kernel access to the
529 * physical counter of non-VHE case.
531 * For VHE, the virtual counter uses a fixed virtual
532 * offset of zero, so no need to zero CNTVOFF_EL2
533 * register, but this is actually useful when switching
534 * between EL1/vEL2 with NV.
536 * Do it unconditionally, as this is either unavoidable
543 timer_set_ctl(ctx
, read_sysreg_el0(SYS_CNTP_CTL
));
544 cval
= read_sysreg_el0(SYS_CNTP_CVAL
);
546 cval
-= timer_get_offset(ctx
);
548 timer_set_cval(ctx
, cval
);
550 /* Disable the timer */
551 write_sysreg_el0(0, SYS_CNTP_CTL
);
560 trace_kvm_timer_save_state(ctx
);
564 local_irq_restore(flags
);
568 * Schedule the background timer before calling kvm_vcpu_halt, so that this
569 * thread is removed from its waitqueue and made runnable when there's a timer
570 * interrupt to handle.
572 static void kvm_timer_blocking(struct kvm_vcpu
*vcpu
)
574 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
575 struct timer_map map
;
577 get_timer_map(vcpu
, &map
);
580 * If no timers are capable of raising interrupts (disabled or
581 * masked), then there's no more work for us to do.
583 if (!kvm_timer_irq_can_fire(map
.direct_vtimer
) &&
584 !kvm_timer_irq_can_fire(map
.direct_ptimer
) &&
585 !kvm_timer_irq_can_fire(map
.emul_vtimer
) &&
586 !kvm_timer_irq_can_fire(map
.emul_ptimer
) &&
587 !vcpu_has_wfit_active(vcpu
))
591 * At least one guest time will expire. Schedule a background timer.
592 * Set the earliest expiration time among the guest timers.
594 soft_timer_start(&timer
->bg_timer
, kvm_timer_earliest_exp(vcpu
));
597 static void kvm_timer_unblocking(struct kvm_vcpu
*vcpu
)
599 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
601 soft_timer_cancel(&timer
->bg_timer
);
604 static void timer_restore_state(struct arch_timer_context
*ctx
)
606 struct arch_timer_cpu
*timer
= vcpu_timer(ctx
->vcpu
);
607 enum kvm_arch_timers index
= arch_timer_ctx_index(ctx
);
613 local_irq_save(flags
);
623 set_cntvoff(timer_get_offset(ctx
));
624 write_sysreg_el0(timer_get_cval(ctx
), SYS_CNTV_CVAL
);
626 write_sysreg_el0(timer_get_ctl(ctx
), SYS_CNTV_CTL
);
630 cval
= timer_get_cval(ctx
);
631 offset
= timer_get_offset(ctx
);
634 write_sysreg_el0(cval
, SYS_CNTP_CVAL
);
636 write_sysreg_el0(timer_get_ctl(ctx
), SYS_CNTP_CTL
);
642 trace_kvm_timer_restore_state(ctx
);
646 local_irq_restore(flags
);
649 static inline void set_timer_irq_phys_active(struct arch_timer_context
*ctx
, bool active
)
652 r
= irq_set_irqchip_state(ctx
->host_timer_irq
, IRQCHIP_STATE_ACTIVE
, active
);
656 static void kvm_timer_vcpu_load_gic(struct arch_timer_context
*ctx
)
658 struct kvm_vcpu
*vcpu
= ctx
->vcpu
;
659 bool phys_active
= false;
662 * Update the timer output so that it is likely to match the
663 * state we're about to restore. If the timer expires between
664 * this point and the register restoration, we'll take the
667 kvm_timer_update_irq(ctx
->vcpu
, kvm_timer_should_fire(ctx
), ctx
);
669 if (irqchip_in_kernel(vcpu
->kvm
))
670 phys_active
= kvm_vgic_map_is_active(vcpu
, timer_irq(ctx
));
672 phys_active
|= ctx
->irq
.level
;
674 set_timer_irq_phys_active(ctx
, phys_active
);
677 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu
*vcpu
)
679 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
682 * Update the timer output so that it is likely to match the
683 * state we're about to restore. If the timer expires between
684 * this point and the register restoration, we'll take the
687 kvm_timer_update_irq(vcpu
, kvm_timer_should_fire(vtimer
), vtimer
);
690 * When using a userspace irqchip with the architected timers and a
691 * host interrupt controller that doesn't support an active state, we
692 * must still prevent continuously exiting from the guest, and
693 * therefore mask the physical interrupt by disabling it on the host
694 * interrupt controller when the virtual level is high, such that the
695 * guest can make forward progress. Once we detect the output level
696 * being de-asserted, we unmask the interrupt again so that we exit
697 * from the guest when the timer fires.
699 if (vtimer
->irq
.level
)
700 disable_percpu_irq(host_vtimer_irq
);
702 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
705 /* If _pred is true, set bit in _set, otherwise set it in _clr */
706 #define assign_clear_set_bit(_pred, _bit, _clr, _set) \
714 static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu
*vcpu
,
715 struct timer_map
*map
)
719 if (!irqchip_in_kernel(vcpu
->kvm
))
723 * We only ever unmap the vtimer irq on a VHE system that runs nested
724 * virtualization, in which case we have both a valid emul_vtimer,
725 * emul_ptimer, direct_vtimer, and direct_ptimer.
727 * Since this is called from kvm_timer_vcpu_load(), a change between
728 * vEL2 and vEL1/0 will have just happened, and the timer_map will
729 * represent this, and therefore we switch the emul/direct mappings
732 hw
= kvm_vgic_get_map(vcpu
, timer_irq(map
->direct_vtimer
));
734 kvm_vgic_unmap_phys_irq(vcpu
, timer_irq(map
->emul_vtimer
));
735 kvm_vgic_unmap_phys_irq(vcpu
, timer_irq(map
->emul_ptimer
));
737 ret
= kvm_vgic_map_phys_irq(vcpu
,
738 map
->direct_vtimer
->host_timer_irq
,
739 timer_irq(map
->direct_vtimer
),
740 &arch_timer_irq_ops
);
742 ret
= kvm_vgic_map_phys_irq(vcpu
,
743 map
->direct_ptimer
->host_timer_irq
,
744 timer_irq(map
->direct_ptimer
),
745 &arch_timer_irq_ops
);
749 * The virtual offset behaviour is "interresting", as it
750 * always applies when HCR_EL2.E2H==0, but only when
751 * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
752 * track E2H when putting the HV timer in "direct" mode.
754 if (map
->direct_vtimer
== vcpu_hvtimer(vcpu
)) {
755 struct arch_timer_offset
*offs
= &map
->direct_vtimer
->offset
;
757 if (vcpu_el2_e2h_is_set(vcpu
))
758 offs
->vcpu_offset
= NULL
;
760 offs
->vcpu_offset
= &__vcpu_sys_reg(vcpu
, CNTVOFF_EL2
);
765 static void timer_set_traps(struct kvm_vcpu
*vcpu
, struct timer_map
*map
)
771 * No trapping gets configured here with nVHE. See
772 * __timer_enable_traps(), which is where the stuff happens.
778 * Our default policy is not to trap anything. As we progress
779 * within this function, reality kicks in and we start adding
780 * traps based on emulation requirements.
785 * We have two possibility to deal with a physical offset:
787 * - Either we have CNTPOFF (yay!) or the offset is 0:
788 * we let the guest freely access the HW
790 * - or neither of these condition apply:
791 * we trap accesses to the HW, but still use it
792 * after correcting the physical offset
794 if (!has_cntpoff() && timer_get_offset(map
->direct_ptimer
))
798 * Apply the enable bits that the guest hypervisor has requested for
799 * its own guest. We can only add traps that wouldn't have been set
802 if (vcpu_has_nv(vcpu
) && !is_hyp_ctxt(vcpu
)) {
803 u64 val
= __vcpu_sys_reg(vcpu
, CNTHCTL_EL2
);
805 /* Use the VHE format for mental sanity */
806 if (!vcpu_el2_e2h_is_set(vcpu
))
807 val
= (val
& (CNTHCTL_EL1PCEN
| CNTHCTL_EL1PCTEN
)) << 10;
809 tpt
|= !(val
& (CNTHCTL_EL1PCEN
<< 10));
810 tpc
|= !(val
& (CNTHCTL_EL1PCTEN
<< 10));
814 * Now that we have collected our requirements, compute the
815 * trap and enable bits.
820 assign_clear_set_bit(tpt
, CNTHCTL_EL1PCEN
<< 10, set
, clr
);
821 assign_clear_set_bit(tpc
, CNTHCTL_EL1PCTEN
<< 10, set
, clr
);
823 /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
824 sysreg_clear_set(cnthctl_el2
, clr
, set
);
827 void kvm_timer_vcpu_load(struct kvm_vcpu
*vcpu
)
829 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
830 struct timer_map map
;
832 if (unlikely(!timer
->enabled
))
835 get_timer_map(vcpu
, &map
);
837 if (static_branch_likely(&has_gic_active_state
)) {
838 if (vcpu_has_nv(vcpu
))
839 kvm_timer_vcpu_load_nested_switch(vcpu
, &map
);
841 kvm_timer_vcpu_load_gic(map
.direct_vtimer
);
842 if (map
.direct_ptimer
)
843 kvm_timer_vcpu_load_gic(map
.direct_ptimer
);
845 kvm_timer_vcpu_load_nogic(vcpu
);
848 kvm_timer_unblocking(vcpu
);
850 timer_restore_state(map
.direct_vtimer
);
851 if (map
.direct_ptimer
)
852 timer_restore_state(map
.direct_ptimer
);
854 timer_emulate(map
.emul_vtimer
);
856 timer_emulate(map
.emul_ptimer
);
858 timer_set_traps(vcpu
, &map
);
861 bool kvm_timer_should_notify_user(struct kvm_vcpu
*vcpu
)
863 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
864 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
865 struct kvm_sync_regs
*sregs
= &vcpu
->run
->s
.regs
;
868 if (likely(irqchip_in_kernel(vcpu
->kvm
)))
871 vlevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_VTIMER
;
872 plevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_PTIMER
;
874 return kvm_timer_should_fire(vtimer
) != vlevel
||
875 kvm_timer_should_fire(ptimer
) != plevel
;
878 void kvm_timer_vcpu_put(struct kvm_vcpu
*vcpu
)
880 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
881 struct timer_map map
;
883 if (unlikely(!timer
->enabled
))
886 get_timer_map(vcpu
, &map
);
888 timer_save_state(map
.direct_vtimer
);
889 if (map
.direct_ptimer
)
890 timer_save_state(map
.direct_ptimer
);
893 * Cancel soft timer emulation, because the only case where we
894 * need it after a vcpu_put is in the context of a sleeping VCPU, and
895 * in that case we already factor in the deadline for the physical
896 * timer when scheduling the bg_timer.
898 * In any case, we re-schedule the hrtimer for the physical timer when
899 * coming back to the VCPU thread in kvm_timer_vcpu_load().
902 soft_timer_cancel(&map
.emul_vtimer
->hrtimer
);
904 soft_timer_cancel(&map
.emul_ptimer
->hrtimer
);
906 if (kvm_vcpu_is_blocking(vcpu
))
907 kvm_timer_blocking(vcpu
);
911 * With a userspace irqchip we have to check if the guest de-asserted the
912 * timer and if so, unmask the timer irq signal on the host interrupt
913 * controller to ensure that we see future timer signals.
915 static void unmask_vtimer_irq_user(struct kvm_vcpu
*vcpu
)
917 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
919 if (!kvm_timer_should_fire(vtimer
)) {
920 kvm_timer_update_irq(vcpu
, false, vtimer
);
921 if (static_branch_likely(&has_gic_active_state
))
922 set_timer_irq_phys_active(vtimer
, false);
924 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
928 void kvm_timer_sync_user(struct kvm_vcpu
*vcpu
)
930 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
932 if (unlikely(!timer
->enabled
))
935 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
936 unmask_vtimer_irq_user(vcpu
);
939 void kvm_timer_vcpu_reset(struct kvm_vcpu
*vcpu
)
941 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
942 struct timer_map map
;
944 get_timer_map(vcpu
, &map
);
947 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
948 * and to 0 for ARMv7. We provide an implementation that always
949 * resets the timer to be disabled and unmasked and is compliant with
950 * the ARMv7 architecture.
952 for (int i
= 0; i
< nr_timers(vcpu
); i
++)
953 timer_set_ctl(vcpu_get_timer(vcpu
, i
), 0);
956 * A vcpu running at EL2 is in charge of the offset applied to
957 * the virtual timer, so use the physical VM offset, and point
958 * the vcpu offset to CNTVOFF_EL2.
960 if (vcpu_has_nv(vcpu
)) {
961 struct arch_timer_offset
*offs
= &vcpu_vtimer(vcpu
)->offset
;
963 offs
->vcpu_offset
= &__vcpu_sys_reg(vcpu
, CNTVOFF_EL2
);
964 offs
->vm_offset
= &vcpu
->kvm
->arch
.timer_data
.poffset
;
967 if (timer
->enabled
) {
968 for (int i
= 0; i
< nr_timers(vcpu
); i
++)
969 kvm_timer_update_irq(vcpu
, false,
970 vcpu_get_timer(vcpu
, i
));
972 if (irqchip_in_kernel(vcpu
->kvm
)) {
973 kvm_vgic_reset_mapped_irq(vcpu
, timer_irq(map
.direct_vtimer
));
974 if (map
.direct_ptimer
)
975 kvm_vgic_reset_mapped_irq(vcpu
, timer_irq(map
.direct_ptimer
));
980 soft_timer_cancel(&map
.emul_vtimer
->hrtimer
);
982 soft_timer_cancel(&map
.emul_ptimer
->hrtimer
);
985 static void timer_context_init(struct kvm_vcpu
*vcpu
, int timerid
)
987 struct arch_timer_context
*ctxt
= vcpu_get_timer(vcpu
, timerid
);
988 struct kvm
*kvm
= vcpu
->kvm
;
992 if (timerid
== TIMER_VTIMER
)
993 ctxt
->offset
.vm_offset
= &kvm
->arch
.timer_data
.voffset
;
995 ctxt
->offset
.vm_offset
= &kvm
->arch
.timer_data
.poffset
;
997 hrtimer_init(&ctxt
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_HARD
);
998 ctxt
->hrtimer
.function
= kvm_hrtimer_expire
;
1003 ctxt
->host_timer_irq
= host_ptimer_irq
;
1007 ctxt
->host_timer_irq
= host_vtimer_irq
;
1012 void kvm_timer_vcpu_init(struct kvm_vcpu
*vcpu
)
1014 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
1016 for (int i
= 0; i
< NR_KVM_TIMERS
; i
++)
1017 timer_context_init(vcpu
, i
);
1019 /* Synchronize offsets across timers of a VM if not already provided */
1020 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
, &vcpu
->kvm
->arch
.flags
)) {
1021 timer_set_offset(vcpu_vtimer(vcpu
), kvm_phys_timer_read());
1022 timer_set_offset(vcpu_ptimer(vcpu
), 0);
1025 hrtimer_init(&timer
->bg_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_HARD
);
1026 timer
->bg_timer
.function
= kvm_bg_timer_expire
;
1029 void kvm_timer_init_vm(struct kvm
*kvm
)
1031 for (int i
= 0; i
< NR_KVM_TIMERS
; i
++)
1032 kvm
->arch
.timer_data
.ppi
[i
] = default_ppi
[i
];
1035 void kvm_timer_cpu_up(void)
1037 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
1038 if (host_ptimer_irq
)
1039 enable_percpu_irq(host_ptimer_irq
, host_ptimer_irq_flags
);
1042 void kvm_timer_cpu_down(void)
1044 disable_percpu_irq(host_vtimer_irq
);
1045 if (host_ptimer_irq
)
1046 disable_percpu_irq(host_ptimer_irq
);
1049 int kvm_arm_timer_set_reg(struct kvm_vcpu
*vcpu
, u64 regid
, u64 value
)
1051 struct arch_timer_context
*timer
;
1054 case KVM_REG_ARM_TIMER_CTL
:
1055 timer
= vcpu_vtimer(vcpu
);
1056 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CTL
, value
);
1058 case KVM_REG_ARM_TIMER_CNT
:
1059 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
,
1060 &vcpu
->kvm
->arch
.flags
)) {
1061 timer
= vcpu_vtimer(vcpu
);
1062 timer_set_offset(timer
, kvm_phys_timer_read() - value
);
1065 case KVM_REG_ARM_TIMER_CVAL
:
1066 timer
= vcpu_vtimer(vcpu
);
1067 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CVAL
, value
);
1069 case KVM_REG_ARM_PTIMER_CTL
:
1070 timer
= vcpu_ptimer(vcpu
);
1071 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CTL
, value
);
1073 case KVM_REG_ARM_PTIMER_CNT
:
1074 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
,
1075 &vcpu
->kvm
->arch
.flags
)) {
1076 timer
= vcpu_ptimer(vcpu
);
1077 timer_set_offset(timer
, kvm_phys_timer_read() - value
);
1080 case KVM_REG_ARM_PTIMER_CVAL
:
1081 timer
= vcpu_ptimer(vcpu
);
1082 kvm_arm_timer_write(vcpu
, timer
, TIMER_REG_CVAL
, value
);
1092 static u64
read_timer_ctl(struct arch_timer_context
*timer
)
1095 * Set ISTATUS bit if it's expired.
1096 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
1097 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
1098 * regardless of ENABLE bit for our implementation convenience.
1100 u32 ctl
= timer_get_ctl(timer
);
1102 if (!kvm_timer_compute_delta(timer
))
1103 ctl
|= ARCH_TIMER_CTRL_IT_STAT
;
1108 u64
kvm_arm_timer_get_reg(struct kvm_vcpu
*vcpu
, u64 regid
)
1111 case KVM_REG_ARM_TIMER_CTL
:
1112 return kvm_arm_timer_read(vcpu
,
1113 vcpu_vtimer(vcpu
), TIMER_REG_CTL
);
1114 case KVM_REG_ARM_TIMER_CNT
:
1115 return kvm_arm_timer_read(vcpu
,
1116 vcpu_vtimer(vcpu
), TIMER_REG_CNT
);
1117 case KVM_REG_ARM_TIMER_CVAL
:
1118 return kvm_arm_timer_read(vcpu
,
1119 vcpu_vtimer(vcpu
), TIMER_REG_CVAL
);
1120 case KVM_REG_ARM_PTIMER_CTL
:
1121 return kvm_arm_timer_read(vcpu
,
1122 vcpu_ptimer(vcpu
), TIMER_REG_CTL
);
1123 case KVM_REG_ARM_PTIMER_CNT
:
1124 return kvm_arm_timer_read(vcpu
,
1125 vcpu_ptimer(vcpu
), TIMER_REG_CNT
);
1126 case KVM_REG_ARM_PTIMER_CVAL
:
1127 return kvm_arm_timer_read(vcpu
,
1128 vcpu_ptimer(vcpu
), TIMER_REG_CVAL
);
1133 static u64
kvm_arm_timer_read(struct kvm_vcpu
*vcpu
,
1134 struct arch_timer_context
*timer
,
1135 enum kvm_arch_timer_regs treg
)
1140 case TIMER_REG_TVAL
:
1141 val
= timer_get_cval(timer
) - kvm_phys_timer_read() + timer_get_offset(timer
);
1142 val
= lower_32_bits(val
);
1146 val
= read_timer_ctl(timer
);
1149 case TIMER_REG_CVAL
:
1150 val
= timer_get_cval(timer
);
1154 val
= kvm_phys_timer_read() - timer_get_offset(timer
);
1157 case TIMER_REG_VOFF
:
1158 val
= *timer
->offset
.vcpu_offset
;
1168 u64
kvm_arm_timer_read_sysreg(struct kvm_vcpu
*vcpu
,
1169 enum kvm_arch_timers tmr
,
1170 enum kvm_arch_timer_regs treg
)
1172 struct arch_timer_context
*timer
;
1173 struct timer_map map
;
1176 get_timer_map(vcpu
, &map
);
1177 timer
= vcpu_get_timer(vcpu
, tmr
);
1179 if (timer
== map
.emul_vtimer
|| timer
== map
.emul_ptimer
)
1180 return kvm_arm_timer_read(vcpu
, timer
, treg
);
1183 timer_save_state(timer
);
1185 val
= kvm_arm_timer_read(vcpu
, timer
, treg
);
1187 timer_restore_state(timer
);
1193 static void kvm_arm_timer_write(struct kvm_vcpu
*vcpu
,
1194 struct arch_timer_context
*timer
,
1195 enum kvm_arch_timer_regs treg
,
1199 case TIMER_REG_TVAL
:
1200 timer_set_cval(timer
, kvm_phys_timer_read() - timer_get_offset(timer
) + (s32
)val
);
1204 timer_set_ctl(timer
, val
& ~ARCH_TIMER_CTRL_IT_STAT
);
1207 case TIMER_REG_CVAL
:
1208 timer_set_cval(timer
, val
);
1211 case TIMER_REG_VOFF
:
1212 *timer
->offset
.vcpu_offset
= val
;
1220 void kvm_arm_timer_write_sysreg(struct kvm_vcpu
*vcpu
,
1221 enum kvm_arch_timers tmr
,
1222 enum kvm_arch_timer_regs treg
,
1225 struct arch_timer_context
*timer
;
1226 struct timer_map map
;
1228 get_timer_map(vcpu
, &map
);
1229 timer
= vcpu_get_timer(vcpu
, tmr
);
1230 if (timer
== map
.emul_vtimer
|| timer
== map
.emul_ptimer
) {
1231 soft_timer_cancel(&timer
->hrtimer
);
1232 kvm_arm_timer_write(vcpu
, timer
, treg
, val
);
1233 timer_emulate(timer
);
1236 timer_save_state(timer
);
1237 kvm_arm_timer_write(vcpu
, timer
, treg
, val
);
1238 timer_restore_state(timer
);
1243 static int timer_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu
)
1246 irqd_set_forwarded_to_vcpu(d
);
1248 irqd_clr_forwarded_to_vcpu(d
);
1253 static int timer_irq_set_irqchip_state(struct irq_data
*d
,
1254 enum irqchip_irq_state which
, bool val
)
1256 if (which
!= IRQCHIP_STATE_ACTIVE
|| !irqd_is_forwarded_to_vcpu(d
))
1257 return irq_chip_set_parent_state(d
, which
, val
);
1260 irq_chip_mask_parent(d
);
1262 irq_chip_unmask_parent(d
);
1267 static void timer_irq_eoi(struct irq_data
*d
)
1269 if (!irqd_is_forwarded_to_vcpu(d
))
1270 irq_chip_eoi_parent(d
);
1273 static void timer_irq_ack(struct irq_data
*d
)
1276 if (d
->chip
->irq_ack
)
1277 d
->chip
->irq_ack(d
);
1280 static struct irq_chip timer_chip
= {
1282 .irq_ack
= timer_irq_ack
,
1283 .irq_mask
= irq_chip_mask_parent
,
1284 .irq_unmask
= irq_chip_unmask_parent
,
1285 .irq_eoi
= timer_irq_eoi
,
1286 .irq_set_type
= irq_chip_set_type_parent
,
1287 .irq_set_vcpu_affinity
= timer_irq_set_vcpu_affinity
,
1288 .irq_set_irqchip_state
= timer_irq_set_irqchip_state
,
1291 static int timer_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
1292 unsigned int nr_irqs
, void *arg
)
1294 irq_hw_number_t hwirq
= (uintptr_t)arg
;
1296 return irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
,
1300 static void timer_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
1301 unsigned int nr_irqs
)
1305 static const struct irq_domain_ops timer_domain_ops
= {
1306 .alloc
= timer_irq_domain_alloc
,
1307 .free
= timer_irq_domain_free
,
1310 static void kvm_irq_fixup_flags(unsigned int virq
, u32
*flags
)
1312 *flags
= irq_get_trigger_type(virq
);
1313 if (*flags
!= IRQF_TRIGGER_HIGH
&& *flags
!= IRQF_TRIGGER_LOW
) {
1314 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1316 *flags
= IRQF_TRIGGER_LOW
;
1320 static int kvm_irq_init(struct arch_timer_kvm_info
*info
)
1322 struct irq_domain
*domain
= NULL
;
1324 if (info
->virtual_irq
<= 0) {
1325 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1330 host_vtimer_irq
= info
->virtual_irq
;
1331 kvm_irq_fixup_flags(host_vtimer_irq
, &host_vtimer_irq_flags
);
1333 if (kvm_vgic_global_state
.no_hw_deactivation
) {
1334 struct fwnode_handle
*fwnode
;
1335 struct irq_data
*data
;
1337 fwnode
= irq_domain_alloc_named_fwnode("kvm-timer");
1341 /* Assume both vtimer and ptimer in the same parent */
1342 data
= irq_get_irq_data(host_vtimer_irq
);
1343 domain
= irq_domain_create_hierarchy(data
->domain
, 0,
1344 NR_KVM_TIMERS
, fwnode
,
1345 &timer_domain_ops
, NULL
);
1347 irq_domain_free_fwnode(fwnode
);
1351 arch_timer_irq_ops
.flags
|= VGIC_IRQ_SW_RESAMPLE
;
1352 WARN_ON(irq_domain_push_irq(domain
, host_vtimer_irq
,
1353 (void *)TIMER_VTIMER
));
1356 if (info
->physical_irq
> 0) {
1357 host_ptimer_irq
= info
->physical_irq
;
1358 kvm_irq_fixup_flags(host_ptimer_irq
, &host_ptimer_irq_flags
);
1361 WARN_ON(irq_domain_push_irq(domain
, host_ptimer_irq
,
1362 (void *)TIMER_PTIMER
));
1368 int __init
kvm_timer_hyp_init(bool has_gic
)
1370 struct arch_timer_kvm_info
*info
;
1373 info
= arch_timer_get_kvm_info();
1374 timecounter
= &info
->timecounter
;
1376 if (!timecounter
->cc
) {
1377 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1381 err
= kvm_irq_init(info
);
1385 /* First, do the virtual EL1 timer irq */
1387 err
= request_percpu_irq(host_vtimer_irq
, kvm_arch_timer_handler
,
1388 "kvm guest vtimer", kvm_get_running_vcpus());
1390 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1391 host_vtimer_irq
, err
);
1396 err
= irq_set_vcpu_affinity(host_vtimer_irq
,
1397 kvm_get_running_vcpus());
1399 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1400 goto out_free_vtimer_irq
;
1403 static_branch_enable(&has_gic_active_state
);
1406 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq
);
1408 /* Now let's do the physical EL1 timer irq */
1410 if (info
->physical_irq
> 0) {
1411 err
= request_percpu_irq(host_ptimer_irq
, kvm_arch_timer_handler
,
1412 "kvm guest ptimer", kvm_get_running_vcpus());
1414 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1415 host_ptimer_irq
, err
);
1416 goto out_free_vtimer_irq
;
1420 err
= irq_set_vcpu_affinity(host_ptimer_irq
,
1421 kvm_get_running_vcpus());
1423 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1424 goto out_free_ptimer_irq
;
1428 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq
);
1429 } else if (has_vhe()) {
1430 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1431 info
->physical_irq
);
1433 goto out_free_vtimer_irq
;
1438 out_free_ptimer_irq
:
1439 if (info
->physical_irq
> 0)
1440 free_percpu_irq(host_ptimer_irq
, kvm_get_running_vcpus());
1441 out_free_vtimer_irq
:
1442 free_percpu_irq(host_vtimer_irq
, kvm_get_running_vcpus());
1446 void kvm_timer_vcpu_terminate(struct kvm_vcpu
*vcpu
)
1448 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
1450 soft_timer_cancel(&timer
->bg_timer
);
1453 static bool timer_irqs_are_valid(struct kvm_vcpu
*vcpu
)
1458 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1460 for (int i
= 0; i
< nr_timers(vcpu
); i
++) {
1461 struct arch_timer_context
*ctx
;
1464 ctx
= vcpu_get_timer(vcpu
, i
);
1465 irq
= timer_irq(ctx
);
1466 if (kvm_vgic_set_owner(vcpu
, irq
, ctx
))
1470 * We know by construction that we only have PPIs, so
1471 * all values are less than 32.
1476 valid
= hweight32(ppis
) == nr_timers(vcpu
);
1479 set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE
, &vcpu
->kvm
->arch
.flags
);
1481 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1486 static bool kvm_arch_timer_get_input_level(int vintid
)
1488 struct kvm_vcpu
*vcpu
= kvm_get_running_vcpu();
1490 if (WARN(!vcpu
, "No vcpu context!\n"))
1493 for (int i
= 0; i
< nr_timers(vcpu
); i
++) {
1494 struct arch_timer_context
*ctx
;
1496 ctx
= vcpu_get_timer(vcpu
, i
);
1497 if (timer_irq(ctx
) == vintid
)
1498 return kvm_timer_should_fire(ctx
);
1501 /* A timer IRQ has fired, but no matching timer was found? */
1502 WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid
);
1507 int kvm_timer_enable(struct kvm_vcpu
*vcpu
)
1509 struct arch_timer_cpu
*timer
= vcpu_timer(vcpu
);
1510 struct timer_map map
;
1516 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1517 if (!irqchip_in_kernel(vcpu
->kvm
))
1521 * At this stage, we have the guarantee that the vgic is both
1522 * available and initialized.
1524 if (!timer_irqs_are_valid(vcpu
)) {
1525 kvm_debug("incorrectly configured timer irqs\n");
1529 get_timer_map(vcpu
, &map
);
1531 ret
= kvm_vgic_map_phys_irq(vcpu
,
1532 map
.direct_vtimer
->host_timer_irq
,
1533 timer_irq(map
.direct_vtimer
),
1534 &arch_timer_irq_ops
);
1538 if (map
.direct_ptimer
) {
1539 ret
= kvm_vgic_map_phys_irq(vcpu
,
1540 map
.direct_ptimer
->host_timer_irq
,
1541 timer_irq(map
.direct_ptimer
),
1542 &arch_timer_irq_ops
);
1553 /* If we have CNTPOFF, permanently set ECV to enable it */
1554 void kvm_timer_init_vhe(void)
1556 if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF
))
1557 sysreg_clear_set(cnthctl_el2
, 0, CNTHCTL_ECV
);
1560 int kvm_arm_timer_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
1562 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
1563 int irq
, idx
, ret
= 0;
1565 if (!irqchip_in_kernel(vcpu
->kvm
))
1568 if (get_user(irq
, uaddr
))
1571 if (!(irq_is_ppi(irq
)))
1574 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1576 if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE
,
1577 &vcpu
->kvm
->arch
.flags
)) {
1582 switch (attr
->attr
) {
1583 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
1586 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
1589 case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER
:
1590 idx
= TIMER_HVTIMER
;
1592 case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER
:
1593 idx
= TIMER_HPTIMER
;
1601 * We cannot validate the IRQ unicity before we run, so take it at
1602 * face value. The verdict will be given on first vcpu run, for each
1603 * vcpu. Yes this is late. Blame it on the stupid API.
1605 vcpu
->kvm
->arch
.timer_data
.ppi
[idx
] = irq
;
1608 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1612 int kvm_arm_timer_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
1614 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
1615 struct arch_timer_context
*timer
;
1618 switch (attr
->attr
) {
1619 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
1620 timer
= vcpu_vtimer(vcpu
);
1622 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
1623 timer
= vcpu_ptimer(vcpu
);
1625 case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER
:
1626 timer
= vcpu_hvtimer(vcpu
);
1628 case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER
:
1629 timer
= vcpu_hptimer(vcpu
);
1635 irq
= timer_irq(timer
);
1636 return put_user(irq
, uaddr
);
1639 int kvm_arm_timer_has_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
1641 switch (attr
->attr
) {
1642 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
1643 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
1644 case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER
:
1645 case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER
:
1652 int kvm_vm_ioctl_set_counter_offset(struct kvm
*kvm
,
1653 struct kvm_arm_counter_offset
*offset
)
1657 if (offset
->reserved
)
1660 mutex_lock(&kvm
->lock
);
1662 if (lock_all_vcpus(kvm
)) {
1663 set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET
, &kvm
->arch
.flags
);
1666 * If userspace decides to set the offset using this
1667 * API rather than merely restoring the counter
1668 * values, the offset applies to both the virtual and
1671 kvm
->arch
.timer_data
.voffset
= offset
->counter_offset
;
1672 kvm
->arch
.timer_data
.poffset
= offset
->counter_offset
;
1674 unlock_all_vcpus(kvm
);
1679 mutex_unlock(&kvm
->lock
);