]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Introduce timer_context_to_vcpu() helper
authorMarc Zyngier <maz@kernel.org>
Mon, 29 Sep 2025 16:04:46 +0000 (17:04 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 13 Oct 2025 13:42:40 +0000 (14:42 +0100)
We currently have a vcpu pointer nested into each timer context.

As we are about to remove this pointer, introduce a helper (aptly
named timer_context_to_vcpu()) that returns this pointer, at least
until we repaint the data structure.

Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/arch_timer.c
include/kvm/arm_arch_timer.h

index dbd74e4885e244bc22db0962b4fc0c1ce671ff11..e5a25e743f5be1c7facb39d74ee0c3a3b58e907c 100644 (file)
@@ -66,7 +66,7 @@ static int nr_timers(struct kvm_vcpu *vcpu)
 
 u32 timer_get_ctl(struct arch_timer_context *ctxt)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
+       struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
 
        switch(arch_timer_ctx_index(ctxt)) {
        case TIMER_VTIMER:
@@ -85,7 +85,7 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt)
 
 u64 timer_get_cval(struct arch_timer_context *ctxt)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
+       struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
 
        switch(arch_timer_ctx_index(ctxt)) {
        case TIMER_VTIMER:
@@ -104,7 +104,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
 
 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
+       struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
 
        switch(arch_timer_ctx_index(ctxt)) {
        case TIMER_VTIMER:
@@ -126,7 +126,7 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
 
 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
+       struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
 
        switch(arch_timer_ctx_index(ctxt)) {
        case TIMER_VTIMER:
@@ -343,7 +343,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
        u64 ns;
 
        ctx = container_of(hrt, struct arch_timer_context, hrtimer);
-       vcpu = ctx->vcpu;
+       vcpu = timer_context_to_vcpu(ctx);
 
        trace_kvm_timer_hrtimer_expire(ctx);
 
@@ -436,8 +436,9 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
         *
         * But hey, it's fast, right?
         */
-       if (is_hyp_ctxt(ctx->vcpu) &&
-           (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
+       struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
+       if (is_hyp_ctxt(vcpu) &&
+           (ctx == vcpu_vtimer(vcpu) || ctx == vcpu_ptimer(vcpu))) {
                unsigned long val = timer_get_ctl(ctx);
                __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
                timer_set_ctl(ctx, val);
@@ -470,7 +471,7 @@ static void timer_emulate(struct arch_timer_context *ctx)
        trace_kvm_timer_emulate(ctx, should_fire);
 
        if (should_fire != ctx->irq.level)
-               kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
+               kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx);
 
        kvm_timer_update_status(ctx, should_fire);
 
@@ -498,7 +499,7 @@ static void set_cntpoff(u64 cntpoff)
 
 static void timer_save_state(struct arch_timer_context *ctx)
 {
-       struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
+       struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
        enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
        unsigned long flags;
 
@@ -609,7 +610,7 @@ static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
 
 static void timer_restore_state(struct arch_timer_context *ctx)
 {
-       struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
+       struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
        enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
        unsigned long flags;
 
@@ -668,7 +669,7 @@ static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, boo
 
 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
 {
-       struct kvm_vcpu *vcpu = ctx->vcpu;
+       struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
        bool phys_active = false;
 
        /*
@@ -677,7 +678,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
         * this point and the register restoration, we'll take the
         * interrupt anyway.
         */
-       kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
+       kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx);
 
        if (irqchip_in_kernel(vcpu->kvm))
                phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
index 681cf0c8b9df4e495c31ec62b00629ecbebac76c..d188c716d03cb493b163b79b42fd94ee5cc0f197 100644 (file)
@@ -128,7 +128,7 @@ void kvm_timer_init_vhe(void);
 #define vcpu_hptimer(v)        (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
 
 #define arch_timer_ctx_index(ctx)      ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
-
+#define timer_context_to_vcpu(ctx)     ((ctx)->vcpu)
 #define timer_vm_data(ctx)             (&(ctx)->vcpu->kvm->arch.timer_data)
 #define timer_irq(ctx)                 (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])