]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Revamp vgic maintenance interrupt configuration
authorMarc Zyngier <maz@kernel.org>
Thu, 20 Nov 2025 17:25:11 +0000 (17:25 +0000)
committerOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 22:29:13 +0000 (14:29 -0800)
We currently don't use the maintenance interrupt very much, apart
from EOI on level interrupts, and for LR underflow in limited cases.

However, as we are moving toward a setup where active interrupts
can live outside of the LRs, we need to use the MIs in a more
diverse set of cases.

Add a new helper that produces a digest of the ap_list, and use
that summary to set the various control bits as required.

This slightly changes the way v2 SGIs are handled, as they used to
count for more than one interrupt, but not anymore.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-22-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/kvm/vgic/vgic-v2.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/kvm/vgic/vgic.c
arch/arm64/kvm/vgic/vgic.h

index 07e93acafd04d793ff9daab75fc474e356c2cc4e..f53bc55288978f10a7ff1783c425b67865603378 100644 (file)
@@ -26,11 +26,19 @@ void vgic_v2_init_lrs(void)
                vgic_v2_write_lr(i, 0);
 }
 
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu,
+                          struct ap_list_summary *als)
 {
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
 
-       cpuif->vgic_hcr |= GICH_HCR_UIE;
+       cpuif->vgic_hcr = GICH_HCR_EN;
+
+       if (irqs_pending_outside_lrs(als))
+               cpuif->vgic_hcr |= GICH_HCR_NPIE;
+       if (irqs_active_outside_lrs(als))
+               cpuif->vgic_hcr |= GICH_HCR_LRENPIE;
+       if (irqs_outside_lrs(als))
+               cpuif->vgic_hcr |= GICH_HCR_UIE;
 }
 
 static bool lr_signals_eoi_mi(u32 lr_val)
index 5b276e303aab23477f5dc7435c86c359cc5ff6e2..81f1de9e3897b3a1ebea27ea019a5bcff40f3757 100644 (file)
@@ -20,11 +20,25 @@ static bool common_trap;
 static bool dir_trap;
 static bool gicv4_enable;
 
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu,
+                          struct ap_list_summary *als)
 {
        struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
 
-       cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return;
+
+       cpuif->vgic_hcr = ICH_HCR_EL2_En;
+
+       if (irqs_pending_outside_lrs(als))
+               cpuif->vgic_hcr |= ICH_HCR_EL2_NPIE;
+       if (irqs_active_outside_lrs(als))
+               cpuif->vgic_hcr |= ICH_HCR_EL2_LRENPIE;
+       if (irqs_outside_lrs(als))
+               cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+
+       if (!als->nr_sgi)
+               cpuif->vgic_hcr |= ICH_HCR_EL2_vSGIEOICount;
 }
 
 static bool lr_signals_eoi_mi(u64 lr_val)
index 39346baa2677ccf7895476a4dde660549da543e1..7e6f02d48fff1f47556f339e327f737fbf3aafd6 100644 (file)
@@ -791,38 +791,30 @@ static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
                vgic_v3_clear_lr(vcpu, lr);
 }
 
-static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
-{
-       if (kvm_vgic_global_state.type == VGIC_V2)
-               vgic_v2_set_underflow(vcpu);
-       else
-               vgic_v3_set_underflow(vcpu);
-}
-
-/* Requires the ap_list_lock to be held. */
-static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
-                                bool *multi_sgi)
+static void summarize_ap_list(struct kvm_vcpu *vcpu,
+                             struct ap_list_summary *als)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_irq *irq;
-       int count = 0;
-
-       *multi_sgi = false;
 
        lockdep_assert_held(&vgic_cpu->ap_list_lock);
 
+       *als = (typeof(*als)){};
+
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
-               int w;
+               guard(raw_spinlock)(&irq->irq_lock);
 
-               raw_spin_lock(&irq->irq_lock);
-               /* GICv2 SGIs can count for more than one... */
-               w = vgic_irq_get_lr_count(irq);
-               raw_spin_unlock(&irq->irq_lock);
+               if (unlikely(vgic_target_oracle(irq) != vcpu))
+                       continue;
+
+               if (!irq->active)
+                       als->nr_pend++;
+               else
+                       als->nr_act++;
 
-               count += w;
-               *multi_sgi |= (w > 1);
+               if (irq->intid < VGIC_NR_SGIS)
+                       als->nr_sgi++;
        }
-       return count;
 }
 
 /*
@@ -908,60 +900,39 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct ap_list_summary als;
        struct vgic_irq *irq;
-       int count;
-       bool multi_sgi;
-       u8 prio = 0xff;
-       int i = 0;
+       int count = 0;
 
        lockdep_assert_held(&vgic_cpu->ap_list_lock);
 
-       count = compute_ap_list_depth(vcpu, &multi_sgi);
-       if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
-               vgic_sort_ap_list(vcpu);
+       summarize_ap_list(vcpu, &als);
 
-       count = 0;
+       if (irqs_outside_lrs(&als))
+               vgic_sort_ap_list(vcpu);
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
-               raw_spin_lock(&irq->irq_lock);
-
-               /*
-                * If we have multi-SGIs in the pipeline, we need to
-                * guarantee that they are all seen before any IRQ of
-                * lower priority. In that case, we need to filter out
-                * these interrupts by exiting early. This is easy as
-                * the AP list has been sorted already.
-                */
-               if (multi_sgi && irq->priority > prio) {
-                       raw_spin_unlock(&irq->irq_lock);
-                       break;
+               scoped_guard(raw_spinlock,  &irq->irq_lock) {
+                       if (likely(vgic_target_oracle(irq) == vcpu)) {
+                               vgic_populate_lr(vcpu, irq, count++);
+                       }
                }
 
-               if (likely(vgic_target_oracle(irq) == vcpu)) {
-                       vgic_populate_lr(vcpu, irq, count++);
-
-                       if (irq->source)
-                               prio = irq->priority;
-               }
-
-               raw_spin_unlock(&irq->irq_lock);
-
-               if (count == kvm_vgic_global_state.nr_lr) {
-                       if (!list_is_last(&irq->ap_list,
-                                         &vgic_cpu->ap_list_head))
-                               vgic_set_underflow(vcpu);
+               if (count == kvm_vgic_global_state.nr_lr)
                        break;
-               }
        }
 
        /* Nuke remaining LRs */
-       for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
+       for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++)
                vgic_clear_lr(vcpu, i);
 
-       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
                vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
-       else
+               vgic_v2_configure_hcr(vcpu, &als);
+       } else {
                vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
+               vgic_v3_configure_hcr(vcpu, &als);
+       }
 }
 
 static inline bool can_access_vgic_from_kernel(void)
index 0ecadfa00397d3935c0177114221d358535a9b5c..4a0733869cb5f1948673877ab968557d7742561d 100644 (file)
@@ -236,6 +236,21 @@ struct its_ite {
        u32 event_id;
 };
 
+struct ap_list_summary {
+       unsigned int    nr_pend;        /* purely pending, not active */
+       unsigned int    nr_act;         /* active, or active+pending */
+       unsigned int    nr_sgi;         /* any SGI */
+};
+
+#define irqs_outside_lrs(s)                                            \
+        (((s)->nr_pend + (s)->nr_act) > kvm_vgic_global_state.nr_lr)
+
+#define irqs_pending_outside_lrs(s)                    \
+       ((s)->nr_pend > kvm_vgic_global_state.nr_lr)
+
+#define irqs_active_outside_lrs(s)             \
+       ((s)->nr_act && irqs_outside_lrs(s))
+
 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
                       struct vgic_reg_attr *reg_attr);
 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
@@ -262,7 +277,7 @@ int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu, struct ap_list_summary *als);
 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
                         int offset, u32 *val);
@@ -302,7 +317,7 @@ static inline void vgic_get_irq_ref(struct vgic_irq *irq)
 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu, struct ap_list_summary *als);
 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 void vgic_v3_enable(struct kvm_vcpu *vcpu);