vgic_v2_write_lr(i, 0);
}
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
- cpuif->vgic_hcr |= GICH_HCR_UIE;
+ cpuif->vgic_hcr = GICH_HCR_EN;
+
+ if (irqs_pending_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_NPIE;
+ if (irqs_active_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_LRENPIE;
+ if (irqs_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_UIE;
}
static bool lr_signals_eoi_mi(u32 lr_val)
static bool dir_trap;
static bool gicv4_enable;
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
- cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ cpuif->vgic_hcr = ICH_HCR_EL2_En;
+
+ if (irqs_pending_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_NPIE;
+ if (irqs_active_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_LRENPIE;
+ if (irqs_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+
+ if (!als->nr_sgi)
+ cpuif->vgic_hcr |= ICH_HCR_EL2_vSGIEOICount;
}
static bool lr_signals_eoi_mi(u64 lr_val)
vgic_v3_clear_lr(vcpu, lr);
}
-static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
-{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_set_underflow(vcpu);
- else
- vgic_v3_set_underflow(vcpu);
-}
-
-/* Requires the ap_list_lock to be held. */
-static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
- bool *multi_sgi)
+static void summarize_ap_list(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
- int count = 0;
-
- *multi_sgi = false;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
+ *als = (typeof(*als)){};
+
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- int w;
+ guard(raw_spinlock)(&irq->irq_lock);
- raw_spin_lock(&irq->irq_lock);
- /* GICv2 SGIs can count for more than one... */
- w = vgic_irq_get_lr_count(irq);
- raw_spin_unlock(&irq->irq_lock);
+ if (unlikely(vgic_target_oracle(irq) != vcpu))
+ continue;
+
+ if (!irq->active)
+ als->nr_pend++;
+ else
+ als->nr_act++;
- count += w;
- *multi_sgi |= (w > 1);
+ if (irq->intid < VGIC_NR_SGIS)
+ als->nr_sgi++;
}
- return count;
}
/*
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct ap_list_summary als;
struct vgic_irq *irq;
- int count;
- bool multi_sgi;
- u8 prio = 0xff;
- int i = 0;
+ int count = 0;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
- count = compute_ap_list_depth(vcpu, &multi_sgi);
- if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
- vgic_sort_ap_list(vcpu);
+ summarize_ap_list(vcpu, &als);
- count = 0;
+ if (irqs_outside_lrs(&als))
+ vgic_sort_ap_list(vcpu);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- raw_spin_lock(&irq->irq_lock);
-
- /*
- * If we have multi-SGIs in the pipeline, we need to
- * guarantee that they are all seen before any IRQ of
- * lower priority. In that case, we need to filter out
- * these interrupts by exiting early. This is easy as
- * the AP list has been sorted already.
- */
- if (multi_sgi && irq->priority > prio) {
- raw_spin_unlock(&irq->irq_lock);
- break;
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
+ if (likely(vgic_target_oracle(irq) == vcpu)) {
+ vgic_populate_lr(vcpu, irq, count++);
+ }
}
- if (likely(vgic_target_oracle(irq) == vcpu)) {
- vgic_populate_lr(vcpu, irq, count++);
-
- if (irq->source)
- prio = irq->priority;
- }
-
- raw_spin_unlock(&irq->irq_lock);
-
- if (count == kvm_vgic_global_state.nr_lr) {
- if (!list_is_last(&irq->ap_list,
- &vgic_cpu->ap_list_head))
- vgic_set_underflow(vcpu);
+ if (count == kvm_vgic_global_state.nr_lr)
break;
- }
}
/* Nuke remaining LRs */
- for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
+ for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++)
vgic_clear_lr(vcpu, i);
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
- else
+ vgic_v2_configure_hcr(vcpu, &als);
+ } else {
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
+ vgic_v3_configure_hcr(vcpu, &als);
+ }
}
static inline bool can_access_vgic_from_kernel(void)
u32 event_id;
};
+struct ap_list_summary {
+ unsigned int nr_pend; /* purely pending, not active */
+ unsigned int nr_act; /* active, or active+pending */
+ unsigned int nr_sgi; /* any SGI */
+};
+
+#define irqs_outside_lrs(s) \
+ (((s)->nr_pend + (s)->nr_act) > kvm_vgic_global_state.nr_lr)
+
+#define irqs_pending_outside_lrs(s) \
+ ((s)->nr_pend > kvm_vgic_global_state.nr_lr)
+
+#define irqs_active_outside_lrs(s) \
+ ((s)->nr_act && irqs_outside_lrs(s))
+
int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
struct vgic_reg_attr *reg_attr);
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu, struct ap_list_summary *als);
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu, struct ap_list_summary *als);
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_enable(struct kvm_vcpu *vcpu);