]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: gic-v3: Switch vGIC-v3 to use generated ICH_VMCR_EL2
authorSascha Bischoff <Sascha.Bischoff@arm.com>
Wed, 28 Jan 2026 17:59:50 +0000 (17:59 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 30 Jan 2026 11:10:46 +0000 (11:10 +0000)
The VGIC-v3 code relied on hand-written definitions for the
ICH_VMCR_EL2 register. This register, and the associated fields, is
now generated as part of the sysreg framework. Move to using the
generated definitions instead of the hand-written ones.

There are no functional changes as part of this change.

Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Link: https://patch.msgid.link/20260128175919.3828384-3-sascha.bischoff@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/sysreg.h
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/kvm/vgic/vgic-v3-nested.c
arch/arm64/kvm/vgic/vgic-v3.c

index 9df51accbb025b41dfc451d6c44cdeed77667d33..b3b8b8cd7bf1ef171743694dd7ee54359ab675fb 100644 (file)
 #define SYS_ICC_SRE_EL2                        sys_reg(3, 4, 12, 9, 5)
 #define SYS_ICH_EISR_EL2               sys_reg(3, 4, 12, 11, 3)
 #define SYS_ICH_ELRSR_EL2              sys_reg(3, 4, 12, 11, 5)
-#define SYS_ICH_VMCR_EL2               sys_reg(3, 4, 12, 11, 7)
 
 #define __SYS__LR0_EL2(x)              sys_reg(3, 4, 12, 12, x)
 #define SYS_ICH_LR0_EL2                        __SYS__LR0_EL2(0)
 #define ICH_LR_PRIORITY_SHIFT  48
 #define ICH_LR_PRIORITY_MASK   (0xffULL << ICH_LR_PRIORITY_SHIFT)
 
-/* ICH_VMCR_EL2 bit definitions */
-#define ICH_VMCR_ACK_CTL_SHIFT 2
-#define ICH_VMCR_ACK_CTL_MASK  (1 << ICH_VMCR_ACK_CTL_SHIFT)
-#define ICH_VMCR_FIQ_EN_SHIFT  3
-#define ICH_VMCR_FIQ_EN_MASK   (1 << ICH_VMCR_FIQ_EN_SHIFT)
-#define ICH_VMCR_CBPR_SHIFT    4
-#define ICH_VMCR_CBPR_MASK     (1 << ICH_VMCR_CBPR_SHIFT)
-#define ICH_VMCR_EOIM_SHIFT    9
-#define ICH_VMCR_EOIM_MASK     (1 << ICH_VMCR_EOIM_SHIFT)
-#define ICH_VMCR_BPR1_SHIFT    18
-#define ICH_VMCR_BPR1_MASK     (7 << ICH_VMCR_BPR1_SHIFT)
-#define ICH_VMCR_BPR0_SHIFT    21
-#define ICH_VMCR_BPR0_MASK     (7 << ICH_VMCR_BPR0_SHIFT)
-#define ICH_VMCR_PMR_SHIFT     24
-#define ICH_VMCR_PMR_MASK      (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICH_VMCR_ENG0_SHIFT    0
-#define ICH_VMCR_ENG0_MASK     (1 << ICH_VMCR_ENG0_SHIFT)
-#define ICH_VMCR_ENG1_SHIFT    1
-#define ICH_VMCR_ENG1_MASK     (1 << ICH_VMCR_ENG1_SHIFT)
-
 /*
  * Permission Indirection Extension (PIE) permission encodings.
  * Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
index 0b670a033fd87858b3d38efc38b81a8db525fc57..c4d2f1feea8b625d9e1941f280ab7db9e8afc895 100644 (file)
@@ -569,11 +569,11 @@ static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
                        continue;
 
                /* Group-0 interrupt, but Group-0 disabled? */
-               if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
+               if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_EL2_VENG0_MASK))
                        continue;
 
                /* Group-1 interrupt, but Group-1 disabled? */
-               if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
+               if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_EL2_VENG1_MASK))
                        continue;
 
                /* Not the highest priority? */
@@ -646,19 +646,19 @@ static int __vgic_v3_get_highest_active_priority(void)
 
 static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
 {
-       return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
+       return FIELD_GET(ICH_VMCR_EL2_VBPR0, vmcr);
 }
 
 static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
 {
        unsigned int bpr;
 
-       if (vmcr & ICH_VMCR_CBPR_MASK) {
+       if (vmcr & ICH_VMCR_EL2_VCBPR_MASK) {
                bpr = __vgic_v3_get_bpr0(vmcr);
                if (bpr < 7)
                        bpr++;
        } else {
-               bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
+               bpr = FIELD_GET(ICH_VMCR_EL2_VBPR1, vmcr);
        }
 
        return bpr;
@@ -758,7 +758,7 @@ static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
        if (grp != !!(lr_val & ICH_LR_GROUP))
                goto spurious;
 
-       pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+       pmr = FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr);
        lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
        if (pmr <= lr_prio)
                goto spurious;
@@ -806,7 +806,7 @@ static int ___vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
        int lr;
 
        /* EOImode == 0, nothing to be done here */
-       if (!(vmcr & ICH_VMCR_EOIM_MASK))
+       if (!(vmcr & ICH_VMCR_EL2_VEOIM_MASK))
                return 1;
 
        /* No deactivate to be performed on an LPI */
@@ -849,7 +849,7 @@ static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
        }
 
        /* EOImode == 1 and not an LPI, nothing to be done here */
-       if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI))
+       if ((vmcr & ICH_VMCR_EL2_VEOIM_MASK) && !(vid >= VGIC_MIN_LPI))
                return;
 
        lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
@@ -865,22 +865,19 @@ static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 
 static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
-       vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
+       vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VENG0, vmcr));
 }
 
 static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
-       vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
+       vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VENG1, vmcr));
 }
 
 static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
        u64 val = vcpu_get_reg(vcpu, rt);
 
-       if (val & 1)
-               vmcr |= ICH_VMCR_ENG0_MASK;
-       else
-               vmcr &= ~ICH_VMCR_ENG0_MASK;
+       FIELD_MODIFY(ICH_VMCR_EL2_VENG0, &vmcr, val & 1);
 
        __vgic_v3_write_vmcr(vmcr);
 }
@@ -889,10 +886,7 @@ static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
        u64 val = vcpu_get_reg(vcpu, rt);
 
-       if (val & 1)
-               vmcr |= ICH_VMCR_ENG1_MASK;
-       else
-               vmcr &= ~ICH_VMCR_ENG1_MASK;
+       FIELD_MODIFY(ICH_VMCR_EL2_VENG1, &vmcr, val & 1);
 
        __vgic_v3_write_vmcr(vmcr);
 }
@@ -916,10 +910,7 @@ static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
        if (val < bpr_min)
                val = bpr_min;
 
-       val <<= ICH_VMCR_BPR0_SHIFT;
-       val &= ICH_VMCR_BPR0_MASK;
-       vmcr &= ~ICH_VMCR_BPR0_MASK;
-       vmcr |= val;
+       FIELD_MODIFY(ICH_VMCR_EL2_VBPR0, &vmcr, val);
 
        __vgic_v3_write_vmcr(vmcr);
 }
@@ -929,17 +920,14 @@ static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
        u64 val = vcpu_get_reg(vcpu, rt);
        u8 bpr_min = __vgic_v3_bpr_min();
 
-       if (vmcr & ICH_VMCR_CBPR_MASK)
+       if (FIELD_GET(ICH_VMCR_EL2_VCBPR, val))
                return;
 
        /* Enforce BPR limiting */
        if (val < bpr_min)
                val = bpr_min;
 
-       val <<= ICH_VMCR_BPR1_SHIFT;
-       val &= ICH_VMCR_BPR1_MASK;
-       vmcr &= ~ICH_VMCR_BPR1_MASK;
-       vmcr |= val;
+       FIELD_MODIFY(ICH_VMCR_EL2_VBPR1, &vmcr, val);
 
        __vgic_v3_write_vmcr(vmcr);
 }
@@ -1029,19 +1017,14 @@ spurious:
 
 static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
-       vmcr &= ICH_VMCR_PMR_MASK;
-       vmcr >>= ICH_VMCR_PMR_SHIFT;
-       vcpu_set_reg(vcpu, rt, vmcr);
+       vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr));
 }
 
 static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
        u32 val = vcpu_get_reg(vcpu, rt);
 
-       val <<= ICH_VMCR_PMR_SHIFT;
-       val &= ICH_VMCR_PMR_MASK;
-       vmcr &= ~ICH_VMCR_PMR_MASK;
-       vmcr |= val;
+       FIELD_MODIFY(ICH_VMCR_EL2_VPMR, &vmcr, val);
 
        write_gicreg(vmcr, ICH_VMCR_EL2);
 }
@@ -1064,9 +1047,11 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
        /* A3V */
        val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
        /* EOImode */
-       val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
+       val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK,
+                         FIELD_GET(ICH_VMCR_EL2_VEOIM, vmcr));
        /* CBPR */
-       val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+       val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK,
+                       FIELD_GET(ICH_VMCR_EL2_VCBPR, vmcr));
 
        vcpu_set_reg(vcpu, rt, val);
 }
@@ -1075,15 +1060,11 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
 {
        u32 val = vcpu_get_reg(vcpu, rt);
 
-       if (val & ICC_CTLR_EL1_CBPR_MASK)
-               vmcr |= ICH_VMCR_CBPR_MASK;
-       else
-               vmcr &= ~ICH_VMCR_CBPR_MASK;
+       FIELD_MODIFY(ICH_VMCR_EL2_VCBPR, &vmcr,
+                    FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val));
 
-       if (val & ICC_CTLR_EL1_EOImode_MASK)
-               vmcr |= ICH_VMCR_EOIM_MASK;
-       else
-               vmcr &= ~ICH_VMCR_EOIM_MASK;
+       FIELD_MODIFY(ICH_VMCR_EL2_VEOIM, &vmcr,
+                    FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val));
 
        write_gicreg(vmcr, ICH_VMCR_EL2);
 }
index 61b44f3f2bf140ffdd64fd2adf7c2c8eba31a278..c9e35ec67117372056d24e099717536889922f59 100644 (file)
@@ -202,16 +202,16 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
        if ((hcr & ICH_HCR_EL2_NPIE) && !mi_state.pend)
                reg |= ICH_MISR_EL2_NP;
 
-       if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_ENG0_MASK))
+       if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_EL2_VENG0_MASK))
                reg |= ICH_MISR_EL2_VGrp0E;
 
-       if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_ENG0_MASK))
+       if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_EL2_VENG0_MASK))
                reg |= ICH_MISR_EL2_VGrp0D;
 
-       if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_ENG1_MASK))
+       if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_EL2_VENG1_MASK))
                reg |= ICH_MISR_EL2_VGrp1E;
 
-       if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_ENG1_MASK))
+       if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_EL2_VENG1_MASK))
                reg |= ICH_MISR_EL2_VGrp1D;
 
        return reg;
index 1d6dd1b545bdd6cebf5f738c9aa8e29bdcea58a5..2afc041672311909cf62c9c4c383a411f5ddf707 100644 (file)
@@ -41,9 +41,9 @@ void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu,
        if (!als->nr_sgi)
                cpuif->vgic_hcr |= ICH_HCR_EL2_vSGIEOICount;
 
-       cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_ENG0_MASK) ?
+       cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_EL2_VENG0_MASK) ?
                ICH_HCR_EL2_VGrp0DIE : ICH_HCR_EL2_VGrp0EIE;
-       cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_ENG1_MASK) ?
+       cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_EL2_VENG1_MASK) ?
                ICH_HCR_EL2_VGrp1DIE : ICH_HCR_EL2_VGrp1EIE;
 
        /*
@@ -215,7 +215,7 @@ void vgic_v3_deactivate(struct kvm_vcpu *vcpu, u64 val)
         * We only deal with DIR when EOIMode==1, and only for SGI,
         * PPI or SPI.
         */
-       if (!(cpuif->vgic_vmcr & ICH_VMCR_EOIM_MASK) ||
+       if (!(cpuif->vgic_vmcr & ICH_VMCR_EL2_VEOIM_MASK) ||
            val >= vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)
                return;
 
@@ -408,25 +408,23 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
        u32 vmcr;
 
        if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
-               vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
-                       ICH_VMCR_ACK_CTL_MASK;
-               vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
-                       ICH_VMCR_FIQ_EN_MASK;
+               vmcr = FIELD_PREP(ICH_VMCR_EL2_VAckCtl, vmcrp->ackctl);
+               vmcr |= FIELD_PREP(ICH_VMCR_EL2_VFIQEn, vmcrp->fiqen);
        } else {
                /*
                 * When emulating GICv3 on GICv3 with SRE=1 on the
                 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
                 */
-               vmcr = ICH_VMCR_FIQ_EN_MASK;
+               vmcr = ICH_VMCR_EL2_VFIQEn_MASK;
        }
 
-       vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
-       vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
-       vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
-       vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
-       vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
-       vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
-       vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VCBPR, vmcrp->cbpr);
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VEOIM, vmcrp->eoim);
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VBPR1, vmcrp->abpr);
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VBPR0, vmcrp->bpr);
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VPMR, vmcrp->pmr);
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VENG0, vmcrp->grpen0);
+       vmcr |= FIELD_PREP(ICH_VMCR_EL2_VENG1, vmcrp->grpen1);
 
        cpu_if->vgic_vmcr = vmcr;
 }
@@ -440,10 +438,8 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
        vmcr = cpu_if->vgic_vmcr;
 
        if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
-               vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
-                       ICH_VMCR_ACK_CTL_SHIFT;
-               vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
-                       ICH_VMCR_FIQ_EN_SHIFT;
+               vmcrp->ackctl = FIELD_GET(ICH_VMCR_EL2_VAckCtl, vmcr);
+               vmcrp->fiqen = FIELD_GET(ICH_VMCR_EL2_VFIQEn, vmcr);
        } else {
                /*
                 * When emulating GICv3 on GICv3 with SRE=1 on the
@@ -453,13 +449,13 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
                vmcrp->ackctl = 0;
        }
 
-       vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
-       vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
-       vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
-       vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
-       vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
-       vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
-       vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
+       vmcrp->cbpr = FIELD_GET(ICH_VMCR_EL2_VCBPR, vmcr);
+       vmcrp->eoim = FIELD_GET(ICH_VMCR_EL2_VEOIM, vmcr);
+       vmcrp->abpr = FIELD_GET(ICH_VMCR_EL2_VBPR1, vmcr);
+       vmcrp->bpr  = FIELD_GET(ICH_VMCR_EL2_VBPR0, vmcr);
+       vmcrp->pmr  = FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr);
+       vmcrp->grpen0 = FIELD_GET(ICH_VMCR_EL2_VENG0, vmcr);
+       vmcrp->grpen1 = FIELD_GET(ICH_VMCR_EL2_VENG1, vmcr);
 }
 
 #define INITIAL_PENDBASER_VALUE                                                  \