]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: gic-v5: Support GICv5 FGTs & FGUs
authorSascha Bischoff <Sascha.Bischoff@arm.com>
Thu, 19 Mar 2026 15:53:05 +0000 (15:53 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 19 Mar 2026 18:21:27 +0000 (18:21 +0000)
Extend the existing FGT/FGU infrastructure to include the GICv5 trap
registers (ICH_HFGRTR_EL2, ICH_HFGWTR_EL2, ICH_HFGITR_EL2). This
involves mapping the trap registers and their bits to the
corresponding feature that introduces them (FEAT_GCIE for all, in this
case), and mapping each trap bit to the system register/instruction
controlled by it.

As of this change, none of the GICv5 instructions or register accesses
are being trapped.

Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Link: https://patch.msgid.link/20260319154937.3619520-14-sascha.bischoff@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/vncr_mapping.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/config.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/sys_regs.c

index 70cb9cfd760a3689426a7aab829de9136f8c22c7..64a1ee6c442f0ef279de43315a786de61179ee3d 100644 (file)
@@ -287,6 +287,9 @@ enum fgt_group_id {
        HDFGRTR2_GROUP,
        HDFGWTR2_GROUP = HDFGRTR2_GROUP,
        HFGITR2_GROUP,
+       ICH_HFGRTR_GROUP,
+       ICH_HFGWTR_GROUP = ICH_HFGRTR_GROUP,
+       ICH_HFGITR_GROUP,
 
        /* Must be last */
        __NR_FGT_GROUP_IDS__
@@ -620,6 +623,10 @@ enum vcpu_sysreg {
        VNCR(ICH_HCR_EL2),
        VNCR(ICH_VMCR_EL2),
 
+       VNCR(ICH_HFGRTR_EL2),
+       VNCR(ICH_HFGWTR_EL2),
+       VNCR(ICH_HFGITR_EL2),
+
        NR_SYS_REGS     /* Nothing after this line! */
 };
 
@@ -675,6 +682,9 @@ extern struct fgt_masks hfgwtr2_masks;
 extern struct fgt_masks hfgitr2_masks;
 extern struct fgt_masks hdfgrtr2_masks;
 extern struct fgt_masks hdfgwtr2_masks;
+extern struct fgt_masks ich_hfgrtr_masks;
+extern struct fgt_masks ich_hfgwtr_masks;
+extern struct fgt_masks ich_hfgitr_masks;
 
 extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks);
 extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks);
@@ -687,6 +697,9 @@ extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks);
 extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks);
 extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks);
 extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(ich_hfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(ich_hfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(ich_hfgitr_masks);
 
 struct kvm_cpu_context {
        struct user_pt_regs regs;       /* sp = sp_el0 */
@@ -1659,6 +1672,11 @@ static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg
        case HDFGRTR2_EL2:
        case HDFGWTR2_EL2:
                return HDFGRTR2_GROUP;
+       case ICH_HFGRTR_EL2:
+       case ICH_HFGWTR_EL2:
+               return ICH_HFGRTR_GROUP;
+       case ICH_HFGITR_EL2:
+               return ICH_HFGITR_GROUP;
        default:
                BUILD_BUG_ON(1);
        }
@@ -1673,6 +1691,7 @@ static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg
                case HDFGWTR_EL2:                                       \
                case HFGWTR2_EL2:                                       \
                case HDFGWTR2_EL2:                                      \
+               case ICH_HFGWTR_EL2:                                    \
                        p = &(vcpu)->arch.fgt[id].w;                    \
                        break;                                          \
                default:                                                \
index c2485a862e6904f3823b3d18c16cb2b1e385066a..14366d35ce82f08ec4595e7be2850d2f913494d4 100644 (file)
 #define VNCR_MPAMVPM5_EL2       0x968
 #define VNCR_MPAMVPM6_EL2       0x970
 #define VNCR_MPAMVPM7_EL2       0x978
+#define VNCR_ICH_HFGITR_EL2    0xB10
+#define VNCR_ICH_HFGRTR_EL2    0xB18
+#define VNCR_ICH_HFGWTR_EL2    0xB20
 
 #endif /* __ARM64_VNCR_MAPPING_H__ */
index 410ffd41fd73aadfbdedd8892dfb2f0534416605..aa69fd5b372fdbef6fcffc9f180e2fa388b7e8a0 100644 (file)
@@ -2529,6 +2529,9 @@ static void kvm_hyp_init_symbols(void)
        kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks;
        kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks;
        kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks;
+       kvm_nvhe_sym(ich_hfgrtr_masks) = ich_hfgrtr_masks;
+       kvm_nvhe_sym(ich_hfgwtr_masks) = ich_hfgwtr_masks;
+       kvm_nvhe_sym(ich_hfgitr_masks) = ich_hfgitr_masks;
 
        /*
         * Flush entire BSS since part of its data containing init symbols is read
index d9f553cbf9dfdf585f3cb4d748ce2ab2337a0322..e4ec1bda8dfcbb4ae45df71996c05d619159a0da 100644 (file)
@@ -225,6 +225,7 @@ struct reg_feat_map_desc {
 #define FEAT_MTPMU             ID_AA64DFR0_EL1, MTPMU, IMP
 #define FEAT_HCX               ID_AA64MMFR1_EL1, HCX, IMP
 #define FEAT_S2PIE             ID_AA64MMFR3_EL1, S2PIE, IMP
+#define FEAT_GCIE              ID_AA64PFR2_EL1, GCIE, IMP
 
 static bool not_feat_aa64el3(struct kvm *kvm)
 {
@@ -1277,6 +1278,58 @@ static const struct reg_bits_to_feat_map vtcr_el2_feat_map[] = {
 static const DECLARE_FEAT_MAP(vtcr_el2_desc, VTCR_EL2,
                              vtcr_el2_feat_map, FEAT_AA64EL2);
 
+static const struct reg_bits_to_feat_map ich_hfgrtr_feat_map[] = {
+       NEEDS_FEAT(ICH_HFGRTR_EL2_ICC_APR_EL1 |
+                  ICH_HFGRTR_EL2_ICC_IDRn_EL1 |
+                  ICH_HFGRTR_EL2_ICC_CR0_EL1 |
+                  ICH_HFGRTR_EL2_ICC_HPPIR_EL1 |
+                  ICH_HFGRTR_EL2_ICC_PCR_EL1 |
+                  ICH_HFGRTR_EL2_ICC_ICSR_EL1 |
+                  ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 |
+                  ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 |
+                  ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 |
+                  ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 |
+                  ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 |
+                  ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1,
+                  FEAT_GCIE),
+};
+
+static const DECLARE_FEAT_MAP_FGT(ich_hfgrtr_desc, ich_hfgrtr_masks,
+                                 ich_hfgrtr_feat_map, FEAT_GCIE);
+
+static const struct reg_bits_to_feat_map ich_hfgwtr_feat_map[] = {
+       NEEDS_FEAT(ICH_HFGWTR_EL2_ICC_APR_EL1 |
+                  ICH_HFGWTR_EL2_ICC_CR0_EL1 |
+                  ICH_HFGWTR_EL2_ICC_PCR_EL1 |
+                  ICH_HFGWTR_EL2_ICC_ICSR_EL1 |
+                  ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 |
+                  ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 |
+                  ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 |
+                  ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1,
+                  FEAT_GCIE),
+};
+
+static const DECLARE_FEAT_MAP_FGT(ich_hfgwtr_desc, ich_hfgwtr_masks,
+                                 ich_hfgwtr_feat_map, FEAT_GCIE);
+
+static const struct reg_bits_to_feat_map ich_hfgitr_feat_map[] = {
+       NEEDS_FEAT(ICH_HFGITR_EL2_GICCDEN |
+                  ICH_HFGITR_EL2_GICCDDIS |
+                  ICH_HFGITR_EL2_GICCDPRI |
+                  ICH_HFGITR_EL2_GICCDAFF |
+                  ICH_HFGITR_EL2_GICCDPEND |
+                  ICH_HFGITR_EL2_GICCDRCFG |
+                  ICH_HFGITR_EL2_GICCDHM |
+                  ICH_HFGITR_EL2_GICCDEOI |
+                  ICH_HFGITR_EL2_GICCDDI |
+                  ICH_HFGITR_EL2_GICRCDIA |
+                  ICH_HFGITR_EL2_GICRCDNMIA,
+                  FEAT_GCIE),
+};
+
+static const DECLARE_FEAT_MAP_FGT(ich_hfgitr_desc, ich_hfgitr_masks,
+                                 ich_hfgitr_feat_map, FEAT_GCIE);
+
 static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
                                  int map_size, u64 resx, const char *str)
 {
@@ -1328,6 +1381,9 @@ void __init check_feature_map(void)
        check_reg_desc(&sctlr_el2_desc);
        check_reg_desc(&mdcr_el2_desc);
        check_reg_desc(&vtcr_el2_desc);
+       check_reg_desc(&ich_hfgrtr_desc);
+       check_reg_desc(&ich_hfgwtr_desc);
+       check_reg_desc(&ich_hfgitr_desc);
 }
 
 static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
@@ -1460,6 +1516,13 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
                val |= compute_fgu_bits(kvm, &hdfgrtr2_desc);
                val |= compute_fgu_bits(kvm, &hdfgwtr2_desc);
                break;
+       case ICH_HFGRTR_GROUP:
+               val |= compute_fgu_bits(kvm, &ich_hfgrtr_desc);
+               val |= compute_fgu_bits(kvm, &ich_hfgwtr_desc);
+               break;
+       case ICH_HFGITR_GROUP:
+               val |= compute_fgu_bits(kvm, &ich_hfgitr_desc);
+               break;
        default:
                BUG();
        }
@@ -1531,6 +1594,15 @@ struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg)
        case VTCR_EL2:
                resx = compute_reg_resx_bits(kvm, &vtcr_el2_desc, 0, 0);
                break;
+       case ICH_HFGRTR_EL2:
+               resx = compute_reg_resx_bits(kvm, &ich_hfgrtr_desc, 0, 0);
+               break;
+       case ICH_HFGWTR_EL2:
+               resx = compute_reg_resx_bits(kvm, &ich_hfgwtr_desc, 0, 0);
+               break;
+       case ICH_HFGITR_EL2:
+               resx = compute_reg_resx_bits(kvm, &ich_hfgitr_desc, 0, 0);
+               break;
        default:
                WARN_ON_ONCE(1);
                resx = (typeof(resx)){};
@@ -1565,6 +1637,12 @@ static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg
                return &hdfgrtr2_masks;
        case HDFGWTR2_EL2:
                return &hdfgwtr2_masks;
+       case ICH_HFGRTR_EL2:
+               return &ich_hfgrtr_masks;
+       case ICH_HFGWTR_EL2:
+               return &ich_hfgwtr_masks;
+       case ICH_HFGITR_EL2:
+               return &ich_hfgitr_masks;
        default:
                BUILD_BUG_ON(1);
        }
@@ -1618,12 +1696,17 @@ void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
        __compute_hdfgwtr(vcpu);
        __compute_fgt(vcpu, HAFGRTR_EL2);
 
-       if (!cpus_have_final_cap(ARM64_HAS_FGT2))
-               return;
+       if (cpus_have_final_cap(ARM64_HAS_FGT2)) {
+               __compute_fgt(vcpu, HFGRTR2_EL2);
+               __compute_fgt(vcpu, HFGWTR2_EL2);
+               __compute_fgt(vcpu, HFGITR2_EL2);
+               __compute_fgt(vcpu, HDFGRTR2_EL2);
+               __compute_fgt(vcpu, HDFGWTR2_EL2);
+       }
 
-       __compute_fgt(vcpu, HFGRTR2_EL2);
-       __compute_fgt(vcpu, HFGWTR2_EL2);
-       __compute_fgt(vcpu, HFGITR2_EL2);
-       __compute_fgt(vcpu, HDFGRTR2_EL2);
-       __compute_fgt(vcpu, HDFGWTR2_EL2);
+       if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
+               __compute_fgt(vcpu, ICH_HFGRTR_EL2);
+               __compute_fgt(vcpu, ICH_HFGWTR_EL2);
+               __compute_fgt(vcpu, ICH_HFGITR_EL2);
+       }
 }
index 22d497554c949a15dd6fb50843d41cc854b70ea5..dba7ced74ca5e80607b964ed27f93b748943fac9 100644 (file)
@@ -2053,6 +2053,60 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
        SR_FGT(SYS_AMEVCNTR0_EL0(2),    HAFGRTR, AMEVCNTR02_EL0, 1),
        SR_FGT(SYS_AMEVCNTR0_EL0(1),    HAFGRTR, AMEVCNTR01_EL0, 1),
        SR_FGT(SYS_AMEVCNTR0_EL0(0),    HAFGRTR, AMEVCNTR00_EL0, 1),
+
+       /*
+        * ICH_HFGRTR_EL2 & ICH_HFGWTR_EL2
+        */
+       SR_FGT(SYS_ICC_APR_EL1,                 ICH_HFGRTR, ICC_APR_EL1, 0),
+       SR_FGT(SYS_ICC_IDR0_EL1,                ICH_HFGRTR, ICC_IDRn_EL1, 0),
+       SR_FGT(SYS_ICC_CR0_EL1,                 ICH_HFGRTR, ICC_CR0_EL1, 0),
+       SR_FGT(SYS_ICC_HPPIR_EL1,               ICH_HFGRTR, ICC_HPPIR_EL1, 0),
+       SR_FGT(SYS_ICC_PCR_EL1,                 ICH_HFGRTR, ICC_PCR_EL1, 0),
+       SR_FGT(SYS_ICC_ICSR_EL1,                ICH_HFGRTR, ICC_ICSR_EL1, 0),
+       SR_FGT(SYS_ICC_IAFFIDR_EL1,             ICH_HFGRTR, ICC_IAFFIDR_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_HMR0_EL1,            ICH_HFGRTR, ICC_PPI_HMRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_HMR1_EL1,            ICH_HFGRTR, ICC_PPI_HMRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_ENABLER0_EL1,        ICH_HFGRTR, ICC_PPI_ENABLERn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_ENABLER1_EL1,        ICH_HFGRTR, ICC_PPI_ENABLERn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_CPENDR0_EL1,         ICH_HFGRTR, ICC_PPI_PENDRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_CPENDR1_EL1,         ICH_HFGRTR, ICC_PPI_PENDRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_SPENDR0_EL1,         ICH_HFGRTR, ICC_PPI_PENDRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_SPENDR1_EL1,         ICH_HFGRTR, ICC_PPI_PENDRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR0_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR1_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR2_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR3_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR4_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR5_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR6_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR7_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR8_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR9_EL1,      ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR10_EL1,     ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR11_EL1,     ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR12_EL1,     ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR13_EL1,     ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR14_EL1,     ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_PRIORITYR15_EL1,     ICH_HFGRTR, ICC_PPI_PRIORITYRn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_CACTIVER0_EL1,       ICH_HFGRTR, ICC_PPI_ACTIVERn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_CACTIVER1_EL1,       ICH_HFGRTR, ICC_PPI_ACTIVERn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_SACTIVER0_EL1,       ICH_HFGRTR, ICC_PPI_ACTIVERn_EL1, 0),
+       SR_FGT(SYS_ICC_PPI_SACTIVER1_EL1,       ICH_HFGRTR, ICC_PPI_ACTIVERn_EL1, 0),
+
+       /*
+        * ICH_HFGITR_EL2
+        */
+       SR_FGT(GICV5_OP_GIC_CDEN,       ICH_HFGITR, GICCDEN, 0),
+       SR_FGT(GICV5_OP_GIC_CDDIS,      ICH_HFGITR, GICCDDIS, 0),
+       SR_FGT(GICV5_OP_GIC_CDPRI,      ICH_HFGITR, GICCDPRI, 0),
+       SR_FGT(GICV5_OP_GIC_CDAFF,      ICH_HFGITR, GICCDAFF, 0),
+       SR_FGT(GICV5_OP_GIC_CDPEND,     ICH_HFGITR, GICCDPEND, 0),
+       SR_FGT(GICV5_OP_GIC_CDRCFG,     ICH_HFGITR, GICCDRCFG, 0),
+       SR_FGT(GICV5_OP_GIC_CDHM,       ICH_HFGITR, GICCDHM, 0),
+       SR_FGT(GICV5_OP_GIC_CDEOI,      ICH_HFGITR, GICCDEOI, 0),
+       SR_FGT(GICV5_OP_GIC_CDDI,       ICH_HFGITR, GICCDDI, 0),
+       SR_FGT(GICV5_OP_GICR_CDIA,      ICH_HFGITR, GICRCDIA, 0),
+       SR_FGT(GICV5_OP_GICR_CDNMIA,    ICH_HFGITR, GICRCDNMIA, 0),
 };
 
 /*
@@ -2127,6 +2181,9 @@ FGT_MASKS(hfgwtr2_masks, HFGWTR2_EL2);
 FGT_MASKS(hfgitr2_masks, HFGITR2_EL2);
 FGT_MASKS(hdfgrtr2_masks, HDFGRTR2_EL2);
 FGT_MASKS(hdfgwtr2_masks, HDFGWTR2_EL2);
+FGT_MASKS(ich_hfgrtr_masks, ICH_HFGRTR_EL2);
+FGT_MASKS(ich_hfgwtr_masks, ICH_HFGWTR_EL2);
+FGT_MASKS(ich_hfgitr_masks, ICH_HFGITR_EL2);
 
 static __init bool aggregate_fgt(union trap_config tc)
 {
@@ -2162,6 +2219,14 @@ static __init bool aggregate_fgt(union trap_config tc)
                rmasks = &hfgitr2_masks;
                wmasks = NULL;
                break;
+       case ICH_HFGRTR_GROUP:
+               rmasks = &ich_hfgrtr_masks;
+               wmasks = &ich_hfgwtr_masks;
+               break;
+       case ICH_HFGITR_GROUP:
+               rmasks = &ich_hfgitr_masks;
+               wmasks = NULL;
+               break;
        }
 
        rresx = rmasks->res0 | rmasks->res1;
@@ -2232,6 +2297,9 @@ static __init int check_all_fgt_masks(int ret)
                &hfgitr2_masks,
                &hdfgrtr2_masks,
                &hdfgwtr2_masks,
+               &ich_hfgrtr_masks,
+               &ich_hfgwtr_masks,
+               &ich_hfgitr_masks,
        };
        int err = 0;
 
index 2597e8bda86728d5e02e975c7453a940492355df..ae04fd680d1e223a6d374692e1687f68e608e1a7 100644 (file)
@@ -233,6 +233,18 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        __activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
 }
 
+static inline void __activate_traps_ich_hfgxtr(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+
+       if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
+               return;
+
+       __activate_fgt(hctxt, vcpu, ICH_HFGRTR_EL2);
+       __activate_fgt(hctxt, vcpu, ICH_HFGWTR_EL2);
+       __activate_fgt(hctxt, vcpu, ICH_HFGITR_EL2);
+}
+
 #define __deactivate_fgt(htcxt, vcpu, reg)                             \
        do {                                                            \
                write_sysreg_s(ctxt_sys_reg(hctxt, reg),                \
@@ -265,6 +277,19 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        __deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
 }
 
+static inline void __deactivate_traps_ich_hfgxtr(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+
+       if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
+               return;
+
+       __deactivate_fgt(hctxt, vcpu, ICH_HFGRTR_EL2);
+       __deactivate_fgt(hctxt, vcpu, ICH_HFGWTR_EL2);
+       __deactivate_fgt(hctxt, vcpu, ICH_HFGITR_EL2);
+
+}
+
 static inline void  __activate_traps_mpam(struct kvm_vcpu *vcpu)
 {
        u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
@@ -328,6 +353,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
        }
 
        __activate_traps_hfgxtr(vcpu);
+       __activate_traps_ich_hfgxtr(vcpu);
        __activate_traps_mpam(vcpu);
 }
 
@@ -345,6 +371,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
                write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
 
        __deactivate_traps_hfgxtr(vcpu);
+       __deactivate_traps_ich_hfgxtr(vcpu);
        __deactivate_traps_mpam();
 }
 
index 779089e42681e8c4add0e404e55a2f9b912ba38a..b41485ce295abd6e6397d9ead9af4a2e42f313c4 100644 (file)
@@ -44,6 +44,9 @@ struct fgt_masks hfgwtr2_masks;
 struct fgt_masks hfgitr2_masks;
 struct fgt_masks hdfgrtr2_masks;
 struct fgt_masks hdfgwtr2_masks;
+struct fgt_masks ich_hfgrtr_masks;
+struct fgt_masks ich_hfgwtr_masks;
+struct fgt_masks ich_hfgitr_masks;
 
 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
 
index 140cf35f4eeb4571177b518d01f2bd57387a8408..cd6deaf47315969792dfce0994b389ab1a081fc2 100644 (file)
@@ -5661,6 +5661,8 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
        compute_fgu(kvm, HFGRTR2_GROUP);
        compute_fgu(kvm, HFGITR2_GROUP);
        compute_fgu(kvm, HDFGRTR2_GROUP);
+       compute_fgu(kvm, ICH_HFGRTR_GROUP);
+       compute_fgu(kvm, ICH_HFGITR_GROUP);
 
        set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
 out: