]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Add helper to identify a nested context
authorMarc Zyngier <maz@kernel.org>
Tue, 8 Jul 2025 17:25:08 +0000 (10:25 -0700)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 8 Jul 2025 17:40:30 +0000 (10:40 -0700)
A common idiom in the KVM code is to check if we are currently
dealing with a "nested" context, defined as having NV enabled,
but being in the EL1&0 translation regime.

This is usually expressed as:

if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu) ... )

which is a mouthful and a bit hard to read, specially when followed
by additional conditions.

Introduce a new helper that encapsulate these two terms, allowing
the above to be written as

if (is_nested_context(vcpu) ... )

which is both shorter and easier to read, and makes more obvious
the potential for simplification on some code paths.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250708172532.1699409-4-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/kvm/vgic/vgic-v3-nested.c

index 0720898f563e90563452c560e7f6892e9e300b75..8ba991b4bcfdead493aaff9da10d3f1963015f60 100644 (file)
@@ -224,6 +224,11 @@ static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
        return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
 }
 
+static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
+{
+       return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+}
+
 /*
  * The layout of SPSR for an AArch32 state is different when observed from an
  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
index 701ea10a63f1eff98fa4b4295e14163e8a65da89..dbd74e4885e244bc22db0962b4fc0c1ce671ff11 100644 (file)
@@ -830,7 +830,7 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
         * by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but
         * not both). This simplifies the handling of the EL1NV* bits.
         */
-       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+       if (is_nested_ctxt(vcpu)) {
                u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
 
                /* Use the VHE format for mental sanity */
index 38a91bb5d4c75145daf890ec988288bdcec0ab3e..659446378fca19f0eb2b4ed5f1663a65e8f616d6 100644 (file)
@@ -521,7 +521,7 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
                 * Either we're running an L2 guest, and the API/APK bits come
                 * from L1's HCR_EL2, or API/APK are both set.
                 */
-               if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
+               if (unlikely(is_nested_ctxt(vcpu))) {
                        u64 val;
 
                        val = __vcpu_sys_reg(vcpu, HCR_EL2);
index 3a384e9660b83fd17c3e775568a41be3e4f56a35..1de4a9001d9d529c00c8ec15171a57a0cb378834 100644 (file)
@@ -2592,13 +2592,8 @@ inject:
 
 static bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg, u64 control_bit)
 {
-       bool control_bit_set;
-
-       if (!vcpu_has_nv(vcpu))
-               return false;
-
-       control_bit_set = __vcpu_sys_reg(vcpu, reg) & control_bit;
-       if (!is_hyp_ctxt(vcpu) && control_bit_set) {
+       if (is_nested_ctxt(vcpu) &&
+           (__vcpu_sys_reg(vcpu, reg) & control_bit)) {
                kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
                return true;
        }
index 453266c964819d3f452f220e45dfef0b10ab1853..c37c58d9d25d6c9d1a25781cab3eb56eda55ebd1 100644 (file)
@@ -252,7 +252,7 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+       if (is_nested_ctxt(vcpu)) {
                kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
                return 1;
        }
@@ -311,12 +311,11 @@ static int kvm_handle_gcs(struct kvm_vcpu *vcpu)
 
 static int handle_other(struct kvm_vcpu *vcpu)
 {
-       bool is_l2 = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+       bool allowed, fwd = is_nested_ctxt(vcpu);
        u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
        u64 esr = kvm_vcpu_get_esr(vcpu);
        u64 iss = ESR_ELx_ISS(esr);
        struct kvm *kvm = vcpu->kvm;
-       bool allowed, fwd = false;
 
        /*
         * We only trap for two reasons:
@@ -335,28 +334,23 @@ static int handle_other(struct kvm_vcpu *vcpu)
        switch (iss) {
        case ESR_ELx_ISS_OTHER_ST64BV:
                allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
-               if (is_l2)
-                       fwd = !(hcrx & HCRX_EL2_EnASR);
+               fwd &= !(hcrx & HCRX_EL2_EnASR);
                break;
        case ESR_ELx_ISS_OTHER_ST64BV0:
                allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
-               if (is_l2)
-                       fwd = !(hcrx & HCRX_EL2_EnAS0);
+               fwd &= !(hcrx & HCRX_EL2_EnAS0);
                break;
        case ESR_ELx_ISS_OTHER_LDST64B:
                allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
-               if (is_l2)
-                       fwd = !(hcrx & HCRX_EL2_EnALS);
+               fwd &= !(hcrx & HCRX_EL2_EnALS);
                break;
        case ESR_ELx_ISS_OTHER_TSBCSYNC:
                allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1);
-               if (is_l2)
-                       fwd = (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
+               fwd &= (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
                break;
        case ESR_ELx_ISS_OTHER_PSBCSYNC:
                allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P5);
-               if (is_l2)
-                       fwd = (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
+               fwd &= (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
                break;
        default:
                /* Clearly, we're missing something. */
index 2ad57b117385a293341371c95d96af82dde873bd..8a854ab5f7059c6d191cc0477aa669445687c7a3 100644 (file)
@@ -298,7 +298,7 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
                u64 val;                                                \
                                                                        \
                ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg);  \
-               if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))            \
+               if (is_nested_ctxt(vcpu))                               \
                        compute_clr_set(vcpu, reg, c, s);               \
                                                                        \
                compute_undef_clr_set(vcpu, kvm, reg, c, s);            \
@@ -436,7 +436,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
 
        if (cpus_have_final_cap(ARM64_HAS_HCX)) {
                u64 hcrx = vcpu->arch.hcrx_el2;
-               if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+               if (is_nested_ctxt(vcpu)) {
                        u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2);
                        hcrx |= val & __HCRX_EL2_MASK;
                        hcrx &= ~(~val & __HCRX_EL2_nMASK);
@@ -531,7 +531,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
         * nested guest, as the guest hypervisor could select a smaller VL. Slap
         * that into hardware before wrapping up.
         */
-       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+       if (is_nested_ctxt(vcpu))
                sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
 
        write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
@@ -557,7 +557,7 @@ static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
 
        if (vcpu_has_sve(vcpu)) {
                /* A guest hypervisor may restrict the effective max VL. */
-               if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+               if (is_nested_ctxt(vcpu))
                        zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
                else
                        zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
index f162b0df5cae5a711408cab51a1255dfcf1420fa..ac84710fa14e8a0c34d2d8b951a8029d2d75cebe 100644 (file)
@@ -1050,7 +1050,7 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
 {
        u64 ich_hcr;
 
-       if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+       if (!is_nested_ctxt(vcpu))
                return false;
 
        ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
index a50fb7e6841f79b9852178494ada41924f92b255..9c3997776f507baff68a01da879879d4b79271d6 100644 (file)
@@ -116,7 +116,7 @@ bool vgic_state_is_nested(struct kvm_vcpu *vcpu)
 {
        u64 xmo;
 
-       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+       if (is_nested_ctxt(vcpu)) {
                xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO);
                WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO),
                          "Separate virtual IRQ/FIQ settings not supported\n");