]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: unify code to prepare traps
authorSebastian Ott <sebott@redhat.com>
Wed, 19 Jun 2024 17:40:32 +0000 (17:40 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 20 Jun 2024 17:16:44 +0000 (17:16 +0000)
There are 2 functions to calculate traps via HCR_EL2:
* kvm_init_sysreg() called via KVM_RUN (before the 1st run or when
  the pid changes)
* vcpu_reset_hcr() called via KVM_ARM_VCPU_INIT

To unify these 2 and to support traps that are dependent on the
ID register configuration, move the code from vcpu_reset_hcr()
to sys_regs.c and call it via kvm_init_sysreg().

We still have to keep the non-FWB handling stuff in vcpu_reset_hcr().
Also the initialization with HCR_GUEST_FLAGS is kept there but guarded
by !vcpu_has_run_once() to ensure that previous calculated values
don't get overwritten.

While at it rename kvm_init_sysreg() to kvm_calculate_traps() to
better reflect what it's doing.

Signed-off-by: Sebastian Ott <sebott@redhat.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20240619174036.483943-7-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/sys_regs.c

index 501e3e019c930c77488331af26ba7007e9254b70..84dc3fac9711516301a10c9cfa6562aa6d7232c9 100644 (file)
@@ -69,39 +69,17 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
-       if (has_vhe() || has_hvhe())
-               vcpu->arch.hcr_el2 |= HCR_E2H;
-       if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
-               /* route synchronous external abort exceptions to EL2 */
-               vcpu->arch.hcr_el2 |= HCR_TEA;
-               /* trap error record accesses */
-               vcpu->arch.hcr_el2 |= HCR_TERR;
-       }
+       if (!vcpu_has_run_once(vcpu))
+               vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
 
-       if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
-               vcpu->arch.hcr_el2 |= HCR_FWB;
-       } else {
-               /*
-                * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
-                * get set in SCTLR_EL1 such that we can detect when the guest
-                * MMU gets turned on and do the necessary cache maintenance
-                * then.
-                */
+       /*
+        * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
+        * get set in SCTLR_EL1 such that we can detect when the guest
+        * MMU gets turned on and do the necessary cache maintenance
+        * then.
+        */
+       if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
                vcpu->arch.hcr_el2 |= HCR_TVM;
-       }
-
-       if (cpus_have_final_cap(ARM64_HAS_EVT) &&
-           !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
-               vcpu->arch.hcr_el2 |= HCR_TID4;
-       else
-               vcpu->arch.hcr_el2 |= HCR_TID2;
-
-       if (vcpu_el1_is_32bit(vcpu))
-               vcpu->arch.hcr_el2 &= ~HCR_RW;
-
-       if (kvm_has_mte(vcpu->kvm))
-               vcpu->arch.hcr_el2 |= HCR_ATA;
 }
 
 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
index 294c78319f580c62b040f854bc4b1a9fd249a720..26042875d6fc4c4b357ad4730433935def9083f0 100644 (file)
@@ -1120,7 +1120,7 @@ int __init populate_nv_trap_config(void);
 bool lock_all_vcpus(struct kvm *kvm);
 void unlock_all_vcpus(struct kvm *kvm);
 
-void kvm_init_sysreg(struct kvm_vcpu *);
+void kvm_calculate_traps(struct kvm_vcpu *vcpu);
 
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
index 9996a989b52e8742566ff6a8fa978981e2a91d49..6b217afb4e8edbdcaec213a85d8963af9e089412 100644 (file)
@@ -797,7 +797,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
         * This needs to happen after NV has imposed its own restrictions on
         * the feature set
         */
-       kvm_init_sysreg(vcpu);
+       kvm_calculate_traps(vcpu);
 
        ret = kvm_timer_enable(vcpu);
        if (ret)
index 8e33589053717b3d900e9d9bfabf0e54c81789b5..a467ff4290a7201c41fea7428f28464f3d4492a1 100644 (file)
@@ -4069,11 +4069,33 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
        return 0;
 }
 
-void kvm_init_sysreg(struct kvm_vcpu *vcpu)
+static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
 
-       mutex_lock(&kvm->arch.config_lock);
+       if (has_vhe() || has_hvhe())
+               vcpu->arch.hcr_el2 |= HCR_E2H;
+       if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
+               /* route synchronous external abort exceptions to EL2 */
+               vcpu->arch.hcr_el2 |= HCR_TEA;
+               /* trap error record accesses */
+               vcpu->arch.hcr_el2 |= HCR_TERR;
+       }
+
+       if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+               vcpu->arch.hcr_el2 |= HCR_FWB;
+
+       if (cpus_have_final_cap(ARM64_HAS_EVT) &&
+           !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
+               vcpu->arch.hcr_el2 |= HCR_TID4;
+       else
+               vcpu->arch.hcr_el2 |= HCR_TID2;
+
+       if (vcpu_el1_is_32bit(vcpu))
+               vcpu->arch.hcr_el2 &= ~HCR_RW;
+
+       if (kvm_has_mte(vcpu->kvm))
+               vcpu->arch.hcr_el2 |= HCR_ATA;
 
        /*
         * In the absence of FGT, we cannot independently trap TLBI
@@ -4082,6 +4104,14 @@ void kvm_init_sysreg(struct kvm_vcpu *vcpu)
         */
        if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
                vcpu->arch.hcr_el2 |= HCR_TTLBOS;
+}
+
+void kvm_calculate_traps(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       mutex_lock(&kvm->arch.config_lock);
+       vcpu_set_hcr(vcpu);
 
        if (cpus_have_final_cap(ARM64_HAS_HCX)) {
                vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;