]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Track SVE state in the hypervisor vcpu structure
authorFuad Tabba <tabba@google.com>
Wed, 16 Apr 2025 15:26:41 +0000 (15:26 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 28 Apr 2025 08:23:46 +0000 (09:23 +0100)
When dealing with a guest with SVE enabled, make sure the host SVE
state is pinned at EL2 S1, and that the hypervisor vCPU state is
correctly initialised (and then unpinned on teardown).

Co-authored-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20250416152648.2982950-2-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 08ba91e6fb035a1dab1304327e4f596bede3b6f8..330f3c34a6bd03db2f5ac263daadf3667df580c4 100644 (file)
@@ -971,20 +971,22 @@ struct kvm_vcpu_arch {
 #define vcpu_sve_zcr_elx(vcpu)                                         \
        (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
 
-#define vcpu_sve_state_size(vcpu) ({                                   \
+#define sve_state_size_from_vl(sve_max_vl) ({                          \
        size_t __size_ret;                                              \
-       unsigned int __vcpu_vq;                                         \
+       unsigned int __vq;                                              \
                                                                        \
-       if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {          \
+       if (WARN_ON(!sve_vl_valid(sve_max_vl))) {                       \
                __size_ret = 0;                                         \
        } else {                                                        \
-               __vcpu_vq = vcpu_sve_max_vq(vcpu);                      \
-               __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);              \
+               __vq = sve_vq_from_vl(sve_max_vl);                      \
+               __size_ret = SVE_SIG_REGS_SIZE(__vq);                   \
        }                                                               \
                                                                        \
        __size_ret;                                                     \
 })
 
+#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
+
 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
                                 KVM_GUESTDBG_USE_SW_BP | \
                                 KVM_GUESTDBG_USE_HW | \
index 2c37680d954cf2c2aed5abe7c2225b682861869a..59db9606e6e15f30a35c694495477afda908a49b 100644 (file)
@@ -123,10 +123,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
 
        hyp_vcpu->vcpu.arch.ctxt        = host_vcpu->arch.ctxt;
 
-       hyp_vcpu->vcpu.arch.sve_state   = kern_hyp_va(host_vcpu->arch.sve_state);
-       /* Limit guest vector length to the maximum supported by the host.  */
-       hyp_vcpu->vcpu.arch.sve_max_vl  = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
-
        hyp_vcpu->vcpu.arch.mdcr_el2    = host_vcpu->arch.mdcr_el2;
        hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
        hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
index 5a335a51deca149aeb5df1c9acfc36afd1a13fb0..338505cb0171ba13937483436cff68786f1afb7f 100644 (file)
@@ -372,6 +372,18 @@ static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
                hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
 }
 
+static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+       void *sve_state;
+
+       if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
+               return;
+
+       sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
+       hyp_unpin_shared_mem(sve_state,
+                            sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
+}
+
 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
                             unsigned int nr_vcpus)
 {
@@ -384,6 +396,7 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
                        continue;
 
                unpin_host_vcpu(hyp_vcpu->host_vcpu);
+               unpin_host_sve_state(hyp_vcpu);
        }
 }
 
@@ -398,12 +411,40 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
        pkvm_init_features_from_host(hyp_vm, host_kvm);
 }
 
-static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
+static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
 {
        struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
+       unsigned int sve_max_vl;
+       size_t sve_state_size;
+       void *sve_state;
+       int ret = 0;
 
-       if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
+       if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
                vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
+               return 0;
+       }
+
+       /* Limit guest vector length to the maximum supported by the host. */
+       sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
+       sve_state_size = sve_state_size_from_vl(sve_max_vl);
+       sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
+
+       if (!sve_state || !sve_state_size) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
+       if (ret)
+               goto err;
+
+       vcpu->arch.sve_state = sve_state;
+       vcpu->arch.sve_max_vl = sve_max_vl;
+
+       return 0;
+err:
+       clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
+       return ret;
 }
 
 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -432,7 +473,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
        if (ret)
                goto done;
 
-       pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
+       ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
 done:
        if (ret)
                unpin_host_vcpu(host_vcpu);