]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Initialize the hypervisor's VM state at EL2
authorFuad Tabba <tabba@google.com>
Fri, 18 Oct 2024 07:48:31 +0000 (08:48 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 31 Oct 2024 18:45:24 +0000 (18:45 +0000)
Do not trust the state of the VM as provided by the host when
initializing the hypervisor's view of the VM sate. Initialize it
instead at EL2 to a known good and safe state, as pKVM already
does with hypervisor VCPU states.

Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241018074833.2563674-4-tabba@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 869955e551a050d0b2e25b4d3275bef3713ef017..954df57a935f5e143f8340e9e77bb511b5dd0626 100644 (file)
@@ -6,6 +6,9 @@
 
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
+
+#include <asm/kvm_emulate.h>
+
 #include <nvhe/fixed_config.h>
 #include <nvhe/mem_protect.h>
 #include <nvhe/memory.h>
@@ -289,6 +292,65 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
        hyp_spin_unlock(&vm_table_lock);
 }
 
+static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
+{
+       struct kvm *kvm = &hyp_vm->kvm;
+       DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
+
+       /* No restrictions for non-protected VMs. */
+       if (!kvm_vm_is_protected(kvm)) {
+               bitmap_copy(kvm->arch.vcpu_features,
+                           host_kvm->arch.vcpu_features,
+                           KVM_VCPU_MAX_FEATURES);
+               return;
+       }
+
+       bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
+
+       /*
+        * For protected VMs, always allow:
+        * - CPU starting in poweroff state
+        * - PSCI v0.2
+        */
+       set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features);
+       set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
+
+       /*
+        * Check if remaining features are allowed:
+        * - Performance Monitoring
+        * - Scalable Vectors
+        * - Pointer Authentication
+        */
+       if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
+               set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
+
+       if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
+               set_bit(KVM_ARM_VCPU_SVE, allowed_features);
+
+       if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
+           FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
+               set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
+
+       if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
+           FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW))
+               set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
+
+       bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
+                  allowed_features, KVM_VCPU_MAX_FEATURES);
+}
+
+static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+       struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
+
+       if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
+           vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) {
+               kvm_vcpu_enable_ptrauth(vcpu);
+       } else {
+               vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH);
+       }
+}
+
 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
 {
        if (host_vcpu)
@@ -310,6 +372,18 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
        hyp_vm->host_kvm = host_kvm;
        hyp_vm->kvm.created_vcpus = nr_vcpus;
        hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
+       hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
+       pkvm_init_features_from_host(hyp_vm, host_kvm);
+}
+
+static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
+{
+       struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
+
+       if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
+               vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
+               vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
+       }
 }
 
 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -335,7 +409,10 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
 
        hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
        hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
+       hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
 
+       pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
+       pkvm_vcpu_init_ptrauth(hyp_vcpu);
        pkvm_vcpu_init_traps(&hyp_vcpu->vcpu);
 done:
        if (ret)