]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Fix ID register initialization for non-protected pKVM guests
authorFuad Tabba <tabba@google.com>
Fri, 13 Feb 2026 14:38:14 +0000 (14:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 13 Feb 2026 14:54:48 +0000 (14:54 +0000)
In protected mode, the hypervisor maintains a separate instance of
the `kvm` structure for each VM. For non-protected VMs, this structure is
initialized from the host's `kvm` state.

Currently, `pkvm_init_features_from_host()` copies the
`KVM_ARCH_FLAG_ID_REGS_INITIALIZED` flag from the host without the
underlying `id_regs` data being initialized. This results in the
hypervisor seeing the flag as set while the ID registers remain zeroed.

Consequently, `kvm_has_feat()` checks at EL2 fail (return 0) for
non-protected VMs. This breaks logic that relies on feature detection,
such as `ctxt_has_tcrx()` for TCR2_EL1 support. As a result, certain
system registers (e.g., TCR2_EL1, PIR_EL1, POR_EL1) are not
saved/restored during the world switch, which could lead to state
corruption.

Fix this by explicitly copying the ID registers from the host `kvm` to
the hypervisor `kvm` for non-protected VMs during initialization, since
we trust the host with its non-protected guests' features. Also ensure
`KVM_ARCH_FLAG_ID_REGS_INITIALIZED` is cleared initially in
`pkvm_init_features_from_host` so that `vm_copy_id_regs` can properly
initialize them and set the flag once done.

Fixes: 41d6028e28bd ("KVM: arm64: Convert the SVE guest vcpu flag to a vm flag")
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://patch.msgid.link/20260213143815.1732675-4-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 8e29d7734a1555b283fb8744a9c8939d51f9a707..f3c1c695516342667850c75aba8b06d67b6dd7c7 100644 (file)
@@ -342,6 +342,7 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
        /* No restrictions for non-protected VMs. */
        if (!kvm_vm_is_protected(kvm)) {
                hyp_vm->kvm.arch.flags = host_arch_flags;
+               hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
 
                bitmap_copy(kvm->arch.vcpu_features,
                            host_kvm->arch.vcpu_features,
@@ -471,6 +472,35 @@ err:
        return ret;
 }
 
+static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+       struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
+       const struct kvm *host_kvm = hyp_vm->host_kvm;
+       struct kvm *kvm = &hyp_vm->kvm;
+
+       if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
+               return -EINVAL;
+
+       if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
+               return 0;
+
+       memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
+
+       return 0;
+}
+
+static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+       int ret = 0;
+
+       if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+               kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
+       else
+               ret = vm_copy_id_regs(hyp_vcpu);
+
+       return ret;
+}
+
 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
                              struct pkvm_hyp_vm *hyp_vm,
                              struct kvm_vcpu *host_vcpu)
@@ -490,8 +520,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
        hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
        hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
 
-       if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
-               kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
+       ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
+       if (ret)
+               goto done;
 
        ret = pkvm_vcpu_init_traps(hyp_vcpu);
        if (ret)