From: Paolo Bonzini Date: Thu, 11 Nov 2021 15:52:26 +0000 (-0500) Subject: Merge branch 'kvm-sev-move-context' into kvm-master X-Git-Tag: v5.16-rc1~29^2~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1f05833193d816279b03ec9d0170cf9bda9283c2;p=thirdparty%2Fkernel%2Flinux.git Merge branch 'kvm-sev-move-context' into kvm-master Add support for AMD SEV and SEV-ES intra-host migration support. Intra host migration provides a low-cost mechanism for userspace VMM upgrades. In the common case for intra host migration, we can rely on the normal ioctls for passing data from one VMM to the next. SEV, SEV-ES, and other confidential compute environments make most of this information opaque, and render KVM ioctls such as "KVM_GET_REGS" irrelevant. As a result, we need the ability to pass this opaque metadata from one VMM to the next. The easiest way to do this is to leave this data in the kernel, and transfer ownership of the metadata from one KVM VM (or vCPU) to the next. In-kernel hand off makes it possible to move any data that would be unsafe/impossible for the kernel to hand directly to userspace, and cannot be reproduced using data that can be handed to userspace. Signed-off-by: Paolo Bonzini --- 1f05833193d816279b03ec9d0170cf9bda9283c2 diff --cc arch/x86/kvm/svm/sev.c index 1964b9a174beb,f63f9156964f6..531613f758baf --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@@ -616,14 -624,9 +624,14 @@@ static int __sev_launch_update_vmsa(str vmsa.reserved = 0; vmsa.handle = to_kvm_svm(kvm)->sev_info.handle; - vmsa.address = __sme_pa(svm->vmsa); + vmsa.address = __sme_pa(svm->sev_es.vmsa); vmsa.len = PAGE_SIZE; - return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error); + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = true; + return 0; } static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) @@@ -2591,20 -2786,11 +2798,21 @@@ int sev_handle_vmgexit(struct kvm_vcpu int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) { - if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2)) + int count; + int bytes; + + if (svm->vmcb->control.exit_info_2 > INT_MAX) + return -EINVAL; + + count = svm->vmcb->control.exit_info_2; + if (unlikely(check_mul_overflow(count, size, &bytes))) + return -EINVAL; + + if (!setup_vmgexit_scratch(svm, in, bytes)) return -EINVAL; - return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in); + return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, - svm->sev_es.ghcb_sa_len / size, in); ++ count, in); } void sev_es_init_vmcb(struct vcpu_svm *svm) diff --cc arch/x86/kvm/svm/svm.h index 5e9510d4574e3,d4eae06b06953..437e68504e669 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@@ -123,6 -124,20 +124,20 @@@ struct svm_nested_state bool initialized; }; + struct vcpu_sev_es_state { + /* SEV-ES support */ + struct vmcb_save_area *vmsa; + struct ghcb *ghcb; + struct kvm_host_map ghcb_map; + bool received_first_sipi; + + /* SEV-ES scratch area support */ + void *ghcb_sa; - u64 ghcb_sa_len; ++ u32 ghcb_sa_len; + bool ghcb_sa_sync; + bool ghcb_sa_free; + }; + struct vcpu_svm { struct kvm_vcpu vcpu; /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */