]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Add helpers to prepare kvm_run for userspace MMIO exit
authorSean Christopherson <seanjc@google.com>
Wed, 25 Feb 2026 01:20:49 +0000 (17:20 -0800)
committerSean Christopherson <seanjc@google.com>
Tue, 3 Mar 2026 00:06:49 +0000 (16:06 -0800)
Add helpers to fill kvm_run for userspace MMIO exits to deduplicate a
variety of code, and to allow for a cleaner return path in
emulator_read_write().

Opportunistically add a KVM_BUG_ON() to ensure the caller is limiting the
length of a single MMIO access to 8 bytes (the largest size userspace is
prepared to handled, as the ABI was baked before things like MOVDQ came
along).

No functional change intended.

Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Binbin Wu <binbin.wu@linux.intel.com>
Cc: Xiaoyao Li <xiaoyao.li@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Michael Roth <michael.roth@amd.com>
Tested-by: Tom Lendacky <thomas.lendacky@gmail.com>
Tested-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Link: https://patch.msgid.link/20260225012049.920665-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index c5065f84b78be3ca09cd854611e6c9dc3ec61ae4..5e9b0c4d9af6cc6dc31a1491ca0c866c56c3a185 100644 (file)
@@ -1467,17 +1467,11 @@ static int tdx_emulate_mmio(struct kvm_vcpu *vcpu)
 
        /* Request the device emulation to userspace device model. */
        vcpu->mmio_is_write = write;
-       if (!write)
-               vcpu->arch.complete_userspace_io = tdx_complete_mmio_read;
 
-       vcpu->run->mmio.phys_addr = gpa;
-       vcpu->run->mmio.len = size;
-       vcpu->run->mmio.is_write = write;
-       vcpu->run->exit_reason = KVM_EXIT_MMIO;
+       __kvm_prepare_emulated_mmio_exit(vcpu, gpa, size, &val, write);
 
-       if (write) {
-               memcpy(vcpu->run->mmio.data, &val, size);
-       } else {
+       if (!write) {
+               vcpu->arch.complete_userspace_io = tdx_complete_mmio_read;
                vcpu->mmio_fragments[0].gpa = gpa;
                vcpu->mmio_fragments[0].len = size;
                trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, size, gpa, NULL);
index 1467652ceabc178f08938fd501610bb5017883e8..8cb6b1f1916ecd20925ede556388ec50ffa7349d 100644 (file)
@@ -8209,7 +8209,6 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
                        const struct read_write_emulator_ops *ops)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       struct kvm_mmio_fragment *frag;
        int rc;
 
        if (WARN_ON_ONCE((bytes > 8u || !ops->write) && object_is_on_stack(val)))
@@ -8267,12 +8266,9 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
 
        vcpu->mmio_needed = 1;
        vcpu->mmio_cur_fragment = 0;
+       vcpu->mmio_is_write = ops->write;
 
-       frag = &vcpu->mmio_fragments[0];
-       vcpu->run->mmio.len = min(8u, frag->len);
-       vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
-       vcpu->run->exit_reason = KVM_EXIT_MMIO;
-       vcpu->run->mmio.phys_addr = frag->gpa;
+       kvm_prepare_emulated_mmio_exit(vcpu, &vcpu->mmio_fragments[0]);
 
        /*
         * For MMIO reads, stop emulating and immediately exit to userspace, as
@@ -8282,11 +8278,7 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
         * after completing emulation (see the check on vcpu->mmio_needed in
         * x86_emulate_instruction()).
         */
-       if (!ops->write)
-               return X86EMUL_IO_NEEDED;
-
-       memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
-       return X86EMUL_CONTINUE;
+       return ops->write ? X86EMUL_CONTINUE : X86EMUL_IO_NEEDED;
 }
 
 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -11883,12 +11875,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
                return complete_emulated_io(vcpu);
        }
 
-       run->exit_reason = KVM_EXIT_MMIO;
-       run->mmio.phys_addr = frag->gpa;
-       if (vcpu->mmio_is_write)
-               memcpy(run->mmio.data, frag->data, min(8u, frag->len));
-       run->mmio.len = min(8u, frag->len);
-       run->mmio.is_write = vcpu->mmio_is_write;
+       kvm_prepare_emulated_mmio_exit(vcpu, frag);
        vcpu->arch.complete_userspace_io = complete_emulated_mmio;
        return 0;
 }
@@ -14295,15 +14282,8 @@ static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
        }
 
        // More MMIO is needed
-       run->mmio.phys_addr = frag->gpa;
-       run->mmio.len = min(8u, frag->len);
-       run->mmio.is_write = vcpu->mmio_is_write;
-       if (run->mmio.is_write)
-               memcpy(run->mmio.data, frag->data, min(8u, frag->len));
-       run->exit_reason = KVM_EXIT_MMIO;
-
+       kvm_prepare_emulated_mmio_exit(vcpu, frag);
        vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
-
        return 0;
 }
 
@@ -14332,23 +14312,17 @@ int kvm_sev_es_mmio(struct kvm_vcpu *vcpu, bool is_write, gpa_t gpa,
         *       requests that split a page boundary.
         */
        frag = vcpu->mmio_fragments;
-       vcpu->mmio_nr_fragments = 1;
        frag->len = bytes;
        frag->gpa = gpa;
        frag->data = data;
 
        vcpu->mmio_needed = 1;
        vcpu->mmio_cur_fragment = 0;
+       vcpu->mmio_nr_fragments = 1;
+       vcpu->mmio_is_write = is_write;
 
-       vcpu->run->mmio.phys_addr = gpa;
-       vcpu->run->mmio.len = min(8u, frag->len);
-       vcpu->run->mmio.is_write = is_write;
-       if (is_write)
-               memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
-       vcpu->run->exit_reason = KVM_EXIT_MMIO;
-
+       kvm_prepare_emulated_mmio_exit(vcpu, frag);
        vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
-
        return 0;
 }
 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio);
index 1d0f0edd31b3cfc88177f46cb07df5cfcb720430..44a28d343d407a54a3466f831ca29d7398edc450 100644 (file)
@@ -718,6 +718,32 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
                         unsigned int port, void *data,  unsigned int count,
                         int in);
 
+static inline void __kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu,
+                                                   gpa_t gpa, unsigned int len,
+                                                   const void *data,
+                                                   bool is_write)
+{
+       struct kvm_run *run = vcpu->run;
+
+       KVM_BUG_ON(len > 8, vcpu->kvm);
+
+       run->mmio.len = len;
+       run->mmio.is_write = is_write;
+       run->exit_reason = KVM_EXIT_MMIO;
+       run->mmio.phys_addr = gpa;
+       if (is_write)
+               memcpy(run->mmio.data, data, len);
+}
+
+static inline void kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu,
+                                                 struct kvm_mmio_fragment *frag)
+{
+       WARN_ON_ONCE(!vcpu->mmio_needed || !vcpu->mmio_nr_fragments);
+
+       __kvm_prepare_emulated_mmio_exit(vcpu, frag->gpa, min(8u, frag->len),
+                                        frag->data, vcpu->mmio_is_write);
+}
+
 static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
 {
        return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);