From 33e09e2f9735fef7255aa96d1fe00782777bc44b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 24 Feb 2026 17:20:43 -0800 Subject: [PATCH] KVM: x86: Dedup kvm_sev_es_mmio_{read,write}() Dedup the SEV-ES emulated MMIO code by using the read vs. write emulator ops to handle the few differences between reads and writes. Opportunistically tweak the comment about fragments to call out that KVM should verify that userspace can actually handle MMIO requests that cross page boundaries. Unlike emulated MMIO, the request is made in the GPA space, not the GVA space, i.e. emulation across page boundaries can work generically, at least in theory. No functional change intended. Tested-by: Tom Lendacky Tested-by: Rick Edgecombe Link: https://patch.msgid.link/20260225012049.920665-9-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 58 +++++++++++++++------------------------------- 1 file changed, 19 insertions(+), 39 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5752ec3fc8f2f..81e6839140725 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -14292,16 +14292,17 @@ static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) return 0; } -int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, - void *data) +static int kvm_sev_es_do_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, + unsigned int bytes, void *data, + const struct read_write_emulator_ops *ops) { - int handled; struct kvm_mmio_fragment *frag; + int handled; if (!data || WARN_ON_ONCE(object_is_on_stack(data))) return -EINVAL; - handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); + handled = ops->read_write_mmio(vcpu, gpa, bytes, data); if (handled == bytes) return 1; @@ -14309,7 +14310,10 @@ int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, gpa += handled; data += handled; - /*TODO: Check if need to increment number of frags */ + /* + * TODO: Determine whether or not userspace plays nice with MMIO + * requests that split a page boundary. + */ frag = vcpu->mmio_fragments; vcpu->mmio_nr_fragments = 1; frag->len = bytes; @@ -14321,51 +14325,27 @@ int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, vcpu->run->mmio.phys_addr = gpa; vcpu->run->mmio.len = min(8u, frag->len); - vcpu->run->mmio.is_write = 1; - memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); + vcpu->run->mmio.is_write = ops->write; + if (ops->write) + memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; return 0; } + +int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, + void *data) +{ + return kvm_sev_es_do_mmio(vcpu, gpa, bytes, data, &write_emultor); +} EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_write); int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, void *data) { - int handled; - struct kvm_mmio_fragment *frag; - - if (!data || WARN_ON_ONCE(object_is_on_stack(data))) - return -EINVAL; - - handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); - if (handled == bytes) - return 1; - - bytes -= handled; - gpa += handled; - data += handled; - - /*TODO: Check if need to increment number of frags */ - frag = vcpu->mmio_fragments; - vcpu->mmio_nr_fragments = 1; - frag->len = bytes; - frag->gpa = gpa; - frag->data = data; - - vcpu->mmio_needed = 1; - vcpu->mmio_cur_fragment = 0; - - vcpu->run->mmio.phys_addr = gpa; - vcpu->run->mmio.len = min(8u, frag->len); - vcpu->run->mmio.is_write = 0; - vcpu->run->exit_reason = KVM_EXIT_MMIO; - - vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; - - return 0; + return kvm_sev_es_do_mmio(vcpu, gpa, bytes, data, &read_emultor); } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_read); -- 2.47.3