/* Request the device emulation to userspace device model. */
vcpu->mmio_is_write = write;
- if (!write)
- vcpu->arch.complete_userspace_io = tdx_complete_mmio_read;
- vcpu->run->mmio.phys_addr = gpa;
- vcpu->run->mmio.len = size;
- vcpu->run->mmio.is_write = write;
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
+ __kvm_prepare_emulated_mmio_exit(vcpu, gpa, size, &val, write);
- if (write) {
- memcpy(vcpu->run->mmio.data, &val, size);
- } else {
+ if (!write) {
+ vcpu->arch.complete_userspace_io = tdx_complete_mmio_read;
vcpu->mmio_fragments[0].gpa = gpa;
vcpu->mmio_fragments[0].len = size;
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, size, gpa, NULL);
const struct read_write_emulator_ops *ops)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- struct kvm_mmio_fragment *frag;
int rc;
if (WARN_ON_ONCE((bytes > 8u || !ops->write) && object_is_on_stack(val)))
vcpu->mmio_needed = 1;
vcpu->mmio_cur_fragment = 0;
+ vcpu->mmio_is_write = ops->write;
- frag = &vcpu->mmio_fragments[0];
- vcpu->run->mmio.len = min(8u, frag->len);
- vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
- vcpu->run->mmio.phys_addr = frag->gpa;
+ kvm_prepare_emulated_mmio_exit(vcpu, &vcpu->mmio_fragments[0]);
/*
* For MMIO reads, stop emulating and immediately exit to userspace, as
* after completing emulation (see the check on vcpu->mmio_needed in
* x86_emulate_instruction()).
*/
- if (!ops->write)
- return X86EMUL_IO_NEEDED;
-
- memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
- return X86EMUL_CONTINUE;
+ return ops->write ? X86EMUL_CONTINUE : X86EMUL_IO_NEEDED;
}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
return complete_emulated_io(vcpu);
}
- run->exit_reason = KVM_EXIT_MMIO;
- run->mmio.phys_addr = frag->gpa;
- if (vcpu->mmio_is_write)
- memcpy(run->mmio.data, frag->data, min(8u, frag->len));
- run->mmio.len = min(8u, frag->len);
- run->mmio.is_write = vcpu->mmio_is_write;
+ kvm_prepare_emulated_mmio_exit(vcpu, frag);
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
}
}
// More MMIO is needed
- run->mmio.phys_addr = frag->gpa;
- run->mmio.len = min(8u, frag->len);
- run->mmio.is_write = vcpu->mmio_is_write;
- if (run->mmio.is_write)
- memcpy(run->mmio.data, frag->data, min(8u, frag->len));
- run->exit_reason = KVM_EXIT_MMIO;
-
+ kvm_prepare_emulated_mmio_exit(vcpu, frag);
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
-
return 0;
}
* requests that split a page boundary.
*/
frag = vcpu->mmio_fragments;
- vcpu->mmio_nr_fragments = 1;
frag->len = bytes;
frag->gpa = gpa;
frag->data = data;
vcpu->mmio_needed = 1;
vcpu->mmio_cur_fragment = 0;
+ vcpu->mmio_nr_fragments = 1;
+ vcpu->mmio_is_write = is_write;
- vcpu->run->mmio.phys_addr = gpa;
- vcpu->run->mmio.len = min(8u, frag->len);
- vcpu->run->mmio.is_write = is_write;
- if (is_write)
- memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
-
+ kvm_prepare_emulated_mmio_exit(vcpu, frag);
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
-
return 0;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio);
unsigned int port, void *data, unsigned int count,
int in);
+static inline void __kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu,
+ gpa_t gpa, unsigned int len,
+ const void *data,
+ bool is_write)
+{
+ struct kvm_run *run = vcpu->run;
+
+ KVM_BUG_ON(len > 8, vcpu->kvm);
+
+ run->mmio.len = len;
+ run->mmio.is_write = is_write;
+ run->exit_reason = KVM_EXIT_MMIO;
+ run->mmio.phys_addr = gpa;
+ if (is_write)
+ memcpy(run->mmio.data, data, len);
+}
+
+static inline void kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu,
+ struct kvm_mmio_fragment *frag)
+{
+ WARN_ON_ONCE(!vcpu->mmio_needed || !vcpu->mmio_nr_fragments);
+
+ __kvm_prepare_emulated_mmio_exit(vcpu, frag->gpa, min(8u, frag->len),
+ frag->data, vcpu->mmio_is_write);
+}
+
static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
{
return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);