]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: VMX: Support the immediate form of WRMSRNS in the VM-Exit fastpath
authorXin Li <xin@zytor.com>
Tue, 5 Aug 2025 20:22:23 +0000 (13:22 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 19 Aug 2025 18:59:46 +0000 (11:59 -0700)
Add support for handling "WRMSRNS with an immediate" VM-Exits in KVM's
fastpath.  On Intel, all writes to the x2APIC ICR and to the TSC Deadline
MSR are non-serializing, i.e. it's highly likely guest kernels will switch
to using WRMSRNS when possible.  And in general, any MSR written via
WRMSRNS is probably worth handling in the fastpath, as the entire point of
WRMSRNS is to shave cycles in hot paths.

Signed-off-by: Xin Li (Intel) <xin@zytor.com>
[sean: rewrite changelog, split rename to separate patch]
Link: https://lore.kernel.org/r/20250805202224.1475590-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 44423d5f0e27d4aabe673d3ee0364d9d88acd5da..a3f0d458be9d9276fefb24313a48c3b90291dc57 100644 (file)
@@ -7192,6 +7192,9 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
        switch (vmx_get_exit_reason(vcpu).basic) {
        case EXIT_REASON_MSR_WRITE:
                return handle_fastpath_wrmsr(vcpu);
+       case EXIT_REASON_MSR_WRITE_IMM:
+               return handle_fastpath_wrmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
+                                                vmx_get_msr_imm_reg(vcpu));
        case EXIT_REASON_PREEMPTION_TIMER:
                return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
        case EXIT_REASON_HLT:
index efd45b2e8f45ef0fb172be0a2f7ecf920eadde4f..b47a6a4ced157e8812e83797b1bb75ac7f7ba490 100644 (file)
@@ -2178,11 +2178,8 @@ static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
               kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending();
 }
 
-fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu)
+static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
-       u64 data = kvm_read_edx_eax(vcpu);
-       u32 msr = kvm_rcx_read(vcpu);
-
        switch (msr) {
        case APIC_BASE_MSR + (APIC_ICR >> 4):
                if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic) ||
@@ -2203,8 +2200,20 @@ fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu)
 
        return EXIT_FASTPATH_REENTER_GUEST;
 }
+
+fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu)
+{
+       return __handle_fastpath_wrmsr(vcpu, kvm_rcx_read(vcpu),
+                                      kvm_read_edx_eax(vcpu));
+}
 EXPORT_SYMBOL_GPL(handle_fastpath_wrmsr);
 
+fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
+{
+       return __handle_fastpath_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
+}
+EXPORT_SYMBOL_GPL(handle_fastpath_wrmsr_imm);
+
 /*
  * Adapt set_msr() to msr_io()'s calling convention
  */
index 2dab9c9d6199e4d6a237186aa41f0865ce9c832d..eb3088684e8a91e1d96c6db2bbc88807ac540dcf 100644 (file)
@@ -438,6 +438,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                            int emulation_type, void *insn, int insn_len);
 fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
 fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu);