]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'kvm-x86-misc-6.12' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Sat, 14 Sep 2024 13:38:08 +0000 (09:38 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 17 Sep 2024 15:38:23 +0000 (11:38 -0400)
KVM x86 misc changes for 6.12

 - Advertise AVX10.1 to userspace (effectively prep work for the "real" AVX10
   functionality that is on the horizon).

 - Rework common MSR handling code to suppress errors on userspace accesses to
   unsupported-but-advertised MSRs.  This will allow removing (almost?) all of
   KVM's exemptions for userspace access to MSRs that shouldn't exist based on
   the vCPU model (the actual cleanup is non-trivial future work).

 - Rework KVM's handling of x2APIC ICR, again, because AMD (x2AVIC) splits the
   64-bit value into the legacy ICR and ICR2 storage, whereas Intel (APICv)
   stores the entire 64-bit value a the ICR offset.

 - Fix a bug where KVM would fail to exit to userspace if one was triggered by
   a fastpath exit handler.

 - Add fastpath handling of HLT VM-Exit to expedite re-entering the guest when
   there's already a pending wake event at the time of the exit.

 - Finally fix the RSM vs. nested VM-Enter WARN by forcing the vCPU out of
   guest mode prior to signalling SHUTDOWN (architecturally, the SHUTDOWN is
   supposed to hit L1, not L2).

1  2 
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 0f7342b574f91509525a3bbc58c40e7b4cfc18c6,92fade53c79f9648fbbfdb06f295a31de505607f..a877c0764fc56334202a7cca4f66b8b14aba91c8
@@@ -9898,78 -9883,33 +9887,33 @@@ void kvm_x86_vendor_exit(void
  
  #ifdef CONFIG_X86_64
        if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
-               clear_hv_tscchange_cb();
- #endif
-       kvm_lapic_exit();
-       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
-               cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
-                                           CPUFREQ_TRANSITION_NOTIFIER);
-               cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
-       }
- #ifdef CONFIG_X86_64
-       pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
-       irq_work_sync(&pvclock_irq_work);
-       cancel_work_sync(&pvclock_gtod_work);
- #endif
-       kvm_x86_call(hardware_unsetup)();
-       kvm_mmu_vendor_module_exit();
-       free_percpu(user_return_msrs);
-       kmem_cache_destroy(x86_emulator_cache);
- #ifdef CONFIG_KVM_XEN
-       static_key_deferred_flush(&kvm_xen_enabled);
-       WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
- #endif
-       mutex_lock(&vendor_module_lock);
-       kvm_x86_ops.enable_virtualization_cpu = NULL;
-       mutex_unlock(&vendor_module_lock);
- }
- EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
- static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
- {
-       /*
-        * The vCPU has halted, e.g. executed HLT.  Update the run state if the
-        * local APIC is in-kernel, the run loop will detect the non-runnable
-        * state and halt the vCPU.  Exit to userspace if the local APIC is
-        * managed by userspace, in which case userspace is responsible for
-        * handling wake events.
-        */
-       ++vcpu->stat.halt_exits;
-       if (lapic_in_kernel(vcpu)) {
-               vcpu->arch.mp_state = state;
-               return 1;
-       } else {
-               vcpu->run->exit_reason = reason;
-               return 0;
-       }
- }
- int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
- {
-       return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
- }
- EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
- int kvm_emulate_halt(struct kvm_vcpu *vcpu)
- {
-       int ret = kvm_skip_emulated_instruction(vcpu);
-       /*
-        * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
-        * KVM_EXIT_DEBUG here.
-        */
-       return kvm_emulate_halt_noskip(vcpu) && ret;
- }
- EXPORT_SYMBOL_GPL(kvm_emulate_halt);
- int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
- {
-       int ret = kvm_skip_emulated_instruction(vcpu);
+               clear_hv_tscchange_cb();
+ #endif
+       kvm_lapic_exit();
  
-       return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
-                                       KVM_EXIT_AP_RESET_HOLD) && ret;
+       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+               cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
+                                           CPUFREQ_TRANSITION_NOTIFIER);
+               cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
+       }
+ #ifdef CONFIG_X86_64
+       pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
+       irq_work_sync(&pvclock_irq_work);
+       cancel_work_sync(&pvclock_gtod_work);
+ #endif
+       kvm_x86_call(hardware_unsetup)();
+       kvm_mmu_vendor_module_exit();
+       free_percpu(user_return_msrs);
+       kmem_cache_destroy(x86_emulator_cache);
+ #ifdef CONFIG_KVM_XEN
+       static_key_deferred_flush(&kvm_xen_enabled);
+       WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
+ #endif
+       mutex_lock(&vendor_module_lock);
 -      kvm_x86_ops.hardware_enable = NULL;
++      kvm_x86_ops.enable_virtualization_cpu = NULL;
+       mutex_unlock(&vendor_module_lock);
  }
- EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+ EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
  
  #ifdef CONFIG_X86_64
  static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,