kvm_nested_vmexit_handle_ibrs(vcpu);
- /* Update any VMCS fields that might have changed while L2 ran */
+ /*
+ * Update any VMCS fields that might have changed while vmcs02 was the
+ * active VMCS. The tracking is per-vCPU, not per-VMCS.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
return true;
}
+static void vmx_add_autostore_msr(struct vcpu_vmx *vmx, u32 msr)
+{
+ vmx_add_auto_msr(&vmx->msr_autostore, msr, 0, VM_EXIT_MSR_STORE_COUNT,
+ vmx->vcpu.kvm);
+}
+
+static void vmx_remove_autostore_msr(struct vcpu_vmx *vmx, u32 msr)
+{
+ vmx_remove_auto_msr(&vmx->msr_autostore, msr, VM_EXIT_MSR_STORE_COUNT);
+}
+
#ifdef CONFIG_X86_32
/*
* On 32-bit kernels, VM exits still load the FS and GS bases from the
static void vmx_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu)
{
+ u64 vm_exit_controls_bits = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL;
bool has_mediated_pmu = kvm_vcpu_has_mediated_pmu(vcpu);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!enable_mediated_pmu)
return;
+ if (!cpu_has_save_perf_global_ctrl()) {
+ vm_exit_controls_bits &= ~VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL;
+
+ if (has_mediated_pmu)
+ vmx_add_autostore_msr(vmx, MSR_CORE_PERF_GLOBAL_CTRL);
+ else
+ vmx_remove_autostore_msr(vmx, MSR_CORE_PERF_GLOBAL_CTRL);
+ }
+
vm_entry_controls_changebit(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
has_mediated_pmu);
- vm_exit_controls_changebit(vmx, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL,
- has_mediated_pmu);
+ vm_exit_controls_changebit(vmx, vm_exit_controls_bits, has_mediated_pmu);
for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
vmx_set_intercept_for_msr(vcpu, MSR_IA32_PERFCTR0 + i,
msrs[i].host);
}
+static void vmx_refresh_guest_perf_global_control(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (msr_write_intercepted(vmx, MSR_CORE_PERF_GLOBAL_CTRL))
+ return;
+
+ if (!cpu_has_save_perf_global_ctrl()) {
+ int slot = vmx_find_loadstore_msr_slot(&vmx->msr_autostore,
+ MSR_CORE_PERF_GLOBAL_CTRL);
+
+ if (WARN_ON_ONCE(slot < 0))
+ return;
+
+ pmu->global_ctrl = vmx->msr_autostore.val[slot].value;
+ vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, pmu->global_ctrl);
+ return;
+ }
+
+ pmu->global_ctrl = vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL);
+}
+
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->loaded_vmcs->launched = 1;
- if (!msr_write_intercepted(vmx, MSR_CORE_PERF_GLOBAL_CTRL))
- vcpu_to_pmu(vcpu)->global_ctrl = vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL);
+ vmx_refresh_guest_perf_global_control(vcpu);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);