--- /dev/null
+From 4e59516a12a6ef6dcb660cb3a3f70c64bd60cfec Mon Sep 17 00:00:00 2001
+From: Peter Feiner <pfeiner@google.com>
+Date: Thu, 7 Jul 2016 14:49:58 -0700
+Subject: kvm: vmx: ensure VMCS is current while enabling PML
+
+From: Peter Feiner <pfeiner@google.com>
+
+commit 4e59516a12a6ef6dcb660cb3a3f70c64bd60cfec upstream.
+
+Between loading the new VMCS and enabling PML, the CPU was unpinned.
+If the vCPU thread were migrated to another CPU in the interim (e.g.,
+due to preemption or sleeping alloc_page), then the VMWRITEs to enable
+PML would target the wrong VMCS -- or no VMCS at all:
+
+ [ 2087.266950] vmwrite error: reg 200e value 3fe1d52000 (err -506126336)
+ [ 2087.267062] vmwrite error: reg 812 value 1ff (err 511)
+ [ 2087.267125] vmwrite error: reg 401e value 12229c00 (err 304258048)
+
+This patch ensures that the VMCS remains current while enabling PML by
+doing the VMWRITEs while the CPU is pinned. Allocation of the PML buffer
+is hoisted out of the critical section.
+
+Signed-off-by: Peter Feiner <pfeiner@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Herongguang (Stephen)" <herongguang.he@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 56 ++++++++++++++++++++++-------------------------------
+ 1 file changed, 24 insertions(+), 32 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4867,6 +4867,12 @@ static int vmx_vcpu_setup(struct vcpu_vm
+ if (vmx_xsaves_supported())
+ vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
+
++ if (enable_pml) {
++ ASSERT(vmx->pml_pg);
++ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
++ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
++ }
++
+ return 0;
+ }
+
+@@ -7839,22 +7845,6 @@ static void vmx_get_exit_info(struct kvm
+ *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
+ }
+
+-static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
+-{
+- struct page *pml_pg;
+-
+- pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+- if (!pml_pg)
+- return -ENOMEM;
+-
+- vmx->pml_pg = pml_pg;
+-
+- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+-
+- return 0;
+-}
+-
+ static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
+ {
+ if (vmx->pml_pg) {
+@@ -8789,14 +8779,26 @@ static struct kvm_vcpu *vmx_create_vcpu(
+ if (err)
+ goto free_vcpu;
+
++ err = -ENOMEM;
++
++ /*
++ * If PML is turned on, failure on enabling PML just results in failure
++ * of creating the vcpu, therefore we can simplify PML logic (by
++ * avoiding dealing with cases, such as enabling PML partially on vcpus
++ * for the guest, etc.
++ */
++ if (enable_pml) {
++ vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!vmx->pml_pg)
++ goto uninit_vcpu;
++ }
++
+ vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
+ > PAGE_SIZE);
+
+- err = -ENOMEM;
+- if (!vmx->guest_msrs) {
+- goto uninit_vcpu;
+- }
++ if (!vmx->guest_msrs)
++ goto free_pml;
+
+ vmx->loaded_vmcs = &vmx->vmcs01;
+ vmx->loaded_vmcs->vmcs = alloc_vmcs();
+@@ -8840,18 +8842,6 @@ static struct kvm_vcpu *vmx_create_vcpu(
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+
+- /*
+- * If PML is turned on, failure on enabling PML just results in failure
+- * of creating the vcpu, therefore we can simplify PML logic (by
+- * avoiding dealing with cases, such as enabling PML partially on vcpus
+- * for the guest, etc.
+- */
+- if (enable_pml) {
+- err = vmx_create_pml_buffer(vmx);
+- if (err)
+- goto free_vmcs;
+- }
+-
+ return &vmx->vcpu;
+
+ free_vmcs:
+@@ -8859,6 +8849,8 @@ free_vmcs:
+ free_loaded_vmcs(vmx->loaded_vmcs);
+ free_msrs:
+ kfree(vmx->guest_msrs);
++free_pml:
++ vmx_destroy_pml_buffer(vmx);
+ uninit_vcpu:
+ kvm_vcpu_uninit(&vmx->vcpu);
+ free_vcpu: