--- /dev/null
+From 350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7 Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Tue, 19 Aug 2014 19:14:50 +0800
+Subject: kvm: iommu: fix the third parameter of kvm_iommu_put_pages (CVE-2014-3601)
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+commit 350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7 upstream.
+
+The third parameter of kvm_iommu_put_pages is wrong,
+It should be 'gfn - slot->base_gfn'.
+
+By making gfn very large, malicious guest or userspace can cause kvm to
+go to this error path, and subsequently to pass a huge value as size.
+Alternatively if gfn is small, then pages would be pinned but never
+unpinned, causing host memory leak and local DOS.
+
+Passing a reasonable but large value could be the most dangerous case,
+because it would unpin a page that should have stayed pinned, and thus
+allow the device to DMA into arbitrary memory. However, this cannot
+happen because of the condition that can trigger the error:
+
+- out of memory (where you can't allocate even a single page)
+ should not be possible for the attacker to trigger
+
+- when exceeding the iommu's address space, guest pages after gfn
+ will also exceed the iommu's address space, and inside
+ kvm_iommu_put_pages() the iommu_iova_to_phys() will fail. The
+ page thus would not be unpinned at all.
+
+Reported-by: Jack Morgenstein <jackm@mellanox.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/iommu.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_me
+ return pfn;
+ }
+
++static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
++{
++ unsigned long i;
++
++ for (i = 0; i < npages; ++i)
++ kvm_release_pfn_clean(pfn + i);
++}
++
+ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ {
+ gfn_t gfn, end_gfn;
+@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
+ if (r) {
+ printk(KERN_ERR "kvm_iommu_map_address:"
+ "iommu failed to map pfn=%llx\n", pfn);
++ kvm_unpin_pages(kvm, pfn, page_size);
+ goto unmap_pages;
+ }
+
+@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
+ return 0;
+
+ unmap_pages:
+- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
++ kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
+ return r;
+ }
+
+@@ -266,14 +275,6 @@ out_unlock:
+ return r;
+ }
+
+-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+-{
+- unsigned long i;
+-
+- for (i = 0; i < npages; ++i)
+- kvm_release_pfn_clean(pfn + i);
+-}
+-
+ static void kvm_iommu_put_pages(struct kvm *kvm,
+ gfn_t base_gfn, unsigned long npages)
+ {
--- /dev/null
+From 56cc2406d68c0f09505c389e276f27a99f495cbd Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@linux.intel.com>
+Date: Tue, 5 Aug 2014 12:42:24 +0800
+Subject: KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use
+
+From: Wanpeng Li <wanpeng.li@linux.intel.com>
+
+commit 56cc2406d68c0f09505c389e276f27a99f495cbd upstream.
+
+After commit 77b0f5d (KVM: nVMX: Ack and write vector info to intr_info
+if L1 asks us to), "Acknowledge interrupt on exit" behavior can be
+emulated. To do so, KVM will ask the APIC for the interrupt vector if
+during a nested vmexit if VM_EXIT_ACK_INTR_ON_EXIT is set. With APICv,
+kvm_get_apic_interrupt would return -1 and give the following WARNING:
+
+Call Trace:
+ [<ffffffff81493563>] dump_stack+0x49/0x5e
+ [<ffffffff8103f0eb>] warn_slowpath_common+0x7c/0x96
+ [<ffffffffa059709a>] ? nested_vmx_vmexit+0xa4/0x233 [kvm_intel]
+ [<ffffffff8103f11a>] warn_slowpath_null+0x15/0x17
+ [<ffffffffa059709a>] nested_vmx_vmexit+0xa4/0x233 [kvm_intel]
+ [<ffffffffa0594295>] ? nested_vmx_exit_handled+0x6a/0x39e [kvm_intel]
+ [<ffffffffa0537931>] ? kvm_apic_has_interrupt+0x80/0xd5 [kvm]
+ [<ffffffffa05972ec>] vmx_check_nested_events+0xc3/0xd3 [kvm_intel]
+ [<ffffffffa051ebe9>] inject_pending_event+0xd0/0x16e [kvm]
+ [<ffffffffa051efa0>] vcpu_enter_guest+0x319/0x704 [kvm]
+
+To fix this, we cannot rely on the processor's virtual interrupt delivery,
+because "acknowledge interrupt on exit" must only update the virtual
+ISR/PPR/IRR registers (and SVI, which is just a cache of the virtual ISR)
+but it should not deliver the interrupt through the IDT. Thus, KVM has
+to deliver the interrupt "by hand", similar to the treatment of EOI in
+commit fc57ac2c9ca8 (KVM: lapic: sync highest ISR to hardware apic on
+EOI, 2014-05-14).
+
+The patch modifies kvm_cpu_get_interrupt to always acknowledge an
+interrupt; there are only two callers, and the other is not affected
+because it is never reached with kvm_apic_vid_enabled() == true. Then it
+modifies apic_set_isr and apic_clear_irr to update SVI and RVI in addition
+to the registers.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Suggested-by: "Zhang, Yang Z" <yang.z.zhang@intel.com>
+Tested-by: Liu, RongrongX <rongrongx.liu@intel.com>
+Tested-by: Felipe Reyes <freyes@suse.com>
+Fixes: 77b0f5d67ff2781f36831cba79674c3e97bd7acf
+Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/irq.c | 2 -
+ arch/x86/kvm/lapic.c | 52 ++++++++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 40 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcp
+
+ vector = kvm_cpu_get_extint(v);
+
+- if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
++ if (vector != -1)
+ return vector; /* PIC */
+
+ return kvm_get_apic_interrupt(v); /* APIC */
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -352,25 +352,46 @@ static inline int apic_find_highest_irr(
+
+ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+ {
+- apic->irr_pending = false;
++ struct kvm_vcpu *vcpu;
++
++ vcpu = apic->vcpu;
++
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
+- if (apic_search_irr(apic) != -1)
+- apic->irr_pending = true;
++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++ /* try to update RVI */
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ else {
++ vec = apic_search_irr(apic);
++ apic->irr_pending = (vec != -1);
++ }
+ }
+
+ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ {
+- /* Note that we never get here with APIC virtualization enabled. */
++ struct kvm_vcpu *vcpu;
++
++ if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++ return;
++
++ vcpu = apic->vcpu;
+
+- if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+- ++apic->isr_count;
+- BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ /*
+- * ISR (in service register) bit is set when injecting an interrupt.
+- * The highest vector is injected. Thus the latest bit set matches
+- * the highest bit in ISR.
++ * With APIC virtualization enabled, all caching is disabled
++ * because the processor can modify ISR under the hood. Instead
++ * just set SVI.
+ */
+- apic->highest_isr_cache = vec;
++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
++ else {
++ ++apic->isr_count;
++ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
++ /*
++ * ISR (in service register) bit is set when injecting an interrupt.
++ * The highest vector is injected. Thus the latest bit set matches
++ * the highest bit in ISR.
++ */
++ apic->highest_isr_cache = vec;
++ }
+ }
+
+ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
+@@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vc
+ int vector = kvm_apic_has_interrupt(vcpu);
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+- /* Note that we never get here with APIC virtualization enabled. */
+-
+ if (vector == -1)
+ return -1;
+
++ /*
++ * We get here even with APIC virtualization enabled, if doing
++ * nested virtualization and L1 runs with the "acknowledge interrupt
++ * on exit" mode. Then we cannot inject the interrupt via RVI,
++ * because the process would deliver it through the IDT.
++ */
++
+ apic_set_isr(vector, apic);
+ apic_update_ppr(apic);
+ apic_clear_irr(vector, apic);
--- /dev/null
+From 0d234daf7e0a3290a3a20c8087eefbd6335a5bd4 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 18 Aug 2014 16:39:48 +0200
+Subject: Revert "KVM: x86: Increase the number of fixed MTRR regs to 10"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 0d234daf7e0a3290a3a20c8087eefbd6335a5bd4 upstream.
+
+This reverts commit 682367c494869008eb89ef733f196e99415ae862,
+which causes 32-bit SMP Windows 7 guests to panic.
+
+SeaBIOS has a limit on the number of MTRRs that it can handle,
+and this patch exceeded the limit. Better revert it.
+Thanks to Nadav Amit for debugging the cause.
+
+Reported-by: Wanpeng Li <wanpeng.li@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -99,7 +99,7 @@ static inline gfn_t gfn_to_index(gfn_t g
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 10
++#define KVM_NR_VAR_MTRR 8
+
+ #define ASYNC_PF_PER_VCPU 64
+