--- /dev/null
+From 350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7 Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Tue, 19 Aug 2014 19:14:50 +0800
+Subject: kvm: iommu: fix the third parameter of kvm_iommu_put_pages (CVE-2014-3601)
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+commit 350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7 upstream.
+
+The third parameter of kvm_iommu_put_pages is wrong,
+It should be 'gfn - slot->base_gfn'.
+
+By making gfn very large, malicious guest or userspace can cause kvm to
+go to this error path, and subsequently to pass a huge value as size.
+Alternatively if gfn is small, then pages would be pinned but never
+unpinned, causing host memory leak and local DOS.
+
+Passing a reasonable but large value could be the most dangerous case,
+because it would unpin a page that should have stayed pinned, and thus
+allow the device to DMA into arbitrary memory. However, this cannot
+happen because of the condition that can trigger the error:
+
+- out of memory (where you can't allocate even a single page)
+ should not be possible for the attacker to trigger
+
+- when exceeding the iommu's address space, guest pages after gfn
+ will also exceed the iommu's address space, and inside
+ kvm_iommu_put_pages() the iommu_iova_to_phys() will fail. The
+ page thus would not be unpinned at all.
+
+Reported-by: Jack Morgenstein <jackm@mellanox.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/iommu.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_me
+ return pfn;
+ }
+
++static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
++{
++ unsigned long i;
++
++ for (i = 0; i < npages; ++i)
++ kvm_release_pfn_clean(pfn + i);
++}
++
+ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ {
+ gfn_t gfn, end_gfn;
+@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
+ if (r) {
+ printk(KERN_ERR "kvm_iommu_map_address:"
+ "iommu failed to map pfn=%llx\n", pfn);
++ kvm_unpin_pages(kvm, pfn, page_size);
+ goto unmap_pages;
+ }
+
+@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
+ return 0;
+
+ unmap_pages:
+- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
++ kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
+ return r;
+ }
+
+@@ -266,14 +275,6 @@ out_unlock:
+ return r;
+ }
+
+-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+-{
+- unsigned long i;
+-
+- for (i = 0; i < npages; ++i)
+- kvm_release_pfn_clean(pfn + i);
+-}
+-
+ static void kvm_iommu_put_pages(struct kvm *kvm,
+ gfn_t base_gfn, unsigned long npages)
+ {
--- /dev/null
+From 56cc2406d68c0f09505c389e276f27a99f495cbd Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@linux.intel.com>
+Date: Tue, 5 Aug 2014 12:42:24 +0800
+Subject: KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use
+
+From: Wanpeng Li <wanpeng.li@linux.intel.com>
+
+commit 56cc2406d68c0f09505c389e276f27a99f495cbd upstream.
+
+After commit 77b0f5d (KVM: nVMX: Ack and write vector info to intr_info
+if L1 asks us to), "Acknowledge interrupt on exit" behavior can be
+emulated. To do so, KVM will ask the APIC for the interrupt vector if
+during a nested vmexit if VM_EXIT_ACK_INTR_ON_EXIT is set. With APICv,
+kvm_get_apic_interrupt would return -1 and give the following WARNING:
+
+Call Trace:
+ [<ffffffff81493563>] dump_stack+0x49/0x5e
+ [<ffffffff8103f0eb>] warn_slowpath_common+0x7c/0x96
+ [<ffffffffa059709a>] ? nested_vmx_vmexit+0xa4/0x233 [kvm_intel]
+ [<ffffffff8103f11a>] warn_slowpath_null+0x15/0x17
+ [<ffffffffa059709a>] nested_vmx_vmexit+0xa4/0x233 [kvm_intel]
+ [<ffffffffa0594295>] ? nested_vmx_exit_handled+0x6a/0x39e [kvm_intel]
+ [<ffffffffa0537931>] ? kvm_apic_has_interrupt+0x80/0xd5 [kvm]
+ [<ffffffffa05972ec>] vmx_check_nested_events+0xc3/0xd3 [kvm_intel]
+ [<ffffffffa051ebe9>] inject_pending_event+0xd0/0x16e [kvm]
+ [<ffffffffa051efa0>] vcpu_enter_guest+0x319/0x704 [kvm]
+
+To fix this, we cannot rely on the processor's virtual interrupt delivery,
+because "acknowledge interrupt on exit" must only update the virtual
+ISR/PPR/IRR registers (and SVI, which is just a cache of the virtual ISR)
+but it should not deliver the interrupt through the IDT. Thus, KVM has
+to deliver the interrupt "by hand", similar to the treatment of EOI in
+commit fc57ac2c9ca8 (KVM: lapic: sync highest ISR to hardware apic on
+EOI, 2014-05-14).
+
+The patch modifies kvm_cpu_get_interrupt to always acknowledge an
+interrupt; there are only two callers, and the other is not affected
+because it is never reached with kvm_apic_vid_enabled() == true. Then it
+modifies apic_set_isr and apic_clear_irr to update SVI and RVI in addition
+to the registers.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Suggested-by: "Zhang, Yang Z" <yang.z.zhang@intel.com>
+Tested-by: Liu, RongrongX <rongrongx.liu@intel.com>
+Tested-by: Felipe Reyes <freyes@suse.com>
+Fixes: 77b0f5d67ff2781f36831cba79674c3e97bd7acf
+Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/irq.c | 2 -
+ arch/x86/kvm/lapic.c | 52 ++++++++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 40 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcp
+
+ vector = kvm_cpu_get_extint(v);
+
+- if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
++ if (vector != -1)
+ return vector; /* PIC */
+
+ return kvm_get_apic_interrupt(v); /* APIC */
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -352,25 +352,46 @@ static inline int apic_find_highest_irr(
+
+ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+ {
+- apic->irr_pending = false;
++ struct kvm_vcpu *vcpu;
++
++ vcpu = apic->vcpu;
++
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
+- if (apic_search_irr(apic) != -1)
+- apic->irr_pending = true;
++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++ /* try to update RVI */
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ else {
++ vec = apic_search_irr(apic);
++ apic->irr_pending = (vec != -1);
++ }
+ }
+
+ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ {
+- /* Note that we never get here with APIC virtualization enabled. */
++ struct kvm_vcpu *vcpu;
++
++ if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++ return;
++
++ vcpu = apic->vcpu;
+
+- if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+- ++apic->isr_count;
+- BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ /*
+- * ISR (in service register) bit is set when injecting an interrupt.
+- * The highest vector is injected. Thus the latest bit set matches
+- * the highest bit in ISR.
++ * With APIC virtualization enabled, all caching is disabled
++ * because the processor can modify ISR under the hood. Instead
++ * just set SVI.
+ */
+- apic->highest_isr_cache = vec;
++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
++ else {
++ ++apic->isr_count;
++ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
++ /*
++ * ISR (in service register) bit is set when injecting an interrupt.
++ * The highest vector is injected. Thus the latest bit set matches
++ * the highest bit in ISR.
++ */
++ apic->highest_isr_cache = vec;
++ }
+ }
+
+ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
+@@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vc
+ int vector = kvm_apic_has_interrupt(vcpu);
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+- /* Note that we never get here with APIC virtualization enabled. */
+-
+ if (vector == -1)
+ return -1;
+
++ /*
++ * We get here even with APIC virtualization enabled, if doing
++ * nested virtualization and L1 runs with the "acknowledge interrupt
++ * on exit" mode. Then we cannot inject the interrupt via RVI,
++ * because the process would deliver it through the IDT.
++ */
++
+ apic_set_isr(vector, apic);
+ apic_update_ppr(apic);
+ apic_clear_irr(vector, apic);
--- /dev/null
+From a0840240c0c6bcbac8f0f5db11f95c19aaf9b52f Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Sat, 19 Jul 2014 17:59:34 +1000
+Subject: KVM: PPC: Book3S: Fix LPCR one_reg interface
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit a0840240c0c6bcbac8f0f5db11f95c19aaf9b52f upstream.
+
+Unfortunately, the LPCR got defined as a 32-bit register in the
+one_reg interface. This is unfortunate because KVM allows userspace
+to control the DPFD (default prefetch depth) field, which is in the
+upper 32 bits. The result is that DPFD always get set to 0, which
+reduces performance in the guest.
+
+We can't just change KVM_REG_PPC_LPCR to be a 64-bit register ID,
+since that would break existing userspace binaries. Instead we define
+a new KVM_REG_PPC_LPCR_64 id which is 64-bit. Userspace can still use
+the old KVM_REG_PPC_LPCR id, but it now only modifies those fields in
+the bottom 32 bits that userspace can modify (ILE, TC and AIL).
+If userspace uses the new KVM_REG_PPC_LPCR_64 id, it can modify DPFD
+as well.
+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Alexander Graf <agraf@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/virtual/kvm/api.txt | 3 ++-
+ arch/powerpc/include/uapi/asm/kvm.h | 1 +
+ arch/powerpc/kvm/book3s_hv.c | 13 +++++++++++--
+ arch/powerpc/kvm/book3s_pr.c | 2 ++
+ 4 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -1869,7 +1869,8 @@ registers, find a list below:
+ PPC | KVM_REG_PPC_PID | 64
+ PPC | KVM_REG_PPC_ACOP | 64
+ PPC | KVM_REG_PPC_VRSAVE | 32
+- PPC | KVM_REG_PPC_LPCR | 64
++ PPC | KVM_REG_PPC_LPCR | 32
++ PPC | KVM_REG_PPC_LPCR_64 | 64
+ PPC | KVM_REG_PPC_PPR | 64
+ PPC | KVM_REG_PPC_ARCH_COMPAT 32
+ PPC | KVM_REG_PPC_DABRX | 32
+--- a/arch/powerpc/include/uapi/asm/kvm.h
++++ b/arch/powerpc/include/uapi/asm/kvm.h
+@@ -548,6 +548,7 @@ struct kvm_get_htab_header {
+
+ #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
+ #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
++#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
+ #define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
+
+ /* Architecture compatibility level */
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -785,7 +785,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs
+ return 0;
+ }
+
+-static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
++static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
++ bool preserve_top32)
+ {
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+@@ -820,6 +821,10 @@ static void kvmppc_set_lpcr(struct kvm_v
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ mask |= LPCR_AIL;
++
++ /* Broken 32-bit version of LPCR must not clear top bits */
++ if (preserve_top32)
++ mask &= 0xFFFFFFFF;
+ vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+ spin_unlock(&vc->lock);
+ }
+@@ -939,6 +944,7 @@ static int kvmppc_get_one_reg_hv(struct
+ *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+ break;
+ case KVM_REG_PPC_LPCR:
++ case KVM_REG_PPC_LPCR_64:
+ *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ break;
+ case KVM_REG_PPC_PPR:
+@@ -1150,7 +1156,10 @@ static int kvmppc_set_one_reg_hv(struct
+ ALIGN(set_reg_val(id, *val), 1UL << 24);
+ break;
+ case KVM_REG_PPC_LPCR:
+- kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
++ kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
++ break;
++ case KVM_REG_PPC_LPCR_64:
++ kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
+ break;
+ case KVM_REG_PPC_PPR:
+ vcpu->arch.ppr = set_reg_val(id, *val);
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -1233,6 +1233,7 @@ static int kvmppc_get_one_reg_pr(struct
+ *val = get_reg_val(id, to_book3s(vcpu)->hior);
+ break;
+ case KVM_REG_PPC_LPCR:
++ case KVM_REG_PPC_LPCR_64:
+ /*
+ * We are only interested in the LPCR_ILE bit
+ */
+@@ -1268,6 +1269,7 @@ static int kvmppc_set_one_reg_pr(struct
+ to_book3s(vcpu)->hior_explicit = true;
+ break;
+ case KVM_REG_PPC_LPCR:
++ case KVM_REG_PPC_LPCR_64:
+ kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
+ break;
+ default:
--- /dev/null
+From 0d234daf7e0a3290a3a20c8087eefbd6335a5bd4 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 18 Aug 2014 16:39:48 +0200
+Subject: Revert "KVM: x86: Increase the number of fixed MTRR regs to 10"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 0d234daf7e0a3290a3a20c8087eefbd6335a5bd4 upstream.
+
+This reverts commit 682367c494869008eb89ef733f196e99415ae862,
+which causes 32-bit SMP Windows 7 guests to panic.
+
+SeaBIOS has a limit on the number of MTRRs that it can handle,
+and this patch exceeded the limit. Better revert it.
+Thanks to Nadav Amit for debugging the cause.
+
+Reported-by: Wanpeng Li <wanpeng.li@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t g
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 10
++#define KVM_NR_VAR_MTRR 8
+
+ #define ASYNC_PF_PER_VCPU 64
+
kvm-x86-inter-privilege-level-ret-emulation-is-not-implemeneted.patch
kvm-x86-always-exit-on-eois-for-interrupts-listed-in-the-ioapic-redir-table.patch
kvm-s390-mm-fix-page-table-locking-vs.-split-pmd-lock.patch
+kvm-ppc-book3s-fix-lpcr-one_reg-interface.patch
+kvm-nvmx-fix-acknowledge-interrupt-on-exit-when-apicv-is-in-use.patch
+revert-kvm-x86-increase-the-number-of-fixed-mtrr-regs-to-10.patch
+kvm-iommu-fix-the-third-parameter-of-kvm_iommu_put_pages-cve-2014-3601.patch