--- /dev/null
+From 8985d50382359e5bf118fdbefc859d0dbf6cebc7 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 11 Aug 2016 11:58:13 +0100
+Subject: MIPS: KVM: Add missing gfn range check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 8985d50382359e5bf118fdbefc859d0dbf6cebc7 upstream.
+
+kvm_mips_handle_mapped_seg_tlb_fault() calculates the guest frame number
+based on the guest TLB EntryLo values, however it is not range checked
+to ensure it lies within the guest_pmap. If the physical memory the
+guest refers to is out of range then dump the guest TLB and emit an
+internal error.
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Cc: <stable@vger.kernel.org> # 3.10.x-
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v4.7]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/tlb.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -373,6 +373,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ kvm_pfn_t pfn0, pfn1;
++ gfn_t gfn0, gfn1;
+ long tlb_lo[2];
+ int ret;
+
+@@ -387,18 +388,24 @@ int kvm_mips_handle_mapped_seg_tlb_fault
+ VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0])
+- >> PAGE_SHIFT) < 0)
++ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
++ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
++ if (gfn0 >= kvm->arch.guest_pmap_npages ||
++ gfn1 >= kvm->arch.guest_pmap_npages) {
++ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
++ __func__, gfn0, gfn1, tlb->tlb_hi);
++ kvm_mips_dump_guest_tlbs(vcpu);
+ return -1;
++ }
+
+- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1])
+- >> PAGE_SHIFT) < 0)
++ if (kvm_mips_map_page(kvm, gfn0) < 0)
+ return -1;
+
+- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[0])
+- >> PAGE_SHIFT];
+- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[1])
+- >> PAGE_SHIFT];
++ if (kvm_mips_map_page(kvm, gfn1) < 0)
++ return -1;
++
++ pfn0 = kvm->arch.guest_pmap[gfn0];
++ pfn1 = kvm->arch.guest_pmap[gfn1];
+
+ if (hpa0)
+ *hpa0 = pfn0 << PAGE_SHIFT;
--- /dev/null
+From 0741f52d1b980dbeb290afe67d88fc2928edd8ab Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 11 Aug 2016 11:58:14 +0100
+Subject: MIPS: KVM: Fix gfn range check in kseg0 tlb faults
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 0741f52d1b980dbeb290afe67d88fc2928edd8ab upstream.
+
+Two consecutive gfns are loaded into host TLB, so ensure the range check
+isn't off by one if guest_pmap_npages is odd.
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Cc: <stable@vger.kernel.org> # 3.10.x-
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v4.7]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/tlb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -284,7 +284,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsi
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+- if (gfn >= kvm->arch.guest_pmap_npages) {
++ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ gfn, badvaddr);
+ kvm_mips_dump_host_tlbs();
--- /dev/null
+From c604cffa93478f8888bec62b23d6073dad03d43a Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 11 Aug 2016 11:58:12 +0100
+Subject: MIPS: KVM: Fix mapped fault broken commpage handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit c604cffa93478f8888bec62b23d6073dad03d43a upstream.
+
+kvm_mips_handle_mapped_seg_tlb_fault() appears to map the guest page at
+virtual address 0 to PFN 0 if the guest has created its own mapping
+there. The intention is unclear, but it may have been an attempt to
+protect the zero page from being mapped to anything but the comm page in
+code paths you wouldn't expect from genuine commpage accesses (guest
+kernel mode cache instructions on that address, hitting trapping
+instructions when executing from that address with a coincidental TLB
+eviction during the KVM handling, and guest user mode accesses to that
+address).
+
+Fix this to check for mappings exactly at KVM_GUEST_COMMPAGE_ADDR (it
+may not be at address 0 since commit 42aa12e74e91 ("MIPS: KVM: Move
+commpage so 0x0 is unmapped")), and set the corresponding EntryLo to be
+interpreted as 0 (invalid).
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Cc: <stable@vger.kernel.org> # 3.10.x-
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v4.7]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/tlb.c | 45 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 26 insertions(+), 19 deletions(-)
+
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -373,25 +373,32 @@ int kvm_mips_handle_mapped_seg_tlb_fault
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ kvm_pfn_t pfn0, pfn1;
++ long tlb_lo[2];
+ int ret;
+
+- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+- pfn0 = 0;
+- pfn1 = 0;
+- } else {
+- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+- >> PAGE_SHIFT) < 0)
+- return -1;
+-
+- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+- >> PAGE_SHIFT) < 0)
+- return -1;
+-
+- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+- >> PAGE_SHIFT];
+- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+- >> PAGE_SHIFT];
+- }
++ tlb_lo[0] = tlb->tlb_lo0;
++ tlb_lo[1] = tlb->tlb_lo1;
++
++ /*
++ * The commpage address must not be mapped to anything else if the guest
++ * TLB contains entries nearby, or commpage accesses will break.
++ */
++ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
++ VPN2_MASK & (PAGE_MASK << 1)))
++ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
++
++ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0])
++ >> PAGE_SHIFT) < 0)
++ return -1;
++
++ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1])
++ >> PAGE_SHIFT) < 0)
++ return -1;
++
++ pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[0])
++ >> PAGE_SHIFT];
++ pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[1])
++ >> PAGE_SHIFT];
+
+ if (hpa0)
+ *hpa0 = pfn0 << PAGE_SHIFT;
+@@ -401,9 +408,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault
+
+ /* Get attributes from the Guest TLB */
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
++ (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
++ (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
+
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo0, tlb->tlb_lo1);
--- /dev/null
+From 9b731bcfdec4c159ad2e4312e25d69221709b96a Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 11 Aug 2016 11:58:15 +0100
+Subject: MIPS: KVM: Propagate kseg0/mapped tlb fault errors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 9b731bcfdec4c159ad2e4312e25d69221709b96a upstream.
+
+Propagate errors from kvm_mips_handle_kseg0_tlb_fault() and
+kvm_mips_handle_mapped_seg_tlb_fault(), usually triggering an internal
+error since they normally indicate the guest accessed bad physical
+memory or the commpage in an unexpected way.
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Fixes: e685c689f3a8 ("KVM/MIPS32: Privileged instruction/target branch emulation.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Cc: <stable@vger.kernel.org> # 3.10.x-
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v4.7]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/emulate.c | 40 ++++++++++++++++++++++++++++------------
+ arch/mips/kvm/tlb.c | 14 ++++++++++----
+ 2 files changed, 38 insertions(+), 16 deletions(-)
+
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -1615,8 +1615,14 @@ enum emulation_result kvm_mips_emulate_c
+
+ preempt_disable();
+ if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
+- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
++ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
++ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
++ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
++ __func__, va, vcpu, read_c0_entryhi());
++ er = EMULATE_FAIL;
++ preempt_enable();
++ goto done;
++ }
+ } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ int index;
+@@ -1654,14 +1660,19 @@ enum emulation_result kvm_mips_emulate_c
+ run, vcpu);
+ preempt_enable();
+ goto dont_update_pc;
+- } else {
+- /*
+- * We fault an entry from the guest tlb to the
+- * shadow host TLB
+- */
+- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+- NULL,
+- NULL);
++ }
++ /*
++ * We fault an entry from the guest tlb to the
++ * shadow host TLB
++ */
++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++ NULL, NULL)) {
++ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++ __func__, va, index, vcpu,
++ read_c0_entryhi());
++ er = EMULATE_FAIL;
++ preempt_enable();
++ goto done;
+ }
+ }
+ } else {
+@@ -2625,8 +2636,13 @@ enum emulation_result kvm_mips_handle_tl
+ * OK we have a Guest TLB entry, now inject it into the
+ * shadow host TLB
+ */
+- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+- NULL);
++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++ NULL, NULL)) {
++ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++ __func__, va, index, vcpu,
++ read_c0_entryhi());
++ er = EMULATE_FAIL;
++ }
+ }
+ }
+
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -790,10 +790,16 @@ uint32_t kvm_get_inst(uint32_t *opc, str
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+- &vcpu->arch.
+- guest_tlb[index],
+- NULL, NULL);
++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
++ &vcpu->arch.guest_tlb[index],
++ NULL, NULL)) {
++ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
++ __func__, opc, index, vcpu,
++ read_c0_entryhi());
++ kvm_mips_dump_guest_tlbs(vcpu);
++ local_irq_restore(flags);
++ return KVM_INVALID_INST;
++ }
+ inst = *(opc);
+ }
+ local_irq_restore(flags);
random-initialize-the-non-blocking-pool-via-add_hwgenerator_randomness.patch
random-print-a-warning-for-the-first-ten-uninitialized-random-users.patch
cachefiles-fix-race-between-inactivating-and-culling-a-cache-object.patch
+mips-kvm-fix-mapped-fault-broken-commpage-handling.patch
+mips-kvm-add-missing-gfn-range-check.patch
+mips-kvm-fix-gfn-range-check-in-kseg0-tlb-faults.patch
+mips-kvm-propagate-kseg0-mapped-tlb-fault-errors.patch