]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Aug 2016 09:46:59 +0000 (11:46 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Aug 2016 09:46:59 +0000 (11:46 +0200)
added patches:
mips-kvm-add-missing-gfn-range-check.patch
mips-kvm-fix-gfn-range-check-in-kseg0-tlb-faults.patch
mips-kvm-fix-mapped-fault-broken-commpage-handling.patch
mips-kvm-propagate-kseg0-mapped-tlb-fault-errors.patch

queue-3.14/mips-kvm-add-missing-gfn-range-check.patch [new file with mode: 0644]
queue-3.14/mips-kvm-fix-gfn-range-check-in-kseg0-tlb-faults.patch [new file with mode: 0644]
queue-3.14/mips-kvm-fix-mapped-fault-broken-commpage-handling.patch [new file with mode: 0644]
queue-3.14/mips-kvm-propagate-kseg0-mapped-tlb-fault-errors.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/mips-kvm-add-missing-gfn-range-check.patch b/queue-3.14/mips-kvm-add-missing-gfn-range-check.patch
new file mode 100644 (file)
index 0000000..d1d29cf
--- /dev/null
@@ -0,0 +1,72 @@
+From james.hogan@imgtec.com  Thu Aug 18 11:46:20 2016
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 18 Aug 2016 10:22:53 +0100
+Subject: [PATCH BACKPORT 3.10-3.15 2/4] MIPS: KVM: Add missing gfn range check
+To: <stable@vger.kernel.org>
+Cc: James Hogan <james.hogan@imgtec.com>, Paolo Bonzini <pbonzini@redhat.com>, Radim Krčmář <rkrcmar@redhat.com>, Ralf Baechle <ralf@linux-mips.org>, <linux-mips@linux-mips.org>, <kvm@vger.kernel.org>
+Message-ID: <54b01d37d3ac6ff18fdef80d1189d3c1285d4f46.1471021142.git-series.james.hogan@imgtec.com>
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 8985d50382359e5bf118fdbefc859d0dbf6cebc7 upstream.
+
+kvm_mips_handle_mapped_seg_tlb_fault() calculates the guest frame number
+based on the guest TLB EntryLo values, however it is not range checked
+to ensure it lies within the guest_pmap. If the physical memory the
+guest refers to is out of range then dump the guest TLB and emit an
+internal error.
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v3.10.y - v3.15.y]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/kvm_tlb.c |   19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/kvm/kvm_tlb.c
++++ b/arch/mips/kvm/kvm_tlb.c
+@@ -370,6 +370,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(str
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
++      gfn_t gfn0, gfn1;
+       long tlb_lo[2];
+       tlb_lo[0] = tlb->tlb_lo0;
+@@ -383,14 +384,24 @@ kvm_mips_handle_mapped_seg_tlb_fault(str
+                       VPN2_MASK & (PAGE_MASK << 1)))
+               tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+-      if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT) < 0)
++      gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
++      gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
++      if (gfn0 >= kvm->arch.guest_pmap_npages ||
++          gfn1 >= kvm->arch.guest_pmap_npages) {
++              kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
++                      __func__, gfn0, gfn1, tlb->tlb_hi);
++              kvm_mips_dump_guest_tlbs(vcpu);
+               return -1;
++      }
+-      if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT) < 0)
++      if (kvm_mips_map_page(kvm, gfn0) < 0)
+               return -1;
+-      pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT];
+-      pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT];
++      if (kvm_mips_map_page(kvm, gfn1) < 0)
++              return -1;
++
++      pfn0 = kvm->arch.guest_pmap[gfn0];
++      pfn1 = kvm->arch.guest_pmap[gfn1];
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
diff --git a/queue-3.14/mips-kvm-fix-gfn-range-check-in-kseg0-tlb-faults.patch b/queue-3.14/mips-kvm-fix-gfn-range-check-in-kseg0-tlb-faults.patch
new file mode 100644 (file)
index 0000000..d46f0f5
--- /dev/null
@@ -0,0 +1,41 @@
+From james.hogan@imgtec.com  Thu Aug 18 11:46:25 2016
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 18 Aug 2016 10:22:54 +0100
+Subject: [PATCH BACKPORT 3.10-3.15 3/4] MIPS: KVM: Fix gfn range check in kseg0 tlb faults
+To: <stable@vger.kernel.org>
+Cc: James Hogan <james.hogan@imgtec.com>, Paolo Bonzini <pbonzini@redhat.com>, Radim Krčmář <rkrcmar@redhat.com>, Ralf Baechle <ralf@linux-mips.org>, <linux-mips@linux-mips.org>, <kvm@vger.kernel.org>
+Message-ID: <86ad47b80d7285bab4b9bb144764d4ac1d4d1adf.1471021142.git-series.james.hogan@imgtec.com>
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 0741f52d1b980dbeb290afe67d88fc2928edd8ab upstream.
+
+Two consecutive gfns are loaded into host TLB, so ensure the range check
+isn't off by one if guest_pmap_npages is odd.
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v3.10.y - v3.15.y]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/kvm_tlb.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kvm/kvm_tlb.c
++++ b/arch/mips/kvm/kvm_tlb.c
+@@ -285,7 +285,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsi
+       }
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+-      if (gfn >= kvm->arch.guest_pmap_npages) {
++      if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
diff --git a/queue-3.14/mips-kvm-fix-mapped-fault-broken-commpage-handling.patch b/queue-3.14/mips-kvm-fix-mapped-fault-broken-commpage-handling.patch
new file mode 100644 (file)
index 0000000..6dad7a9
--- /dev/null
@@ -0,0 +1,97 @@
+From james.hogan@imgtec.com  Thu Aug 18 11:46:11 2016
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 18 Aug 2016 10:22:52 +0100
+Subject: [PATCH BACKPORT 3.10-3.15 1/4] MIPS: KVM: Fix mapped fault broken commpage handling
+To: <stable@vger.kernel.org>
+Cc: James Hogan <james.hogan@imgtec.com>, Paolo Bonzini <pbonzini@redhat.com>, Radim Krčmář <rkrcmar@redhat.com>, Ralf Baechle <ralf@linux-mips.org>, <linux-mips@linux-mips.org>, <kvm@vger.kernel.org>
+Message-ID: <4980f95f6ec938cc80bb79c06222c535564a521d.1471021142.git-series.james.hogan@imgtec.com>
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit c604cffa93478f8888bec62b23d6073dad03d43a upstream.
+
+kvm_mips_handle_mapped_seg_tlb_fault() appears to map the guest page at
+virtual address 0 to PFN 0 if the guest has created its own mapping
+there. The intention is unclear, but it may have been an attempt to
+protect the zero page from being mapped to anything but the comm page in
+code paths you wouldn't expect from genuine commpage accesses (guest
+kernel mode cache instructions on that address, hitting trapping
+instructions when executing from that address with a coincidental TLB
+eviction during the KVM handling, and guest user mode accesses to that
+address).
+
+Fix this to check for mappings exactly at KVM_GUEST_COMMPAGE_ADDR (it
+may not be at address 0 since commit 42aa12e74e91 ("MIPS: KVM: Move
+commpage so 0x0 is unmapped")), and set the corresponding EntryLo to be
+interpreted as 0 (invalid).
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v3.10.y - v3.15.y]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/kvm_tlb.c |   36 +++++++++++++++++++++---------------
+ 1 file changed, 21 insertions(+), 15 deletions(-)
+
+--- a/arch/mips/kvm/kvm_tlb.c
++++ b/arch/mips/kvm/kvm_tlb.c
+@@ -370,21 +370,27 @@ kvm_mips_handle_mapped_seg_tlb_fault(str
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
++      long tlb_lo[2];
++      tlb_lo[0] = tlb->tlb_lo0;
++      tlb_lo[1] = tlb->tlb_lo1;
+-      if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+-              pfn0 = 0;
+-              pfn1 = 0;
+-      } else {
+-              if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
+-                      return -1;
+-
+-              if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
+-                      return -1;
+-
+-              pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+-              pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+-      }
++      /*
++       * The commpage address must not be mapped to anything else if the guest
++       * TLB contains entries nearby, or commpage accesses will break.
++       */
++      if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
++                      VPN2_MASK & (PAGE_MASK << 1)))
++              tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
++
++      if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT) < 0)
++              return -1;
++
++      if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT) < 0)
++              return -1;
++
++      pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT];
++      pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT];
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+@@ -396,9 +402,9 @@ kvm_mips_handle_mapped_seg_tlb_fault(str
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                       kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+-                      (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
++                      (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+-                      (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
++                      (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
+ #ifdef DEBUG
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
diff --git a/queue-3.14/mips-kvm-propagate-kseg0-mapped-tlb-fault-errors.patch b/queue-3.14/mips-kvm-propagate-kseg0-mapped-tlb-fault-errors.patch
new file mode 100644 (file)
index 0000000..9da9b3f
--- /dev/null
@@ -0,0 +1,113 @@
+From james.hogan@imgtec.com  Thu Aug 18 11:46:32 2016
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 18 Aug 2016 10:22:55 +0100
+Subject: [PATCH BACKPORT 3.10-3.15 4/4] MIPS: KVM: Propagate kseg0/mapped tlb fault errors
+To: <stable@vger.kernel.org>
+Cc: James Hogan <james.hogan@imgtec.com>, Paolo Bonzini <pbonzini@redhat.com>, Radim Krčmář <rkrcmar@redhat.com>, Ralf Baechle <ralf@linux-mips.org>, <linux-mips@linux-mips.org>, <kvm@vger.kernel.org>
+Message-ID: <f985ed835cfcabe5c7f313abd1de1bb3ee8737f9.1471021142.git-series.james.hogan@imgtec.com>
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 9b731bcfdec4c159ad2e4312e25d69221709b96a upstream.
+
+Propagate errors from kvm_mips_handle_kseg0_tlb_fault() and
+kvm_mips_handle_mapped_seg_tlb_fault(), usually triggering an internal
+error since they normally indicate the guest accessed bad physical
+memory or the commpage in an unexpected way.
+
+Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.")
+Fixes: e685c689f3a8 ("KVM/MIPS32: Privileged instruction/target branch emulation.")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[james.hogan@imgtec.com: Backport to v3.10.y - v3.15.y]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kvm/kvm_mips_emul.c |   33 ++++++++++++++++++++++++---------
+ arch/mips/kvm/kvm_tlb.c       |   14 ++++++++++----
+ 2 files changed, 34 insertions(+), 13 deletions(-)
+
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -972,8 +972,13 @@ kvm_mips_emulate_cache(uint32_t inst, ui
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+-              if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+-                      kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
++              if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
++                  kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
++                      kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
++                              __func__, va, vcpu, read_c0_entryhi());
++                      er = EMULATE_FAIL;
++                      preempt_enable();
++                      goto done;
+               }
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+@@ -1006,11 +1011,16 @@ kvm_mips_emulate_cache(uint32_t inst, ui
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+-                      } else {
+-                              /* We fault an entry from the guest tlb to the shadow host TLB */
+-                              kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+-                                                                   NULL,
+-                                                                   NULL);
++                      }
++                      /* We fault an entry from the guest tlb to the shadow host TLB */
++                      if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++                                                               NULL, NULL)) {
++                              kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++                                      __func__, va, index, vcpu,
++                                      read_c0_entryhi());
++                              er = EMULATE_FAIL;
++                              preempt_enable();
++                              goto done;
+                       }
+               }
+       } else {
+@@ -1821,8 +1831,13 @@ kvm_mips_handle_tlbmiss(unsigned long ca
+                            tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+ #endif
+                       /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+-                      kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+-                                                           NULL);
++                      if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++                                                               NULL, NULL)) {
++                              kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++                                      __func__, va, index, vcpu,
++                                      read_c0_entryhi());
++                              er = EMULATE_FAIL;
++                      }
+               }
+       }
+--- a/arch/mips/kvm/kvm_tlb.c
++++ b/arch/mips/kvm/kvm_tlb.c
+@@ -797,10 +797,16 @@ uint32_t kvm_get_inst(uint32_t *opc, str
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+-                      kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+-                                                           &vcpu->arch.
+-                                                           guest_tlb[index],
+-                                                           NULL, NULL);
++                      if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
++                                              &vcpu->arch.guest_tlb[index],
++                                              NULL, NULL)) {
++                              kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
++                                      __func__, opc, index, vcpu,
++                                      read_c0_entryhi());
++                              kvm_mips_dump_guest_tlbs(vcpu);
++                              local_irq_restore(flags);
++                              return KVM_INVALID_INST;
++                      }
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
index a52e5fd55094481e1a25df13dc2db23ade4b8dbd..8574b703e1155f5fe5cccaa7ddf8d58e300beadf 100644 (file)
@@ -13,3 +13,7 @@ cifs-fix-crash-due-to-race-in-hmac-md5-handling.patch
 cifs-fix-a-possible-invalid-memory-access-in-smb2_query_symlink.patch
 random-properly-align-get_random_int_hash.patch
 random-print-a-warning-for-the-first-ten-uninitialized-random-users.patch
+mips-kvm-fix-mapped-fault-broken-commpage-handling.patch
+mips-kvm-add-missing-gfn-range-check.patch
+mips-kvm-fix-gfn-range-check-in-kseg0-tlb-faults.patch
+mips-kvm-propagate-kseg0-mapped-tlb-fault-errors.patch