]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 19 Apr 2013 21:19:40 +0000 (14:19 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 19 Apr 2013 21:19:40 +0000 (14:19 -0700)
added patches:
kvm-allow-cross-page-reads-and-writes-from-cached-translations.patch
kvm-fix-bounds-checking-in-ioapic-indirect-register-reads-cve-2013-1798.patch

queue-3.0/kvm-allow-cross-page-reads-and-writes-from-cached-translations.patch [new file with mode: 0644]
queue-3.0/kvm-fix-bounds-checking-in-ioapic-indirect-register-reads-cve-2013-1798.patch [new file with mode: 0644]
queue-3.0/series

diff --git a/queue-3.0/kvm-allow-cross-page-reads-and-writes-from-cached-translations.patch b/queue-3.0/kvm-allow-cross-page-reads-and-writes-from-cached-translations.patch
new file mode 100644 (file)
index 0000000..a0ea8b7
--- /dev/null
@@ -0,0 +1,140 @@
+From 8f964525a121f2ff2df948dac908dcc65be21b5b Mon Sep 17 00:00:00 2001
+From: Andrew Honig <ahonig@google.com>
+Date: Fri, 29 Mar 2013 09:35:21 -0700
+Subject: KVM: Allow cross page reads and writes from cached translations.
+
+From: Andrew Honig <ahonig@google.com>
+
+commit 8f964525a121f2ff2df948dac908dcc65be21b5b upstream.
+
+This patch adds support for kvm_gfn_to_hva_cache_init functions for
+reads and writes that will cross a page.  If the range falls within
+the same memslot, then this will be a fast operation.  If the range
+is split between two memslots, then the slower kvm_read_guest and
+kvm_write_guest are used.
+
+Tested: Test against kvm_clock unit tests.
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Gleb Natapov <gleb@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c        |   10 ++++------
+ include/linux/kvm_host.h  |    2 +-
+ include/linux/kvm_types.h |    1 +
+ virt/kvm/kvm_main.c       |   39 +++++++++++++++++++++++++++++++--------
+ 4 files changed, 37 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1448,7 +1448,8 @@ static int kvm_pv_enable_async_pf(struct
+               return 0;
+       }
+-      if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
++      if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
++                                      sizeof(u32)))
+               return 1;
+       vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+@@ -1530,12 +1531,9 @@ int kvm_set_msr_common(struct kvm_vcpu *
+               gpa_offset = data & ~(PAGE_MASK | 1);
+-              /* Check that the address is 32-byte aligned. */
+-              if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+-                      break;
+-
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+-                   &vcpu->arch.pv_time, data & ~1ULL))
++                   &vcpu->arch.pv_time, data & ~1ULL,
++                   sizeof(struct pvclock_vcpu_time_info)))
+                       vcpu->arch.pv_time_enabled = false;
+               else
+                       vcpu->arch.pv_time_enabled = true;
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -388,7 +388,7 @@ int kvm_write_guest(struct kvm *kvm, gpa
+ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                          void *data, unsigned long len);
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+-                            gpa_t gpa);
++                            gpa_t gpa, unsigned long len);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+--- a/include/linux/kvm_types.h
++++ b/include/linux/kvm_types.h
+@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
+       u64 generation;
+       gpa_t gpa;
+       unsigned long hva;
++      unsigned long len;
+       struct kvm_memory_slot *memslot;
+ };
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1375,20 +1375,38 @@ int kvm_write_guest(struct kvm *kvm, gpa
+ }
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+-                            gpa_t gpa)
++                            gpa_t gpa, unsigned long len)
+ {
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       int offset = offset_in_page(gpa);
+-      gfn_t gfn = gpa >> PAGE_SHIFT;
++      gfn_t start_gfn = gpa >> PAGE_SHIFT;
++      gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
++      gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
++      gfn_t nr_pages_avail;
+       ghc->gpa = gpa;
+       ghc->generation = slots->generation;
+-      ghc->memslot = __gfn_to_memslot(slots, gfn);
+-      ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+-      if (!kvm_is_error_hva(ghc->hva))
++      ghc->len = len;
++      ghc->memslot = gfn_to_memslot(kvm, start_gfn);
++      ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
++      if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+               ghc->hva += offset;
+-      else
+-              return -EFAULT;
++      } else {
++              /*
++               * If the requested region crosses two memslots, we still
++               * verify that the entire region is valid here.
++               */
++              while (start_gfn <= end_gfn) {
++                      ghc->memslot = gfn_to_memslot(kvm, start_gfn);
++                      ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
++                                                 &nr_pages_avail);
++                      if (kvm_is_error_hva(ghc->hva))
++                              return -EFAULT;
++                      start_gfn += nr_pages_avail;
++              }
++              /* Use the slow path for cross page reads and writes. */
++              ghc->memslot = NULL;
++      }
+       return 0;
+ }
+@@ -1400,8 +1418,13 @@ int kvm_write_guest_cached(struct kvm *k
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       int r;
++      BUG_ON(len > ghc->len);
++
+       if (slots->generation != ghc->generation)
+-              kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++              kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++      if (unlikely(!ghc->memslot))
++              return kvm_write_guest(kvm, ghc->gpa, data, len);
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
diff --git a/queue-3.0/kvm-fix-bounds-checking-in-ioapic-indirect-register-reads-cve-2013-1798.patch b/queue-3.0/kvm-fix-bounds-checking-in-ioapic-indirect-register-reads-cve-2013-1798.patch
new file mode 100644 (file)
index 0000000..7d593cd
--- /dev/null
@@ -0,0 +1,45 @@
+From a2c118bfab8bc6b8bb213abfc35201e441693d55 Mon Sep 17 00:00:00 2001
+From: Andy Honig <ahonig@google.com>
+Date: Wed, 20 Feb 2013 14:49:16 -0800
+Subject: KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798)
+
+From: Andy Honig <ahonig@google.com>
+
+commit a2c118bfab8bc6b8bb213abfc35201e441693d55 upstream.
+
+If the guest specifies a IOAPIC_REG_SELECT with an invalid value and follows
+that with a read of the IOAPIC_REG_WINDOW KVM does not properly validate
+that request.  ioapic_read_indirect contains an
+ASSERT(redir_index < IOAPIC_NUM_PINS), but the ASSERT has no effect in
+non-debug builds.  In recent kernels this allows a guest to cause a kernel
+oops by reading invalid memory.  In older kernels (pre-3.3) this allows a
+guest to read from large ranges of host memory.
+
+Tested: tested against apic unit tests.
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/ioapic.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirec
+                       u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
+                       u64 redir_content;
+-                      ASSERT(redir_index < IOAPIC_NUM_PINS);
++                      if (redir_index < IOAPIC_NUM_PINS)
++                              redir_content =
++                                      ioapic->redirtbl[redir_index].bits;
++                      else
++                              redir_content = ~0ULL;
+-                      redir_content = ioapic->redirtbl[redir_index].bits;
+                       result = (ioapic->ioregsel & 0x1) ?
+                           (redir_content >> 32) & 0xffffffff :
+                           redir_content & 0xffffffff;
index bd150b29f4f413526ce1b8d59e6c19f69750500e..050afb73c22369f0eaca15ed92eda1370a05cb3d 100644 (file)
@@ -6,3 +6,5 @@ kernel-signal.c-stop-info-leak-via-the-tkill-and-the-tgkill-syscalls.patch
 hfsplus-fix-potential-overflow-in-hfsplus_file_truncate.patch
 kvm-x86-fix-for-buffer-overflow-in-handling-of-msr_kvm_system_time-cve-2013-1796.patch
 kvm-x86-convert-msr_kvm_system_time-to-use-gfn_to_hva_cache-functions-cve-2013-1797.patch
+kvm-fix-bounds-checking-in-ioapic-indirect-register-reads-cve-2013-1798.patch
+kvm-allow-cross-page-reads-and-writes-from-cached-translations.patch