]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Nov 2014 19:48:42 +0000 (11:48 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Nov 2014 19:48:42 +0000 (11:48 -0800)
added patches:
kvm-emulator-fix-execution-close-to-the-segment-limit.patch
tracing-syscalls-ignore-numbers-outside-nr_syscalls-range.patch

queue-3.17/kvm-emulator-fix-execution-close-to-the-segment-limit.patch [new file with mode: 0644]
queue-3.17/tracing-syscalls-ignore-numbers-outside-nr_syscalls-range.patch [new file with mode: 0644]

diff --git a/queue-3.17/kvm-emulator-fix-execution-close-to-the-segment-limit.patch b/queue-3.17/kvm-emulator-fix-execution-close-to-the-segment-limit.patch
new file mode 100644 (file)
index 0000000..53b1387
--- /dev/null
@@ -0,0 +1,150 @@
+From fd56e1546a5f734290cbedd2b81c518850736511 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 27 Oct 2014 14:40:39 +0100
+Subject: KVM: emulator: fix execution close to the segment limit
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit fd56e1546a5f734290cbedd2b81c518850736511 upstream.
+
+Emulation of code that is 14 bytes to the segment limit or closer
+(e.g. RIP = 0xFFFFFFF2 after reset) is broken because we try to read as
+many as 15 bytes from the beginning of the instruction, and __linearize
+fails when the passed (address, size) pair reaches out of the segment.
+
+To fix this, let __linearize return the maximum accessible size (clamped
+to 2^32-1) for usage in __do_insn_fetch_bytes, and avoid the limit check
+by passing zero for the desired size.
+
+For expand-down segments, __linearize is performing a redundant check.
+(u32)(addr.ea + size - 1) <= lim can only happen if addr.ea is close
+to 4GB; in this case, addr.ea + size - 1 will also fail the check against
+the upper bound of the segment (which is provided by the D/B bit).
+After eliminating the redundant check, it is simple to compute
+the *max_size for expand-down segments too.
+
+Now that the limit check is done in __do_insn_fetch_bytes, we want
+to inject a general protection fault there if size < op_size (like
+__linearize would have done), instead of just aborting.
+
+This fixes booting Tiano Core from emulated flash with EPT disabled.
+
+Fixes: 719d5a9b2487e0562f178f61e323c3dc18a8b200
+Reported-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/emulate.c |   43 +++++++++++++++++++++++++++++++++----------
+ 1 file changed, 33 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -613,7 +613,8 @@ static bool insn_aligned(struct x86_emul
+ static int __linearize(struct x86_emulate_ctxt *ctxt,
+                    struct segmented_address addr,
+-                   unsigned size, bool write, bool fetch,
++                   unsigned *max_size, unsigned size,
++                   bool write, bool fetch,
+                    ulong *linear)
+ {
+       struct desc_struct desc;
+@@ -624,10 +625,15 @@ static int __linearize(struct x86_emulat
+       unsigned cpl;
+       la = seg_base(ctxt, addr.seg) + addr.ea;
++      *max_size = 0;
+       switch (ctxt->mode) {
+       case X86EMUL_MODE_PROT64:
+               if (((signed long)la << 16) >> 16 != la)
+                       return emulate_gp(ctxt, 0);
++
++              *max_size = min_t(u64, ~0u, (1ull << 48) - la);
++              if (size > *max_size)
++                      goto bad;
+               break;
+       default:
+               usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
+@@ -645,20 +651,25 @@ static int __linearize(struct x86_emulat
+               if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
+                   (ctxt->d & NoBigReal)) {
+                       /* la is between zero and 0xffff */
+-                      if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
++                      if (la > 0xffff)
+                               goto bad;
++                      *max_size = 0x10000 - la;
+               } else if ((desc.type & 8) || !(desc.type & 4)) {
+                       /* expand-up segment */
+-                      if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
++                      if (addr.ea > lim)
+                               goto bad;
++                      *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
+               } else {
+                       /* expand-down segment */
+-                      if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
++                      if (addr.ea <= lim)
+                               goto bad;
+                       lim = desc.d ? 0xffffffff : 0xffff;
+-                      if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
++                      if (addr.ea > lim)
+                               goto bad;
++                      *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
+               }
++              if (size > *max_size)
++                      goto bad;
+               cpl = ctxt->ops->cpl(ctxt);
+               if (!(desc.type & 8)) {
+                       /* data segment */
+@@ -693,7 +704,8 @@ static int linearize(struct x86_emulate_
+                    unsigned size, bool write,
+                    ulong *linear)
+ {
+-      return __linearize(ctxt, addr, size, write, false, linear);
++      unsigned max_size;
++      return __linearize(ctxt, addr, &max_size, size, write, false, linear);
+ }
+@@ -718,17 +730,27 @@ static int segmented_read_std(struct x86
+ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
+ {
+       int rc;
+-      unsigned size;
++      unsigned size, max_size;
+       unsigned long linear;
+       int cur_size = ctxt->fetch.end - ctxt->fetch.data;
+       struct segmented_address addr = { .seg = VCPU_SREG_CS,
+                                          .ea = ctxt->eip + cur_size };
+-      size = 15UL ^ cur_size;
+-      rc = __linearize(ctxt, addr, size, false, true, &linear);
++      /*
++       * We do not know exactly how many bytes will be needed, and
++       * __linearize is expensive, so fetch as much as possible.  We
++       * just have to avoid going beyond the 15 byte limit, the end
++       * of the segment, or the end of the page.
++       *
++       * __linearize is called with size 0 so that it does not do any
++       * boundary check itself.  Instead, we use max_size to check
++       * against op_size.
++       */
++      rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
+       if (unlikely(rc != X86EMUL_CONTINUE))
+               return rc;
++      size = min_t(unsigned, 15UL ^ cur_size, max_size);
+       size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
+       /*
+@@ -738,7 +760,8 @@ static int __do_insn_fetch_bytes(struct
+        * still, we must have hit the 15-byte boundary.
+        */
+       if (unlikely(size < op_size))
+-              return X86EMUL_UNHANDLEABLE;
++              return emulate_gp(ctxt, 0);
++
+       rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
+                             size, &ctxt->exception);
+       if (unlikely(rc != X86EMUL_CONTINUE))
diff --git a/queue-3.17/tracing-syscalls-ignore-numbers-outside-nr_syscalls-range.patch b/queue-3.17/tracing-syscalls-ignore-numbers-outside-nr_syscalls-range.patch
new file mode 100644 (file)
index 0000000..e21b999
--- /dev/null
@@ -0,0 +1,88 @@
+From 086ba77a6db00ed858ff07451bedee197df868c9 Mon Sep 17 00:00:00 2001
+From: Rabin Vincent <rabin@rab.in>
+Date: Wed, 29 Oct 2014 23:06:58 +0100
+Subject: tracing/syscalls: Ignore numbers outside NR_syscalls' range
+
+From: Rabin Vincent <rabin@rab.in>
+
+commit 086ba77a6db00ed858ff07451bedee197df868c9 upstream.
+
+ARM has some private syscalls (for example, set_tls(2)) which lie
+outside the range of NR_syscalls.  If any of these are called while
+syscall tracing is being performed, out-of-bounds array access will
+occur in the ftrace and perf sys_{enter,exit} handlers.
+
+ # trace-cmd record -e raw_syscalls:* true && trace-cmd report
+ ...
+ true-653   [000]   384.675777: sys_enter:            NR 192 (0, 1000, 3, 4000022, ffffffff, 0)
+ true-653   [000]   384.675812: sys_exit:             NR 192 = 1995915264
+ true-653   [000]   384.675971: sys_enter:            NR 983045 (76f74480, 76f74000, 76f74b28, 76f74480, 76f76f74, 1)
+ true-653   [000]   384.675988: sys_exit:             NR 983045 = 0
+ ...
+
+ # trace-cmd record -e syscalls:* true
+ [   17.289329] Unable to handle kernel paging request at virtual address aaaaaace
+ [   17.289590] pgd = 9e71c000
+ [   17.289696] [aaaaaace] *pgd=00000000
+ [   17.289985] Internal error: Oops: 5 [#1] PREEMPT SMP ARM
+ [   17.290169] Modules linked in:
+ [   17.290391] CPU: 0 PID: 704 Comm: true Not tainted 3.18.0-rc2+ #21
+ [   17.290585] task: 9f4dab00 ti: 9e710000 task.ti: 9e710000
+ [   17.290747] PC is at ftrace_syscall_enter+0x48/0x1f8
+ [   17.290866] LR is at syscall_trace_enter+0x124/0x184
+
+Fix this by ignoring out-of-NR_syscalls-bounds syscall numbers.
+
+Commit cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls"
+added the check for less than zero, but it should have also checked
+for greater than NR_syscalls.
+
+Link: http://lkml.kernel.org/p/1414620418-29472-1-git-send-email-rabin@rab.in
+
+Fixes: cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls"
+Signed-off-by: Rabin Vincent <rabin@rab.in>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_syscalls.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *d
+       int size;
+       syscall_nr = trace_get_syscall_nr(current, regs);
+-      if (syscall_nr < 0)
++      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+               return;
+       /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
+@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *da
+       int syscall_nr;
+       syscall_nr = trace_get_syscall_nr(current, regs);
+-      if (syscall_nr < 0)
++      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+               return;
+       /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
+@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ign
+       int size;
+       syscall_nr = trace_get_syscall_nr(current, regs);
+-      if (syscall_nr < 0)
++      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+               return;
+       if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
+               return;
+@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *igno
+       int size;
+       syscall_nr = trace_get_syscall_nr(current, regs);
+-      if (syscall_nr < 0)
++      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+               return;
+       if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
+               return;