]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Jun 2017 13:43:45 +0000 (15:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Jun 2017 13:43:45 +0000 (15:43 +0200)
added patches:
arm64-ensure-extension-of-smp_store_release-value.patch
arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch
tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
usercopy-adjust-tests-to-deal-with-smap-pan.patch

queue-3.18/arm64-ensure-extension-of-smp_store_release-value.patch [new file with mode: 0644]
queue-3.18/arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch [new file with mode: 0644]
queue-3.18/arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch [new file with mode: 0644]
queue-3.18/rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch [new file with mode: 0644]
queue-3.18/series
queue-3.18/tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch [new file with mode: 0644]
queue-3.18/usercopy-adjust-tests-to-deal-with-smap-pan.patch [new file with mode: 0644]

diff --git a/queue-3.18/arm64-ensure-extension-of-smp_store_release-value.patch b/queue-3.18/arm64-ensure-extension-of-smp_store_release-value.patch
new file mode 100644 (file)
index 0000000..5d9163f
--- /dev/null
@@ -0,0 +1,75 @@
+From 994870bead4ab19087a79492400a5478e2906196 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:34 +0100
+Subject: arm64: ensure extension of smp_store_release value
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 994870bead4ab19087a79492400a5478e2906196 upstream.
+
+When an inline assembly operand's type is narrower than the register it
+is allocated to, the least significant bits of the register (up to the
+operand type's width) are valid, and any other bits are permitted to
+contain any arbitrary value. This aligns with the AAPCS64 parameter
+passing rules.
+
+Our __smp_store_release() implementation does not account for this, and
+implicitly assumes that operands have been zero-extended to the width of
+the type being stored to. Thus, we may store unknown values to memory
+when the value type is narrower than the pointer type (e.g. when storing
+a char to a long).
+
+This patch fixes the issue by casting the value operand to the same
+width as the pointer operand in all cases, which ensures that the value
+is zero-extended as we expect. We use the same union trickery as
+__smp_load_acquire and {READ,WRITE}_ONCE() to avoid GCC complaining that
+pointers are potentially cast to narrower width integers in unreachable
+paths.
+
+A whitespace issue at the top of __smp_store_release() is also
+corrected.
+
+No changes are necessary for __smp_load_acquire(). Load instructions
+implicitly clear any upper bits of the register, and the compiler will
+only consider the least significant bits of the register as valid
+regardless.
+
+Fixes: 47933ad41a86 ("arch: Introduce smp_load_acquire(), smp_store_release()")
+Fixes: 878a84d5a8a1 ("arm64: add missing data types in smp_load_acquire/smp_store_release")
+Cc: <stable@vger.kernel.org> # 3.14.x-
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Matthias Kaehlcke <mka@chromium.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/barrier.h |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -60,15 +60,21 @@ do {                                                                       \
+ #define smp_store_release(p, v)                                               \
+ do {                                                                  \
++      union { typeof(*p) __val; char __c[1]; } __u =                  \
++              { .__val = (__force typeof(*p)) (v) };                  \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 4:                                                         \
+               asm volatile ("stlr %w1, %0"                            \
+-                              : "=Q" (*p) : "r" (v) : "memory");      \
++                              : "=Q" (*p)                             \
++                              : "r" (*(__u32 *)__u.__c)               \
++                              : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("stlr %1, %0"                             \
+-                              : "=Q" (*p) : "r" (v) : "memory");      \
++                              : "=Q" (*p)                             \
++                              : "r" (*(__u64 *)__u.__c)               \
++                              : "memory");                            \
+               break;                                                  \
+       }                                                               \
+ } while (0)
diff --git a/queue-3.18/arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch b/queue-3.18/arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
new file mode 100644 (file)
index 0000000..9abacc1
--- /dev/null
@@ -0,0 +1,88 @@
+From 276e93279a630657fff4b086ba14c95955912dfa Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Wed, 3 May 2017 16:37:47 +0100
+Subject: arm64: entry: improve data abort handling of tagged pointers
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 276e93279a630657fff4b086ba14c95955912dfa upstream.
+
+When handling a data abort from EL0, we currently zero the top byte of
+the faulting address, as we assume the address is a TTBR0 address, which
+may contain a non-zero address tag. However, the address may be a TTBR1
+address, in which case we should not zero the top byte. This patch fixes
+that. The effect is that the full TTBR1 address is passed to the task's
+signal handler (or printed out in the kernel log).
+
+When handling a data abort from EL1, we leave the faulting address
+intact, as we assume it's either a TTBR1 address or a TTBR0 address with
+tag 0x00. This is true as far as I'm aware, we don't seem to access a
+tagged TTBR0 address anywhere in the kernel. Regardless, it's easy to
+forget about address tags, and code added in the future may not always
+remember to remove tags from addresses before accessing them. So add tag
+handling to the EL1 data abort handler as well. This also makes it
+consistent with the EL0 data abort handler.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm64/include/asm/asm-uaccess.h |   13 +++++++++++++
+ arch/arm64/kernel/entry.S            |    6 ++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+--- /dev/null
++++ b/arch/arm64/include/asm/asm-uaccess.h
+@@ -0,0 +1,13 @@
++#ifndef __ASM_ASM_UACCESS_H
++#define __ASM_ASM_UACCESS_H
++
++/*
++ * Remove the address tag from a virtual address, if present.
++ */
++      .macro  clear_address_tag, dst, addr
++      tst     \addr, #(1 << 55)
++      bic     \dst, \addr, #(0xff << 56)
++      csel    \dst, \dst, \addr, eq
++      .endm
++
++#endif
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -28,6 +28,7 @@
+ #include <asm/errno.h>
+ #include <asm/esr.h>
+ #include <asm/thread_info.h>
++#include <asm/asm-uaccess.h>
+ #include <asm/unistd.h>
+ /*
+@@ -307,12 +308,13 @@ el1_da:
+       /*
+        * Data abort handling
+        */
+-      mrs     x0, far_el1
++      mrs     x3, far_el1
+       enable_dbg
+       // re-enable interrupts if they were enabled in the aborted context
+       tbnz    x23, #7, 1f                     // PSR_I_BIT
+       enable_irq
+ 1:
++      clear_address_tag x0, x3
+       mov     x2, sp                          // struct pt_regs
+       bl      do_mem_abort
+@@ -472,7 +474,7 @@ el0_da:
+       // enable interrupts before calling the main handler
+       enable_dbg_and_irq
+       ct_user_exit
+-      bic     x0, x26, #(0xff << 56)
++      clear_address_tag x0, x26
+       mov     x1, x25
+       mov     x2, sp
+       adr     lr, ret_to_user
diff --git a/queue-3.18/arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch b/queue-3.18/arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
new file mode 100644 (file)
index 0000000..8f2ad27
--- /dev/null
@@ -0,0 +1,98 @@
+From 7dcd9dd8cebe9fa626af7e2358d03a37041a70fb Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Wed, 3 May 2017 16:37:46 +0100
+Subject: arm64: hw_breakpoint: fix watchpoint matching for tagged pointers
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 7dcd9dd8cebe9fa626af7e2358d03a37041a70fb upstream.
+
+When we take a watchpoint exception, the address that triggered the
+watchpoint is found in FAR_EL1. We compare it to the address of each
+configured watchpoint to see which one was hit.
+
+The configured watchpoint addresses are untagged, while the address in
+FAR_EL1 will have an address tag if the data access was done using a
+tagged address. The tag needs to be removed to compare the address to
+the watchpoints.
+
+Currently we don't remove it, and as a result can report the wrong
+watchpoint as being hit (specifically, always either the highest TTBR0
+watchpoint or lowest TTBR1 watchpoint). This patch removes the tag.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/uaccess.h  |    8 ++++++++
+ arch/arm64/kernel/hw_breakpoint.c |    3 ++-
+ include/linux/bitops.h            |   11 +++++++++++
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -21,6 +21,7 @@
+ /*
+  * User space memory access functions
+  */
++#include <linux/bitops.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
+@@ -100,6 +101,13 @@ static inline void set_fs(mm_segment_t f
+       flag;                                                           \
+ })
++/*
++ * When dealing with data aborts, watchpoints, or instruction traps we may end
++ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
++ * pass on to access_ok(), for instance.
++ */
++#define untagged_addr(addr)           sign_extend64(addr, 55)
++
+ #define access_ok(type, addr, size)   __range_ok(addr, size)
+ #define user_addr_max                 get_fs
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -35,6 +35,7 @@
+ #include <asm/traps.h>
+ #include <asm/cputype.h>
+ #include <asm/system_misc.h>
++#include <asm/uaccess.h>
+ /* Breakpoint currently in use for each BRP. */
+ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
+@@ -688,7 +689,7 @@ static int watchpoint_handler(unsigned l
+               /* Check if the watchpoint value matches. */
+               val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+-              if (val != (addr & ~alignment_mask))
++              if (val != (untagged_addr(addr) & ~alignment_mask))
+                       goto unlock;
+               /* Possible match, check the byte address select to confirm. */
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -171,6 +171,17 @@ static inline __s32 sign_extend32(__u32
+       return (__s32)(value << shift) >> shift;
+ }
++/**
++ * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
++ * @value: value to sign extend
++ * @index: 0 based bit index (0<=index<64) to sign bit
++ */
++static inline __s64 sign_extend64(__u64 value, int index)
++{
++      __u8 shift = 63 - index;
++      return (__s64)(value << shift) >> shift;
++}
++
+ static inline unsigned fls_long(unsigned long l)
+ {
+       if (sizeof(l) == 4)
diff --git a/queue-3.18/rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch b/queue-3.18/rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch
new file mode 100644 (file)
index 0000000..ce7e597
--- /dev/null
@@ -0,0 +1,48 @@
+From 1feb40067cf04ae48d65f728d62ca255c9449178 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Fri, 12 May 2017 09:02:00 -0700
+Subject: RDMA/qib,hfi1: Fix MR reference count leak on write with immediate
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit 1feb40067cf04ae48d65f728d62ca255c9449178 upstream.
+
+The handling of IB_RDMA_WRITE_ONLY_WITH_IMMEDIATE will leak a memory
+reference when a buffer cannot be allocated for returning the immediate
+data.
+
+The issue is that the rkey validation has already occurred and the RNR
+nak fails to release the reference that was fruitlessly gotten.  The
+the peer will send the identical single packet request when its RNR
+timer pops.
+
+The fix is to release the held reference prior to the rnr nak exit.
+This is the only sequence the requires both rkey validation and the
+buffer allocation on the same packet.
+
+Cc: Stable <stable@vger.kernel.org> # 4.7+
+Tested-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/qib/qib_rc.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -2086,8 +2086,10 @@ send_last:
+               ret = qib_get_rwqe(qp, 1);
+               if (ret < 0)
+                       goto nack_op_err;
+-              if (!ret)
++              if (!ret) {
++                      qib_put_ss(&qp->r_sge);
+                       goto rnr_nak;
++              }
+               wc.ex.imm_data = ohdr->u.rc.imm_data;
+               hdrsize += 4;
+               wc.wc_flags = IB_WC_WITH_IMM;
index a7a1ae251b6680d3483fb8f64bb18cf98934b072..04d5958529bc0e0d5af8a578b692f93945bb7954 100644 (file)
@@ -36,3 +36,9 @@ alsa-timer-fix-missing-queue-indices-reset-at-sndrv_timer_ioctl_select.patch
 asoc-fix-use-after-free-at-card-unregistration.patch
 drivers-char-mem-fix-wraparound-check-to-allow-mappings-up-to-the-end.patch
 serial-sh-sci-fix-panic-when-serial-console-and-dma-are-enabled.patch
+arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
+arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
+rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch
+tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
+usercopy-adjust-tests-to-deal-with-smap-pan.patch
+arm64-ensure-extension-of-smp_store_release-value.patch
diff --git a/queue-3.18/tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch b/queue-3.18/tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
new file mode 100644 (file)
index 0000000..f9afe4e
--- /dev/null
@@ -0,0 +1,39 @@
+From e09e28671cda63e6308b31798b997639120e2a21 Mon Sep 17 00:00:00 2001
+From: Amey Telawane <ameyt@codeaurora.org>
+Date: Wed, 3 May 2017 15:41:14 +0530
+Subject: tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
+
+From: Amey Telawane <ameyt@codeaurora.org>
+
+commit e09e28671cda63e6308b31798b997639120e2a21 upstream.
+
+Strcpy is inherently not safe, and strlcpy() should be used instead.
+__trace_find_cmdline() uses strcpy() because the comms saved must have a
+terminating nul character, but it doesn't hurt to add the extra protection
+of using strlcpy() instead of strcpy().
+
+Link: http://lkml.kernel.org/r/1493806274-13936-1-git-send-email-amit.pundir@linaro.org
+
+Signed-off-by: Amey Telawane <ameyt@codeaurora.org>
+[AmitP: Cherry-picked this commit from CodeAurora kernel/msm-3.10
+https://source.codeaurora.org/quic/la/kernel/msm-3.10/commit/?id=2161ae9a70b12cf18ac8e5952a20161ffbccb477]
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+[ Updated change log and removed the "- 1" from len parameter ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1544,7 +1544,7 @@ static void __trace_find_cmdline(int pid
+       map = savedcmd->map_pid_to_cmdline[pid];
+       if (map != NO_CMDLINE_MAP)
+-              strcpy(comm, get_saved_cmdlines(map));
++              strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+       else
+               strcpy(comm, "<...>");
+ }
diff --git a/queue-3.18/usercopy-adjust-tests-to-deal-with-smap-pan.patch b/queue-3.18/usercopy-adjust-tests-to-deal-with-smap-pan.patch
new file mode 100644 (file)
index 0000000..b8e08ab
--- /dev/null
@@ -0,0 +1,75 @@
+From f5f893c57e37ca730808cb2eee3820abd05e7507 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Mon, 13 Feb 2017 11:25:26 -0800
+Subject: usercopy: Adjust tests to deal with SMAP/PAN
+
+From: Kees Cook <keescook@chromium.org>
+
+commit f5f893c57e37ca730808cb2eee3820abd05e7507 upstream.
+
+Under SMAP/PAN/etc, we cannot write directly to userspace memory, so
+this rearranges the test bytes to get written through copy_to_user().
+Additionally drops the bad copy_from_user() test that would trigger a
+memcpy() against userspace on failure.
+
+[arnd: the test module was added in 3.14, and this backported patch
+       should apply cleanly on all version from 3.14 to 4.10.
+       The original patch was in 4.11 on top of a context change
+       I saw the bug triggered with kselftest on a 4.4.y stable kernel]
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_user_copy.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/lib/test_user_copy.c
++++ b/lib/test_user_copy.c
+@@ -58,7 +58,9 @@ static int __init test_user_copy_init(vo
+       usermem = (char __user *)user_addr;
+       bad_usermem = (char *)user_addr;
+-      /* Legitimate usage: none of these should fail. */
++      /*
++       * Legitimate usage: none of these copies should fail.
++       */
+       ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
+                   "legitimate copy_from_user failed");
+       ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
+@@ -68,19 +70,33 @@ static int __init test_user_copy_init(vo
+       ret |= test(put_user(value, (unsigned long __user *)usermem),
+                   "legitimate put_user failed");
+-      /* Invalid usage: none of these should succeed. */
++      /*
++       * Invalid usage: none of these copies should succeed.
++       */
++
++      /* Reject kernel-to-kernel copies through copy_from_user(). */
+       ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+                                   PAGE_SIZE),
+                   "illegal all-kernel copy_from_user passed");
++
++#if 0
++      /*
++       * When running with SMAP/PAN/etc, this will Oops the kernel
++       * due to the zeroing of userspace memory on failure. This needs
++       * to be tested in LKDTM instead, since this test module does not
++       * expect to explode.
++       */
+       ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
+                                   PAGE_SIZE),
+                   "illegal reversed copy_from_user passed");
++#endif
+       ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+                                 PAGE_SIZE),
+                   "illegal all-kernel copy_to_user passed");
+       ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
+                                 PAGE_SIZE),
+                   "illegal reversed copy_to_user passed");
++
+       ret |= test(!get_user(value, (unsigned long __user *)kmem),
+                   "illegal get_user passed");
+       ret |= test(!put_user(value, (unsigned long __user *)kmem),