]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Jun 2017 13:44:03 +0000 (15:44 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Jun 2017 13:44:03 +0000 (15:44 +0200)
added patches:
arm64-armv8_deprecated-ensure-extension-of-addr.patch
arm64-ensure-extension-of-smp_store_release-value.patch
arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
make-__xfs_xattr_put_listen-preperly-report-errors.patch
rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch
tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
usercopy-adjust-tests-to-deal-with-smap-pan.patch

queue-4.4/arm64-armv8_deprecated-ensure-extension-of-addr.patch [new file with mode: 0644]
queue-4.4/arm64-ensure-extension-of-smp_store_release-value.patch [new file with mode: 0644]
queue-4.4/arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch [new file with mode: 0644]
queue-4.4/arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch [new file with mode: 0644]
queue-4.4/make-__xfs_xattr_put_listen-preperly-report-errors.patch [new file with mode: 0644]
queue-4.4/rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch [new file with mode: 0644]
queue-4.4/usercopy-adjust-tests-to-deal-with-smap-pan.patch [new file with mode: 0644]

diff --git a/queue-4.4/arm64-armv8_deprecated-ensure-extension-of-addr.patch b/queue-4.4/arm64-armv8_deprecated-ensure-extension-of-addr.patch
new file mode 100644 (file)
index 0000000..dfb8620
--- /dev/null
@@ -0,0 +1,41 @@
+From 55de49f9aa17b0b2b144dd2af587177b9aadf429 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:36 +0100
+Subject: arm64: armv8_deprecated: ensure extension of addr
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 55de49f9aa17b0b2b144dd2af587177b9aadf429 upstream.
+
+Our compat swp emulation holds the compat user address in an unsigned
+int, which it passes to __user_swpX_asm(). When a 32-bit value is passed
+in a register, the upper 32 bits of the register are unknown, and we
+must extend the value to 64 bits before we can use it as a base address.
+
+This patch casts the address to unsigned long to ensure it has been
+suitably extended, avoiding the potential issue, and silencing a related
+warning from clang.
+
+Fixes: bd35a4adc413 ("arm64: Port SWP/SWPB emulation support from arm")
+Cc: <stable@vger.kernel.org> # 3.19.x-
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/armv8_deprecated.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -305,7 +305,8 @@ static void register_insn_emulation_sysc
+       ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,    \
+               CONFIG_ARM64_PAN)                               \
+       : "=&r" (res), "+r" (data), "=&r" (temp)                \
+-      : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)              \
++      : "r" ((unsigned long)addr), "i" (-EAGAIN),             \
++        "i" (-EFAULT)                                         \
+       : "memory")
+ #define __user_swp_asm(data, addr, res, temp) \
diff --git a/queue-4.4/arm64-ensure-extension-of-smp_store_release-value.patch b/queue-4.4/arm64-ensure-extension-of-smp_store_release-value.patch
new file mode 100644 (file)
index 0000000..4444d2d
--- /dev/null
@@ -0,0 +1,89 @@
+From 994870bead4ab19087a79492400a5478e2906196 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:34 +0100
+Subject: arm64: ensure extension of smp_store_release value
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 994870bead4ab19087a79492400a5478e2906196 upstream.
+
+When an inline assembly operand's type is narrower than the register it
+is allocated to, the least significant bits of the register (up to the
+operand type's width) are valid, and any other bits are permitted to
+contain any arbitrary value. This aligns with the AAPCS64 parameter
+passing rules.
+
+Our __smp_store_release() implementation does not account for this, and
+implicitly assumes that operands have been zero-extended to the width of
+the type being stored to. Thus, we may store unknown values to memory
+when the value type is narrower than the pointer type (e.g. when storing
+a char to a long).
+
+This patch fixes the issue by casting the value operand to the same
+width as the pointer operand in all cases, which ensures that the value
+is zero-extended as we expect. We use the same union trickery as
+__smp_load_acquire and {READ,WRITE}_ONCE() to avoid GCC complaining that
+pointers are potentially cast to narrower width integers in unreachable
+paths.
+
+A whitespace issue at the top of __smp_store_release() is also
+corrected.
+
+No changes are necessary for __smp_load_acquire(). Load instructions
+implicitly clear any upper bits of the register, and the compiler will
+only consider the least significant bits of the register as valid
+regardless.
+
+Fixes: 47933ad41a86 ("arch: Introduce smp_load_acquire(), smp_store_release()")
+Fixes: 878a84d5a8a1 ("arm64: add missing data types in smp_load_acquire/smp_store_release")
+Cc: <stable@vger.kernel.org> # 3.14.x-
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Matthias Kaehlcke <mka@chromium.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/barrier.h |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -41,23 +41,33 @@
+ #define smp_store_release(p, v)                                               \
+ do {                                                                  \
++      union { typeof(*p) __val; char __c[1]; } __u =                  \
++              { .__val = (__force typeof(*p)) (v) };                  \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("stlrb %w1, %0"                           \
+-                              : "=Q" (*p) : "r" (v) : "memory");      \
++                              : "=Q" (*p)                             \
++                              : "r" (*(__u8 *)__u.__c)                \
++                              : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("stlrh %w1, %0"                           \
+-                              : "=Q" (*p) : "r" (v) : "memory");      \
++                              : "=Q" (*p)                             \
++                              : "r" (*(__u16 *)__u.__c)               \
++                              : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile ("stlr %w1, %0"                            \
+-                              : "=Q" (*p) : "r" (v) : "memory");      \
++                              : "=Q" (*p)                             \
++                              : "r" (*(__u32 *)__u.__c)               \
++                              : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("stlr %1, %0"                             \
+-                              : "=Q" (*p) : "r" (v) : "memory");      \
++                              : "=Q" (*p)                             \
++                              : "r" (*(__u64 *)__u.__c)               \
++                              : "memory");                            \
+               break;                                                  \
+       }                                                               \
+ } while (0)
diff --git a/queue-4.4/arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch b/queue-4.4/arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
new file mode 100644 (file)
index 0000000..4ce787a
--- /dev/null
@@ -0,0 +1,93 @@
+From 276e93279a630657fff4b086ba14c95955912dfa Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Wed, 3 May 2017 16:37:47 +0100
+Subject: arm64: entry: improve data abort handling of tagged pointers
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 276e93279a630657fff4b086ba14c95955912dfa upstream.
+
+This backport has a minor difference from the upstream commit: it adds
+the asm-uaccess.h file, which is not present in 4.4, because 4.4 does
+not have commit b4b8664d291a ("arm64: don't pull uaccess.h into *.S").
+
+Original patch description:
+
+When handling a data abort from EL0, we currently zero the top byte of
+the faulting address, as we assume the address is a TTBR0 address, which
+may contain a non-zero address tag. However, the address may be a TTBR1
+address, in which case we should not zero the top byte. This patch fixes
+that. The effect is that the full TTBR1 address is passed to the task's
+signal handler (or printed out in the kernel log).
+
+When handling a data abort from EL1, we leave the faulting address
+intact, as we assume it's either a TTBR1 address or a TTBR0 address with
+tag 0x00. This is true as far as I'm aware, we don't seem to access a
+tagged TTBR0 address anywhere in the kernel. Regardless, it's easy to
+forget about address tags, and code added in the future may not always
+remember to remove tags from addresses before accessing them. So add tag
+handling to the EL1 data abort handler as well. This also makes it
+consistent with the EL0 data abort handler.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/asm-uaccess.h |   13 +++++++++++++
+ arch/arm64/kernel/entry.S            |    6 ++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+--- /dev/null
++++ b/arch/arm64/include/asm/asm-uaccess.h
+@@ -0,0 +1,13 @@
++#ifndef __ASM_ASM_UACCESS_H
++#define __ASM_ASM_UACCESS_H
++
++/*
++ * Remove the address tag from a virtual address, if present.
++ */
++      .macro  clear_address_tag, dst, addr
++      tst     \addr, #(1 << 55)
++      bic     \dst, \addr, #(0xff << 56)
++      csel    \dst, \dst, \addr, eq
++      .endm
++
++#endif
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -29,6 +29,7 @@
+ #include <asm/esr.h>
+ #include <asm/memory.h>
+ #include <asm/thread_info.h>
++#include <asm/asm-uaccess.h>
+ #include <asm/unistd.h>
+ /*
+@@ -316,12 +317,13 @@ el1_da:
+       /*
+        * Data abort handling
+        */
+-      mrs     x0, far_el1
++      mrs     x3, far_el1
+       enable_dbg
+       // re-enable interrupts if they were enabled in the aborted context
+       tbnz    x23, #7, 1f                     // PSR_I_BIT
+       enable_irq
+ 1:
++      clear_address_tag x0, x3
+       mov     x2, sp                          // struct pt_regs
+       bl      do_mem_abort
+@@ -483,7 +485,7 @@ el0_da:
+       // enable interrupts before calling the main handler
+       enable_dbg_and_irq
+       ct_user_exit
+-      bic     x0, x26, #(0xff << 56)
++      clear_address_tag x0, x26
+       mov     x1, x25
+       mov     x2, sp
+       bl      do_mem_abort
diff --git a/queue-4.4/arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch b/queue-4.4/arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
new file mode 100644 (file)
index 0000000..0d376d4
--- /dev/null
@@ -0,0 +1,85 @@
+From 7dcd9dd8cebe9fa626af7e2358d03a37041a70fb Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Wed, 3 May 2017 16:37:46 +0100
+Subject: arm64: hw_breakpoint: fix watchpoint matching for tagged pointers
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 7dcd9dd8cebe9fa626af7e2358d03a37041a70fb upstream.
+
+This backport has a few small differences from the upstream commit:
+ - The address tag is removed in watchpoint_handler() instead of
+   get_distance_from_watchpoint(), because 4.4 does not have commit
+   fdfeff0f9e3d ("arm64: hw_breakpoint: Handle inexact watchpoint
+   addresses").
+ - A macro is backported (untagged_addr), as it is not present in 4.4.
+
+Original patch description:
+
+When we take a watchpoint exception, the address that triggered the
+watchpoint is found in FAR_EL1. We compare it to the address of each
+configured watchpoint to see which one was hit.
+
+The configured watchpoint addresses are untagged, while the address in
+FAR_EL1 will have an address tag if the data access was done using a
+tagged address. The tag needs to be removed to compare the address to
+the watchpoints.
+
+Currently we don't remove it, and as a result can report the wrong
+watchpoint as being hit (specifically, always either the highest TTBR0
+watchpoint or lowest TTBR1 watchpoint). This patch removes the tag.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h  |    8 ++++++++
+ arch/arm64/kernel/hw_breakpoint.c |    3 ++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -21,6 +21,7 @@
+ /*
+  * User space memory access functions
+  */
++#include <linux/bitops.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
+@@ -103,6 +104,13 @@ static inline void set_fs(mm_segment_t f
+       flag;                                                           \
+ })
++/*
++ * When dealing with data aborts, watchpoints, or instruction traps we may end
++ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
++ * pass on to access_ok(), for instance.
++ */
++#define untagged_addr(addr)           sign_extend64(addr, 55)
++
+ #define access_ok(type, addr, size)   __range_ok(addr, size)
+ #define user_addr_max                 get_fs
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -35,6 +35,7 @@
+ #include <asm/traps.h>
+ #include <asm/cputype.h>
+ #include <asm/system_misc.h>
++#include <asm/uaccess.h>
+ /* Breakpoint currently in use for each BRP. */
+ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
+@@ -690,7 +691,7 @@ static int watchpoint_handler(unsigned l
+               /* Check if the watchpoint value matches. */
+               val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+-              if (val != (addr & ~alignment_mask))
++              if (val != (untagged_addr(addr) & ~alignment_mask))
+                       goto unlock;
+               /* Possible match, check the byte address select to confirm. */
diff --git a/queue-4.4/make-__xfs_xattr_put_listen-preperly-report-errors.patch b/queue-4.4/make-__xfs_xattr_put_listen-preperly-report-errors.patch
new file mode 100644 (file)
index 0000000..8b3e6fa
--- /dev/null
@@ -0,0 +1,46 @@
+From 791cc43b36eb1f88166c8505900cad1b43c7fe1a Mon Sep 17 00:00:00 2001
+From: Artem Savkov <asavkov@redhat.com>
+Date: Wed, 14 Sep 2016 07:40:35 +1000
+Subject: Make __xfs_xattr_put_listen preperly report errors.
+
+From: Artem Savkov <asavkov@redhat.com>
+
+commit 791cc43b36eb1f88166c8505900cad1b43c7fe1a upstream.
+
+Commit 2a6fba6 "xfs: only return -errno or success from attr ->put_listent"
+changes the returnvalue of __xfs_xattr_put_listen to 0 in case when there is
+insufficient space in the buffer assuming that setting context->count to -1
+would be enough, but all of the ->put_listent callers only check seen_enough.
+This results in a failed assertion:
+XFS: Assertion failed: context->count >= 0, file: fs/xfs/xfs_xattr.c, line: 175
+in insufficient buffer size case.
+
+This is only reproducible with at least 2 xattrs and only when the buffer
+gets depleted before the last one.
+
+Furthermore if buffersize is such that it is enough to hold the last xattr's
+name, but not enough to hold the sum of preceeding xattr names listxattr won't
+fail with ERANGE, but will suceed returning last xattr's name without the
+first character. The first character end's up overwriting data stored at
+(context->alist - 1).
+
+Signed-off-by: Artem Savkov <asavkov@redhat.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Cc: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_xattr.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/xfs/xfs_xattr.c
++++ b/fs/xfs/xfs_xattr.c
+@@ -180,6 +180,7 @@ xfs_xattr_put_listent(
+       arraytop = context->count + prefix_len + namelen + 1;
+       if (arraytop > context->firstu) {
+               context->count = -1;    /* insufficient space */
++              context->seen_enough = 1;
+               return 0;
+       }
+       offset = (char *)context->alist + context->count;
diff --git a/queue-4.4/rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch b/queue-4.4/rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch
new file mode 100644 (file)
index 0000000..072264b
--- /dev/null
@@ -0,0 +1,48 @@
+From 1feb40067cf04ae48d65f728d62ca255c9449178 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Fri, 12 May 2017 09:02:00 -0700
+Subject: RDMA/qib,hfi1: Fix MR reference count leak on write with immediate
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit 1feb40067cf04ae48d65f728d62ca255c9449178 upstream.
+
+The handling of IB_RDMA_WRITE_ONLY_WITH_IMMEDIATE will leak a memory
+reference when a buffer cannot be allocated for returning the immediate
+data.
+
+The issue is that the rkey validation has already occurred and the RNR
+nak fails to release the reference that was fruitlessly gotten.  The
+the peer will send the identical single packet request when its RNR
+timer pops.
+
+The fix is to release the held reference prior to the rnr nak exit.
+This is the only sequence the requires both rkey validation and the
+buffer allocation on the same packet.
+
+Cc: Stable <stable@vger.kernel.org> # 4.7+
+Tested-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/qib/qib_rc.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -2088,8 +2088,10 @@ send_last:
+               ret = qib_get_rwqe(qp, 1);
+               if (ret < 0)
+                       goto nack_op_err;
+-              if (!ret)
++              if (!ret) {
++                      qib_put_ss(&qp->r_sge);
+                       goto rnr_nak;
++              }
+               wc.ex.imm_data = ohdr->u.rc.imm_data;
+               hdrsize += 4;
+               wc.wc_flags = IB_WC_WITH_IMM;
index acdf8a43a9370a2296fbdea9b26a7cdb4aa5f7cb..7d99b7310110a495041222f7fdb92bf0862efb08 100644 (file)
@@ -80,3 +80,11 @@ net-better-skb-sender_cpu-and-skb-napi_id-cohabitation.patch
 mm-consider-memblock-reservations-for-deferred-memory-initialization-sizing.patch
 nfs-ensure-we-revalidate-attributes-before-using-execute_ok.patch
 nfsv4-don-t-perform-cached-access-checks-before-we-ve-opened-the-file.patch
+make-__xfs_xattr_put_listen-preperly-report-errors.patch
+arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
+arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
+rdma-qib-hfi1-fix-mr-reference-count-leak-on-write-with-immediate.patch
+tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
+usercopy-adjust-tests-to-deal-with-smap-pan.patch
+arm64-armv8_deprecated-ensure-extension-of-addr.patch
+arm64-ensure-extension-of-smp_store_release-value.patch
diff --git a/queue-4.4/tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch b/queue-4.4/tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
new file mode 100644 (file)
index 0000000..a2feb61
--- /dev/null
@@ -0,0 +1,39 @@
+From e09e28671cda63e6308b31798b997639120e2a21 Mon Sep 17 00:00:00 2001
+From: Amey Telawane <ameyt@codeaurora.org>
+Date: Wed, 3 May 2017 15:41:14 +0530
+Subject: tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
+
+From: Amey Telawane <ameyt@codeaurora.org>
+
+commit e09e28671cda63e6308b31798b997639120e2a21 upstream.
+
+Strcpy is inherently not safe, and strlcpy() should be used instead.
+__trace_find_cmdline() uses strcpy() because the comms saved must have a
+terminating nul character, but it doesn't hurt to add the extra protection
+of using strlcpy() instead of strcpy().
+
+Link: http://lkml.kernel.org/r/1493806274-13936-1-git-send-email-amit.pundir@linaro.org
+
+Signed-off-by: Amey Telawane <ameyt@codeaurora.org>
+[AmitP: Cherry-picked this commit from CodeAurora kernel/msm-3.10
+https://source.codeaurora.org/quic/la/kernel/msm-3.10/commit/?id=2161ae9a70b12cf18ac8e5952a20161ffbccb477]
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+[ Updated change log and removed the "- 1" from len parameter ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1617,7 +1617,7 @@ static void __trace_find_cmdline(int pid
+       map = savedcmd->map_pid_to_cmdline[pid];
+       if (map != NO_CMDLINE_MAP)
+-              strcpy(comm, get_saved_cmdlines(map));
++              strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+       else
+               strcpy(comm, "<...>");
+ }
diff --git a/queue-4.4/usercopy-adjust-tests-to-deal-with-smap-pan.patch b/queue-4.4/usercopy-adjust-tests-to-deal-with-smap-pan.patch
new file mode 100644 (file)
index 0000000..b8e08ab
--- /dev/null
@@ -0,0 +1,75 @@
+From f5f893c57e37ca730808cb2eee3820abd05e7507 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Mon, 13 Feb 2017 11:25:26 -0800
+Subject: usercopy: Adjust tests to deal with SMAP/PAN
+
+From: Kees Cook <keescook@chromium.org>
+
+commit f5f893c57e37ca730808cb2eee3820abd05e7507 upstream.
+
+Under SMAP/PAN/etc, we cannot write directly to userspace memory, so
+this rearranges the test bytes to get written through copy_to_user().
+Additionally drops the bad copy_from_user() test that would trigger a
+memcpy() against userspace on failure.
+
+[arnd: the test module was added in 3.14, and this backported patch
+       should apply cleanly on all version from 3.14 to 4.10.
+       The original patch was in 4.11 on top of a context change
+       I saw the bug triggered with kselftest on a 4.4.y stable kernel]
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_user_copy.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/lib/test_user_copy.c
++++ b/lib/test_user_copy.c
+@@ -58,7 +58,9 @@ static int __init test_user_copy_init(vo
+       usermem = (char __user *)user_addr;
+       bad_usermem = (char *)user_addr;
+-      /* Legitimate usage: none of these should fail. */
++      /*
++       * Legitimate usage: none of these copies should fail.
++       */
+       ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
+                   "legitimate copy_from_user failed");
+       ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
+@@ -68,19 +70,33 @@ static int __init test_user_copy_init(vo
+       ret |= test(put_user(value, (unsigned long __user *)usermem),
+                   "legitimate put_user failed");
+-      /* Invalid usage: none of these should succeed. */
++      /*
++       * Invalid usage: none of these copies should succeed.
++       */
++
++      /* Reject kernel-to-kernel copies through copy_from_user(). */
+       ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+                                   PAGE_SIZE),
+                   "illegal all-kernel copy_from_user passed");
++
++#if 0
++      /*
++       * When running with SMAP/PAN/etc, this will Oops the kernel
++       * due to the zeroing of userspace memory on failure. This needs
++       * to be tested in LKDTM instead, since this test module does not
++       * expect to explode.
++       */
+       ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
+                                   PAGE_SIZE),
+                   "illegal reversed copy_from_user passed");
++#endif
+       ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+                                 PAGE_SIZE),
+                   "illegal all-kernel copy_to_user passed");
+       ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
+                                 PAGE_SIZE),
+                   "illegal reversed copy_to_user passed");
++
+       ret |= test(!get_user(value, (unsigned long __user *)kmem),
+                   "illegal get_user passed");
+       ret |= test(!put_user(value, (unsigned long __user *)kmem),