]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 5 May 2019 10:16:39 +0000 (12:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 5 May 2019 10:16:39 +0000 (12:16 +0200)
added patches:
arm64-fix-single-stepping-in-kernel-traps.patch
arm64-only-advance-singlestep-for-user-instruction-traps.patch
caif-reduce-stack-size-with-kasan.patch
kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch
kasan-remove-redundant-initialization-of-variable-real_size.patch

queue-4.14/arm64-fix-single-stepping-in-kernel-traps.patch [new file with mode: 0644]
queue-4.14/arm64-only-advance-singlestep-for-user-instruction-traps.patch [new file with mode: 0644]
queue-4.14/caif-reduce-stack-size-with-kasan.patch [new file with mode: 0644]
queue-4.14/kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch [new file with mode: 0644]
queue-4.14/kasan-remove-redundant-initialization-of-variable-real_size.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/arm64-fix-single-stepping-in-kernel-traps.patch b/queue-4.14/arm64-fix-single-stepping-in-kernel-traps.patch
new file mode 100644 (file)
index 0000000..5df5f9f
--- /dev/null
@@ -0,0 +1,161 @@
+From 6436beeee5721a8e906e9eabf866f12d04470437 Mon Sep 17 00:00:00 2001
+From: Julien Thierry <julien.thierry@arm.com>
+Date: Wed, 25 Oct 2017 10:04:33 +0100
+Subject: arm64: Fix single stepping in kernel traps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+commit 6436beeee5721a8e906e9eabf866f12d04470437 upstream.
+
+Software Step exception is missing after stepping a trapped instruction.
+
+Ensure SPSR.SS gets set to 0 after emulating/skipping a trapped instruction
+before doing ERET.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Reviewed-by: Alex BennĂ©e <alex.bennee@linaro.org>
+[will: replaced AARCH32_INSN_SIZE with 4]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/traps.h       |    6 ++++++
+ arch/arm64/kernel/armv8_deprecated.c |    8 ++++----
+ arch/arm64/kernel/cpufeature.c       |    2 +-
+ arch/arm64/kernel/traps.c            |   21 ++++++++++++++++-----
+ 4 files changed, 27 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/traps.h
++++ b/arch/arm64/include/asm/traps.h
+@@ -37,6 +37,12 @@ void unregister_undef_hook(struct undef_
+ void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
++/*
++ * Move regs->pc to next instruction and do necessary setup before it
++ * is executed.
++ */
++void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size);
++
+ static inline int __in_irqentry_text(unsigned long ptr)
+ {
+       return ptr >= (unsigned long)&__irqentry_text_start &&
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -431,7 +431,7 @@ ret:
+       pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
+                       current->comm, (unsigned long)current->pid, regs->pc);
+-      regs->pc += 4;
++      arm64_skip_faulting_instruction(regs, 4);
+       return 0;
+ fault:
+@@ -512,7 +512,7 @@ ret:
+       pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
+                       current->comm, (unsigned long)current->pid, regs->pc);
+-      regs->pc += 4;
++      arm64_skip_faulting_instruction(regs, 4);
+       return 0;
+ }
+@@ -586,14 +586,14 @@ static int compat_setend_handler(struct
+ static int a32_setend_handler(struct pt_regs *regs, u32 instr)
+ {
+       int rc = compat_setend_handler(regs, (instr >> 9) & 1);
+-      regs->pc += 4;
++      arm64_skip_faulting_instruction(regs, 4);
+       return rc;
+ }
+ static int t16_setend_handler(struct pt_regs *regs, u32 instr)
+ {
+       int rc = compat_setend_handler(regs, (instr >> 3) & 1);
+-      regs->pc += 2;
++      arm64_skip_faulting_instruction(regs, 2);
+       return rc;
+ }
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1398,7 +1398,7 @@ static int emulate_mrs(struct pt_regs *r
+       if (!rc) {
+               dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+               pt_regs_write_reg(regs, dst, val);
+-              regs->pc += 4;
++              arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+       }
+       return rc;
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -296,6 +296,17 @@ void arm64_notify_die(const char *str, s
+       }
+ }
++void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
++{
++      regs->pc += size;
++
++      /*
++       * If we were single stepping, we want to get the step exception after
++       * we return from the trap.
++       */
++      user_fastforward_single_step(current);
++}
++
+ static LIST_HEAD(undef_hook);
+ static DEFINE_RAW_SPINLOCK(undef_lock);
+@@ -483,7 +494,7 @@ static void user_cache_maint_handler(uns
+       if (ret)
+               arm64_notify_segfault(regs, address);
+       else
+-              regs->pc += 4;
++              arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
+ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
+@@ -493,7 +504,7 @@ static void ctr_read_handler(unsigned in
+       pt_regs_write_reg(regs, rt, val);
+-      regs->pc += 4;
++      arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
+ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+@@ -501,7 +512,7 @@ static void cntvct_read_handler(unsigned
+       int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
+-      regs->pc += 4;
++      arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
+ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+@@ -509,7 +520,7 @@ static void cntfrq_read_handler(unsigned
+       int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       pt_regs_write_reg(regs, rt, arch_timer_get_rate());
+-      regs->pc += 4;
++      arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
+ struct sys64_hook {
+@@ -756,7 +767,7 @@ static int bug_handler(struct pt_regs *r
+       }
+       /* If thread survives, skip over the BUG instruction and continue: */
+-      regs->pc += AARCH64_INSN_SIZE;  /* skip BRK and resume */
++      arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+       return DBG_HOOK_HANDLED;
+ }
diff --git a/queue-4.14/arm64-only-advance-singlestep-for-user-instruction-traps.patch b/queue-4.14/arm64-only-advance-singlestep-for-user-instruction-traps.patch
new file mode 100644 (file)
index 0000000..bed3b9a
--- /dev/null
@@ -0,0 +1,46 @@
+From 9478f1927e6ef9ef5e1ad761af1c98aa8e40b7f5 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 3 Apr 2018 11:22:51 +0100
+Subject: arm64: only advance singlestep for user instruction traps
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 9478f1927e6ef9ef5e1ad761af1c98aa8e40b7f5 upstream.
+
+Our arm64_skip_faulting_instruction() helper advances the userspace
+singlestep state machine, but this is also called by the kernel BRK
+handler, as used for WARN*().
+
+Thus, if we happen to hit a WARN*() while the user singlestep state
+machine is in the active-no-pending state, we'll advance to the
+active-pending state without having executed a user instruction, and
+will take a step exception earlier than expected when we return to
+userspace.
+
+Let's fix this by only advancing the state machine when skipping a user
+instruction.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Andrey Konovalov <andreyknvl@google.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/traps.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -304,7 +304,8 @@ void arm64_skip_faulting_instruction(str
+        * If we were single stepping, we want to get the step exception after
+        * we return from the trap.
+        */
+-      user_fastforward_single_step(current);
++      if (user_mode(regs))
++              user_fastforward_single_step(current);
+ }
+ static LIST_HEAD(undef_hook);
diff --git a/queue-4.14/caif-reduce-stack-size-with-kasan.patch b/queue-4.14/caif-reduce-stack-size-with-kasan.patch
new file mode 100644 (file)
index 0000000..234a9be
--- /dev/null
@@ -0,0 +1,226 @@
+From ce6289661b14a8b391d90db918c91b6d6da6540a Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 16 Jan 2018 17:34:00 +0100
+Subject: caif: reduce stack size with KASAN
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit ce6289661b14a8b391d90db918c91b6d6da6540a upstream.
+
+When CONFIG_KASAN is set, we can use relatively large amounts of kernel
+stack space:
+
+net/caif/cfctrl.c:555:1: warning: the frame size of 1600 bytes is larger than 1280 bytes [-Wframe-larger-than=]
+
+This adds convenience wrappers around cfpkt_extr_head(), which is responsible
+for most of the stack growth. With those wrapper functions, gcc apparently
+starts reusing the stack slots for each instance, thus avoiding the
+problem.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/caif/cfpkt.h |   27 +++++++++++++++++++++++++
+ net/caif/cfctrl.c        |   50 ++++++++++++++++++++---------------------------
+ 2 files changed, 49 insertions(+), 28 deletions(-)
+
+--- a/include/net/caif/cfpkt.h
++++ b/include/net/caif/cfpkt.h
+@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
+  */
+ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
++static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
++{
++      u8 tmp;
++
++      cfpkt_extr_head(pkt, &tmp, 1);
++
++      return tmp;
++}
++
++static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
++{
++      __le16 tmp;
++
++      cfpkt_extr_head(pkt, &tmp, 2);
++
++      return le16_to_cpu(tmp);
++}
++
++static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
++{
++      __le32 tmp;
++
++      cfpkt_extr_head(pkt, &tmp, 4);
++
++      return le32_to_cpu(tmp);
++}
++
+ /*
+  * Peek header from packet.
+  * Reads data from packet without changing packet.
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -352,15 +352,14 @@ static int cfctrl_recv(struct cflayer *l
+       u8 cmdrsp;
+       u8 cmd;
+       int ret = -1;
+-      u16 tmp16;
+       u8 len;
+       u8 param[255];
+-      u8 linkid;
++      u8 linkid = 0;
+       struct cfctrl *cfctrl = container_obj(layer);
+       struct cfctrl_request_info rsp, *req;
+-      cfpkt_extr_head(pkt, &cmdrsp, 1);
++      cmdrsp = cfpkt_extr_head_u8(pkt);
+       cmd = cmdrsp & CFCTRL_CMD_MASK;
+       if (cmd != CFCTRL_CMD_LINK_ERR
+           && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
+@@ -378,13 +377,12 @@ static int cfctrl_recv(struct cflayer *l
+                       u8 physlinkid;
+                       u8 prio;
+                       u8 tmp;
+-                      u32 tmp32;
+                       u8 *cp;
+                       int i;
+                       struct cfctrl_link_param linkparam;
+                       memset(&linkparam, 0, sizeof(linkparam));
+-                      cfpkt_extr_head(pkt, &tmp, 1);
++                      tmp = cfpkt_extr_head_u8(pkt);
+                       serv = tmp & CFCTRL_SRV_MASK;
+                       linkparam.linktype = serv;
+@@ -392,13 +390,13 @@ static int cfctrl_recv(struct cflayer *l
+                       servtype = tmp >> 4;
+                       linkparam.chtype = servtype;
+-                      cfpkt_extr_head(pkt, &tmp, 1);
++                      tmp = cfpkt_extr_head_u8(pkt);
+                       physlinkid = tmp & 0x07;
+                       prio = tmp >> 3;
+                       linkparam.priority = prio;
+                       linkparam.phyid = physlinkid;
+-                      cfpkt_extr_head(pkt, &endpoint, 1);
++                      endpoint = cfpkt_extr_head_u8(pkt);
+                       linkparam.endpoint = endpoint & 0x03;
+                       switch (serv) {
+@@ -407,45 +405,43 @@ static int cfctrl_recv(struct cflayer *l
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_VIDEO:
+-                              cfpkt_extr_head(pkt, &tmp, 1);
++                              tmp = cfpkt_extr_head_u8(pkt);
+                               linkparam.u.video.connid = tmp;
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_DATAGRAM:
+-                              cfpkt_extr_head(pkt, &tmp32, 4);
+                               linkparam.u.datagram.connid =
+-                                  le32_to_cpu(tmp32);
++                                  cfpkt_extr_head_u32(pkt);
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_RFM:
+                               /* Construct a frame, convert
+                                * DatagramConnectionID
+                                * to network format long and copy it out...
+                                */
+-                              cfpkt_extr_head(pkt, &tmp32, 4);
+                               linkparam.u.rfm.connid =
+-                                le32_to_cpu(tmp32);
++                                  cfpkt_extr_head_u32(pkt);
+                               cp = (u8 *) linkparam.u.rfm.volume;
+-                              for (cfpkt_extr_head(pkt, &tmp, 1);
++                              for (tmp = cfpkt_extr_head_u8(pkt);
+                                    cfpkt_more(pkt) && tmp != '\0';
+-                                   cfpkt_extr_head(pkt, &tmp, 1))
++                                   tmp = cfpkt_extr_head_u8(pkt))
+                                       *cp++ = tmp;
+                               *cp = '\0';
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_UTIL:
+@@ -454,13 +450,11 @@ static int cfctrl_recv(struct cflayer *l
+                                * to network format long and copy it out...
+                                */
+                               /* Fifosize KB */
+-                              cfpkt_extr_head(pkt, &tmp16, 2);
+                               linkparam.u.utility.fifosize_kb =
+-                                  le16_to_cpu(tmp16);
++                                  cfpkt_extr_head_u16(pkt);
+                               /* Fifosize bufs */
+-                              cfpkt_extr_head(pkt, &tmp16, 2);
+                               linkparam.u.utility.fifosize_bufs =
+-                                  le16_to_cpu(tmp16);
++                                  cfpkt_extr_head_u16(pkt);
+                               /* name */
+                               cp = (u8 *) linkparam.u.utility.name;
+                               caif_assert(sizeof(linkparam.u.utility.name)
+@@ -468,24 +462,24 @@ static int cfctrl_recv(struct cflayer *l
+                               for (i = 0;
+                                    i < UTILITY_NAME_LENGTH
+                                    && cfpkt_more(pkt); i++) {
+-                                      cfpkt_extr_head(pkt, &tmp, 1);
++                                      tmp = cfpkt_extr_head_u8(pkt);
+                                       *cp++ = tmp;
+                               }
+                               /* Length */
+-                              cfpkt_extr_head(pkt, &len, 1);
++                              len = cfpkt_extr_head_u8(pkt);
+                               linkparam.u.utility.paramlen = len;
+                               /* Param Data */
+                               cp = linkparam.u.utility.params;
+                               while (cfpkt_more(pkt) && len--) {
+-                                      cfpkt_extr_head(pkt, &tmp, 1);
++                                      tmp = cfpkt_extr_head_u8(pkt);
+                                       *cp++ = tmp;
+                               }
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               /* Length */
+-                              cfpkt_extr_head(pkt, &len, 1);
++                              len = cfpkt_extr_head_u8(pkt);
+                               /* Param Data */
+                               cfpkt_extr_head(pkt, &param, len);
+                               break;
+@@ -522,7 +516,7 @@ static int cfctrl_recv(struct cflayer *l
+               }
+               break;
+       case CFCTRL_CMD_LINK_DESTROY:
+-              cfpkt_extr_head(pkt, &linkid, 1);
++              linkid = cfpkt_extr_head_u8(pkt);
+               cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
+               break;
+       case CFCTRL_CMD_LINK_ERR:
diff --git a/queue-4.14/kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch b/queue-4.14/kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch
new file mode 100644 (file)
index 0000000..cd9d483
--- /dev/null
@@ -0,0 +1,51 @@
+From 69ca372c100fba99c78ef826a1795aa86e4f01a8 Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Tue, 10 Apr 2018 16:30:39 -0700
+Subject: kasan: prevent compiler from optimizing away memset in tests
+
+From: Andrey Konovalov <andreyknvl@google.com>
+
+commit 69ca372c100fba99c78ef826a1795aa86e4f01a8 upstream.
+
+A compiler can optimize away memset calls by replacing them with mov
+instructions.  There are KASAN tests that specifically test that KASAN
+correctly handles memset calls so we don't want this optimization to
+happen.
+
+The solution is to add -fno-builtin flag to test_kasan.ko
+
+Link: http://lkml.kernel.org/r/105ec9a308b2abedb1a0d1fdced0c22d765e4732.1519924383.git.andreyknvl@google.com
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Nick Terrell <terrelln@fb.com>
+Cc: Chris Mason <clm@fb.com>
+Cc: Yury Norov <ynorov@caviumnetworks.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: "Luis R . Rodriguez" <mcgrof@kernel.org>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Jeff Layton <jlayton@redhat.com>
+Cc: "Jason A . Donenfeld" <Jason@zx2c4.com>
+Cc: Kostya Serebryany <kcc@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/Makefile |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firm
+ obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
+ obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
+ obj-$(CONFIG_TEST_KASAN) += test_kasan.o
++CFLAGS_test_kasan.o += -fno-builtin
+ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
+ obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+ obj-$(CONFIG_TEST_LKM) += test_module.o
diff --git a/queue-4.14/kasan-remove-redundant-initialization-of-variable-real_size.patch b/queue-4.14/kasan-remove-redundant-initialization-of-variable-real_size.patch
new file mode 100644 (file)
index 0000000..37e6b4d
--- /dev/null
@@ -0,0 +1,43 @@
+From 48c232395431c23d35cf3b4c5a090bd793316578 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 6 Feb 2018 15:36:48 -0800
+Subject: kasan: remove redundant initialization of variable 'real_size'
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 48c232395431c23d35cf3b4c5a090bd793316578 upstream.
+
+Variable real_size is initialized with a value that is never read, it is
+re-assigned a new value later on, hence the initialization is redundant
+and can be removed.
+
+Cleans up clang warning:
+
+  lib/test_kasan.c:422:21: warning: Value stored to 'real_size' during its initialization is never read
+
+Link: http://lkml.kernel.org/r/20180206144950.32457-1-colin.king@canonical.com
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_kasan.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -389,7 +389,7 @@ static noinline void __init kasan_stack_
+ static noinline void __init ksize_unpoisons_memory(void)
+ {
+       char *ptr;
+-      size_t size = 123, real_size = size;
++      size_t size = 123, real_size;
+       pr_info("ksize() unpoisons the whole allocated chunk\n");
+       ptr = kmalloc(size, GFP_KERNEL);
index eade7d019fe56f6edb677d6e8d4dc3a9046d71ba..76a74272d76600241ea675336e214938b9f7f50e 100644 (file)
@@ -9,3 +9,8 @@ bnxt_en-free-short-fw-command-hwrm-memory-in-error-path-in-bnxt_init_one.patch
 rxrpc-fix-net-namespace-cleanup.patch
 net-phy-marvell-fix-buffer-overrun-with-stats-counters.patch
 net-dsa-bcm_sf2-fix-buffer-overflow-doing-set_rxnfc.patch
+kasan-remove-redundant-initialization-of-variable-real_size.patch
+kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch
+arm64-fix-single-stepping-in-kernel-traps.patch
+arm64-only-advance-singlestep-for-user-instruction-traps.patch
+caif-reduce-stack-size-with-kasan.patch