]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 5 May 2019 10:16:20 +0000 (12:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 5 May 2019 10:16:20 +0000 (12:16 +0200)
added patches:
arm64-kasan-avoid-bad-virt_to_pfn.patch
arm64-mm-don-t-print-out-page-table-entries-on-el0-faults.patch
arm64-mm-print-out-correct-page-table-entries.patch
arm64-proc-set-pte_ng-for-table-entries-to-avoid-traversing-them-twice.patch
caif-reduce-stack-size-with-kasan.patch
kasan-add-a-prototype-of-task_struct-to-avoid-warning.patch
kasan-avoid-wmaybe-uninitialized-warning.patch
kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch
kasan-remove-redundant-initialization-of-variable-real_size.patch
kasan-turn-on-fsanitize-address-use-after-scope.patch
mm-kasan-switch-to-using-__pa_symbol-and-lm_alias.patch
x86-suspend-fix-false-positive-kasan-warning-on-suspend-resume.patch
x86-unwind-disable-kasan-checks-for-non-current-tasks.patch

14 files changed:
queue-4.9/arm64-kasan-avoid-bad-virt_to_pfn.patch [new file with mode: 0644]
queue-4.9/arm64-mm-don-t-print-out-page-table-entries-on-el0-faults.patch [new file with mode: 0644]
queue-4.9/arm64-mm-print-out-correct-page-table-entries.patch [new file with mode: 0644]
queue-4.9/arm64-proc-set-pte_ng-for-table-entries-to-avoid-traversing-them-twice.patch [new file with mode: 0644]
queue-4.9/caif-reduce-stack-size-with-kasan.patch [new file with mode: 0644]
queue-4.9/kasan-add-a-prototype-of-task_struct-to-avoid-warning.patch [new file with mode: 0644]
queue-4.9/kasan-avoid-wmaybe-uninitialized-warning.patch [new file with mode: 0644]
queue-4.9/kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch [new file with mode: 0644]
queue-4.9/kasan-remove-redundant-initialization-of-variable-real_size.patch [new file with mode: 0644]
queue-4.9/kasan-turn-on-fsanitize-address-use-after-scope.patch [new file with mode: 0644]
queue-4.9/mm-kasan-switch-to-using-__pa_symbol-and-lm_alias.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/x86-suspend-fix-false-positive-kasan-warning-on-suspend-resume.patch [new file with mode: 0644]
queue-4.9/x86-unwind-disable-kasan-checks-for-non-current-tasks.patch [new file with mode: 0644]

diff --git a/queue-4.9/arm64-kasan-avoid-bad-virt_to_pfn.patch b/queue-4.9/arm64-kasan-avoid-bad-virt_to_pfn.patch
new file mode 100644 (file)
index 0000000..47cb457
--- /dev/null
@@ -0,0 +1,57 @@
+From b0de0ccc8b9edd8846828e0ecdc35deacdf186b0 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Mon, 6 Mar 2017 19:06:40 +0000
+Subject: arm64: kasan: avoid bad virt_to_pfn()
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit b0de0ccc8b9edd8846828e0ecdc35deacdf186b0 upstream.
+
+Booting a v4.11-rc1 kernel with DEBUG_VIRTUAL and KASAN enabled produces
+the following splat (trimmed for brevity):
+
+[    0.000000] virt_to_phys used for non-linear address: ffff200008080000 (0xffff200008080000)
+[    0.000000] WARNING: CPU: 0 PID: 0 at arch/arm64/mm/physaddr.c:14 __virt_to_phys+0x48/0x70
+[    0.000000] PC is at __virt_to_phys+0x48/0x70
+[    0.000000] LR is at __virt_to_phys+0x48/0x70
+[    0.000000] Call trace:
+[    0.000000] [<ffff2000080b1ac0>] __virt_to_phys+0x48/0x70
+[    0.000000] [<ffff20000a03b86c>] kasan_init+0x1c0/0x498
+[    0.000000] [<ffff20000a034018>] setup_arch+0x2fc/0x948
+[    0.000000] [<ffff20000a030c68>] start_kernel+0xb8/0x570
+[    0.000000] [<ffff20000a0301e8>] __primary_switched+0x6c/0x74
+
+This is because we use virt_to_pfn() on a kernel image address when
+trying to figure out its nid, so that we can allocate its shadow from
+the same node.
+
+As with other recent changes, this patch uses lm_alias() to solve this.
+
+We could instead use NUMA_NO_NODE, as x86 does for all shadow
+allocations, though we'll likely want the "real" memory shadow to be
+backed from its corresponding nid anyway, so we may as well be
+consistent and find the nid for the image shadow.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Acked-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/kasan_init.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/kasan_init.c
++++ b/arch/arm64/mm/kasan_init.c
+@@ -153,7 +153,7 @@ void __init kasan_init(void)
+       clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+       vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
+-                       pfn_to_nid(virt_to_pfn(_text)));
++                       pfn_to_nid(virt_to_pfn(lm_alias(_text))));
+       /*
+        * vmemmap_populate() has populated the shadow region that covers the
diff --git a/queue-4.9/arm64-mm-don-t-print-out-page-table-entries-on-el0-faults.patch b/queue-4.9/arm64-mm-don-t-print-out-page-table-entries-on-el0-faults.patch
new file mode 100644 (file)
index 0000000..999b6f3
--- /dev/null
@@ -0,0 +1,38 @@
+From bf396c09c2447a787d02af34cf167e953f85fa42 Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Fri, 9 Jun 2017 16:35:53 +0100
+Subject: arm64: mm: don't print out page table entries on EL0 faults
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit bf396c09c2447a787d02af34cf167e953f85fa42 upstream.
+
+When we take a fault from EL0 that can't be handled, we print out the
+page table entries associated with the faulting address. This allows
+userspace to print out any current page table entries, including kernel
+(TTBR1) entries. Exposing kernel mappings like this could pose a
+security risk, so don't print out page table information on EL0 faults.
+(But still print it out for EL1 faults.) This also follows the same
+behaviour as x86, printing out page table entries on kernel mode faults
+but not user mode faults.
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -231,7 +231,6 @@ static void __do_user_fault(struct task_
+               pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
+                       tsk->comm, task_pid_nr(tsk), inf->name, sig,
+                       addr, esr);
+-              show_pte(addr);
+               show_regs(regs);
+       }
diff --git a/queue-4.9/arm64-mm-print-out-correct-page-table-entries.patch b/queue-4.9/arm64-mm-print-out-correct-page-table-entries.patch
new file mode 100644 (file)
index 0000000..d9231f8
--- /dev/null
@@ -0,0 +1,134 @@
+From 67ce16ec15ce9d97d3d85e72beabbc5d7017193e Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Fri, 9 Jun 2017 16:35:52 +0100
+Subject: arm64: mm: print out correct page table entries
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 67ce16ec15ce9d97d3d85e72beabbc5d7017193e upstream.
+
+When we take a fault that can't be handled, we print out the page table
+entries associated with the faulting address. In some cases we currently
+print out the wrong entries. For a faulting TTBR1 address, we sometimes
+print out TTBR0 table entries instead, and for a faulting TTBR0 address
+we sometimes print out TTBR1 table entries. Fix this by choosing the
+tables based on the faulting address.
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+[will: zero-extend addrs to 64-bit, don't walk swapper w/ TTBR0 addr]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/system_misc.h |    2 -
+ arch/arm64/mm/fault.c                |   36 ++++++++++++++++++++++++-----------
+ 2 files changed, 26 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/include/asm/system_misc.h
++++ b/arch/arm64/include/asm/system_misc.h
+@@ -40,7 +40,7 @@ void hook_debug_fault_code(int nr, int (
+                          int sig, int code, const char *name);
+ struct mm_struct;
+-extern void show_pte(struct mm_struct *mm, unsigned long addr);
++extern void show_pte(unsigned long addr);
+ extern void __show_regs(struct pt_regs *);
+ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -79,18 +79,33 @@ static inline int notify_page_fault(stru
+ #endif
+ /*
+- * Dump out the page tables associated with 'addr' in mm 'mm'.
++ * Dump out the page tables associated with 'addr' in the currently active mm.
+  */
+-void show_pte(struct mm_struct *mm, unsigned long addr)
++void show_pte(unsigned long addr)
+ {
++      struct mm_struct *mm;
+       pgd_t *pgd;
+-      if (!mm)
++      if (addr < TASK_SIZE) {
++              /* TTBR0 */
++              mm = current->active_mm;
++              if (mm == &init_mm) {
++                      pr_alert("[%016lx] user address but active_mm is swapper\n",
++                               addr);
++                      return;
++              }
++      } else if (addr >= VA_START) {
++              /* TTBR1 */
+               mm = &init_mm;
++      } else {
++              pr_alert("[%016lx] address between user and kernel address ranges\n",
++                       addr);
++              return;
++      }
+       pr_alert("pgd = %p\n", mm->pgd);
+       pgd = pgd_offset(mm, addr);
+-      pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
++      pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
+       do {
+               pud_t *pud;
+@@ -176,8 +191,8 @@ static bool is_el1_instruction_abort(uns
+ /*
+  * The kernel tried to access some page that wasn't present.
+  */
+-static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
+-                            unsigned int esr, struct pt_regs *regs)
++static void __do_kernel_fault(unsigned long addr, unsigned int esr,
++                            struct pt_regs *regs)
+ {
+       /*
+        * Are we prepared to handle this kernel fault?
+@@ -194,7 +209,7 @@ static void __do_kernel_fault(struct mm_
+                (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+                "paging request", addr);
+-      show_pte(mm, addr);
++      show_pte(addr);
+       die("Oops", regs, esr);
+       bust_spinlocks(0);
+       do_exit(SIGKILL);
+@@ -216,7 +231,7 @@ static void __do_user_fault(struct task_
+               pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
+                       tsk->comm, task_pid_nr(tsk), inf->name, sig,
+                       addr, esr);
+-              show_pte(tsk->mm, addr);
++              show_pte(addr);
+               show_regs(regs);
+       }
+@@ -232,7 +247,6 @@ static void __do_user_fault(struct task_
+ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+ {
+       struct task_struct *tsk = current;
+-      struct mm_struct *mm = tsk->active_mm;
+       const struct fault_info *inf;
+       /*
+@@ -243,7 +257,7 @@ static void do_bad_area(unsigned long ad
+               inf = esr_to_fault_info(esr);
+               __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
+       } else
+-              __do_kernel_fault(mm, addr, esr, regs);
++              __do_kernel_fault(addr, esr, regs);
+ }
+ #define VM_FAULT_BADMAP               0x010000
+@@ -454,7 +468,7 @@ retry:
+       return 0;
+ no_context:
+-      __do_kernel_fault(mm, addr, esr, regs);
++      __do_kernel_fault(addr, esr, regs);
+       return 0;
+ }
diff --git a/queue-4.9/arm64-proc-set-pte_ng-for-table-entries-to-avoid-traversing-them-twice.patch b/queue-4.9/arm64-proc-set-pte_ng-for-table-entries-to-avoid-traversing-them-twice.patch
new file mode 100644 (file)
index 0000000..a67de75
--- /dev/null
@@ -0,0 +1,82 @@
+From 2ce77f6d8a9ae9ce6d80397d88bdceb84a2004cd Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 13 Feb 2018 13:14:09 +0000
+Subject: arm64: proc: Set PTE_NG for table entries to avoid traversing them twice
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 2ce77f6d8a9ae9ce6d80397d88bdceb84a2004cd upstream.
+
+When KASAN is enabled, the swapper page table contains many identical
+mappings of the zero page, which can lead to a stall during boot whilst
+the G -> nG code continually walks the same page table entries looking
+for global mappings.
+
+This patch sets the nG bit (bit 11, which is IGNORED) in table entries
+after processing the subtree so we can easily skip them if we see them
+a second time.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/proc.S |   14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -181,7 +181,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
+       dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
+       dmb     sy                              // lines are written back before
+       ldr     \type, [cur_\()\type\()p]       // loading the entry
+-      tbz     \type, #0, next_\()\type        // Skip invalid entries
++      tbz     \type, #0, skip_\()\type        // Skip invalid and
++      tbnz    \type, #11, skip_\()\type       // non-global entries
+       .endm
+       .macro __idmap_kpti_put_pgtable_ent_ng, type
+@@ -241,8 +242,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
+       add     end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
+ do_pgd:       __idmap_kpti_get_pgtable_ent    pgd
+       tbnz    pgd, #1, walk_puds
+-      __idmap_kpti_put_pgtable_ent_ng pgd
+ next_pgd:
++      __idmap_kpti_put_pgtable_ent_ng pgd
++skip_pgd:
+       add     cur_pgdp, cur_pgdp, #8
+       cmp     cur_pgdp, end_pgdp
+       b.ne    do_pgd
+@@ -270,8 +272,9 @@ walk_puds:
+       add     end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
+ do_pud:       __idmap_kpti_get_pgtable_ent    pud
+       tbnz    pud, #1, walk_pmds
+-      __idmap_kpti_put_pgtable_ent_ng pud
+ next_pud:
++      __idmap_kpti_put_pgtable_ent_ng pud
++skip_pud:
+       add     cur_pudp, cur_pudp, 8
+       cmp     cur_pudp, end_pudp
+       b.ne    do_pud
+@@ -290,8 +293,9 @@ walk_pmds:
+       add     end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
+ do_pmd:       __idmap_kpti_get_pgtable_ent    pmd
+       tbnz    pmd, #1, walk_ptes
+-      __idmap_kpti_put_pgtable_ent_ng pmd
+ next_pmd:
++      __idmap_kpti_put_pgtable_ent_ng pmd
++skip_pmd:
+       add     cur_pmdp, cur_pmdp, #8
+       cmp     cur_pmdp, end_pmdp
+       b.ne    do_pmd
+@@ -309,7 +313,7 @@ walk_ptes:
+       add     end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
+ do_pte:       __idmap_kpti_get_pgtable_ent    pte
+       __idmap_kpti_put_pgtable_ent_ng pte
+-next_pte:
++skip_pte:
+       add     cur_ptep, cur_ptep, #8
+       cmp     cur_ptep, end_ptep
+       b.ne    do_pte
diff --git a/queue-4.9/caif-reduce-stack-size-with-kasan.patch b/queue-4.9/caif-reduce-stack-size-with-kasan.patch
new file mode 100644 (file)
index 0000000..234a9be
--- /dev/null
@@ -0,0 +1,226 @@
+From ce6289661b14a8b391d90db918c91b6d6da6540a Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 16 Jan 2018 17:34:00 +0100
+Subject: caif: reduce stack size with KASAN
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit ce6289661b14a8b391d90db918c91b6d6da6540a upstream.
+
+When CONFIG_KASAN is set, we can use relatively large amounts of kernel
+stack space:
+
+net/caif/cfctrl.c:555:1: warning: the frame size of 1600 bytes is larger than 1280 bytes [-Wframe-larger-than=]
+
+This adds convenience wrappers around cfpkt_extr_head(), which is responsible
+for most of the stack growth. With those wrapper functions, gcc apparently
+starts reusing the stack slots for each instance, thus avoiding the
+problem.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/caif/cfpkt.h |   27 +++++++++++++++++++++++++
+ net/caif/cfctrl.c        |   50 ++++++++++++++++++++---------------------------
+ 2 files changed, 49 insertions(+), 28 deletions(-)
+
+--- a/include/net/caif/cfpkt.h
++++ b/include/net/caif/cfpkt.h
+@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
+  */
+ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
++static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
++{
++      u8 tmp;
++
++      cfpkt_extr_head(pkt, &tmp, 1);
++
++      return tmp;
++}
++
++static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
++{
++      __le16 tmp;
++
++      cfpkt_extr_head(pkt, &tmp, 2);
++
++      return le16_to_cpu(tmp);
++}
++
++static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
++{
++      __le32 tmp;
++
++      cfpkt_extr_head(pkt, &tmp, 4);
++
++      return le32_to_cpu(tmp);
++}
++
+ /*
+  * Peek header from packet.
+  * Reads data from packet without changing packet.
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -352,15 +352,14 @@ static int cfctrl_recv(struct cflayer *l
+       u8 cmdrsp;
+       u8 cmd;
+       int ret = -1;
+-      u16 tmp16;
+       u8 len;
+       u8 param[255];
+-      u8 linkid;
++      u8 linkid = 0;
+       struct cfctrl *cfctrl = container_obj(layer);
+       struct cfctrl_request_info rsp, *req;
+-      cfpkt_extr_head(pkt, &cmdrsp, 1);
++      cmdrsp = cfpkt_extr_head_u8(pkt);
+       cmd = cmdrsp & CFCTRL_CMD_MASK;
+       if (cmd != CFCTRL_CMD_LINK_ERR
+           && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
+@@ -378,13 +377,12 @@ static int cfctrl_recv(struct cflayer *l
+                       u8 physlinkid;
+                       u8 prio;
+                       u8 tmp;
+-                      u32 tmp32;
+                       u8 *cp;
+                       int i;
+                       struct cfctrl_link_param linkparam;
+                       memset(&linkparam, 0, sizeof(linkparam));
+-                      cfpkt_extr_head(pkt, &tmp, 1);
++                      tmp = cfpkt_extr_head_u8(pkt);
+                       serv = tmp & CFCTRL_SRV_MASK;
+                       linkparam.linktype = serv;
+@@ -392,13 +390,13 @@ static int cfctrl_recv(struct cflayer *l
+                       servtype = tmp >> 4;
+                       linkparam.chtype = servtype;
+-                      cfpkt_extr_head(pkt, &tmp, 1);
++                      tmp = cfpkt_extr_head_u8(pkt);
+                       physlinkid = tmp & 0x07;
+                       prio = tmp >> 3;
+                       linkparam.priority = prio;
+                       linkparam.phyid = physlinkid;
+-                      cfpkt_extr_head(pkt, &endpoint, 1);
++                      endpoint = cfpkt_extr_head_u8(pkt);
+                       linkparam.endpoint = endpoint & 0x03;
+                       switch (serv) {
+@@ -407,45 +405,43 @@ static int cfctrl_recv(struct cflayer *l
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_VIDEO:
+-                              cfpkt_extr_head(pkt, &tmp, 1);
++                              tmp = cfpkt_extr_head_u8(pkt);
+                               linkparam.u.video.connid = tmp;
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_DATAGRAM:
+-                              cfpkt_extr_head(pkt, &tmp32, 4);
+                               linkparam.u.datagram.connid =
+-                                  le32_to_cpu(tmp32);
++                                  cfpkt_extr_head_u32(pkt);
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_RFM:
+                               /* Construct a frame, convert
+                                * DatagramConnectionID
+                                * to network format long and copy it out...
+                                */
+-                              cfpkt_extr_head(pkt, &tmp32, 4);
+                               linkparam.u.rfm.connid =
+-                                le32_to_cpu(tmp32);
++                                  cfpkt_extr_head_u32(pkt);
+                               cp = (u8 *) linkparam.u.rfm.volume;
+-                              for (cfpkt_extr_head(pkt, &tmp, 1);
++                              for (tmp = cfpkt_extr_head_u8(pkt);
+                                    cfpkt_more(pkt) && tmp != '\0';
+-                                   cfpkt_extr_head(pkt, &tmp, 1))
++                                   tmp = cfpkt_extr_head_u8(pkt))
+                                       *cp++ = tmp;
+                               *cp = '\0';
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               break;
+                       case CFCTRL_SRV_UTIL:
+@@ -454,13 +450,11 @@ static int cfctrl_recv(struct cflayer *l
+                                * to network format long and copy it out...
+                                */
+                               /* Fifosize KB */
+-                              cfpkt_extr_head(pkt, &tmp16, 2);
+                               linkparam.u.utility.fifosize_kb =
+-                                  le16_to_cpu(tmp16);
++                                  cfpkt_extr_head_u16(pkt);
+                               /* Fifosize bufs */
+-                              cfpkt_extr_head(pkt, &tmp16, 2);
+                               linkparam.u.utility.fifosize_bufs =
+-                                  le16_to_cpu(tmp16);
++                                  cfpkt_extr_head_u16(pkt);
+                               /* name */
+                               cp = (u8 *) linkparam.u.utility.name;
+                               caif_assert(sizeof(linkparam.u.utility.name)
+@@ -468,24 +462,24 @@ static int cfctrl_recv(struct cflayer *l
+                               for (i = 0;
+                                    i < UTILITY_NAME_LENGTH
+                                    && cfpkt_more(pkt); i++) {
+-                                      cfpkt_extr_head(pkt, &tmp, 1);
++                                      tmp = cfpkt_extr_head_u8(pkt);
+                                       *cp++ = tmp;
+                               }
+                               /* Length */
+-                              cfpkt_extr_head(pkt, &len, 1);
++                              len = cfpkt_extr_head_u8(pkt);
+                               linkparam.u.utility.paramlen = len;
+                               /* Param Data */
+                               cp = linkparam.u.utility.params;
+                               while (cfpkt_more(pkt) && len--) {
+-                                      cfpkt_extr_head(pkt, &tmp, 1);
++                                      tmp = cfpkt_extr_head_u8(pkt);
+                                       *cp++ = tmp;
+                               }
+                               if (CFCTRL_ERR_BIT & cmdrsp)
+                                       break;
+                               /* Link ID */
+-                              cfpkt_extr_head(pkt, &linkid, 1);
++                              linkid = cfpkt_extr_head_u8(pkt);
+                               /* Length */
+-                              cfpkt_extr_head(pkt, &len, 1);
++                              len = cfpkt_extr_head_u8(pkt);
+                               /* Param Data */
+                               cfpkt_extr_head(pkt, &param, len);
+                               break;
+@@ -522,7 +516,7 @@ static int cfctrl_recv(struct cflayer *l
+               }
+               break;
+       case CFCTRL_CMD_LINK_DESTROY:
+-              cfpkt_extr_head(pkt, &linkid, 1);
++              linkid = cfpkt_extr_head_u8(pkt);
+               cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
+               break;
+       case CFCTRL_CMD_LINK_ERR:
diff --git a/queue-4.9/kasan-add-a-prototype-of-task_struct-to-avoid-warning.patch b/queue-4.9/kasan-add-a-prototype-of-task_struct-to-avoid-warning.patch
new file mode 100644 (file)
index 0000000..4d66da5
--- /dev/null
@@ -0,0 +1,49 @@
+From 5be9b730b09c45c358bbfe7f51d254e306cccc07 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Thu, 16 Mar 2017 16:40:21 -0700
+Subject: kasan: add a prototype of task_struct to avoid warning
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 5be9b730b09c45c358bbfe7f51d254e306cccc07 upstream.
+
+Add a prototype of task_struct to fix below warning on arm64.
+
+  In file included from arch/arm64/kernel/probes/kprobes.c:19:0:
+  include/linux/kasan.h:81:132: error: 'struct task_struct' declared inside parameter list will not be visible outside of this definition or declaration [-Werror]
+   static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+
+As same as other types (kmem_cache, page, and vm_struct) this adds a
+prototype of task_struct data structure on top of kasan.h.
+
+[arnd] A related warning was fixed before, but now appears in a
+different line in the same file in v4.11-rc2.  The patch from Masami
+Hiramatsu still seems appropriate, so let's take his version.
+
+Fixes: 71af2ed5eeea ("kasan, sched/headers: Remove <linux/sched.h> from <linux/kasan.h>")
+Link: https://patchwork.kernel.org/patch/9569839/
+Link: http://lkml.kernel.org/r/20170313141517.3397802-1-arnd@arndb.de
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Acked-by: Alexander Potapenko <glider@google.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/kasan.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -7,6 +7,7 @@
+ struct kmem_cache;
+ struct page;
+ struct vm_struct;
++struct task_struct;
+ #ifdef CONFIG_KASAN
diff --git a/queue-4.9/kasan-avoid-wmaybe-uninitialized-warning.patch b/queue-4.9/kasan-avoid-wmaybe-uninitialized-warning.patch
new file mode 100644 (file)
index 0000000..ae62497
--- /dev/null
@@ -0,0 +1,51 @@
+From e7701557bfdd81ff44cab13a80439319a735d8e2 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 2 Aug 2017 13:31:58 -0700
+Subject: kasan: avoid -Wmaybe-uninitialized warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit e7701557bfdd81ff44cab13a80439319a735d8e2 upstream.
+
+gcc-7 produces this warning:
+
+  mm/kasan/report.c: In function 'kasan_report':
+  mm/kasan/report.c:351:3: error: 'info.first_bad_addr' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+     print_shadow_for_address(info->first_bad_addr);
+     ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  mm/kasan/report.c:360:27: note: 'info.first_bad_addr' was declared here
+
+The code seems fine as we only print info.first_bad_addr when there is a
+shadow, and we always initialize it in that case, but this is relatively
+hard for gcc to figure out after the latest rework.
+
+Adding an intialization to the most likely value together with the other
+struct members shuts up that warning.
+
+Fixes: b235b9808664 ("kasan: unify report headers")
+Link: https://patchwork.kernel.org/patch/9641417/
+Link: http://lkml.kernel.org/r/20170725152739.4176967-1-arnd@arndb.de
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Suggested-by: Alexander Potapenko <glider@google.com>
+Suggested-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/kasan/report.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -302,6 +302,7 @@ void kasan_report(unsigned long addr, si
+       disable_trace_on_warning();
+       info.access_addr = (void *)addr;
++      info.first_bad_addr = (void *)addr;
+       info.access_size = size;
+       info.is_write = is_write;
+       info.ip = ip;
diff --git a/queue-4.9/kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch b/queue-4.9/kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch
new file mode 100644 (file)
index 0000000..0e5c4e9
--- /dev/null
@@ -0,0 +1,51 @@
+From 69ca372c100fba99c78ef826a1795aa86e4f01a8 Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Tue, 10 Apr 2018 16:30:39 -0700
+Subject: kasan: prevent compiler from optimizing away memset in tests
+
+From: Andrey Konovalov <andreyknvl@google.com>
+
+commit 69ca372c100fba99c78ef826a1795aa86e4f01a8 upstream.
+
+A compiler can optimize away memset calls by replacing them with mov
+instructions.  There are KASAN tests that specifically test that KASAN
+correctly handles memset calls so we don't want this optimization to
+happen.
+
+The solution is to add -fno-builtin flag to test_kasan.ko
+
+Link: http://lkml.kernel.org/r/105ec9a308b2abedb1a0d1fdced0c22d765e4732.1519924383.git.andreyknvl@google.com
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Nick Terrell <terrelln@fb.com>
+Cc: Chris Mason <clm@fb.com>
+Cc: Yury Norov <ynorov@caviumnetworks.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: "Luis R . Rodriguez" <mcgrof@kernel.org>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Jeff Layton <jlayton@redhat.com>
+Cc: "Jason A . Donenfeld" <Jason@zx2c4.com>
+Cc: Kostya Serebryany <kcc@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/Makefile |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -46,6 +46,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
+ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+ obj-$(CONFIG_TEST_HASH) += test_hash.o
+ obj-$(CONFIG_TEST_KASAN) += test_kasan.o
++CFLAGS_test_kasan.o += -fno-builtin
+ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
+ obj-$(CONFIG_TEST_LKM) += test_module.o
+ obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
diff --git a/queue-4.9/kasan-remove-redundant-initialization-of-variable-real_size.patch b/queue-4.9/kasan-remove-redundant-initialization-of-variable-real_size.patch
new file mode 100644 (file)
index 0000000..d61d17b
--- /dev/null
@@ -0,0 +1,43 @@
+From 48c232395431c23d35cf3b4c5a090bd793316578 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 6 Feb 2018 15:36:48 -0800
+Subject: kasan: remove redundant initialization of variable 'real_size'
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 48c232395431c23d35cf3b4c5a090bd793316578 upstream.
+
+Variable real_size is initialized with a value that is never read, it is
+re-assigned a new value later on, hence the initialization is redundant
+and can be removed.
+
+Cleans up clang warning:
+
+  lib/test_kasan.c:422:21: warning: Value stored to 'real_size' during its initialization is never read
+
+Link: http://lkml.kernel.org/r/20180206144950.32457-1-colin.king@canonical.com
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_kasan.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -355,7 +355,7 @@ static noinline void __init kasan_stack_
+ static noinline void __init ksize_unpoisons_memory(void)
+ {
+       char *ptr;
+-      size_t size = 123, real_size = size;
++      size_t size = 123, real_size;
+       pr_info("ksize() unpoisons the whole allocated chunk\n");
+       ptr = kmalloc(size, GFP_KERNEL);
diff --git a/queue-4.9/kasan-turn-on-fsanitize-address-use-after-scope.patch b/queue-4.9/kasan-turn-on-fsanitize-address-use-after-scope.patch
new file mode 100644 (file)
index 0000000..10e7b2e
--- /dev/null
@@ -0,0 +1,44 @@
+From c5caf21ab0cf884ef15b25af234f620e4a233139 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Mon, 12 Dec 2016 16:44:59 -0800
+Subject: kasan: turn on -fsanitize-address-use-after-scope
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit c5caf21ab0cf884ef15b25af234f620e4a233139 upstream.
+
+In the upcoming gcc7 release, the -fsanitize=kernel-address option at
+first implied new -fsanitize-address-use-after-scope option.  This would
+cause link errors on older kernels because they don't have two new
+functions required for use-after-scope support.  Therefore, gcc7 changed
+default to -fno-sanitize-address-use-after-scope.
+
+Now the kernel has everything required for that feature since commit
+828347f8f9a5 ("kasan: support use-after-scope detection").  So, to make it
+work, we just have to enable use-after-scope in CFLAGS.
+
+Link: http://lkml.kernel.org/r/1481207977-28654-1-git-send-email-aryabinin@virtuozzo.com
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/Makefile.kasan |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/scripts/Makefile.kasan
++++ b/scripts/Makefile.kasan
+@@ -29,6 +29,8 @@ else
+     endif
+ endif
++CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope)
++
+ CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+ endif
diff --git a/queue-4.9/mm-kasan-switch-to-using-__pa_symbol-and-lm_alias.patch b/queue-4.9/mm-kasan-switch-to-using-__pa_symbol-and-lm_alias.patch
new file mode 100644 (file)
index 0000000..aa0d1c8
--- /dev/null
@@ -0,0 +1,80 @@
+From 5c6a84a3f4558a6115fef1b59343c7ae56b3abc3 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Tue, 10 Jan 2017 13:35:44 -0800
+Subject: mm/kasan: Switch to using __pa_symbol and lm_alias
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 5c6a84a3f4558a6115fef1b59343c7ae56b3abc3 upstream.
+
+__pa_symbol is the correct API to find the physical address of symbols.
+Switch to it to allow for debugging APIs to work correctly. Other
+functions such as p*d_populate may call __pa internally. Ensure that the
+address passed is in the linear region by calling lm_alias.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/kasan/kasan_init.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/mm/kasan/kasan_init.c
++++ b/mm/kasan/kasan_init.c
+@@ -15,6 +15,7 @@
+ #include <linux/kasan.h>
+ #include <linux/kernel.h>
+ #include <linux/memblock.h>
++#include <linux/mm.h>
+ #include <linux/pfn.h>
+ #include <asm/page.h>
+@@ -49,7 +50,7 @@ static void __init zero_pte_populate(pmd
+       pte_t *pte = pte_offset_kernel(pmd, addr);
+       pte_t zero_pte;
+-      zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
++      zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
+       zero_pte = pte_wrprotect(zero_pte);
+       while (addr + PAGE_SIZE <= end) {
+@@ -69,7 +70,7 @@ static void __init zero_pmd_populate(pud
+               next = pmd_addr_end(addr, end);
+               if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+-                      pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
++                      pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
+                       continue;
+               }
+@@ -92,9 +93,9 @@ static void __init zero_pud_populate(pgd
+               if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+                       pmd_t *pmd;
+-                      pud_populate(&init_mm, pud, kasan_zero_pmd);
++                      pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
+                       pmd = pmd_offset(pud, addr);
+-                      pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
++                      pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
+                       continue;
+               }
+@@ -135,11 +136,11 @@ void __init kasan_populate_zero_shadow(c
+                        * puds,pmds, so pgd_populate(), pud_populate()
+                        * is noops.
+                        */
+-                      pgd_populate(&init_mm, pgd, kasan_zero_pud);
++                      pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
+                       pud = pud_offset(pgd, addr);
+-                      pud_populate(&init_mm, pud, kasan_zero_pmd);
++                      pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
+                       pmd = pmd_offset(pud, addr);
+-                      pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
++                      pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
+                       continue;
+               }
index af3a2cf5e4b9bddbcbbac1859a802f0be0a6a511..8713de6202a952827cea810ccee75e54abf923a9 100644 (file)
@@ -5,3 +5,16 @@ ipv6-invert-flowlabel-sharing-check-in-process-and-user-mode.patch
 packet-validate-msg_namelen-in-send-directly.patch
 bnxt_en-improve-multicast-address-setup-logic.patch
 net-phy-marvell-fix-buffer-overrun-with-stats-counters.patch
+x86-suspend-fix-false-positive-kasan-warning-on-suspend-resume.patch
+kasan-turn-on-fsanitize-address-use-after-scope.patch
+mm-kasan-switch-to-using-__pa_symbol-and-lm_alias.patch
+x86-unwind-disable-kasan-checks-for-non-current-tasks.patch
+arm64-kasan-avoid-bad-virt_to_pfn.patch
+kasan-add-a-prototype-of-task_struct-to-avoid-warning.patch
+kasan-avoid-wmaybe-uninitialized-warning.patch
+kasan-remove-redundant-initialization-of-variable-real_size.patch
+arm64-proc-set-pte_ng-for-table-entries-to-avoid-traversing-them-twice.patch
+kasan-prevent-compiler-from-optimizing-away-memset-in-tests.patch
+arm64-mm-print-out-correct-page-table-entries.patch
+arm64-mm-don-t-print-out-page-table-entries-on-el0-faults.patch
+caif-reduce-stack-size-with-kasan.patch
diff --git a/queue-4.9/x86-suspend-fix-false-positive-kasan-warning-on-suspend-resume.patch b/queue-4.9/x86-suspend-fix-false-positive-kasan-warning-on-suspend-resume.patch
new file mode 100644 (file)
index 0000000..3df7d2a
--- /dev/null
@@ -0,0 +1,146 @@
+From b53f40db59b27b62bc294c30506b02a0cae47e0b Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Fri, 2 Dec 2016 11:42:21 -0600
+Subject: x86/suspend: fix false positive KASAN warning on suspend/resume
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit b53f40db59b27b62bc294c30506b02a0cae47e0b upstream.
+
+Resuming from a suspend operation is showing a KASAN false positive
+warning:
+
+  BUG: KASAN: stack-out-of-bounds in unwind_get_return_address+0x11d/0x130 at addr ffff8803867d7878
+  Read of size 8 by task pm-suspend/7774
+  page:ffffea000e19f5c0 count:0 mapcount:0 mapping:          (null) index:0x0
+  flags: 0x2ffff0000000000()
+  page dumped because: kasan: bad access detected
+  CPU: 0 PID: 7774 Comm: pm-suspend Tainted: G    B           4.9.0-rc7+ #8
+  Hardware name: Gigabyte Technology Co., Ltd. Z170X-UD5/Z170X-UD5-CF, BIOS F5 03/07/2016
+  Call Trace:
+    dump_stack+0x63/0x82
+    kasan_report_error+0x4b4/0x4e0
+    ? acpi_hw_read_port+0xd0/0x1ea
+    ? kfree_const+0x22/0x30
+    ? acpi_hw_validate_io_request+0x1a6/0x1a6
+    __asan_report_load8_noabort+0x61/0x70
+    ? unwind_get_return_address+0x11d/0x130
+    unwind_get_return_address+0x11d/0x130
+    ? unwind_next_frame+0x97/0xf0
+    __save_stack_trace+0x92/0x100
+    save_stack_trace+0x1b/0x20
+    save_stack+0x46/0xd0
+    ? save_stack_trace+0x1b/0x20
+    ? save_stack+0x46/0xd0
+    ? kasan_kmalloc+0xad/0xe0
+    ? kasan_slab_alloc+0x12/0x20
+    ? acpi_hw_read+0x2b6/0x3aa
+    ? acpi_hw_validate_register+0x20b/0x20b
+    ? acpi_hw_write_port+0x72/0xc7
+    ? acpi_hw_write+0x11f/0x15f
+    ? acpi_hw_read_multiple+0x19f/0x19f
+    ? memcpy+0x45/0x50
+    ? acpi_hw_write_port+0x72/0xc7
+    ? acpi_hw_write+0x11f/0x15f
+    ? acpi_hw_read_multiple+0x19f/0x19f
+    ? kasan_unpoison_shadow+0x36/0x50
+    kasan_kmalloc+0xad/0xe0
+    kasan_slab_alloc+0x12/0x20
+    kmem_cache_alloc_trace+0xbc/0x1e0
+    ? acpi_get_sleep_type_data+0x9a/0x578
+    acpi_get_sleep_type_data+0x9a/0x578
+    acpi_hw_legacy_wake_prep+0x88/0x22c
+    ? acpi_hw_legacy_sleep+0x3c7/0x3c7
+    ? acpi_write_bit_register+0x28d/0x2d3
+    ? acpi_read_bit_register+0x19b/0x19b
+    acpi_hw_sleep_dispatch+0xb5/0xba
+    acpi_leave_sleep_state_prep+0x17/0x19
+    acpi_suspend_enter+0x154/0x1e0
+    ? trace_suspend_resume+0xe8/0xe8
+    suspend_devices_and_enter+0xb09/0xdb0
+    ? printk+0xa8/0xd8
+    ? arch_suspend_enable_irqs+0x20/0x20
+    ? try_to_freeze_tasks+0x295/0x600
+    pm_suspend+0x6c9/0x780
+    ? finish_wait+0x1f0/0x1f0
+    ? suspend_devices_and_enter+0xdb0/0xdb0
+    state_store+0xa2/0x120
+    ? kobj_attr_show+0x60/0x60
+    kobj_attr_store+0x36/0x70
+    sysfs_kf_write+0x131/0x200
+    kernfs_fop_write+0x295/0x3f0
+    __vfs_write+0xef/0x760
+    ? handle_mm_fault+0x1346/0x35e0
+    ? do_iter_readv_writev+0x660/0x660
+    ? __pmd_alloc+0x310/0x310
+    ? do_lock_file_wait+0x1e0/0x1e0
+    ? apparmor_file_permission+0x18/0x20
+    ? security_file_permission+0x73/0x1c0
+    ? rw_verify_area+0xbd/0x2b0
+    vfs_write+0x149/0x4a0
+    SyS_write+0xd9/0x1c0
+    ? SyS_read+0x1c0/0x1c0
+    entry_SYSCALL_64_fastpath+0x1e/0xad
+  Memory state around the buggy address:
+   ffff8803867d7700: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+   ffff8803867d7780: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+  >ffff8803867d7800: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 f4
+                                                                  ^
+   ffff8803867d7880: f3 f3 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00
+   ffff8803867d7900: 00 00 00 f1 f1 f1 f1 04 f4 f4 f4 f3 f3 f3 f3 00
+
+KASAN instrumentation poisons the stack when entering a function and
+unpoisons it when exiting the function.  However, in the suspend path,
+some functions never return, so their stack never gets unpoisoned,
+resulting in stale KASAN shadow data which can cause later false
+positive warnings like the one above.
+
+Reported-by: Scott Bauer <scott.bauer@intel.com>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/acpi/wakeup_64.S |    9 +++++++++
+ mm/kasan/kasan.c                 |    9 ++++++++-
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/acpi/wakeup_64.S
++++ b/arch/x86/kernel/acpi/wakeup_64.S
+@@ -109,6 +109,15 @@ ENTRY(do_suspend_lowlevel)
+       movq    pt_regs_r14(%rax), %r14
+       movq    pt_regs_r15(%rax), %r15
++#ifdef CONFIG_KASAN
++      /*
++       * The suspend path may have poisoned some areas deeper in the stack,
++       * which we now need to unpoison.
++       */
++      movq    %rsp, %rdi
++      call    kasan_unpoison_task_stack_below
++#endif
++
+       xorl    %eax, %eax
+       addq    $8, %rsp
+       FRAME_END
+--- a/mm/kasan/kasan.c
++++ b/mm/kasan/kasan.c
+@@ -80,7 +80,14 @@ void kasan_unpoison_task_stack(struct ta
+ /* Unpoison the stack for the current task beyond a watermark sp value. */
+ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
+ {
+-      __kasan_unpoison_stack(current, watermark);
++      /*
++       * Calculate the task stack base address.  Avoid using 'current'
++       * because this function is called by early resume code which hasn't
++       * yet set up the percpu register (%gs).
++       */
++      void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
++
++      kasan_unpoison_shadow(base, watermark - base);
+ }
+ /*
diff --git a/queue-4.9/x86-unwind-disable-kasan-checks-for-non-current-tasks.patch b/queue-4.9/x86-unwind-disable-kasan-checks-for-non-current-tasks.patch
new file mode 100644 (file)
index 0000000..75d9201
--- /dev/null
@@ -0,0 +1,113 @@
+From 84936118bdf37bda513d4a361c38181a216427e0 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Mon, 9 Jan 2017 12:00:23 -0600
+Subject: x86/unwind: Disable KASAN checks for non-current tasks
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit 84936118bdf37bda513d4a361c38181a216427e0 upstream.
+
+There are a handful of callers to save_stack_trace_tsk() and
+show_stack() which try to unwind the stack of a task other than current.
+In such cases, it's remotely possible that the task is running on one
+CPU while the unwinder is reading its stack from another CPU, causing
+the unwinder to see stack corruption.
+
+These cases seem to be mostly harmless.  The unwinder has checks which
+prevent it from following bad pointers beyond the bounds of the stack.
+So it's not really a bug as long as the caller understands that
+unwinding another task will not always succeed.
+
+In such cases, it's possible that the unwinder may read a KASAN-poisoned
+region of the stack.  Account for that by using READ_ONCE_NOCHECK() when
+reading the stack of another task.
+
+Use READ_ONCE() when reading the stack of the current task, since KASAN
+warnings can still be useful for finding bugs in that case.
+
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Miroslav Benes <mbenes@suse.cz>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/4c575eb288ba9f73d498dfe0acde2f58674598f1.1483978430.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/stacktrace.h |    5 ++++-
+ arch/x86/kernel/unwind_frame.c    |   20 ++++++++++++++++++--
+ 2 files changed, 22 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/stacktrace.h
++++ b/arch/x86/include/asm/stacktrace.h
+@@ -55,13 +55,16 @@ extern int kstack_depth_to_print;
+ static inline unsigned long *
+ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
+ {
++      struct inactive_task_frame *frame;
++
+       if (regs)
+               return (unsigned long *)regs->bp;
+       if (task == current)
+               return __builtin_frame_address(0);
+-      return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
++      frame = (struct inactive_task_frame *)task->thread.sp;
++      return (unsigned long *)READ_ONCE_NOCHECK(frame->bp);
+ }
+ #else
+ static inline unsigned long *
+--- a/arch/x86/kernel/unwind_frame.c
++++ b/arch/x86/kernel/unwind_frame.c
+@@ -6,6 +6,21 @@
+ #define FRAME_HEADER_SIZE (sizeof(long) * 2)
++/*
++ * This disables KASAN checking when reading a value from another task's stack,
++ * since the other task could be running on another CPU and could have poisoned
++ * the stack in the meantime.
++ */
++#define READ_ONCE_TASK_STACK(task, x)                 \
++({                                                    \
++      unsigned long val;                              \
++      if (task == current)                            \
++              val = READ_ONCE(x);                     \
++      else                                            \
++              val = READ_ONCE_NOCHECK(x);             \
++      val;                                            \
++})
++
+ unsigned long unwind_get_return_address(struct unwind_state *state)
+ {
+       unsigned long addr;
+@@ -14,7 +29,8 @@ unsigned long unwind_get_return_address(
+       if (unwind_done(state))
+               return 0;
+-      addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
++      addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
++      addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
+                                    addr_p);
+       return __kernel_text_address(addr) ? addr : 0;
+@@ -48,7 +64,7 @@ bool unwind_next_frame(struct unwind_sta
+       if (unwind_done(state))
+               return false;
+-      next_bp = (unsigned long *)*state->bp;
++      next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
+       /* make sure the next frame's data is accessible */
+       if (!update_stack_state(state, next_bp, FRAME_HEADER_SIZE))