--- /dev/null
+From foo@baz Thu Oct 22 03:56:50 PM CEST 2020
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Thu, 1 Feb 2018 21:00:49 +0300
+Subject: compiler.h: Add read_word_at_a_time() function.
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 7f1e541fc8d57a143dd5df1d0a1276046e08c083 upstream.
+
+Sometimes we know that it's safe to do potentially out-of-bounds access
+because we know it won't cross a page boundary. Still, KASAN will
+report this as a bug.
+
+Add read_word_at_a_time() function which is supposed to be used in such
+cases. In read_word_at_a_time() KASAN performs relaxed check - only the
+first byte of access is validated.
+
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/compiler.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -292,6 +292,7 @@ static __always_inline void __write_once
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
++#include <linux/kasan-checks.h>
+
+ #define __READ_ONCE(x, check) \
+ ({ \
+@@ -310,6 +311,13 @@ static __always_inline void __write_once
+ */
+ #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+
++static __no_kasan_or_inline
++unsigned long read_word_at_a_time(const void *addr)
++{
++ kasan_check_read(addr, 1);
++ return *(unsigned long *)addr;
++}
++
+ #define WRITE_ONCE(x, val) \
+ ({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
--- /dev/null
+From foo@baz Thu Oct 22 03:56:50 PM CEST 2020
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Thu, 1 Feb 2018 21:00:48 +0300
+Subject: compiler.h, kasan: Avoid duplicating __read_once_size_nocheck()
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit bdb5ac801af3d81d36732c2f640d6a1d3df83826 upstream.
+
+Instead of having two identical __read_once_size_nocheck() functions
+with different attributes, consolidate all the difference in new macro
+__no_kasan_or_inline and use it. No functional changes.
+
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/compiler.h | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -241,23 +241,21 @@ void __read_once_size(const volatile voi
+
+ #ifdef CONFIG_KASAN
+ /*
+- * This function is not 'inline' because __no_sanitize_address confilcts
++ * We can't declare function 'inline' because __no_sanitize_address confilcts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+-static __no_sanitize_address __maybe_unused
+-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+-{
+- __READ_ONCE_SIZE;
+-}
++# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+ #else
+-static __always_inline
++# define __no_kasan_or_inline __always_inline
++#endif
++
++static __no_kasan_or_inline
+ void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+ {
+ __READ_ONCE_SIZE;
+ }
+-#endif
+
+ static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+ {
--- /dev/null
+From foo@baz Thu Oct 22 03:56:50 PM CEST 2020
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Thu, 1 Feb 2018 21:00:50 +0300
+Subject: lib/strscpy: Shut up KASAN false-positives in strscpy()
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 1a3241ff10d038ecd096d03380327f2a0b5840a6 upstream.
+
+strscpy() performs the word-at-a-time optimistic reads. So it may may
+access the memory past the end of the object, which is perfectly fine
+since strscpy() doesn't use that (past-the-end) data and makes sure the
+optimistic read won't cross a page boundary.
+
+Use new read_word_at_a_time() to shut up the KASAN.
+
+Note that this potentially could hide some bugs. In example bellow,
+stscpy() will copy more than we should (1-3 extra uninitialized bytes):
+
+ char dst[8];
+ char *src;
+
+ src = kmalloc(5, GFP_KERNEL);
+ memset(src, 0xff, 5);
+ strscpy(dst, src, 8);
+
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/string.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -202,7 +202,7 @@ ssize_t strscpy(char *dest, const char *
+ while (max >= sizeof(unsigned long)) {
+ unsigned long c, data;
+
+- c = *(unsigned long *)(src+res);
++ c = read_word_at_a_time(src+res);
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
--- /dev/null
+From foo@baz Thu Oct 22 03:56:50 PM CEST 2020
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Fri, 20 May 2016 16:59:28 -0700
+Subject: mm/kasan: add API to check memory regions
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 64f8ebaf115bcddc4aaa902f981c57ba6506bc42 upstream.
+
+Memory access coded in an assembly won't be seen by KASAN as a compiler
+can instrument only C code. Add kasan_check_[read,write]() API which is
+going to be used to check a certain memory range.
+
+Link: http://lkml.kernel.org/r/1462538722-1574-3-git-send-email-aryabinin@virtuozzo.com
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4: drop change in MAINTAINERS]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kasan-checks.h | 12 ++++++++++++
+ mm/kasan/kasan.c | 12 ++++++++++++
+ 2 files changed, 24 insertions(+)
+ create mode 100644 include/linux/kasan-checks.h
+
+--- /dev/null
++++ b/include/linux/kasan-checks.h
+@@ -0,0 +1,12 @@
++#ifndef _LINUX_KASAN_CHECKS_H
++#define _LINUX_KASAN_CHECKS_H
++
++#ifdef CONFIG_KASAN
++void kasan_check_read(const void *p, unsigned int size);
++void kasan_check_write(const void *p, unsigned int size);
++#else
++static inline void kasan_check_read(const void *p, unsigned int size) { }
++static inline void kasan_check_write(const void *p, unsigned int size) { }
++#endif
++
++#endif
+--- a/mm/kasan/kasan.c
++++ b/mm/kasan/kasan.c
+@@ -278,6 +278,18 @@ static void check_memory_region(unsigned
+ check_memory_region_inline(addr, size, write, ret_ip);
+ }
+
++void kasan_check_read(const void *p, unsigned int size)
++{
++ check_memory_region((unsigned long)p, size, false, _RET_IP_);
++}
++EXPORT_SYMBOL(kasan_check_read);
++
++void kasan_check_write(const void *p, unsigned int size)
++{
++ check_memory_region((unsigned long)p, size, true, _RET_IP_);
++}
++EXPORT_SYMBOL(kasan_check_write);
++
+ #undef memset
+ void *memset(void *addr, int c, size_t len)
+ {
--- /dev/null
+From foo@baz Thu Oct 22 03:56:50 PM CEST 2020
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Fri, 20 May 2016 16:59:20 -0700
+Subject: mm/kasan: print name of mem[set,cpy,move]() caller in report
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 936bb4bbbb832f81055328b84e5afe1fc7246a8d upstream.
+
+When bogus memory access happens in mem[set,cpy,move]() it's usually
+caller's fault. So don't blame mem[set,cpy,move]() in bug report, blame
+the caller instead.
+
+Before:
+ BUG: KASAN: out-of-bounds access in memset+0x23/0x40 at <address>
+After:
+ BUG: KASAN: out-of-bounds access in <memset_caller> at <address>
+
+Link: http://lkml.kernel.org/r/1462538722-1574-2-git-send-email-aryabinin@virtuozzo.com
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/kasan.c | 64 +++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 34 insertions(+), 30 deletions(-)
+
+--- a/mm/kasan/kasan.c
++++ b/mm/kasan/kasan.c
+@@ -252,32 +252,36 @@ static __always_inline bool memory_is_po
+ return memory_is_poisoned_n(addr, size);
+ }
+
+-
+-static __always_inline void check_memory_region(unsigned long addr,
+- size_t size, bool write)
++static __always_inline void check_memory_region_inline(unsigned long addr,
++ size_t size, bool write,
++ unsigned long ret_ip)
+ {
+ if (unlikely(size == 0))
+ return;
+
+ if (unlikely((void *)addr <
+ kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+- kasan_report(addr, size, write, _RET_IP_);
++ kasan_report(addr, size, write, ret_ip);
+ return;
+ }
+
+ if (likely(!memory_is_poisoned(addr, size)))
+ return;
+
+- kasan_report(addr, size, write, _RET_IP_);
++ kasan_report(addr, size, write, ret_ip);
+ }
+
+-void __asan_loadN(unsigned long addr, size_t size);
+-void __asan_storeN(unsigned long addr, size_t size);
++static void check_memory_region(unsigned long addr,
++ size_t size, bool write,
++ unsigned long ret_ip)
++{
++ check_memory_region_inline(addr, size, write, ret_ip);
++}
+
+ #undef memset
+ void *memset(void *addr, int c, size_t len)
+ {
+- __asan_storeN((unsigned long)addr, len);
++ check_memory_region((unsigned long)addr, len, true, _RET_IP_);
+
+ return __memset(addr, c, len);
+ }
+@@ -285,8 +289,8 @@ void *memset(void *addr, int c, size_t l
+ #undef memmove
+ void *memmove(void *dest, const void *src, size_t len)
+ {
+- __asan_loadN((unsigned long)src, len);
+- __asan_storeN((unsigned long)dest, len);
++ check_memory_region((unsigned long)src, len, false, _RET_IP_);
++ check_memory_region((unsigned long)dest, len, true, _RET_IP_);
+
+ return __memmove(dest, src, len);
+ }
+@@ -294,8 +298,8 @@ void *memmove(void *dest, const void *sr
+ #undef memcpy
+ void *memcpy(void *dest, const void *src, size_t len)
+ {
+- __asan_loadN((unsigned long)src, len);
+- __asan_storeN((unsigned long)dest, len);
++ check_memory_region((unsigned long)src, len, false, _RET_IP_);
++ check_memory_region((unsigned long)dest, len, true, _RET_IP_);
+
+ return __memcpy(dest, src, len);
+ }
+@@ -484,22 +488,22 @@ void __asan_unregister_globals(struct ka
+ }
+ EXPORT_SYMBOL(__asan_unregister_globals);
+
+-#define DEFINE_ASAN_LOAD_STORE(size) \
+- void __asan_load##size(unsigned long addr) \
+- { \
+- check_memory_region(addr, size, false); \
+- } \
+- EXPORT_SYMBOL(__asan_load##size); \
+- __alias(__asan_load##size) \
+- void __asan_load##size##_noabort(unsigned long); \
+- EXPORT_SYMBOL(__asan_load##size##_noabort); \
+- void __asan_store##size(unsigned long addr) \
+- { \
+- check_memory_region(addr, size, true); \
+- } \
+- EXPORT_SYMBOL(__asan_store##size); \
+- __alias(__asan_store##size) \
+- void __asan_store##size##_noabort(unsigned long); \
++#define DEFINE_ASAN_LOAD_STORE(size) \
++ void __asan_load##size(unsigned long addr) \
++ { \
++ check_memory_region_inline(addr, size, false, _RET_IP_);\
++ } \
++ EXPORT_SYMBOL(__asan_load##size); \
++ __alias(__asan_load##size) \
++ void __asan_load##size##_noabort(unsigned long); \
++ EXPORT_SYMBOL(__asan_load##size##_noabort); \
++ void __asan_store##size(unsigned long addr) \
++ { \
++ check_memory_region_inline(addr, size, true, _RET_IP_); \
++ } \
++ EXPORT_SYMBOL(__asan_store##size); \
++ __alias(__asan_store##size) \
++ void __asan_store##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_store##size##_noabort)
+
+ DEFINE_ASAN_LOAD_STORE(1);
+@@ -510,7 +514,7 @@ DEFINE_ASAN_LOAD_STORE(16);
+
+ void __asan_loadN(unsigned long addr, size_t size)
+ {
+- check_memory_region(addr, size, false);
++ check_memory_region(addr, size, false, _RET_IP_);
+ }
+ EXPORT_SYMBOL(__asan_loadN);
+
+@@ -520,7 +524,7 @@ EXPORT_SYMBOL(__asan_loadN_noabort);
+
+ void __asan_storeN(unsigned long addr, size_t size)
+ {
+- check_memory_region(addr, size, true);
++ check_memory_region(addr, size, true, _RET_IP_);
+ }
+ EXPORT_SYMBOL(__asan_storeN);
+
net-ipv4-always-honour-route-mtu-during-forwarding.patch
r8169-fix-data-corruption-issue-on-rtl8402.patch
alsa-bebob-potential-info-leak-in-hwdep_read.patch
+mm-kasan-print-name-of-mem-caller-in-report.patch
+mm-kasan-add-api-to-check-memory-regions.patch
+compiler.h-kasan-avoid-duplicating-__read_once_size_nocheck.patch
+compiler.h-add-read_word_at_a_time-function.patch
+lib-strscpy-shut-up-kasan-false-positives-in-strscpy.patch
+x86-mm-ptdump-fix-soft-lockup-in-page-table-walker.patch
--- /dev/null
+From foo@baz Thu Oct 22 03:56:50 PM CEST 2020
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Fri, 10 Feb 2017 12:54:05 +0300
+Subject: x86/mm/ptdump: Fix soft lockup in page table walker
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 146fbb766934dc003fcbf755b519acef683576bf upstream.
+
+CONFIG_KASAN=y needs a lot of virtual memory mapped for its shadow.
+In that case ptdump_walk_pgd_level_core() takes a lot of time to
+walk across all page tables and doing this without
+a rescheduling causes soft lockups:
+
+ NMI watchdog: BUG: soft lockup - CPU#3 stuck for 23s! [swapper/0:1]
+ ...
+ Call Trace:
+ ptdump_walk_pgd_level_core+0x40c/0x550
+ ptdump_walk_pgd_level_checkwx+0x17/0x20
+ mark_rodata_ro+0x13b/0x150
+ kernel_init+0x2f/0x120
+ ret_from_fork+0x2c/0x40
+
+I guess that this issue might arise even without KASAN on huge machines
+with several terabytes of RAM.
+
+Stick cond_resched() in pgd loop to fix this.
+
+Reported-by: Tobias Regnery <tobias.regnery@gmail.com>
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: kasan-dev@googlegroups.com
+Cc: Alexander Potapenko <glider@google.com>
+Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20170210095405.31802-1-aryabinin@virtuozzo.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/dump_pagetables.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -15,6 +15,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
++#include <linux/sched.h>
+ #include <linux/seq_file.h>
+
+ #include <asm/pgtable.h>
+@@ -407,6 +408,7 @@ static void ptdump_walk_pgd_level_core(s
+ } else
+ note_page(m, &st, __pgprot(0), 1);
+
++ cond_resched();
+ start++;
+ }
+