From: Sasha Levin Date: Thu, 25 Jul 2019 04:19:11 +0000 (-0400) Subject: fixes for 4.9 X-Git-Tag: v5.2.3~2^2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=5dfee0eebbbfea1d81ee8c6e7ae625cb4df7fb84;p=thirdparty%2Fkernel%2Fstable-queue.git fixes for 4.9 Signed-off-by: Sasha Levin --- diff --git a/queue-4.9/compiler.h-add-read_word_at_a_time-function.patch b/queue-4.9/compiler.h-add-read_word_at_a_time-function.patch new file mode 100644 index 00000000000..9721e86ae88 --- /dev/null +++ b/queue-4.9/compiler.h-add-read_word_at_a_time-function.patch @@ -0,0 +1,51 @@ +From 5f384318881adcf49e2b02f7d6fb373baa819d96 Mon Sep 17 00:00:00 2001 +From: Andrey Ryabinin +Date: Thu, 1 Feb 2018 21:00:49 +0300 +Subject: compiler.h: Add read_word_at_a_time() function. + +[ Upstream commit 7f1e541fc8d57a143dd5df1d0a1276046e08c083 ] + +Sometimes we know that it's safe to do potentially out-of-bounds access +because we know it won't cross a page boundary. Still, KASAN will +report this as a bug. + +Add read_word_at_a_time() function which is supposed to be used in such +cases. In read_word_at_a_time() KASAN performs relaxed check - only the +first byte of access is validated. + +Signed-off-by: Andrey Ryabinin +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + include/linux/compiler.h | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index ced454c03819..3050de0dac96 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -302,6 +302,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ ++#include + + #define __READ_ONCE(x, check) \ + ({ \ +@@ -320,6 +321,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s + */ + #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) + ++static __no_kasan_or_inline ++unsigned long read_word_at_a_time(const void *addr) ++{ ++ kasan_check_read(addr, 1); ++ return *(unsigned long *)addr; ++} ++ + #define WRITE_ONCE(x, val) \ + ({ \ + union { typeof(x) __val; char __c[1]; } __u = \ +-- +2.20.1 + diff --git a/queue-4.9/compiler.h-kasan-avoid-duplicating-__read_once_size_.patch b/queue-4.9/compiler.h-kasan-avoid-duplicating-__read_once_size_.patch new file mode 100644 index 00000000000..e648f24236b --- /dev/null +++ b/queue-4.9/compiler.h-kasan-avoid-duplicating-__read_once_size_.patch @@ -0,0 +1,55 @@ +From 296b615f97ff74dff6d5267463e2294a58fed595 Mon Sep 17 00:00:00 2001 +From: Andrey Ryabinin +Date: Thu, 1 Feb 2018 21:00:48 +0300 +Subject: compiler.h, kasan: Avoid duplicating __read_once_size_nocheck() + +[ Upstream commit bdb5ac801af3d81d36732c2f640d6a1d3df83826 ] + +Instead of having two identical __read_once_size_nocheck() functions +with different attributes, consolidate all the difference in new macro +__no_kasan_or_inline and use it. No functional changes. + +Signed-off-by: Andrey Ryabinin +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + include/linux/compiler.h | 14 ++++++-------- + 1 file changed, 6 insertions(+), 8 deletions(-) + +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 80a5bc623c47..ced454c03819 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -250,23 +250,21 @@ void __read_once_size(const volatile void *p, void *res, int size) + + #ifdef CONFIG_KASAN + /* +- * This function is not 'inline' because __no_sanitize_address confilcts ++ * We can't declare function 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +-static __no_sanitize_address __maybe_unused +-void __read_once_size_nocheck(const volatile void *p, void *res, int size) +-{ +- __READ_ONCE_SIZE; +-} ++# define __no_kasan_or_inline __no_sanitize_address __maybe_unused + #else +-static __always_inline ++# define __no_kasan_or_inline __always_inline ++#endif ++ ++static __no_kasan_or_inline + void __read_once_size_nocheck(const volatile void *p, void *res, int size) + { + __READ_ONCE_SIZE; + } +-#endif + + static __always_inline void __write_once_size(volatile void *p, void *res, int size) + { +-- +2.20.1 + diff --git a/queue-4.9/lib-strscpy-shut-up-kasan-false-positives-in-strscpy.patch b/queue-4.9/lib-strscpy-shut-up-kasan-false-positives-in-strscpy.patch new file mode 100644 index 00000000000..e34bbe067e6 --- /dev/null +++ b/queue-4.9/lib-strscpy-shut-up-kasan-false-positives-in-strscpy.patch @@ -0,0 +1,47 @@ +From 3d80280bcb3a96d9613e9e55a0753229e0c023df Mon Sep 17 00:00:00 2001 +From: Andrey Ryabinin +Date: Thu, 1 Feb 2018 21:00:50 +0300 +Subject: lib/strscpy: Shut up KASAN false-positives in strscpy() + +[ Upstream commit 1a3241ff10d038ecd096d03380327f2a0b5840a6 ] + +strscpy() performs the word-at-a-time optimistic reads. So it may may +access the memory past the end of the object, which is perfectly fine +since strscpy() doesn't use that (past-the-end) data and makes sure the +optimistic read won't cross a page boundary. + +Use new read_word_at_a_time() to shut up the KASAN. + +Note that this potentially could hide some bugs. In example bellow, +stscpy() will copy more than we should (1-3 extra uninitialized bytes): + + char dst[8]; + char *src; + + src = kmalloc(5, GFP_KERNEL); + memset(src, 0xff, 5); + strscpy(dst, src, 8); + +Signed-off-by: Andrey Ryabinin +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + lib/string.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/string.c b/lib/string.c +index 1cd9757291b1..8f1a2a04e22f 100644 +--- a/lib/string.c ++++ b/lib/string.c +@@ -202,7 +202,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count) + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + +- c = *(unsigned long *)(src+res); ++ c = read_word_at_a_time(src+res); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); +-- +2.20.1 + diff --git a/queue-4.9/series b/queue-4.9/series index 29789e55250..52d869882e0 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -123,3 +123,6 @@ intel_th-msu-fix-single-mode-with-disabled-iommu.patch bluetooth-add-smp-workaround-microsoft-surface-precision-mouse-bug.patch usb-handle-usb3-remote-wakeup-for-lpm-enabled-devices-correctly.patch dm-bufio-fix-deadlock-with-loop-device.patch +compiler.h-kasan-avoid-duplicating-__read_once_size_.patch +compiler.h-add-read_word_at_a_time-function.patch +lib-strscpy-shut-up-kasan-false-positives-in-strscpy.patch