From: Greg Kroah-Hartman Date: Wed, 5 Oct 2022 16:49:30 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v5.4.217~14 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=37978cf332335acaa32ac4d02d9356c4ed9fe9b2;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: arm-fix-function-graph-tracer-and-unwinder-dependencies.patch provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch wait_on_bit-add-an-acquire-memory-barrier.patch --- diff --git a/queue-4.9/arm-fix-function-graph-tracer-and-unwinder-dependencies.patch b/queue-4.9/arm-fix-function-graph-tracer-and-unwinder-dependencies.patch new file mode 100644 index 00000000000..efc65e8151c --- /dev/null +++ b/queue-4.9/arm-fix-function-graph-tracer-and-unwinder-dependencies.patch @@ -0,0 +1,69 @@ +From 503621628b32782a07b2318e4112bd4372aa3401 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Tue, 23 Apr 2019 17:09:38 +0100 +Subject: ARM: fix function graph tracer and unwinder dependencies + +From: Russell King + +commit 503621628b32782a07b2318e4112bd4372aa3401 upstream. + +Naresh Kamboju recently reported that the function-graph tracer crashes +on ARM. The function-graph tracer assumes that the kernel is built with +frame pointers. + +We explicitly disabled the function-graph tracer when building Thumb2, +since the Thumb2 ABI doesn't have frame pointers. + +We recently changed the way the unwinder method was selected, which +seems to have made it more likely that we can end up with the function- +graph tracer enabled but without the kernel built with frame pointers. + +Fix up the function graph tracer dependencies so the option is not +available when we have no possibility of having frame pointers, and +adjust the dependencies on the unwinder option to hide the non-frame +pointer unwinder options if the function-graph tracer is enabled. + +Reviewed-by: Masami Hiramatsu +Tested-by: Masami Hiramatsu +Signed-off-by: Russell King +Signed-off-by: Sebastian Andrzej Siewior +Reported-by: Danilo Cezar Zanella +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/Kconfig | 2 +- + arch/arm/Kconfig.debug | 6 +++--- + 2 files changed, 4 insertions(+), 4 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -53,7 +53,7 @@ config ARM + select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU + select HAVE_EXIT_THREAD + select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) +- select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL) ++ select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL && !CC_IS_CLANG) + select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) + select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_GCC_PLUGINS +--- a/arch/arm/Kconfig.debug ++++ b/arch/arm/Kconfig.debug +@@ -17,8 +17,8 @@ config ARM_PTDUMP + + choice + prompt "Choose kernel unwinder" +- default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER +- default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER ++ default UNWINDER_ARM if AEABI ++ default UNWINDER_FRAME_POINTER if !AEABI + help + This determines which method will be used for unwinding kernel stack + traces for panics, oopses, bugs, warnings, perf, /proc//stack, +@@ -35,7 +35,7 @@ config UNWINDER_FRAME_POINTER + + config UNWINDER_ARM + bool "ARM EABI stack unwinder" +- depends on AEABI ++ depends on AEABI && !FUNCTION_GRAPH_TRACER + select ARM_UNWIND + help + This option enables stack unwinding support in the kernel diff --git a/queue-4.9/provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch b/queue-4.9/provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch new file mode 100644 index 00000000000..d91fd5a5b47 --- /dev/null +++ b/queue-4.9/provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch @@ -0,0 +1,130 @@ +From d6ffe6067a54972564552ea45d320fb98db1ac5e Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Fri, 26 Aug 2022 16:43:51 -0400 +Subject: provide arch_test_bit_acquire for architectures that define test_bit + +From: Mikulas Patocka + +commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream. + +Some architectures define their own arch_test_bit and they also need +arch_test_bit_acquire, otherwise they won't compile. We also clean up +the code by using the generic test_bit if that is equivalent to the +arch-specific version. + +Signed-off-by: Mikulas Patocka +Cc: stable@vger.kernel.org +Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/include/asm/bitops.h | 7 +++++++ + arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ + arch/ia64/include/asm/bitops.h | 7 +++++++ + arch/m68k/include/asm/bitops.h | 6 ++++++ + arch/s390/include/asm/bitops.h | 7 +++++++ + arch/sh/include/asm/bitops-op32.h | 7 +++++++ + 6 files changed, 49 insertions(+) + +--- a/arch/alpha/include/asm/bitops.h ++++ b/arch/alpha/include/asm/bitops.h +@@ -288,6 +288,13 @@ test_bit(int nr, const volatile void * a + return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; + } + ++static __always_inline bool ++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); ++} ++ + /* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. +--- a/arch/hexagon/include/asm/bitops.h ++++ b/arch/hexagon/include/asm/bitops.h +@@ -186,7 +186,22 @@ static inline int __test_bit(int nr, con + return retval; + } + ++static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr) ++{ ++ int retval; ++ ++ asm volatile( ++ "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" ++ : "=&r" (retval) ++ : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) ++ : "p0", "memory" ++ ); ++ ++ return retval; ++} ++ + #define test_bit(nr, addr) __test_bit(nr, addr) ++#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr) + + /* + * ffz - find first zero in word. +--- a/arch/ia64/include/asm/bitops.h ++++ b/arch/ia64/include/asm/bitops.h +@@ -336,6 +336,13 @@ test_bit (int nr, const volatile void *a + return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); + } + ++static __always_inline bool ++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); ++} ++ + /** + * ffz - find the first zero bit in a long word + * @x: The long word to find the bit in +--- a/arch/m68k/include/asm/bitops.h ++++ b/arch/m68k/include/asm/bitops.h +@@ -153,6 +153,12 @@ static inline int test_bit(int nr, const + return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; + } + ++static __always_inline bool ++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); ++} + + static inline int bset_reg_test_and_set_bit(int nr, + volatile unsigned long *vaddr) +--- a/arch/s390/include/asm/bitops.h ++++ b/arch/s390/include/asm/bitops.h +@@ -270,6 +270,13 @@ static inline int test_bit(unsigned long + return (*addr >> (nr & 7)) & 1; + } + ++static __always_inline bool ++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); ++} ++ + static inline int test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *ptr) + { +--- a/arch/sh/include/asm/bitops-op32.h ++++ b/arch/sh/include/asm/bitops-op32.h +@@ -139,4 +139,11 @@ static inline int test_bit(int nr, const + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); + } + ++static __always_inline bool ++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); ++} ++ + #endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/queue-4.9/series b/queue-4.9/series index 93bbf6b65bd..cccecfb1f83 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -14,3 +14,6 @@ selftests-fix-the-if-conditions-of-in-test_extra_fil.patch clk-iproc-minor-tidy-up-of-iproc-pll-data-structures.patch clk-iproc-do-not-rely-on-node-name-for-correct-pll-s.patch makefile.extrawarn-move-wcast-function-type-strict-to-w-1.patch +arm-fix-function-graph-tracer-and-unwinder-dependencies.patch +wait_on_bit-add-an-acquire-memory-barrier.patch +provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch diff --git a/queue-4.9/wait_on_bit-add-an-acquire-memory-barrier.patch b/queue-4.9/wait_on_bit-add-an-acquire-memory-barrier.patch new file mode 100644 index 00000000000..9ec67d9d1c9 --- /dev/null +++ b/queue-4.9/wait_on_bit-add-an-acquire-memory-barrier.patch @@ -0,0 +1,157 @@ +From 8238b4579866b7c1bb99883cfe102a43db5506ff Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Fri, 26 Aug 2022 09:17:08 -0400 +Subject: wait_on_bit: add an acquire memory barrier + +From: Mikulas Patocka + +commit 8238b4579866b7c1bb99883cfe102a43db5506ff upstream. + +There are several places in the kernel where wait_on_bit is not followed +by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). + +On architectures with weak memory ordering, it may happen that memory +accesses that follow wait_on_bit are reordered before wait_on_bit and +they may return invalid data. + +Fix this class of bugs by introducing a new function "test_bit_acquire" +that works like test_bit, but has acquire memory ordering semantics. + +Signed-off-by: Mikulas Patocka +Acked-by: Will Deacon +Cc: stable@vger.kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ + include/asm-generic/bitops/non-atomic.h | 14 ++++++++++++++ + include/linux/buffer_head.h | 2 +- + include/linux/wait.h | 8 ++++---- + kernel/sched/wait.c | 2 +- + 5 files changed, 41 insertions(+), 6 deletions(-) + +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -314,6 +314,20 @@ static __always_inline bool constant_tes + (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; + } + ++static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) ++{ ++ bool oldbit; ++ ++ asm volatile("testb %2,%1" ++ CC_SET(nz) ++ : CC_OUT(nz) (oldbit) ++ : "m" (((unsigned char *)addr)[nr >> 3]), ++ "i" (1 << (nr & 7)) ++ :"memory"); ++ ++ return oldbit; ++} ++ + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) + { + bool oldbit; +@@ -340,6 +354,13 @@ static bool test_bit(int nr, const volat + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) + ++static __always_inline bool ++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : ++ variable_test_bit(nr, addr); ++} ++ + /** + * __ffs - find first set bit in word + * @word: The word to search +--- a/include/asm-generic/bitops/non-atomic.h ++++ b/include/asm-generic/bitops/non-atomic.h +@@ -2,6 +2,7 @@ + #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ + + #include ++#include + + /** + * __set_bit - Set a bit in memory +@@ -105,4 +106,17 @@ static inline int test_bit(int nr, const + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); + } + ++/** ++ * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set ++ * @nr: bit number to test ++ * @addr: Address to start counting from ++ */ ++static __always_inline bool ++arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) ++{ ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); ++} ++#define test_bit_acquire arch_test_bit_acquire ++ + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -162,7 +162,7 @@ static __always_inline int buffer_uptoda + * make it consistent with folio_test_uptodate + * pairs with smp_mb__before_atomic in set_buffer_uptodate + */ +- return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; ++ return test_bit_acquire(BH_Uptodate, &bh->b_state); + } + + #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) +--- a/include/linux/wait.h ++++ b/include/linux/wait.h +@@ -1066,7 +1066,7 @@ static inline int + wait_on_bit(unsigned long *word, int bit, unsigned mode) + { + might_sleep(); +- if (!test_bit(bit, word)) ++ if (!test_bit_acquire(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait, +@@ -1091,7 +1091,7 @@ static inline int + wait_on_bit_io(unsigned long *word, int bit, unsigned mode) + { + might_sleep(); +- if (!test_bit(bit, word)) ++ if (!test_bit_acquire(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait_io, +@@ -1118,7 +1118,7 @@ wait_on_bit_timeout(unsigned long *word, + unsigned long timeout) + { + might_sleep(); +- if (!test_bit(bit, word)) ++ if (!test_bit_acquire(bit, word)) + return 0; + return out_of_line_wait_on_bit_timeout(word, bit, + bit_wait_timeout, +@@ -1146,7 +1146,7 @@ wait_on_bit_action(unsigned long *word, + unsigned mode) + { + might_sleep(); +- if (!test_bit(bit, word)) ++ if (!test_bit_acquire(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, action, mode); + } +--- a/kernel/sched/wait.c ++++ b/kernel/sched/wait.c +@@ -389,7 +389,7 @@ __wait_on_bit(wait_queue_head_t *wq, str + prepare_to_wait(wq, &q->wait, mode); + if (test_bit(q->key.bit_nr, q->key.flags)) + ret = (*action)(&q->key, mode); +- } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); ++ } while (test_bit_acquire(q->key.bit_nr, q->key.flags) && !ret); + finish_wait(wq, &q->wait); + return ret; + }