]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Oct 2022 16:49:54 +0000 (18:49 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Oct 2022 16:49:54 +0000 (18:49 +0200)
added patches:
provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch
wait_on_bit-add-an-acquire-memory-barrier.patch

queue-5.4/provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/wait_on_bit-add-an-acquire-memory-barrier.patch [new file with mode: 0644]

diff --git a/queue-5.4/provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch b/queue-5.4/provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch
new file mode 100644 (file)
index 0000000..e3dcb93
--- /dev/null
@@ -0,0 +1,130 @@
+From d6ffe6067a54972564552ea45d320fb98db1ac5e Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 26 Aug 2022 16:43:51 -0400
+Subject: provide arch_test_bit_acquire for architectures that define test_bit
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream.
+
+Some architectures define their own arch_test_bit and they also need
+arch_test_bit_acquire, otherwise they won't compile.  We also clean up
+the code by using the generic test_bit if that is equivalent to the
+arch-specific version.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/alpha/include/asm/bitops.h   |    7 +++++++
+ arch/hexagon/include/asm/bitops.h |   15 +++++++++++++++
+ arch/ia64/include/asm/bitops.h    |    7 +++++++
+ arch/m68k/include/asm/bitops.h    |    6 ++++++
+ arch/s390/include/asm/bitops.h    |    7 +++++++
+ arch/sh/include/asm/bitops-op32.h |    7 +++++++
+ 6 files changed, 49 insertions(+)
+
+--- a/arch/alpha/include/asm/bitops.h
++++ b/arch/alpha/include/asm/bitops.h
+@@ -289,6 +289,13 @@ test_bit(int nr, const volatile void * a
+       return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
+ }
++static __always_inline bool
++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
++      return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
++}
++
+ /*
+  * ffz = Find First Zero in word. Undefined if no zero exists,
+  * so code should check against ~0UL first..
+--- a/arch/hexagon/include/asm/bitops.h
++++ b/arch/hexagon/include/asm/bitops.h
+@@ -172,7 +172,22 @@ static inline int __test_bit(int nr, con
+       return retval;
+ }
++static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr)
++{
++      int retval;
++
++      asm volatile(
++      "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
++      : "=&r" (retval)
++      : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
++      : "p0", "memory"
++      );
++
++      return retval;
++}
++
+ #define test_bit(nr, addr) __test_bit(nr, addr)
++#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr)
+ /*
+  * ffz - find first zero in word.
+--- a/arch/ia64/include/asm/bitops.h
++++ b/arch/ia64/include/asm/bitops.h
+@@ -337,6 +337,13 @@ test_bit (int nr, const volatile void *a
+       return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
+ }
++static __always_inline bool
++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
++      return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
++}
++
+ /**
+  * ffz - find the first zero bit in a long word
+  * @x: The long word to find the bit in
+--- a/arch/m68k/include/asm/bitops.h
++++ b/arch/m68k/include/asm/bitops.h
+@@ -153,6 +153,12 @@ static inline int test_bit(int nr, const
+       return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
+ }
++static __always_inline bool
++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
++      return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
++}
+ static inline int bset_reg_test_and_set_bit(int nr,
+                                           volatile unsigned long *vaddr)
+--- a/arch/s390/include/asm/bitops.h
++++ b/arch/s390/include/asm/bitops.h
+@@ -219,6 +219,13 @@ static inline bool arch_test_bit(unsigne
+       return (*addr >> (nr & 7)) & 1;
+ }
++static __always_inline bool
++arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
++      return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
++}
++
+ static inline bool arch_test_and_set_bit_lock(unsigned long nr,
+                                             volatile unsigned long *ptr)
+ {
+--- a/arch/sh/include/asm/bitops-op32.h
++++ b/arch/sh/include/asm/bitops-op32.h
+@@ -140,4 +140,11 @@ static inline int test_bit(int nr, const
+       return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+ }
++static __always_inline bool
++test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
++      return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
++}
++
+ #endif /* __ASM_SH_BITOPS_OP32_H */
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..914da3c60f94554f1ea4f5c42f0cb38cf8e6e7fe 100644 (file)
@@ -0,0 +1,2 @@
+wait_on_bit-add-an-acquire-memory-barrier.patch
+provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch
diff --git a/queue-5.4/wait_on_bit-add-an-acquire-memory-barrier.patch b/queue-5.4/wait_on_bit-add-an-acquire-memory-barrier.patch
new file mode 100644 (file)
index 0000000..1aebd3a
--- /dev/null
@@ -0,0 +1,173 @@
+From 8238b4579866b7c1bb99883cfe102a43db5506ff Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 26 Aug 2022 09:17:08 -0400
+Subject: wait_on_bit: add an acquire memory barrier
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 8238b4579866b7c1bb99883cfe102a43db5506ff upstream.
+
+There are several places in the kernel where wait_on_bit is not followed
+by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read).
+
+On architectures with weak memory ordering, it may happen that memory
+accesses that follow wait_on_bit are reordered before wait_on_bit and
+they may return invalid data.
+
+Fix this class of bugs by introducing a new function "test_bit_acquire"
+that works like test_bit, but has acquire memory ordering semantics.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/bitops.h             |   21 +++++++++++++++++++++
+ include/asm-generic/bitops-instrumented.h |    6 ++++++
+ include/asm-generic/bitops/non-atomic.h   |   14 ++++++++++++++
+ include/linux/buffer_head.h               |    2 +-
+ include/linux/wait_bit.h                  |    8 ++++----
+ kernel/sched/wait_bit.c                   |    2 +-
+ 6 files changed, 47 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -207,6 +207,20 @@ static __always_inline bool constant_tes
+               (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
+ }
++static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
++{
++      bool oldbit;
++
++      asm volatile("testb %2,%1"
++                   CC_SET(nz)
++                   : CC_OUT(nz) (oldbit)
++                   : "m" (((unsigned char *)addr)[nr >> 3]),
++                     "i" (1 << (nr & 7))
++                   :"memory");
++
++      return oldbit;
++}
++
+ static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
+ {
+       bool oldbit;
+@@ -224,6 +238,13 @@ static __always_inline bool variable_tes
+        ? constant_test_bit((nr), (addr))      \
+        : variable_test_bit((nr), (addr)))
++static __always_inline bool
++arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
++                                        variable_test_bit(nr, addr);
++}
++
+ /**
+  * __ffs - find first set bit in word
+  * @word: The word to search
+--- a/include/asm-generic/bitops-instrumented.h
++++ b/include/asm-generic/bitops-instrumented.h
+@@ -238,6 +238,12 @@ static inline bool test_bit(long nr, con
+       return arch_test_bit(nr, addr);
+ }
++static inline bool test_bit_acquire(long nr, const volatile unsigned long *addr)
++{
++      kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
++      return arch_test_bit_acquire(nr, addr);
++}
++
+ #if defined(arch_clear_bit_unlock_is_negative_byte)
+ /**
+  * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+--- a/include/asm-generic/bitops/non-atomic.h
++++ b/include/asm-generic/bitops/non-atomic.h
+@@ -3,6 +3,7 @@
+ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+ #include <asm/types.h>
++#include <asm/barrier.h>
+ /**
+  * __set_bit - Set a bit in memory
+@@ -106,4 +107,17 @@ static inline int test_bit(int nr, const
+       return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+ }
++/**
++ * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
++ * @nr: bit number to test
++ * @addr: Address to start counting from
++ */
++static __always_inline bool
++arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
++{
++      unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
++      return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
++}
++#define test_bit_acquire arch_test_bit_acquire
++
+ #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -166,7 +166,7 @@ static __always_inline int buffer_uptoda
+        * make it consistent with folio_test_uptodate
+        * pairs with smp_mb__before_atomic in set_buffer_uptodate
+        */
+-      return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
++      return test_bit_acquire(BH_Uptodate, &bh->b_state);
+ }
+ #define bh_offset(bh)         ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -71,7 +71,7 @@ static inline int
+ wait_on_bit(unsigned long *word, int bit, unsigned mode)
+ {
+       might_sleep();
+-      if (!test_bit(bit, word))
++      if (!test_bit_acquire(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit(word, bit,
+                                      bit_wait,
+@@ -96,7 +96,7 @@ static inline int
+ wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
+ {
+       might_sleep();
+-      if (!test_bit(bit, word))
++      if (!test_bit_acquire(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit(word, bit,
+                                      bit_wait_io,
+@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word,
+                   unsigned long timeout)
+ {
+       might_sleep();
+-      if (!test_bit(bit, word))
++      if (!test_bit_acquire(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_timeout(word, bit,
+                                              bit_wait_timeout,
+@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word,
+                  unsigned mode)
+ {
+       might_sleep();
+-      if (!test_bit(bit, word))
++      if (!test_bit_acquire(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit(word, bit, action, mode);
+ }
+--- a/kernel/sched/wait_bit.c
++++ b/kernel/sched/wait_bit.c
+@@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq
+               prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
+               if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
+                       ret = (*action)(&wbq_entry->key, mode);
+-      } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
++      } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
+       finish_wait(wq_head, &wbq_entry->wq_entry);