]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Feb 2024 08:52:25 +0000 (09:52 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Feb 2024 08:52:25 +0000 (09:52 +0100)
added patches:
apparmor-free-up-__cleanup-name.patch
dm-limit-the-number-of-targets-and-parameter-size-area.patch
dmaengine-ioat-free-up-__cleanup-name.patch
kbuild-drop-wdeclaration-after-statement.patch
locking-introduce-__cleanup-based-infrastructure.patch
nilfs2-fix-potential-bug-in-end_buffer_async_write.patch
nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch
of-property-add-in-ports-out-ports-support-to-of_graph_get_port_parent.patch
sched-membarrier-reduce-the-ability-to-hammer-on-sys_membarrier.patch

queue-6.1/apparmor-free-up-__cleanup-name.patch [new file with mode: 0644]
queue-6.1/dm-limit-the-number-of-targets-and-parameter-size-area.patch [new file with mode: 0644]
queue-6.1/dmaengine-ioat-free-up-__cleanup-name.patch [new file with mode: 0644]
queue-6.1/kbuild-drop-wdeclaration-after-statement.patch [new file with mode: 0644]
queue-6.1/locking-introduce-__cleanup-based-infrastructure.patch [new file with mode: 0644]
queue-6.1/nilfs2-fix-potential-bug-in-end_buffer_async_write.patch [new file with mode: 0644]
queue-6.1/nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch [new file with mode: 0644]
queue-6.1/of-property-add-in-ports-out-ports-support-to-of_graph_get_port_parent.patch [new file with mode: 0644]
queue-6.1/sched-membarrier-reduce-the-ability-to-hammer-on-sys_membarrier.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/apparmor-free-up-__cleanup-name.patch b/queue-6.1/apparmor-free-up-__cleanup-name.patch
new file mode 100644 (file)
index 0000000..5b5bc9a
--- /dev/null
@@ -0,0 +1,49 @@
+From 9a1f37ebcfe061721564042254719dc8fd5c9fa0 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 9 Jun 2023 09:48:59 +0200
+Subject: apparmor: Free up __cleanup() name
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 9a1f37ebcfe061721564042254719dc8fd5c9fa0 upstream.
+
+In order to use __cleanup for __attribute__((__cleanup__(func))) the
+name must not be used for anything else. Avoid the conflict.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: John Johansen <john.johansen@canonical.com>
+Link: https://lkml.kernel.org/r/20230612093537.536441207%40infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/apparmor/include/lib.h |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/security/apparmor/include/lib.h
++++ b/security/apparmor/include/lib.h
+@@ -226,7 +226,7 @@ void aa_policy_destroy(struct aa_policy
+  */
+ #define fn_label_build(L, P, GFP, FN)                                 \
+ ({                                                                    \
+-      __label__ __cleanup, __done;                                    \
++      __label__ __do_cleanup, __done;                                 \
+       struct aa_label *__new_;                                        \
+                                                                       \
+       if ((L)->size > 1) {                                            \
+@@ -244,7 +244,7 @@ void aa_policy_destroy(struct aa_policy
+                       __new_ = (FN);                                  \
+                       AA_BUG(!__new_);                                \
+                       if (IS_ERR(__new_))                             \
+-                              goto __cleanup;                         \
++                              goto __do_cleanup;                      \
+                       __lvec[__j++] = __new_;                         \
+               }                                                       \
+               for (__j = __count = 0; __j < (L)->size; __j++)         \
+@@ -266,7 +266,7 @@ void aa_policy_destroy(struct aa_policy
+                       vec_cleanup(profile, __pvec, __count);          \
+               } else                                                  \
+                       __new_ = NULL;                                  \
+-__cleanup:                                                            \
++__do_cleanup:                                                         \
+               vec_cleanup(label, __lvec, (L)->size);                  \
+       } else {                                                        \
+               (P) = labels_profile(L);                                \
diff --git a/queue-6.1/dm-limit-the-number-of-targets-and-parameter-size-area.patch b/queue-6.1/dm-limit-the-number-of-targets-and-parameter-size-area.patch
new file mode 100644 (file)
index 0000000..b3fdf48
--- /dev/null
@@ -0,0 +1,72 @@
+From bd504bcfec41a503b32054da5472904b404341a4 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 9 Jan 2024 15:57:56 +0100
+Subject: dm: limit the number of targets and parameter size area
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit bd504bcfec41a503b32054da5472904b404341a4 upstream.
+
+The kvmalloc function fails with a warning if the size is larger than
+INT_MAX. The warning was triggered by a syscall testing robot.
+
+In order to avoid the warning, this commit limits the number of targets to
+1048576 and the size of the parameter area to 1073741824.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-core.h  |    2 ++
+ drivers/md/dm-ioctl.c |    3 ++-
+ drivers/md/dm-table.c |    9 +++++++--
+ 3 files changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -21,6 +21,8 @@
+ #include "dm-ima.h"
+ #define DM_RESERVED_MAX_IOS           1024
++#define DM_MAX_TARGETS                        1048576
++#define DM_MAX_TARGET_PARAMS          1024
+ struct dm_io;
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1877,7 +1877,8 @@ static int copy_params(struct dm_ioctl _
+                          minimum_data_size - sizeof(param_kernel->version)))
+               return -EFAULT;
+-      if (param_kernel->data_size < minimum_data_size) {
++      if (unlikely(param_kernel->data_size < minimum_data_size) ||
++          unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
+               DMERR("Invalid data size in the ioctl structure: %u",
+                     param_kernel->data_size);
+               return -EINVAL;
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -128,7 +128,12 @@ static int alloc_targets(struct dm_table
+ int dm_table_create(struct dm_table **result, fmode_t mode,
+                   unsigned int num_targets, struct mapped_device *md)
+ {
+-      struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
++      struct dm_table *t;
++
++      if (num_targets > DM_MAX_TARGETS)
++              return -EOVERFLOW;
++
++      t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               return -ENOMEM;
+@@ -143,7 +148,7 @@ int dm_table_create(struct dm_table **re
+       if (!num_targets) {
+               kfree(t);
+-              return -ENOMEM;
++              return -EOVERFLOW;
+       }
+       if (alloc_targets(t, num_targets)) {
diff --git a/queue-6.1/dmaengine-ioat-free-up-__cleanup-name.patch b/queue-6.1/dmaengine-ioat-free-up-__cleanup-name.patch
new file mode 100644 (file)
index 0000000..d7d2e6e
--- /dev/null
@@ -0,0 +1,72 @@
+From f62141ac730d6fe73a05750cb4482aabb681cfb9 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 27 Sep 2022 11:32:41 +0200
+Subject: dmaengine: ioat: Free up __cleanup() name
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit f62141ac730d6fe73a05750cb4482aabb681cfb9 upstream.
+
+In order to use __cleanup for __attribute__((__cleanup__(func))) the
+name must not be used for anything else. Avoid the conflict.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lkml.kernel.org/r/20230612093537.467120754%40infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/ioat/dma.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -584,11 +584,11 @@ desc_get_errstat(struct ioatdma_chan *io
+ }
+ /**
+- * __cleanup - reclaim used descriptors
++ * __ioat_cleanup - reclaim used descriptors
+  * @ioat_chan: channel (ring) to clean
+  * @phys_complete: zeroed (or not) completion address (from status)
+  */
+-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
++static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+ {
+       struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+       struct ioat_ring_ent *desc;
+@@ -675,7 +675,7 @@ static void ioat_cleanup(struct ioatdma_
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+       if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+       if (is_ioat_halted(*ioat_chan->completion)) {
+               u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+@@ -712,7 +712,7 @@ static void ioat_restart_channel(struct
+       ioat_quiesce(ioat_chan, 0);
+       if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+       __ioat_restart_chan(ioat_chan);
+ }
+@@ -786,7 +786,7 @@ static void ioat_eh(struct ioatdma_chan
+       /* cleanup so tail points to descriptor that caused the error */
+       if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+@@ -943,7 +943,7 @@ void ioat_timer_event(struct timer_list
+               /* timer restarted in ioat_cleanup_preamble
+                * and IOAT_COMPLETION_ACK cleared
+                */
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+               goto unlock_out;
+       }
diff --git a/queue-6.1/kbuild-drop-wdeclaration-after-statement.patch b/queue-6.1/kbuild-drop-wdeclaration-after-statement.patch
new file mode 100644 (file)
index 0000000..ae85be1
--- /dev/null
@@ -0,0 +1,60 @@
+From b5ec6fd286dfa466f64cb0e56ed768092d0342ae Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 9 Jun 2023 11:28:30 +0200
+Subject: kbuild: Drop -Wdeclaration-after-statement
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit b5ec6fd286dfa466f64cb0e56ed768092d0342ae upstream.
+
+With the advent on scope-based resource management it comes really
+tedious to abide by the contraints of -Wdeclaration-after-statement.
+
+It will still be recommeneded to place declarations at the start of a
+scope where possible, but it will no longer be enforced.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/CAHk-%3Dwi-RyoUhbChiVaJZoZXheAwnJ7OO%3DGxe85BkPAd93TwDA%40mail.gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile                          |    6 +-----
+ arch/arm64/kernel/vdso32/Makefile |    2 --
+ 2 files changed, 1 insertion(+), 7 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -459,8 +459,7 @@ HOSTRUSTC = rustc
+ HOSTPKG_CONFIG        = pkg-config
+ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+-                       -O2 -fomit-frame-pointer -std=gnu11 \
+-                       -Wdeclaration-after-statement
++                       -O2 -fomit-frame-pointer -std=gnu11
+ KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+@@ -1018,9 +1017,6 @@ endif
+ # arch Makefile may override CC so keep this after arch Makefile is included
+ NOSTDINC_FLAGS += -nostdinc
+-# warn about C99 declaration after statement
+-KBUILD_CFLAGS += -Wdeclaration-after-statement
+-
+ # Variable Length Arrays (VLAs) should not be used anywhere in the kernel
+ KBUILD_CFLAGS += -Wvla
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -68,11 +68,9 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-pr
+                -fno-strict-aliasing -fno-common \
+                -Werror-implicit-function-declaration \
+                -Wno-format-security \
+-               -Wdeclaration-after-statement \
+                -std=gnu11
+ VDSO_CFLAGS  += -O2
+ # Some useful compiler-dependent flags from top-level Makefile
+-VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,)
+ VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+ VDSO_CFLAGS += -fno-strict-overflow
+ VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
diff --git a/queue-6.1/locking-introduce-__cleanup-based-infrastructure.patch b/queue-6.1/locking-introduce-__cleanup-based-infrastructure.patch
new file mode 100644 (file)
index 0000000..92431dc
--- /dev/null
@@ -0,0 +1,525 @@
+From 54da6a0924311c7cf5015533991e44fb8eb12773 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 26 May 2023 12:23:48 +0200
+Subject: locking: Introduce __cleanup() based infrastructure
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 54da6a0924311c7cf5015533991e44fb8eb12773 upstream.
+
+Use __attribute__((__cleanup__(func))) to build:
+
+ - simple auto-release pointers using __free()
+
+ - 'classes' with constructor and destructor semantics for
+   scope-based resource management.
+
+ - lock guards based on the above classes.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230612093537.614161713%40infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cleanup.h             |  171 ++++++++++++++++++++++++++++++++++++
+ include/linux/compiler-clang.h      |    9 +
+ include/linux/compiler_attributes.h |    6 +
+ include/linux/device.h              |    7 +
+ include/linux/file.h                |    6 +
+ include/linux/irqflags.h            |    7 +
+ include/linux/mutex.h               |    4 
+ include/linux/percpu.h              |    4 
+ include/linux/preempt.h             |    5 +
+ include/linux/rcupdate.h            |    3 
+ include/linux/rwsem.h               |    8 +
+ include/linux/sched/task.h          |    2 
+ include/linux/slab.h                |    3 
+ include/linux/spinlock.h            |   31 ++++++
+ include/linux/srcu.h                |    5 +
+ scripts/checkpatch.pl               |    2 
+ 16 files changed, 272 insertions(+), 1 deletion(-)
+ create mode 100644 include/linux/cleanup.h
+
+--- /dev/null
++++ b/include/linux/cleanup.h
+@@ -0,0 +1,171 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_GUARDS_H
++#define __LINUX_GUARDS_H
++
++#include <linux/compiler.h>
++
++/*
++ * DEFINE_FREE(name, type, free):
++ *    simple helper macro that defines the required wrapper for a __free()
++ *    based cleanup function. @free is an expression using '_T' to access
++ *    the variable.
++ *
++ * __free(name):
++ *    variable attribute to add a scoped based cleanup to the variable.
++ *
++ * no_free_ptr(var):
++ *    like a non-atomic xchg(var, NULL), such that the cleanup function will
++ *    be inhibited -- provided it sanely deals with a NULL value.
++ *
++ * return_ptr(p):
++ *    returns p while inhibiting the __free().
++ *
++ * Ex.
++ *
++ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++ *
++ *    struct obj *p __free(kfree) = kmalloc(...);
++ *    if (!p)
++ *            return NULL;
++ *
++ *    if (!init_obj(p))
++ *            return NULL;
++ *
++ *    return_ptr(p);
++ */
++
++#define DEFINE_FREE(_name, _type, _free) \
++      static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
++
++#define __free(_name) __cleanup(__free_##_name)
++
++#define no_free_ptr(p) \
++      ({ __auto_type __ptr = (p); (p) = NULL; __ptr; })
++
++#define return_ptr(p) return no_free_ptr(p)
++
++
++/*
++ * DEFINE_CLASS(name, type, exit, init, init_args...):
++ *    helper to define the destructor and constructor for a type.
++ *    @exit is an expression using '_T' -- similar to FREE above.
++ *    @init is an expression in @init_args resulting in @type
++ *
++ * EXTEND_CLASS(name, ext, init, init_args...):
++ *    extends class @name to @name@ext with the new constructor
++ *
++ * CLASS(name, var)(args...):
++ *    declare the variable @var as an instance of the named class
++ *
++ * Ex.
++ *
++ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
++ *
++ *    CLASS(fdget, f)(fd);
++ *    if (!f.file)
++ *            return -EBADF;
++ *
++ *    // use 'f' without concern
++ */
++
++#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...)               \
++typedef _type class_##_name##_t;                                      \
++static inline void class_##_name##_destructor(_type *p)                       \
++{ _type _T = *p; _exit; }                                             \
++static inline _type class_##_name##_constructor(_init_args)           \
++{ _type t = _init; return t; }
++
++#define EXTEND_CLASS(_name, ext, _init, _init_args...)                        \
++typedef class_##_name##_t class_##_name##ext##_t;                     \
++static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
++{ class_##_name##_destructor(p); }                                    \
++static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
++{ class_##_name##_t t = _init; return t; }
++
++#define CLASS(_name, var)                                             \
++      class_##_name##_t var __cleanup(class_##_name##_destructor) =   \
++              class_##_name##_constructor
++
++
++/*
++ * DEFINE_GUARD(name, type, lock, unlock):
++ *    trivial wrapper around DEFINE_CLASS() above specifically
++ *    for locks.
++ *
++ * guard(name):
++ *    an anonymous instance of the (guard) class
++ *
++ * scoped_guard (name, args...) { }:
++ *    similar to CLASS(name, scope)(args), except the variable (with the
++ *    explicit name 'scope') is declard in a for-loop such that its scope is
++ *    bound to the next (compound) statement.
++ *
++ */
++
++#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
++      DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
++
++#define guard(_name) \
++      CLASS(_name, __UNIQUE_ID(guard))
++
++#define scoped_guard(_name, args...)                                  \
++      for (CLASS(_name, scope)(args),                                 \
++           *done = NULL; !done; done = (void *)1)
++
++/*
++ * Additional helper macros for generating lock guards with types, either for
++ * locks that don't have a native type (eg. RCU, preempt) or those that need a
++ * 'fat' pointer (eg. spin_lock_irqsave).
++ *
++ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
++ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
++ *
++ * will result in the following type:
++ *
++ *   typedef struct {
++ *    type *lock;             // 'type := void' for the _0 variant
++ *    __VA_ARGS__;
++ *   } class_##name##_t;
++ *
++ * As above, both _lock and _unlock are statements, except this time '_T' will
++ * be a pointer to the above struct.
++ */
++
++#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...)             \
++typedef struct {                                                      \
++      _type *lock;                                                    \
++      __VA_ARGS__;                                                    \
++} class_##_name##_t;                                                  \
++                                                                      \
++static inline void class_##_name##_destructor(class_##_name##_t *_T)  \
++{                                                                     \
++      if (_T->lock) { _unlock; }                                      \
++}
++
++
++#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock)                    \
++static inline class_##_name##_t class_##_name##_constructor(_type *l) \
++{                                                                     \
++      class_##_name##_t _t = { .lock = l }, *_T = &_t;                \
++      _lock;                                                          \
++      return _t;                                                      \
++}
++
++#define __DEFINE_LOCK_GUARD_0(_name, _lock)                           \
++static inline class_##_name##_t class_##_name##_constructor(void)     \
++{                                                                     \
++      class_##_name##_t _t = { .lock = (void*)1 },                    \
++                       *_T __maybe_unused = &_t;                      \
++      _lock;                                                          \
++      return _t;                                                      \
++}
++
++#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)                \
++__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)             \
++__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
++
++#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)                       \
++__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)              \
++__DEFINE_LOCK_GUARD_0(_name, _lock)
++
++#endif /* __LINUX_GUARDS_H */
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -5,6 +5,15 @@
+ /* Compiler specific definitions for Clang compiler */
++/*
++ * Clang prior to 17 is being silly and considers many __cleanup() variables
++ * as unused (because they are, their sole purpose is to go out of scope).
++ *
++ * https://reviews.llvm.org/D152180
++ */
++#undef __cleanup
++#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
++
+ /* same as gcc, this was present in clang-2.6 so we can assume it works
+  * with any version that can compile the kernel
+  */
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -76,6 +76,12 @@
+ #endif
+ /*
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
++ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
++ */
++#define __cleanup(func)                       __attribute__((__cleanup__(func)))
++
++/*
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+  */
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -30,6 +30,7 @@
+ #include <linux/device/bus.h>
+ #include <linux/device/class.h>
+ #include <linux/device/driver.h>
++#include <linux/cleanup.h>
+ #include <asm/device.h>
+ struct device;
+@@ -898,6 +899,9 @@ void device_unregister(struct device *de
+ void device_initialize(struct device *dev);
+ int __must_check device_add(struct device *dev);
+ void device_del(struct device *dev);
++
++DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
++
+ int device_for_each_child(struct device *dev, void *data,
+                         int (*fn)(struct device *dev, void *data));
+ int device_for_each_child_reverse(struct device *dev, void *data,
+@@ -1071,6 +1075,9 @@ extern int (*platform_notify_remove)(str
+  */
+ struct device *get_device(struct device *dev);
+ void put_device(struct device *dev);
++
++DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
++
+ bool kill_device(struct device *dev);
+ #ifdef CONFIG_DEVTMPFS
+--- a/include/linux/file.h
++++ b/include/linux/file.h
+@@ -10,6 +10,7 @@
+ #include <linux/types.h>
+ #include <linux/posix_types.h>
+ #include <linux/errno.h>
++#include <linux/cleanup.h>
+ struct file;
+@@ -80,6 +81,8 @@ static inline void fdput_pos(struct fd f
+       fdput(f);
+ }
++DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
++
+ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
+ extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
+ extern void set_close_on_exec(unsigned int fd, int flag);
+@@ -88,6 +91,9 @@ extern int __get_unused_fd_flags(unsigne
+ extern int get_unused_fd_flags(unsigned flags);
+ extern void put_unused_fd(unsigned int fd);
++DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
++           get_unused_fd_flags(flags), unsigned flags)
++
+ extern void fd_install(unsigned int fd, struct file *file);
+ extern int __receive_fd(struct file *file, int __user *ufd,
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -13,6 +13,7 @@
+ #define _LINUX_TRACE_IRQFLAGS_H
+ #include <linux/typecheck.h>
++#include <linux/cleanup.h>
+ #include <asm/irqflags.h>
+ #include <asm/percpu.h>
+@@ -267,4 +268,10 @@ extern void warn_bogus_irq_restore(void)
+ #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
++DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
++DEFINE_LOCK_GUARD_0(irqsave,
++                  local_irq_save(_T->flags),
++                  local_irq_restore(_T->flags),
++                  unsigned long flags)
++
+ #endif
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -19,6 +19,7 @@
+ #include <asm/processor.h>
+ #include <linux/osq_lock.h>
+ #include <linux/debug_locks.h>
++#include <linux/cleanup.h>
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname)                        \
+@@ -219,4 +220,7 @@ extern void mutex_unlock(struct mutex *l
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
++DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
++DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
++
+ #endif /* __LINUX_MUTEX_H */
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -8,6 +8,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/pfn.h>
+ #include <linux/init.h>
++#include <linux/cleanup.h>
+ #include <asm/percpu.h>
+@@ -128,6 +129,9 @@ extern void __init setup_per_cpu_areas(v
+ extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+ extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
+ extern void free_percpu(void __percpu *__pdata);
++
++DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
++
+ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+ #define alloc_percpu_gfp(type, gfp)                                   \
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/linkage.h>
++#include <linux/cleanup.h>
+ #include <linux/list.h>
+ /*
+@@ -474,4 +475,8 @@ static __always_inline void preempt_enab
+               preempt_enable();
+ }
++DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
++DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
++DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
++
+ #endif /* __LINUX_PREEMPT_H */
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -27,6 +27,7 @@
+ #include <linux/preempt.h>
+ #include <linux/bottom_half.h>
+ #include <linux/lockdep.h>
++#include <linux/cleanup.h>
+ #include <asm/processor.h>
+ #include <linux/cpumask.h>
+ #include <linux/context_tracking_irq.h>
+@@ -1077,4 +1078,6 @@ rcu_head_after_call_rcu(struct rcu_head
+ extern int rcu_expedited;
+ extern int rcu_normal;
++DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
++
+ #endif /* __LINUX_RCUPDATE_H */
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -15,6 +15,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/atomic.h>
+ #include <linux/err.h>
++#include <linux/cleanup.h>
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __RWSEM_DEP_MAP_INIT(lockname)                       \
+@@ -201,6 +202,13 @@ extern void up_read(struct rw_semaphore
+  */
+ extern void up_write(struct rw_semaphore *sem);
++DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
++DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
++
++DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
++DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
++
++
+ /*
+  * downgrade write lock to read lock
+  */
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -145,6 +145,8 @@ static inline void put_task_struct(struc
+               __put_task_struct(t);
+ }
++DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
++
+ static inline void put_task_struct_many(struct task_struct *t, int nr)
+ {
+       if (refcount_sub_and_test(nr, &t->usage))
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
+ #include <linux/percpu-refcount.h>
++#include <linux/cleanup.h>
+ /*
+@@ -197,6 +198,8 @@ void kfree(const void *objp);
+ void kfree_sensitive(const void *objp);
+ size_t __ksize(const void *objp);
++DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++
+ /**
+  * ksize - Report actual allocation size of associated object
+  *
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -61,6 +61,7 @@
+ #include <linux/stringify.h>
+ #include <linux/bottom_half.h>
+ #include <linux/lockdep.h>
++#include <linux/cleanup.h>
+ #include <asm/barrier.h>
+ #include <asm/mmiowb.h>
+@@ -493,5 +494,35 @@ int __alloc_bucket_spinlocks(spinlock_t
+ void free_bucket_spinlocks(spinlock_t *locks);
++DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
++                  raw_spin_lock(_T->lock),
++                  raw_spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
++                  raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
++                  raw_spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
++                  raw_spin_lock_irq(_T->lock),
++                  raw_spin_unlock_irq(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
++                  raw_spin_lock_irqsave(_T->lock, _T->flags),
++                  raw_spin_unlock_irqrestore(_T->lock, _T->flags),
++                  unsigned long flags)
++
++DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
++                  spin_lock(_T->lock),
++                  spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
++                  spin_lock_irq(_T->lock),
++                  spin_unlock_irq(_T->lock))
++
++DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
++                  spin_lock_irqsave(_T->lock, _T->flags),
++                  spin_unlock_irqrestore(_T->lock, _T->flags),
++                  unsigned long flags)
++
+ #undef __LINUX_INSIDE_SPINLOCK_H
+ #endif /* __LINUX_SPINLOCK_H */
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -212,4 +212,9 @@ static inline void smp_mb__after_srcu_re
+       /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
+ }
++DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
++                  _T->idx = srcu_read_lock(_T->lock),
++                  srcu_read_unlock(_T->lock, _T->idx),
++                  int idx)
++
+ #endif
+--- a/scripts/checkpatch.pl
++++ b/scripts/checkpatch.pl
+@@ -4971,7 +4971,7 @@ sub process {
+                               if|for|while|switch|return|case|
+                               volatile|__volatile__|
+                               __attribute__|format|__extension__|
+-                              asm|__asm__)$/x)
++                              asm|__asm__|scoped_guard)$/x)
+                       {
+                       # cpp #define statements have non-optional spaces, ie
+                       # if there is a space between the name and the open
diff --git a/queue-6.1/nilfs2-fix-potential-bug-in-end_buffer_async_write.patch b/queue-6.1/nilfs2-fix-potential-bug-in-end_buffer_async_write.patch
new file mode 100644 (file)
index 0000000..084cdb5
--- /dev/null
@@ -0,0 +1,99 @@
+From 5bc09b397cbf1221f8a8aacb1152650c9195b02b Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Sun, 4 Feb 2024 01:16:45 +0900
+Subject: nilfs2: fix potential bug in end_buffer_async_write
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 5bc09b397cbf1221f8a8aacb1152650c9195b02b upstream.
+
+According to a syzbot report, end_buffer_async_write(), which handles the
+completion of block device writes, may detect abnormal condition of the
+buffer async_write flag and cause a BUG_ON failure when using nilfs2.
+
+Nilfs2 itself does not use end_buffer_async_write().  But, the async_write
+flag is now used as a marker by commit 7f42ec394156 ("nilfs2: fix issue
+with race condition of competition between segments for dirty blocks") as
+a means of resolving double list insertion of dirty blocks in
+nilfs_lookup_dirty_data_buffers() and nilfs_lookup_node_buffers() and the
+resulting crash.
+
+This modification is safe as long as it is used for file data and b-tree
+node blocks where the page caches are independent.  However, it was
+irrelevant and redundant to also introduce async_write for segment summary
+and super root blocks that share buffers with the backing device.  This
+led to the possibility that the BUG_ON check in end_buffer_async_write
+would fail as described above, if independent writebacks of the backing
+device occurred in parallel.
+
+The use of async_write for segment summary buffers has already been
+removed in a previous change.
+
+Fix this issue by removing the manipulation of the async_write flag for
+the remaining super root block buffer.
+
+Link: https://lkml.kernel.org/r/20240203161645.4992-1-konishi.ryusuke@gmail.com
+Fixes: 7f42ec394156 ("nilfs2: fix issue with race condition of competition between segments for dirty blocks")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+5c04210f7c7f897c1e7f@syzkaller.appspotmail.com
+Closes: https://lkml.kernel.org/r/00000000000019a97c05fd42f8c8@google.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/segment.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1702,7 +1702,6 @@ static void nilfs_segctor_prepare_write(
+               list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+                                   b_assoc_buffers) {
+-                      set_buffer_async_write(bh);
+                       if (bh == segbuf->sb_super_root) {
+                               if (bh->b_page != bd_page) {
+                                       lock_page(bd_page);
+@@ -1713,6 +1712,7 @@ static void nilfs_segctor_prepare_write(
+                               }
+                               break;
+                       }
++                      set_buffer_async_write(bh);
+                       if (bh->b_page != fs_page) {
+                               nilfs_begin_page_io(fs_page);
+                               fs_page = bh->b_page;
+@@ -1798,7 +1798,6 @@ static void nilfs_abort_logs(struct list
+               list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+                                   b_assoc_buffers) {
+-                      clear_buffer_async_write(bh);
+                       if (bh == segbuf->sb_super_root) {
+                               clear_buffer_uptodate(bh);
+                               if (bh->b_page != bd_page) {
+@@ -1807,6 +1806,7 @@ static void nilfs_abort_logs(struct list
+                               }
+                               break;
+                       }
++                      clear_buffer_async_write(bh);
+                       if (bh->b_page != fs_page) {
+                               nilfs_end_page_io(fs_page, err);
+                               fs_page = bh->b_page;
+@@ -1894,8 +1894,9 @@ static void nilfs_segctor_complete_write
+                                BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+                                BIT(BH_NILFS_Redirected));
+-                      set_mask_bits(&bh->b_state, clear_bits, set_bits);
+                       if (bh == segbuf->sb_super_root) {
++                              set_buffer_uptodate(bh);
++                              clear_buffer_dirty(bh);
+                               if (bh->b_page != bd_page) {
+                                       end_page_writeback(bd_page);
+                                       bd_page = bh->b_page;
+@@ -1903,6 +1904,7 @@ static void nilfs_segctor_complete_write
+                               update_sr = true;
+                               break;
+                       }
++                      set_mask_bits(&bh->b_state, clear_bits, set_bits);
+                       if (bh->b_page != fs_page) {
+                               nilfs_end_page_io(fs_page, 0);
+                               fs_page = bh->b_page;
diff --git a/queue-6.1/nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch b/queue-6.1/nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch
new file mode 100644 (file)
index 0000000..90404dc
--- /dev/null
@@ -0,0 +1,82 @@
+From 5124a0a549857c4b87173280e192eea24dea72ad Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 27 Jan 2023 01:41:14 +0900
+Subject: nilfs2: replace WARN_ONs for invalid DAT metadata block requests
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 5124a0a549857c4b87173280e192eea24dea72ad upstream.
+
+If DAT metadata file block access fails due to corruption of the DAT file
+or abnormal virtual block numbers held by b-trees or inodes, a kernel
+warning is generated.
+
+This replaces the WARN_ONs by error output, so that a kernel, booted with
+panic_on_warn, does not panic.  This patch also replaces the detected
+return code -ENOENT with another internal code -EINVAL to notify the bmap
+layer of metadata corruption.  When the bmap layer sees -EINVAL, it
+handles the abnormal situation with nilfs_bmap_convert_error() and finally
+returns code -EIO as it should.
+
+Link: https://lkml.kernel.org/r/0000000000005cc3d205ea23ddcf@google.com
+Link: https://lkml.kernel.org/r/20230126164114.6911-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: <syzbot+5d5d25f90f195a3cfcb4@syzkaller.appspotmail.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dat.c |   27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -40,8 +40,21 @@ static inline struct nilfs_dat_info *NIL
+ static int nilfs_dat_prepare_entry(struct inode *dat,
+                                  struct nilfs_palloc_req *req, int create)
+ {
+-      return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
+-                                          create, &req->pr_entry_bh);
++      int ret;
++
++      ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
++                                         create, &req->pr_entry_bh);
++      if (unlikely(ret == -ENOENT)) {
++              nilfs_err(dat->i_sb,
++                        "DAT doesn't have a block to manage vblocknr = %llu",
++                        (unsigned long long)req->pr_entry_nr);
++              /*
++               * Return internal code -EINVAL to notify bmap layer of
++               * metadata corruption.
++               */
++              ret = -EINVAL;
++      }
++      return ret;
+ }
+ static void nilfs_dat_commit_entry(struct inode *dat,
+@@ -123,11 +136,7 @@ static void nilfs_dat_commit_free(struct
+ int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+-      int ret;
+-
+-      ret = nilfs_dat_prepare_entry(dat, req, 0);
+-      WARN_ON(ret == -ENOENT);
+-      return ret;
++      return nilfs_dat_prepare_entry(dat, req, 0);
+ }
+ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
+@@ -154,10 +163,8 @@ int nilfs_dat_prepare_end(struct inode *
+       int ret;
+       ret = nilfs_dat_prepare_entry(dat, req, 0);
+-      if (ret < 0) {
+-              WARN_ON(ret == -ENOENT);
++      if (ret < 0)
+               return ret;
+-      }
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
+       entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
diff --git a/queue-6.1/of-property-add-in-ports-out-ports-support-to-of_graph_get_port_parent.patch b/queue-6.1/of-property-add-in-ports-out-ports-support-to-of_graph_get_port_parent.patch
new file mode 100644 (file)
index 0000000..fab115c
--- /dev/null
@@ -0,0 +1,38 @@
+From 8f1e0d791b5281f3a38620bc7c57763dc551be15 Mon Sep 17 00:00:00 2001
+From: Saravana Kannan <saravanak@google.com>
+Date: Tue, 6 Feb 2024 17:18:02 -0800
+Subject: of: property: Add in-ports/out-ports support to of_graph_get_port_parent()
+
+From: Saravana Kannan <saravanak@google.com>
+
+commit 8f1e0d791b5281f3a38620bc7c57763dc551be15 upstream.
+
+Similar to the existing "ports" node name, coresight device tree bindings
+have added "in-ports" and "out-ports" as standard node names for a
+collection of ports.
+
+Add support for these name to of_graph_get_port_parent() so that
+remote-endpoint parsing can find the correct parent node for these
+coresight ports too.
+
+Signed-off-by: Saravana Kannan <saravanak@google.com>
+Link: https://lore.kernel.org/r/20240207011803.2637531-4-saravanak@google.com
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/of/property.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -762,7 +762,9 @@ struct device_node *of_graph_get_port_pa
+       /* Walk 3 levels up only if there is 'ports' node. */
+       for (depth = 3; depth && node; depth--) {
+               node = of_get_next_parent(node);
+-              if (depth == 2 && !of_node_name_eq(node, "ports"))
++              if (depth == 2 && !of_node_name_eq(node, "ports") &&
++                  !of_node_name_eq(node, "in-ports") &&
++                  !of_node_name_eq(node, "out-ports"))
+                       break;
+       }
+       return node;
diff --git a/queue-6.1/sched-membarrier-reduce-the-ability-to-hammer-on-sys_membarrier.patch b/queue-6.1/sched-membarrier-reduce-the-ability-to-hammer-on-sys_membarrier.patch
new file mode 100644 (file)
index 0000000..6124311
--- /dev/null
@@ -0,0 +1,60 @@
+From 944d5fe50f3f03daacfea16300e656a1691c4a23 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linuxfoundation.org>
+Date: Sun, 4 Feb 2024 15:25:12 +0000
+Subject: sched/membarrier: reduce the ability to hammer on sys_membarrier
+
+From: Linus Torvalds <torvalds@linuxfoundation.org>
+
+commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream.
+
+On some systems, sys_membarrier can be very expensive, causing overall
+slowdowns for everything.  So put a lock on the path in order to
+serialize the accesses to prevent the ability for this to be called at
+too high of a frequency and saturate the machine.
+
+Reviewed-and-tested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Acked-by: Borislav Petkov <bp@alien8.de>
+Fixes: 22e4ebb97582 ("membarrier: Provide expedited private command")
+Fixes: c5f58bd58f43 ("membarrier: Provide GLOBAL_EXPEDITED command")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/membarrier.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -161,6 +161,9 @@
+       | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK                \
+       | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
++static DEFINE_MUTEX(membarrier_ipi_mutex);
++#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
++
+ static void ipi_mb(void *info)
+ {
+       smp_mb();       /* IPIs should be serializing but paranoid. */
+@@ -258,6 +261,7 @@ static int membarrier_global_expedited(v
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
++      SERIALIZE_IPI();
+       cpus_read_lock();
+       rcu_read_lock();
+       for_each_online_cpu(cpu) {
+@@ -346,6 +350,7 @@ static int membarrier_private_expedited(
+       if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
++      SERIALIZE_IPI();
+       cpus_read_lock();
+       if (cpu_id >= 0) {
+@@ -459,6 +464,7 @@ static int sync_runqueues_membarrier_sta
+        * between threads which are users of @mm has its membarrier state
+        * updated.
+        */
++      SERIALIZE_IPI();
+       cpus_read_lock();
+       rcu_read_lock();
+       for_each_online_cpu(cpu) {
index a037b10a8dfb444f0f4783328512faecc106288e..c052d475bca09bb0fb9b33d654210634a197951a 100644 (file)
@@ -194,3 +194,12 @@ bpf-add-struct-for-bin_args-arg-in-bpf_bprintf_prepare.patch
 bpf-do-cleanup-in-bpf_bprintf_cleanup-only-when-needed.patch
 bpf-remove-trace_printk_lock.patch
 userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch
+dmaengine-ioat-free-up-__cleanup-name.patch
+apparmor-free-up-__cleanup-name.patch
+locking-introduce-__cleanup-based-infrastructure.patch
+kbuild-drop-wdeclaration-after-statement.patch
+sched-membarrier-reduce-the-ability-to-hammer-on-sys_membarrier.patch
+of-property-add-in-ports-out-ports-support-to-of_graph_get_port_parent.patch
+nilfs2-fix-potential-bug-in-end_buffer_async_write.patch
+nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch
+dm-limit-the-number-of-targets-and-parameter-size-area.patch