]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Oct 2025 13:32:06 +0000 (15:32 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Oct 2025 13:32:06 +0000 (15:32 +0200)
added patches:
arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch
fscontext-do-not-consume-log-entries-when-returning-emsgsize.patch
locking-introduce-__cleanup-based-infrastructure.patch

queue-5.10/arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch [new file with mode: 0644]
queue-5.10/fscontext-do-not-consume-log-entries-when-returning-emsgsize.patch [new file with mode: 0644]
queue-5.10/locking-introduce-__cleanup-based-infrastructure.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch b/queue-5.10/arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch
new file mode 100644 (file)
index 0000000..b0cae70
--- /dev/null
@@ -0,0 +1,87 @@
+From stable+bounces-186322-greg=kroah.com@vger.kernel.org Fri Oct 17 15:15:00 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 09:14:53 -0400
+Subject: arm64: mte: Do not flag the zero page as PG_mte_tagged
+To: stable@vger.kernel.org
+Cc: Catalin Marinas <catalin.marinas@arm.com>, Gergely Kovacs <Gergely.Kovacs2@arm.com>, Will Deacon <will@kernel.org>, David Hildenbrand <david@redhat.com>, Lance Yang <lance.yang@linux.dev>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251017131453.3942800-1-sashal@kernel.org>
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+[ Upstream commit f620d66af3165838bfa845dcf9f5f9b4089bf508 ]
+
+Commit 68d54ceeec0e ("arm64: mte: Allow PTRACE_PEEKMTETAGS access to the
+zero page") attempted to fix ptrace() reading of tags from the zero page
+by marking it as PG_mte_tagged during cpu_enable_mte(). The same commit
+also changed the ptrace() tag access permission check to the VM_MTE vma
+flag while turning the page flag test into a WARN_ON_ONCE().
+
+Attempting to set the PG_mte_tagged flag early with
+CONFIG_DEFERRED_STRUCT_PAGE_INIT enabled may either hang (after commit
+d77e59a8fccd "arm64: mte: Lock a page for MTE tag initialisation") or
+have the flags cleared later during page_alloc_init_late(). In addition,
+pages_identical() -> memcmp_pages() will reject any comparison with the
+zero page as it is marked as tagged.
+
+Partially revert the above commit to avoid setting PG_mte_tagged on the
+zero page. Update the __access_remote_tags() warning on untagged pages
+to ignore the zero page since it is known to have the tags initialised.
+
+Note that all user mapping of the zero page are marked as pte_special().
+The arm64 set_pte_at() will not call mte_sync_tags() on such pages, so
+PG_mte_tagged will remain cleared.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 68d54ceeec0e ("arm64: mte: Allow PTRACE_PEEKMTETAGS access to the zero page")
+Reported-by: Gergely Kovacs <Gergely.Kovacs2@arm.com>
+Cc: stable@vger.kernel.org # 5.10.x
+Cc: Will Deacon <will@kernel.org>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Acked-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Signed-off-by: Will Deacon <will@kernel.org>
+[ replaced is_zero_page() with is_zero_pfn(page_to_pfn()) and folio APIs with page APIs ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   10 ++++++++--
+ arch/arm64/kernel/mte.c        |    3 ++-
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1768,12 +1768,18 @@ static void bti_enable(const struct arm6
+ #ifdef CONFIG_ARM64_MTE
+ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ {
++      static bool cleared_zero_page = false;
++
+       /*
+        * Clear the tags in the zero page. This needs to be done via the
+-       * linear map which has the Tagged attribute.
++       * linear map which has the Tagged attribute. Since this page is
++       * always mapped as pte_special(), set_pte_at() will not attempt to
++       * clear the tags or set PG_mte_tagged.
+        */
+-      if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
++      if (!cleared_zero_page) {
++              cleared_zero_page = true;
+               mte_clear_page_tags(lm_alias(empty_zero_page));
++      }
+ }
+ #endif /* CONFIG_ARM64_MTE */
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -247,7 +247,8 @@ static int __access_remote_tags(struct m
+                       put_page(page);
+                       break;
+               }
+-              WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
++              WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags) &&
++                           !is_zero_pfn(page_to_pfn(page)));
+               /* limit access to the end of the page */
+               offset = offset_in_page(addr);
diff --git a/queue-5.10/fscontext-do-not-consume-log-entries-when-returning-emsgsize.patch b/queue-5.10/fscontext-do-not-consume-log-entries-when-returning-emsgsize.patch
new file mode 100644 (file)
index 0000000..9a22fb1
--- /dev/null
@@ -0,0 +1,125 @@
+From stable+bounces-185850-greg=kroah.com@vger.kernel.org Wed Oct 15 20:44:37 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Oct 2025 14:44:27 -0400
+Subject: fscontext: do not consume log entries when returning -EMSGSIZE
+To: stable@vger.kernel.org
+Cc: Aleksa Sarai <cyphar@cyphar.com>, David Howells <dhowells@redhat.com>, Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251015184427.1495851-2-sashal@kernel.org>
+
+From: Aleksa Sarai <cyphar@cyphar.com>
+
+[ Upstream commit 72d271a7baa7062cb27e774ac37c5459c6d20e22 ]
+
+Userspace generally expects APIs that return -EMSGSIZE to allow for them
+to adjust their buffer size and retry the operation. However, the
+fscontext log would previously clear the message even in the -EMSGSIZE
+case.
+
+Given that it is very cheap for us to check whether the buffer is too
+small before we remove the message from the ring buffer, let's just do
+that instead. While we're at it, refactor some fscontext_read() into a
+separate helper to make the ring buffer logic a bit easier to read.
+
+Fixes: 007ec26cdc9f ("vfs: Implement logging through fs_context")
+Cc: David Howells <dhowells@redhat.com>
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
+Link: https://lore.kernel.org/20250807-fscontext-log-cleanups-v3-1-8d91d6242dc3@cyphar.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fsopen.c |   70 ++++++++++++++++++++++++++++++++----------------------------
+ 1 file changed, 38 insertions(+), 32 deletions(-)
+
+--- a/fs/fsopen.c
++++ b/fs/fsopen.c
+@@ -18,50 +18,56 @@
+ #include "internal.h"
+ #include "mount.h"
++static inline const char *fetch_message_locked(struct fc_log *log, size_t len,
++                                             bool *need_free)
++{
++      const char *p;
++      int index;
++
++      if (unlikely(log->head == log->tail))
++              return ERR_PTR(-ENODATA);
++
++      index = log->tail & (ARRAY_SIZE(log->buffer) - 1);
++      p = log->buffer[index];
++      if (unlikely(strlen(p) > len))
++              return ERR_PTR(-EMSGSIZE);
++
++      log->buffer[index] = NULL;
++      *need_free = log->need_free & (1 << index);
++      log->need_free &= ~(1 << index);
++      log->tail++;
++
++      return p;
++}
++
+ /*
+  * Allow the user to read back any error, warning or informational messages.
++ * Only one message is returned for each read(2) call.
+  */
+ static ssize_t fscontext_read(struct file *file,
+                             char __user *_buf, size_t len, loff_t *pos)
+ {
+       struct fs_context *fc = file->private_data;
+-      struct fc_log *log = fc->log.log;
+-      unsigned int logsize = ARRAY_SIZE(log->buffer);
+-      ssize_t ret;
+-      char *p;
++      ssize_t err;
++      const char *p __free(kfree) = NULL, *message;
+       bool need_free;
+-      int index, n;
+-
+-      ret = mutex_lock_interruptible(&fc->uapi_mutex);
+-      if (ret < 0)
+-              return ret;
+-
+-      if (log->head == log->tail) {
+-              mutex_unlock(&fc->uapi_mutex);
+-              return -ENODATA;
+-      }
++      int n;
+-      index = log->tail & (logsize - 1);
+-      p = log->buffer[index];
+-      need_free = log->need_free & (1 << index);
+-      log->buffer[index] = NULL;
+-      log->need_free &= ~(1 << index);
+-      log->tail++;
++      err = mutex_lock_interruptible(&fc->uapi_mutex);
++      if (err < 0)
++              return err;
++      message = fetch_message_locked(fc->log.log, len, &need_free);
+       mutex_unlock(&fc->uapi_mutex);
++      if (IS_ERR(message))
++              return PTR_ERR(message);
+-      ret = -EMSGSIZE;
+-      n = strlen(p);
+-      if (n > len)
+-              goto err_free;
+-      ret = -EFAULT;
+-      if (copy_to_user(_buf, p, n) != 0)
+-              goto err_free;
+-      ret = n;
+-
+-err_free:
+       if (need_free)
+-              kfree(p);
+-      return ret;
++              p = message;
++
++      n = strlen(message);
++      if (copy_to_user(_buf, message, n))
++              return -EFAULT;
++      return n;
+ }
+ static int fscontext_release(struct inode *inode, struct file *file)
diff --git a/queue-5.10/locking-introduce-__cleanup-based-infrastructure.patch b/queue-5.10/locking-introduce-__cleanup-based-infrastructure.patch
new file mode 100644 (file)
index 0000000..5a46314
--- /dev/null
@@ -0,0 +1,583 @@
+From stable+bounces-185849-greg=kroah.com@vger.kernel.org Wed Oct 15 20:44:36 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Oct 2025 14:44:26 -0400
+Subject: locking: Introduce __cleanup() based infrastructure
+To: stable@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251015184427.1495851-1-sashal@kernel.org>
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 54da6a0924311c7cf5015533991e44fb8eb12773 ]
+
+Use __attribute__((__cleanup__(func))) to build:
+
+ - simple auto-release pointers using __free()
+
+ - 'classes' with constructor and destructor semantics for
+   scope-based resource management.
+
+ - lock guards based on the above classes.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230612093537.614161713%40infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/ioat/dma.c              |   12 +-
+ include/linux/cleanup.h             |  171 ++++++++++++++++++++++++++++++++++++
+ include/linux/compiler-clang.h      |    9 +
+ include/linux/compiler_attributes.h |    6 +
+ include/linux/device.h              |    7 +
+ include/linux/file.h                |    6 +
+ include/linux/irqflags.h            |    7 +
+ include/linux/mutex.h               |    4 
+ include/linux/percpu.h              |    4 
+ include/linux/preempt.h             |    5 +
+ include/linux/rcupdate.h            |    3 
+ include/linux/rwsem.h               |    9 +
+ include/linux/sched/task.h          |    2 
+ include/linux/slab.h                |    3 
+ include/linux/spinlock.h            |   32 ++++++
+ include/linux/srcu.h                |    5 +
+ scripts/checkpatch.pl               |    2 
+ 17 files changed, 280 insertions(+), 7 deletions(-)
+ create mode 100644 include/linux/cleanup.h
+
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -584,11 +584,11 @@ desc_get_errstat(struct ioatdma_chan *io
+ }
+ /**
+- * __cleanup - reclaim used descriptors
++ * __ioat_cleanup - reclaim used descriptors
+  * @ioat_chan: channel (ring) to clean
+  * @phys_complete: zeroed (or not) completion address (from status)
+  */
+-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
++static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+ {
+       struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+       struct ioat_ring_ent *desc;
+@@ -675,7 +675,7 @@ static void ioat_cleanup(struct ioatdma_
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+       if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+       if (is_ioat_halted(*ioat_chan->completion)) {
+               u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+@@ -712,7 +712,7 @@ static void ioat_restart_channel(struct
+       ioat_quiesce(ioat_chan, 0);
+       if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+       __ioat_restart_chan(ioat_chan);
+ }
+@@ -786,7 +786,7 @@ static void ioat_eh(struct ioatdma_chan
+       /* cleanup so tail points to descriptor that caused the error */
+       if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+@@ -943,7 +943,7 @@ void ioat_timer_event(struct timer_list
+               /* timer restarted in ioat_cleanup_preamble
+                * and IOAT_COMPLETION_ACK cleared
+                */
+-              __cleanup(ioat_chan, phys_complete);
++              __ioat_cleanup(ioat_chan, phys_complete);
+               goto unlock_out;
+       }
+--- /dev/null
++++ b/include/linux/cleanup.h
+@@ -0,0 +1,171 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_GUARDS_H
++#define __LINUX_GUARDS_H
++
++#include <linux/compiler.h>
++
++/*
++ * DEFINE_FREE(name, type, free):
++ *    simple helper macro that defines the required wrapper for a __free()
++ *    based cleanup function. @free is an expression using '_T' to access
++ *    the variable.
++ *
++ * __free(name):
++ *    variable attribute to add a scoped based cleanup to the variable.
++ *
++ * no_free_ptr(var):
++ *    like a non-atomic xchg(var, NULL), such that the cleanup function will
++ *    be inhibited -- provided it sanely deals with a NULL value.
++ *
++ * return_ptr(p):
++ *    returns p while inhibiting the __free().
++ *
++ * Ex.
++ *
++ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++ *
++ *    struct obj *p __free(kfree) = kmalloc(...);
++ *    if (!p)
++ *            return NULL;
++ *
++ *    if (!init_obj(p))
++ *            return NULL;
++ *
++ *    return_ptr(p);
++ */
++
++#define DEFINE_FREE(_name, _type, _free) \
++      static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
++
++#define __free(_name) __cleanup(__free_##_name)
++
++#define no_free_ptr(p) \
++      ({ __auto_type __ptr = (p); (p) = NULL; __ptr; })
++
++#define return_ptr(p) return no_free_ptr(p)
++
++
++/*
++ * DEFINE_CLASS(name, type, exit, init, init_args...):
++ *    helper to define the destructor and constructor for a type.
++ *    @exit is an expression using '_T' -- similar to FREE above.
++ *    @init is an expression in @init_args resulting in @type
++ *
++ * EXTEND_CLASS(name, ext, init, init_args...):
++ *    extends class @name to @name@ext with the new constructor
++ *
++ * CLASS(name, var)(args...):
++ *    declare the variable @var as an instance of the named class
++ *
++ * Ex.
++ *
++ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
++ *
++ *    CLASS(fdget, f)(fd);
++ *    if (!f.file)
++ *            return -EBADF;
++ *
++ *    // use 'f' without concern
++ */
++
++#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...)               \
++typedef _type class_##_name##_t;                                      \
++static inline void class_##_name##_destructor(_type *p)                       \
++{ _type _T = *p; _exit; }                                             \
++static inline _type class_##_name##_constructor(_init_args)           \
++{ _type t = _init; return t; }
++
++#define EXTEND_CLASS(_name, ext, _init, _init_args...)                        \
++typedef class_##_name##_t class_##_name##ext##_t;                     \
++static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
++{ class_##_name##_destructor(p); }                                    \
++static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
++{ class_##_name##_t t = _init; return t; }
++
++#define CLASS(_name, var)                                             \
++      class_##_name##_t var __cleanup(class_##_name##_destructor) =   \
++              class_##_name##_constructor
++
++
++/*
++ * DEFINE_GUARD(name, type, lock, unlock):
++ *    trivial wrapper around DEFINE_CLASS() above specifically
++ *    for locks.
++ *
++ * guard(name):
++ *    an anonymous instance of the (guard) class
++ *
++ * scoped_guard (name, args...) { }:
++ *    similar to CLASS(name, scope)(args), except the variable (with the
++ *    explicit name 'scope') is declard in a for-loop such that its scope is
++ *    bound to the next (compound) statement.
++ *
++ */
++
++#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
++      DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
++
++#define guard(_name) \
++      CLASS(_name, __UNIQUE_ID(guard))
++
++#define scoped_guard(_name, args...)                                  \
++      for (CLASS(_name, scope)(args),                                 \
++           *done = NULL; !done; done = (void *)1)
++
++/*
++ * Additional helper macros for generating lock guards with types, either for
++ * locks that don't have a native type (eg. RCU, preempt) or those that need a
++ * 'fat' pointer (eg. spin_lock_irqsave).
++ *
++ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
++ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
++ *
++ * will result in the following type:
++ *
++ *   typedef struct {
++ *    type *lock;             // 'type := void' for the _0 variant
++ *    __VA_ARGS__;
++ *   } class_##name##_t;
++ *
++ * As above, both _lock and _unlock are statements, except this time '_T' will
++ * be a pointer to the above struct.
++ */
++
++#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...)             \
++typedef struct {                                                      \
++      _type *lock;                                                    \
++      __VA_ARGS__;                                                    \
++} class_##_name##_t;                                                  \
++                                                                      \
++static inline void class_##_name##_destructor(class_##_name##_t *_T)  \
++{                                                                     \
++      if (_T->lock) { _unlock; }                                      \
++}
++
++
++#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock)                    \
++static inline class_##_name##_t class_##_name##_constructor(_type *l) \
++{                                                                     \
++      class_##_name##_t _t = { .lock = l }, *_T = &_t;                \
++      _lock;                                                          \
++      return _t;                                                      \
++}
++
++#define __DEFINE_LOCK_GUARD_0(_name, _lock)                           \
++static inline class_##_name##_t class_##_name##_constructor(void)     \
++{                                                                     \
++      class_##_name##_t _t = { .lock = (void*)1 },                    \
++                       *_T __maybe_unused = &_t;                      \
++      _lock;                                                          \
++      return _t;                                                      \
++}
++
++#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)                \
++__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)             \
++__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
++
++#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)                       \
++__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)              \
++__DEFINE_LOCK_GUARD_0(_name, _lock)
++
++#endif /* __LINUX_GUARDS_H */
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -15,6 +15,15 @@
+ /* Compiler specific definitions for Clang compiler */
++/*
++ * Clang prior to 17 is being silly and considers many __cleanup() variables
++ * as unused (because they are, their sole purpose is to go out of scope).
++ *
++ * https://reviews.llvm.org/D152180
++ */
++#undef __cleanup
++#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
++
+ /* same as gcc, this was present in clang-2.6 so we can assume it works
+  * with any version that can compile the kernel
+  */
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -95,6 +95,12 @@
+ #define __cold                          __attribute__((__cold__))
+ /*
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
++ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
++ */
++#define __cleanup(func)                       __attribute__((__cleanup__(func)))
++
++/*
+  * Note the long name.
+  *
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -30,6 +30,7 @@
+ #include <linux/device/bus.h>
+ #include <linux/device/class.h>
+ #include <linux/device/driver.h>
++#include <linux/cleanup.h>
+ #include <asm/device.h>
+ struct device;
+@@ -829,6 +830,9 @@ void device_unregister(struct device *de
+ void device_initialize(struct device *dev);
+ int __must_check device_add(struct device *dev);
+ void device_del(struct device *dev);
++
++DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
++
+ int device_for_each_child(struct device *dev, void *data,
+                         int (*fn)(struct device *dev, void *data));
+ int device_for_each_child_reverse(struct device *dev, void *data,
+@@ -957,6 +961,9 @@ extern int (*platform_notify_remove)(str
+  */
+ struct device *get_device(struct device *dev);
+ void put_device(struct device *dev);
++
++DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
++
+ bool kill_device(struct device *dev);
+ #ifdef CONFIG_DEVTMPFS
+--- a/include/linux/file.h
++++ b/include/linux/file.h
+@@ -10,6 +10,7 @@
+ #include <linux/types.h>
+ #include <linux/posix_types.h>
+ #include <linux/errno.h>
++#include <linux/cleanup.h>
+ struct file;
+@@ -82,6 +83,8 @@ static inline void fdput_pos(struct fd f
+       fdput(f);
+ }
++DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
++
+ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
+ extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
+ extern void set_close_on_exec(unsigned int fd, int flag);
+@@ -90,6 +93,9 @@ extern int __get_unused_fd_flags(unsigne
+ extern int get_unused_fd_flags(unsigned flags);
+ extern void put_unused_fd(unsigned int fd);
++DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
++           get_unused_fd_flags(flags), unsigned flags)
++
+ extern void fd_install(unsigned int fd, struct file *file);
+ extern int __receive_fd(int fd, struct file *file, int __user *ufd,
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -13,6 +13,7 @@
+ #define _LINUX_TRACE_IRQFLAGS_H
+ #include <linux/typecheck.h>
++#include <linux/cleanup.h>
+ #include <asm/irqflags.h>
+ #include <asm/percpu.h>
+@@ -248,4 +249,10 @@ do {                                              \
+ #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
++DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
++DEFINE_LOCK_GUARD_0(irqsave,
++                  local_irq_save(_T->flags),
++                  local_irq_restore(_T->flags),
++                  unsigned long flags)
++
+ #endif
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -19,6 +19,7 @@
+ #include <asm/processor.h>
+ #include <linux/osq_lock.h>
+ #include <linux/debug_locks.h>
++#include <linux/cleanup.h>
+ struct ww_acquire_ctx;
+@@ -224,4 +225,7 @@ enum mutex_trylock_recursive_enum {
+ extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
+ mutex_trylock_recursive(struct mutex *lock);
++DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
++DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
++
+ #endif /* __LINUX_MUTEX_H */
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -9,6 +9,7 @@
+ #include <linux/printk.h>
+ #include <linux/pfn.h>
+ #include <linux/init.h>
++#include <linux/cleanup.h>
+ #include <asm/percpu.h>
+@@ -134,6 +135,9 @@ extern void __init setup_per_cpu_areas(v
+ extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
+ extern void __percpu *__alloc_percpu(size_t size, size_t align);
+ extern void free_percpu(void __percpu *__pdata);
++
++DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
++
+ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+ #define alloc_percpu_gfp(type, gfp)                                   \
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/linkage.h>
++#include <linux/cleanup.h>
+ #include <linux/list.h>
+ /*
+@@ -352,4 +353,8 @@ static __always_inline void migrate_enab
+       preempt_enable();
+ }
++DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
++DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
++DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
++
+ #endif /* __LINUX_PREEMPT_H */
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -27,6 +27,7 @@
+ #include <linux/preempt.h>
+ #include <linux/bottom_half.h>
+ #include <linux/lockdep.h>
++#include <linux/cleanup.h>
+ #include <asm/processor.h>
+ #include <linux/cpumask.h>
+@@ -1058,4 +1059,6 @@ rcu_head_after_call_rcu(struct rcu_head
+ extern int rcu_expedited;
+ extern int rcu_normal;
++DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
++
+ #endif /* __LINUX_RCUPDATE_H */
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -16,6 +16,8 @@
+ #include <linux/spinlock.h>
+ #include <linux/atomic.h>
+ #include <linux/err.h>
++#include <linux/cleanup.h>
++
+ #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ #include <linux/osq_lock.h>
+ #endif
+@@ -152,6 +154,13 @@ extern void up_read(struct rw_semaphore
+  */
+ extern void up_write(struct rw_semaphore *sem);
++DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
++DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
++
++DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
++DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
++
++
+ /*
+  * downgrade write lock to read lock
+  */
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -143,6 +143,8 @@ static inline void put_task_struct(struc
+               __put_task_struct(t);
+ }
++DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
++
+ static inline void put_task_struct_many(struct task_struct *t, int nr)
+ {
+       if (refcount_sub_and_test(nr, &t->usage))
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
+ #include <linux/percpu-refcount.h>
++#include <linux/cleanup.h>
+ /*
+@@ -187,6 +188,8 @@ void kfree_sensitive(const void *);
+ size_t __ksize(const void *);
+ size_t ksize(const void *);
++DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++
+ #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+                       bool to_user);
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -57,6 +57,7 @@
+ #include <linux/stringify.h>
+ #include <linux/bottom_half.h>
+ #include <linux/lockdep.h>
++#include <linux/cleanup.h>
+ #include <asm/barrier.h>
+ #include <asm/mmiowb.h>
+@@ -493,4 +494,35 @@ int __alloc_bucket_spinlocks(spinlock_t
+ void free_bucket_spinlocks(spinlock_t *locks);
++DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
++                  raw_spin_lock(_T->lock),
++                  raw_spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
++                  raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
++                  raw_spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
++                  raw_spin_lock_irq(_T->lock),
++                  raw_spin_unlock_irq(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
++                  raw_spin_lock_irqsave(_T->lock, _T->flags),
++                  raw_spin_unlock_irqrestore(_T->lock, _T->flags),
++                  unsigned long flags)
++
++DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
++                  spin_lock(_T->lock),
++                  spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
++                  spin_lock_irq(_T->lock),
++                  spin_unlock_irq(_T->lock))
++
++DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
++                  spin_lock_irqsave(_T->lock, _T->flags),
++                  spin_unlock_irqrestore(_T->lock, _T->flags),
++                  unsigned long flags)
++
++#undef __LINUX_INSIDE_SPINLOCK_H
+ #endif /* __LINUX_SPINLOCK_H */
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -205,4 +205,9 @@ static inline void smp_mb__after_srcu_re
+       /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
+ }
++DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
++                  _T->idx = srcu_read_lock(_T->lock),
++                  srcu_read_unlock(_T->lock, _T->idx),
++                  int idx)
++
+ #endif
+--- a/scripts/checkpatch.pl
++++ b/scripts/checkpatch.pl
+@@ -4522,7 +4522,7 @@ sub process {
+                               if|for|while|switch|return|case|
+                               volatile|__volatile__|
+                               __attribute__|format|__extension__|
+-                              asm|__asm__)$/x)
++                              asm|__asm__|scoped_guard)$/x)
+                       {
+                       # cpp #define statements have non-optional spaces, ie
+                       # if there is a space between the name and the open
index 2c5f19ead5c3d0ced118141dda698f7815cb1934..87303f2264fa1058262bf8044356ea0530d7da97 100644 (file)
@@ -190,3 +190,6 @@ pid-add-a-judgment-for-ns-null-in-pid_nr_ns.patch
 pid-make-__task_pid_nr_ns-ns-null-safe-for-zombie-ca.patch
 fs-add-initramfs_options-to-set-initramfs-mount-opti.patch
 cramfs-verify-inode-mode-when-loading-from-disk.patch
+locking-introduce-__cleanup-based-infrastructure.patch
+fscontext-do-not-consume-log-entries-when-returning-emsgsize.patch
+arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch