]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 May 2025 09:25:50 +0000 (11:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 May 2025 09:25:50 +0000 (11:25 +0200)
added patches:
bcachefs-remove-incorrect-__counted_by-annotation.patch
mm-slab-clean-up-slab-obj_exts-always.patch

queue-6.12/bcachefs-remove-incorrect-__counted_by-annotation.patch [new file with mode: 0644]
queue-6.12/mm-slab-clean-up-slab-obj_exts-always.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/bcachefs-remove-incorrect-__counted_by-annotation.patch b/queue-6.12/bcachefs-remove-incorrect-__counted_by-annotation.patch
new file mode 100644 (file)
index 0000000..70ef124
--- /dev/null
@@ -0,0 +1,45 @@
+From 3f105630c0b2e53a93713c2328e3426081f961c1 Mon Sep 17 00:00:00 2001
+From: Alan Huang <mmpgouride@gmail.com>
+Date: Fri, 2 May 2025 04:01:31 +0800
+Subject: bcachefs: Remove incorrect __counted_by annotation
+
+From: Alan Huang <mmpgouride@gmail.com>
+
+commit 6846100b00d97d3d6f05766ae86a0d821d849e78 upstream.
+
+This actually reverts 86e92eeeb237 ("bcachefs: Annotate struct bch_xattr
+with __counted_by()").
+
+After the x_name, there is a value. According to the disscussion[1],
+__counted_by assumes that the flexible array member contains exactly
+the amount of elements that are specified. Now there are users came across
+a false positive detection of an out of bounds write caused by
+the __counted_by here[2], so revert that.
+
+[1] https://lore.kernel.org/lkml/Zv8VDKWN1GzLRT-_@archlinux/T/#m0ce9541c5070146320efd4f928cc1ff8de69e9b2
+[2] https://privatebin.net/?a0d4e97d590d71e1#9bLmp2Kb5NU6X6cZEucchDcu88HzUQwHUah8okKPReEt
+
+Signed-off-by: Alan Huang <mmpgouride@gmail.com>
+Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/bcachefs/xattr_format.h |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/bcachefs/xattr_format.h
++++ b/fs/bcachefs/xattr_format.h
+@@ -13,7 +13,13 @@ struct bch_xattr {
+       __u8                    x_type;
+       __u8                    x_name_len;
+       __le16                  x_val_len;
+-      __u8                    x_name[] __counted_by(x_name_len);
++      /*
++       * x_name contains the name and value counted by
++       * x_name_len + x_val_len. The introduction of
++       * __counted_by(x_name_len) caused a false positive
++       * detection of an out of bounds write.
++       */
++      __u8                    x_name[];
+ } __packed __aligned(8);
+ #endif /* _BCACHEFS_XATTR_FORMAT_H */
diff --git a/queue-6.12/mm-slab-clean-up-slab-obj_exts-always.patch b/queue-6.12/mm-slab-clean-up-slab-obj_exts-always.patch
new file mode 100644 (file)
index 0000000..9803000
--- /dev/null
@@ -0,0 +1,95 @@
+From be8250786ca94952a19ce87f98ad9906448bc9ef Mon Sep 17 00:00:00 2001
+From: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+Date: Mon, 21 Apr 2025 15:52:32 +0800
+Subject: mm, slab: clean up slab->obj_exts always
+
+From: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+
+commit be8250786ca94952a19ce87f98ad9906448bc9ef upstream.
+
+When memory allocation profiling is disabled at runtime or due to an
+error, shutdown_mem_profiling() is called: slab->obj_exts which
+previously allocated remains.
+It won't be cleared by unaccount_slab() because of
+mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts
+should always be cleaned up in unaccount_slab() to avoid following error:
+
+[...]BUG: Bad page state in process...
+..
+[...]page dumped because: page still charged to cgroup
+
+[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user]
+Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Acked-by: Harry Yoo <harry.yoo@oracle.com>
+Tested-by: Harry Yoo <harry.yoo@oracle.com>
+Acked-by: Suren Baghdasaryan <surenb@google.com>
+Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+[surenb: fixed trivial merge conflict in alloc_tagging_slab_alloc_hook(),
+skipped inlining free_slab_obj_exts() as it's already inline in 6.12]
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   27 +++++++--------------------
+ 1 file changed, 7 insertions(+), 20 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2035,18 +2035,6 @@ static inline void free_slab_obj_exts(st
+       slab->obj_exts = 0;
+ }
+-static inline bool need_slab_obj_ext(void)
+-{
+-      if (mem_alloc_profiling_enabled())
+-              return true;
+-
+-      /*
+-       * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
+-       * inside memcg_slab_post_alloc_hook. No other users for now.
+-       */
+-      return false;
+-}
+-
+ #else /* CONFIG_SLAB_OBJ_EXT */
+ static inline void init_slab_obj_exts(struct slab *slab)
+@@ -2063,11 +2051,6 @@ static inline void free_slab_obj_exts(st
+ {
+ }
+-static inline bool need_slab_obj_ext(void)
+-{
+-      return false;
+-}
+-
+ #endif /* CONFIG_SLAB_OBJ_EXT */
+ #ifdef CONFIG_MEM_ALLOC_PROFILING
+@@ -2099,7 +2082,7 @@ prepare_slab_obj_exts_hook(struct kmem_c
+ static inline void
+ alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
+ {
+-      if (need_slab_obj_ext()) {
++      if (mem_alloc_profiling_enabled()) {
+               struct slabobj_ext *obj_exts;
+               obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
+@@ -2577,8 +2560,12 @@ static __always_inline void account_slab
+ static __always_inline void unaccount_slab(struct slab *slab, int order,
+                                          struct kmem_cache *s)
+ {
+-      if (memcg_kmem_online() || need_slab_obj_ext())
+-              free_slab_obj_exts(slab);
++      /*
++       * The slab object extensions should now be freed regardless of
++       * whether mem_alloc_profiling_enabled() or not because profiling
++       * might have been disabled after slab->obj_exts got allocated.
++       */
++      free_slab_obj_exts(slab);
+       mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
+                           -(PAGE_SIZE << order));
index 3341bd492851de45358af029b9bc66c76d8d8a1d..4c8b4a17cd7e302685ba3a5dd65ae988f1476aae 100644 (file)
@@ -134,3 +134,5 @@ net-vertexcom-mse102x-fix-len_mask.patch
 net-vertexcom-mse102x-add-range-check-for-cmd_rts.patch
 net-vertexcom-mse102x-fix-rx-error-handling.patch
 blk-mq-create-correct-map-for-fallback-case.patch
+mm-slab-clean-up-slab-obj_exts-always.patch
+bcachefs-remove-incorrect-__counted_by-annotation.patch