From: Greg Kroah-Hartman Date: Thu, 29 Aug 2024 14:24:24 +0000 (+0200) Subject: 6.6-stable patches X-Git-Tag: v6.1.107~1 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=92b0e4739e0989af1ce49be243f1ed6326b23e3a;p=thirdparty%2Fkernel%2Fstable-queue.git 6.6-stable patches added patches: mm-remove-kmem_valid_obj.patch tools-move-alignment-related-macros-to-new-linux-align.h.patch --- diff --git a/queue-6.6/mm-remove-kmem_valid_obj.patch b/queue-6.6/mm-remove-kmem_valid_obj.patch new file mode 100644 index 00000000000..122e5f26838 --- /dev/null +++ b/queue-6.6/mm-remove-kmem_valid_obj.patch @@ -0,0 +1,132 @@ +From 6e284c55fc0bef7d25fd34d29db11f483da60ea4 Mon Sep 17 00:00:00 2001 +From: Zhen Lei +Date: Sat, 5 Aug 2023 11:17:25 +0800 +Subject: mm: Remove kmem_valid_obj() + +From: Zhen Lei + +commit 6e284c55fc0bef7d25fd34d29db11f483da60ea4 upstream. + +Function kmem_dump_obj() will splat if passed a pointer to a non-slab +object. So nothing calls it directly, instead calling kmem_valid_obj() +first to determine whether the passed pointer to a valid slab object. This +means that merging kmem_valid_obj() into kmem_dump_obj() will make the +code more concise. Therefore, convert kmem_dump_obj() to work the same +way as vmalloc_dump_obj(), removing the need for the kmem_dump_obj() +caller to check kmem_valid_obj(). After this, there are no remaining +calls to kmem_valid_obj() anymore, and it can be safely removed. + +Suggested-by: Matthew Wilcox +Signed-off-by: Zhen Lei +Reviewed-by: Matthew Wilcox (Oracle) +Acked-by: Vlastimil Babka +Signed-off-by: Paul E. McKenney +Signed-off-by: Frederic Weisbecker +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/slab.h | 5 +++-- + mm/slab_common.c | 41 +++++++++++------------------------------ + mm/util.c | 4 +--- + 3 files changed, 15 insertions(+), 35 deletions(-) + +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (!IS_ERR_O + size_t ksize(const void *objp); + + #ifdef CONFIG_PRINTK +-bool kmem_valid_obj(void *object); +-void kmem_dump_obj(void *object); ++bool kmem_dump_obj(void *object); ++#else ++static inline bool kmem_dump_obj(void *object) { return false; } + #endif + + /* +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -528,26 +528,6 @@ bool slab_is_available(void) + } + + #ifdef CONFIG_PRINTK +-/** +- * kmem_valid_obj - does the pointer reference a valid slab object? +- * @object: pointer to query. +- * +- * Return: %true if the pointer is to a not-yet-freed object from +- * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer +- * is to an already-freed object, and %false otherwise. +- */ +-bool kmem_valid_obj(void *object) +-{ +- struct folio *folio; +- +- /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ +- if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) +- return false; +- folio = virt_to_folio(object); +- return folio_test_slab(folio); +-} +-EXPORT_SYMBOL_GPL(kmem_valid_obj); +- + static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) + { + if (__kfence_obj_info(kpp, object, slab)) +@@ -566,11 +546,11 @@ static void kmem_obj_info(struct kmem_ob + * and, if available, the slab name, return address, and stack trace from + * the allocation and last free path of that object. + * +- * This function will splat if passed a pointer to a non-slab object. +- * If you are not sure what type of object you have, you should instead +- * use mem_dump_obj(). ++ * Return: %true if the pointer is to a not-yet-freed object from ++ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer ++ * is to an already-freed object, and %false otherwise. + */ +-void kmem_dump_obj(void *object) ++bool kmem_dump_obj(void *object) + { + char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; + int i; +@@ -578,13 +558,13 @@ void kmem_dump_obj(void *object) + unsigned long ptroffset; + struct kmem_obj_info kp = { }; + +- if (WARN_ON_ONCE(!virt_addr_valid(object))) +- return; ++ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ ++ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) ++ return false; + slab = virt_to_slab(object); +- if (WARN_ON_ONCE(!slab)) { +- pr_cont(" non-slab memory.\n"); +- return; +- } ++ if (!slab) ++ return false; ++ + kmem_obj_info(&kp, object, slab); + if (kp.kp_slab_cache) + pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); +@@ -621,6 +601,7 @@ void kmem_dump_obj(void *object) + pr_info(" %pS\n", kp.kp_free_stack[i]); + } + ++ return true; + } + EXPORT_SYMBOL_GPL(kmem_dump_obj); + #endif +--- a/mm/util.c ++++ b/mm/util.c +@@ -1070,10 +1070,8 @@ void mem_dump_obj(void *object) + { + const char *type; + +- if (kmem_valid_obj(object)) { +- kmem_dump_obj(object); ++ if (kmem_dump_obj(object)) + return; +- } + + if (vmalloc_dump_obj(object)) + return; diff --git a/queue-6.6/series b/queue-6.6/series index 0b01b961b68..027ebfa5d0f 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -89,6 +89,7 @@ wifi-mac80211-lock-wiphy-in-ip-address-notifier.patch wifi-cfg80211-check-wiphy-mutex-is-held-for-wdev-mut.patch wifi-mac80211-fix-ba-session-teardown-race.patch wifi-iwlwifi-mvm-fix-recovery-flow-in-csa.patch +mm-remove-kmem_valid_obj.patch rcu-dump-memory-object-info-if-callback-function-is-.patch rcu-eliminate-rcu_gp_slow_unregister-false-positive.patch net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch @@ -339,3 +340,4 @@ ksmbd-fix-race-condition-between-destroy_previous_session-and-smb2-operations.pa net-ngbe-fix-phy-mode-set-to-external-phy.patch revert-s390-dasd-establish-dma-alignment.patch input-mt-limit-max-slots.patch +tools-move-alignment-related-macros-to-new-linux-align.h.patch diff --git a/queue-6.6/tools-move-alignment-related-macros-to-new-linux-align.h.patch b/queue-6.6/tools-move-alignment-related-macros-to-new-linux-align.h.patch new file mode 100644 index 00000000000..224eb7949c8 --- /dev/null +++ b/queue-6.6/tools-move-alignment-related-macros-to-new-linux-align.h.patch @@ -0,0 +1,82 @@ +From 10a04ff09bcc39e0044190ffe9f00f998f13647c Mon Sep 17 00:00:00 2001 +From: Alexander Lobakin +Date: Wed, 27 Mar 2024 16:23:48 +0100 +Subject: tools: move alignment-related macros to new + +From: Alexander Lobakin + +commit 10a04ff09bcc39e0044190ffe9f00f998f13647c upstream. + +Currently, tools have *ALIGN*() macros scattered across the unrelated +headers, as there are only 3 of them and they were added separately +each time on an as-needed basis. +Anyway, let's make it more consistent with the kernel headers and allow +using those macros outside of the mentioned headers. Create + inside the tools/ folder and include it where needed. + +Signed-off-by: Yury Norov +Signed-off-by: Alexander Lobakin +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + tools/include/linux/align.h | 12 ++++++++++++ + tools/include/linux/bitmap.h | 2 +- + tools/include/linux/mm.h | 5 +---- + 3 files changed, 14 insertions(+), 5 deletions(-) + create mode 100644 tools/include/linux/align.h + +--- /dev/null ++++ b/tools/include/linux/align.h +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++ ++#ifndef _TOOLS_LINUX_ALIGN_H ++#define _TOOLS_LINUX_ALIGN_H ++ ++#include ++ ++#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) ++#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) ++#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) ++ ++#endif /* _TOOLS_LINUX_ALIGN_H */ +--- a/tools/include/linux/bitmap.h ++++ b/tools/include/linux/bitmap.h +@@ -3,6 +3,7 @@ + #define _TOOLS_LINUX_BITMAP_H + + #include ++#include + #include + #include + #include +@@ -127,7 +128,6 @@ static inline bool bitmap_and(unsigned l + #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) + #endif + #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) +-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + + static inline bool bitmap_equal(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +--- a/tools/include/linux/mm.h ++++ b/tools/include/linux/mm.h +@@ -2,8 +2,8 @@ + #ifndef _TOOLS_LINUX_MM_H + #define _TOOLS_LINUX_MM_H + ++#include + #include +-#include + + #define PAGE_SHIFT 12 + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +@@ -11,9 +11,6 @@ + + #define PHYS_ADDR_MAX (~(phys_addr_t)0) + +-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +- + #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + + #define __va(x) ((void *)((unsigned long)(x)))