--- /dev/null
+From 6e284c55fc0bef7d25fd34d29db11f483da60ea4 Mon Sep 17 00:00:00 2001
+From: Zhen Lei <thunder.leizhen@huawei.com>
+Date: Sat, 5 Aug 2023 11:17:25 +0800
+Subject: mm: Remove kmem_valid_obj()
+
+From: Zhen Lei <thunder.leizhen@huawei.com>
+
+commit 6e284c55fc0bef7d25fd34d29db11f483da60ea4 upstream.
+
+Function kmem_dump_obj() will splat if passed a pointer to a non-slab
+object. So nothing calls it directly, instead calling kmem_valid_obj()
+first to determine whether the passed pointer to a valid slab object. This
+means that merging kmem_valid_obj() into kmem_dump_obj() will make the
+code more concise. Therefore, convert kmem_dump_obj() to work the same
+way as vmalloc_dump_obj(), removing the need for the kmem_dump_obj()
+caller to check kmem_valid_obj(). After this, there are no remaining
+calls to kmem_valid_obj() anymore, and it can be safely removed.
+
+Suggested-by: Matthew Wilcox <willy@infradead.org>
+Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/slab.h | 5 +++--
+ mm/slab_common.c | 41 +++++++++++------------------------------
+ mm/util.c | 4 +---
+ 3 files changed, 15 insertions(+), 35 deletions(-)
+
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -215,8 +215,9 @@ DEFINE_FREE(kfree, void *, if (!IS_ERR_O
+ size_t ksize(const void *objp);
+
+ #ifdef CONFIG_PRINTK
+-bool kmem_valid_obj(void *object);
+-void kmem_dump_obj(void *object);
++bool kmem_dump_obj(void *object);
++#else
++static inline bool kmem_dump_obj(void *object) { return false; }
+ #endif
+
+ /*
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -523,26 +523,6 @@ bool slab_is_available(void)
+ }
+
+ #ifdef CONFIG_PRINTK
+-/**
+- * kmem_valid_obj - does the pointer reference a valid slab object?
+- * @object: pointer to query.
+- *
+- * Return: %true if the pointer is to a not-yet-freed object from
+- * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
+- * is to an already-freed object, and %false otherwise.
+- */
+-bool kmem_valid_obj(void *object)
+-{
+- struct folio *folio;
+-
+- /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
+- if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
+- return false;
+- folio = virt_to_folio(object);
+- return folio_test_slab(folio);
+-}
+-EXPORT_SYMBOL_GPL(kmem_valid_obj);
+-
+ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ if (__kfence_obj_info(kpp, object, slab))
+@@ -561,11 +541,11 @@ static void kmem_obj_info(struct kmem_ob
+ * and, if available, the slab name, return address, and stack trace from
+ * the allocation and last free path of that object.
+ *
+- * This function will splat if passed a pointer to a non-slab object.
+- * If you are not sure what type of object you have, you should instead
+- * use mem_dump_obj().
++ * Return: %true if the pointer is to a not-yet-freed object from
++ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
++ * is to an already-freed object, and %false otherwise.
+ */
+-void kmem_dump_obj(void *object)
++bool kmem_dump_obj(void *object)
+ {
+ char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
+ int i;
+@@ -573,13 +553,13 @@ void kmem_dump_obj(void *object)
+ unsigned long ptroffset;
+ struct kmem_obj_info kp = { };
+
+- if (WARN_ON_ONCE(!virt_addr_valid(object)))
+- return;
++ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
++ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
++ return false;
+ slab = virt_to_slab(object);
+- if (WARN_ON_ONCE(!slab)) {
+- pr_cont(" non-slab memory.\n");
+- return;
+- }
++ if (!slab)
++ return false;
++
+ kmem_obj_info(&kp, object, slab);
+ if (kp.kp_slab_cache)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+@@ -616,6 +596,7 @@ void kmem_dump_obj(void *object)
+ pr_info(" %pS\n", kp.kp_free_stack[i]);
+ }
+
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(kmem_dump_obj);
+ #endif
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -1119,10 +1119,8 @@ void mem_dump_obj(void *object)
+ {
+ const char *type;
+
+- if (kmem_valid_obj(object)) {
+- kmem_dump_obj(object);
++ if (kmem_dump_obj(object))
+ return;
+- }
+
+ if (vmalloc_dump_obj(object))
+ return;
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- kernel/rcu/rcu.h | 7 +++++++
- kernel/rcu/srcutiny.c | 1 +
- kernel/rcu/srcutree.c | 1 +
- kernel/rcu/tasks.h | 1 +
- kernel/rcu/tiny.c | 1 +
- kernel/rcu/tree.c | 1 +
+ kernel/rcu/rcu.h | 7 +++++++
+ kernel/rcu/srcutiny.c | 1 +
+ kernel/rcu/srcutree.c | 1 +
+ kernel/rcu/tasks.h | 1 +
+ kernel/rcu/tiny.c | 1 +
+ kernel/rcu/tree.c | 1 +
6 files changed, 12 insertions(+)
-diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
-index 48d8f754b730e..49ff955ed2034 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -10,6 +10,7 @@
#include <trace/events/rcu.h>
/*
-@@ -211,6 +212,12 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+@@ -211,6 +212,12 @@ static inline void debug_rcu_head_unqueu
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
extern int rcu_cpu_stall_suppress_at_boot;
static inline bool rcu_stall_is_suppressed_at_boot(void)
-diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
-index 33adafdad2613..5e7f336baa06a 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
-@@ -138,6 +138,7 @@ void srcu_drive_gp(struct work_struct *wp)
+@@ -138,6 +138,7 @@ void srcu_drive_gp(struct work_struct *w
while (lh) {
rhp = lh;
lh = lh->next;
local_bh_disable();
rhp->func(rhp);
local_bh_enable();
-diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
-index 929dcbc04d29c..f7825900bdfd7 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
-@@ -1591,6 +1591,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+@@ -1591,6 +1591,7 @@ static void srcu_invoke_callbacks(struct
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
debug_rcu_head_unqueue(rhp);
local_bh_disable();
rhp->func(rhp);
local_bh_enable();
-diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
-index 456c956f481ef..bb6b037ef30fa 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
-@@ -487,6 +487,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+@@ -487,6 +487,7 @@ static void rcu_tasks_invoke_cbs(struct
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
len = rcl.len;
for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
local_bh_disable();
rhp->func(rhp);
local_bh_enable();
-diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
-index a33a8d4942c37..21c040cba4bd0 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
-@@ -97,6 +97,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head)
+@@ -97,6 +97,7 @@ static inline bool rcu_reclaim_tiny(stru
trace_rcu_invoke_callback("", head);
f = head->func;
WRITE_ONCE(head->func, (rcu_callback_t)0L);
f(head);
rcu_lock_release(&rcu_callback_map);
-diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
-index cd6144cea5a1a..86923a8914007 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -2292,6 +2292,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
+@@ -2292,6 +2292,7 @@ static void rcu_do_batch(struct rcu_data
trace_rcu_invoke_callback(rcu_state.name, rhp);
f = rhp->func;
WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
f(rhp);
---
-2.43.0
-
ssb-fix-division-by-zero-issue-in-ssb_calc_clock_rat.patch
wifi-cfg80211-check-wiphy-mutex-is-held-for-wdev-mut.patch
wifi-mac80211-fix-ba-session-teardown-race.patch
+mm-remove-kmem_valid_obj.patch
rcu-dump-memory-object-info-if-callback-function-is-.patch
rcu-eliminate-rcu_gp_slow_unregister-false-positive.patch
wifi-cw1200-avoid-processing-an-invalid-tim-ie.patch
net-change-maximum-number-of-udp-segments-to-128.patch
selftests-net-more-strict-check-in-net_helper.patch
input-mt-limit-max-slots.patch
+tools-move-alignment-related-macros-to-new-linux-align.h.patch
--- /dev/null
+From 10a04ff09bcc39e0044190ffe9f00f998f13647c Mon Sep 17 00:00:00 2001
+From: Alexander Lobakin <aleksander.lobakin@intel.com>
+Date: Wed, 27 Mar 2024 16:23:48 +0100
+Subject: tools: move alignment-related macros to new <linux/align.h>
+
+From: Alexander Lobakin <aleksander.lobakin@intel.com>
+
+commit 10a04ff09bcc39e0044190ffe9f00f998f13647c upstream.
+
+Currently, tools have *ALIGN*() macros scattered across the unrelated
+headers, as there are only 3 of them and they were added separately
+each time on an as-needed basis.
+Anyway, let's make it more consistent with the kernel headers and allow
+using those macros outside of the mentioned headers. Create
+<linux/align.h> inside the tools/ folder and include it where needed.
+
+Signed-off-by: Yury Norov <yury.norov@gmail.com>
+Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/include/linux/align.h | 12 ++++++++++++
+ tools/include/linux/bitmap.h | 2 +-
+ tools/include/linux/mm.h | 5 +----
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+ create mode 100644 tools/include/linux/align.h
+
+--- /dev/null
++++ b/tools/include/linux/align.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef _TOOLS_LINUX_ALIGN_H
++#define _TOOLS_LINUX_ALIGN_H
++
++#include <uapi/linux/const.h>
++
++#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
++#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
++#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
++
++#endif /* _TOOLS_LINUX_ALIGN_H */
+--- a/tools/include/linux/bitmap.h
++++ b/tools/include/linux/bitmap.h
+@@ -3,6 +3,7 @@
+ #define _TOOLS_LINUX_BITMAP_H
+
+ #include <string.h>
++#include <linux/align.h>
+ #include <linux/bitops.h>
+ #include <linux/find.h>
+ #include <stdlib.h>
+@@ -161,7 +162,6 @@ static inline bool bitmap_and(unsigned l
+ #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+ #endif
+ #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+ static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -2,8 +2,8 @@
+ #ifndef _TOOLS_LINUX_MM_H
+ #define _TOOLS_LINUX_MM_H
+
++#include <linux/align.h>
+ #include <linux/mmzone.h>
+-#include <uapi/linux/const.h>
+
+ #define PAGE_SHIFT 12
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+@@ -11,9 +11,6 @@
+
+ #define PHYS_ADDR_MAX (~(phys_addr_t)0)
+
+-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+-
+ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+ #define __va(x) ((void *)((unsigned long)(x)))