--- /dev/null
+From 6e284c55fc0bef7d25fd34d29db11f483da60ea4 Mon Sep 17 00:00:00 2001
+From: Zhen Lei <thunder.leizhen@huawei.com>
+Date: Sat, 5 Aug 2023 11:17:25 +0800
+Subject: mm: Remove kmem_valid_obj()
+
+From: Zhen Lei <thunder.leizhen@huawei.com>
+
+commit 6e284c55fc0bef7d25fd34d29db11f483da60ea4 upstream.
+
+Function kmem_dump_obj() will splat if passed a pointer to a non-slab
+object. So nothing calls it directly, instead calling kmem_valid_obj()
+first to determine whether the passed pointer to a valid slab object. This
+means that merging kmem_valid_obj() into kmem_dump_obj() will make the
+code more concise. Therefore, convert kmem_dump_obj() to work the same
+way as vmalloc_dump_obj(), removing the need for the kmem_dump_obj()
+caller to check kmem_valid_obj(). After this, there are no remaining
+calls to kmem_valid_obj() anymore, and it can be safely removed.
+
+Suggested-by: Matthew Wilcox <willy@infradead.org>
+Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/slab.h | 5 +++--
+ mm/slab_common.c | 41 +++++++++++------------------------------
+ mm/util.c | 4 +---
+ 3 files changed, 15 insertions(+), 35 deletions(-)
+
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (!IS_ERR_O
+ size_t ksize(const void *objp);
+
+ #ifdef CONFIG_PRINTK
+-bool kmem_valid_obj(void *object);
+-void kmem_dump_obj(void *object);
++bool kmem_dump_obj(void *object);
++#else
++static inline bool kmem_dump_obj(void *object) { return false; }
+ #endif
+
+ /*
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -528,26 +528,6 @@ bool slab_is_available(void)
+ }
+
+ #ifdef CONFIG_PRINTK
+-/**
+- * kmem_valid_obj - does the pointer reference a valid slab object?
+- * @object: pointer to query.
+- *
+- * Return: %true if the pointer is to a not-yet-freed object from
+- * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
+- * is to an already-freed object, and %false otherwise.
+- */
+-bool kmem_valid_obj(void *object)
+-{
+- struct folio *folio;
+-
+- /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
+- if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
+- return false;
+- folio = virt_to_folio(object);
+- return folio_test_slab(folio);
+-}
+-EXPORT_SYMBOL_GPL(kmem_valid_obj);
+-
+ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ if (__kfence_obj_info(kpp, object, slab))
+@@ -566,11 +546,11 @@ static void kmem_obj_info(struct kmem_ob
+ * and, if available, the slab name, return address, and stack trace from
+ * the allocation and last free path of that object.
+ *
+- * This function will splat if passed a pointer to a non-slab object.
+- * If you are not sure what type of object you have, you should instead
+- * use mem_dump_obj().
++ * Return: %true if the pointer is to a not-yet-freed object from
++ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
++ * is to an already-freed object, and %false otherwise.
+ */
+-void kmem_dump_obj(void *object)
++bool kmem_dump_obj(void *object)
+ {
+ char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
+ int i;
+@@ -578,13 +558,13 @@ void kmem_dump_obj(void *object)
+ unsigned long ptroffset;
+ struct kmem_obj_info kp = { };
+
+- if (WARN_ON_ONCE(!virt_addr_valid(object)))
+- return;
++ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
++ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
++ return false;
+ slab = virt_to_slab(object);
+- if (WARN_ON_ONCE(!slab)) {
+- pr_cont(" non-slab memory.\n");
+- return;
+- }
++ if (!slab)
++ return false;
++
+ kmem_obj_info(&kp, object, slab);
+ if (kp.kp_slab_cache)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+@@ -621,6 +601,7 @@ void kmem_dump_obj(void *object)
+ pr_info(" %pS\n", kp.kp_free_stack[i]);
+ }
+
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(kmem_dump_obj);
+ #endif
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -1070,10 +1070,8 @@ void mem_dump_obj(void *object)
+ {
+ const char *type;
+
+- if (kmem_valid_obj(object)) {
+- kmem_dump_obj(object);
++ if (kmem_dump_obj(object))
+ return;
+- }
+
+ if (vmalloc_dump_obj(object))
+ return;
--- /dev/null
+From 10a04ff09bcc39e0044190ffe9f00f998f13647c Mon Sep 17 00:00:00 2001
+From: Alexander Lobakin <aleksander.lobakin@intel.com>
+Date: Wed, 27 Mar 2024 16:23:48 +0100
+Subject: tools: move alignment-related macros to new <linux/align.h>
+
+From: Alexander Lobakin <aleksander.lobakin@intel.com>
+
+commit 10a04ff09bcc39e0044190ffe9f00f998f13647c upstream.
+
+Currently, tools have *ALIGN*() macros scattered across the unrelated
+headers, as there are only 3 of them and they were added separately
+each time on an as-needed basis.
+Anyway, let's make it more consistent with the kernel headers and allow
+using those macros outside of the mentioned headers. Create
+<linux/align.h> inside the tools/ folder and include it where needed.
+
+Signed-off-by: Yury Norov <yury.norov@gmail.com>
+Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/include/linux/align.h | 12 ++++++++++++
+ tools/include/linux/bitmap.h | 2 +-
+ tools/include/linux/mm.h | 5 +----
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+ create mode 100644 tools/include/linux/align.h
+
+--- /dev/null
++++ b/tools/include/linux/align.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef _TOOLS_LINUX_ALIGN_H
++#define _TOOLS_LINUX_ALIGN_H
++
++#include <uapi/linux/const.h>
++
++#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
++#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
++#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
++
++#endif /* _TOOLS_LINUX_ALIGN_H */
+--- a/tools/include/linux/bitmap.h
++++ b/tools/include/linux/bitmap.h
+@@ -3,6 +3,7 @@
+ #define _TOOLS_LINUX_BITMAP_H
+
+ #include <string.h>
++#include <linux/align.h>
+ #include <linux/bitops.h>
+ #include <linux/find.h>
+ #include <stdlib.h>
+@@ -127,7 +128,6 @@ static inline bool bitmap_and(unsigned l
+ #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+ #endif
+ #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+ static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -2,8 +2,8 @@
+ #ifndef _TOOLS_LINUX_MM_H
+ #define _TOOLS_LINUX_MM_H
+
++#include <linux/align.h>
+ #include <linux/mmzone.h>
+-#include <uapi/linux/const.h>
+
+ #define PAGE_SHIFT 12
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+@@ -11,9 +11,6 @@
+
+ #define PHYS_ADDR_MAX (~(phys_addr_t)0)
+
+-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+-
+ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+ #define __va(x) ((void *)((unsigned long)(x)))