--- /dev/null
+From 8669dbab2ae56085c128894b181c2aa50f97e368 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 15 Jun 2021 18:23:19 -0700
+Subject: mm/slub: clarify verification reporting
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 8669dbab2ae56085c128894b181c2aa50f97e368 upstream.
+
+Patch series "Actually fix freelist pointer vs redzoning", v4.
+
+This fixes redzoning vs the freelist pointer (both for middle-position
+and very small caches). Both are "theoretical" fixes, in that I see no
+evidence of such small-sized caches actually be used in the kernel, but
+that's no reason to let the bugs continue to exist, especially since
+people doing local development keep tripping over it. :)
+
+This patch (of 3):
+
+Instead of repeating "Redzone" and "Poison", clarify which sides of
+those zones got tripped. Additionally fix column alignment in the
+trailer.
+
+Before:
+
+ BUG test (Tainted: G B ): Redzone overwritten
+ ...
+ Redzone (____ptrval____): bb bb bb bb bb bb bb bb ........
+ Object (____ptrval____): f6 f4 a5 40 1d e8 ...@..
+ Redzone (____ptrval____): 1a aa ..
+ Padding (____ptrval____): 00 00 00 00 00 00 00 00 ........
+
+After:
+
+ BUG test (Tainted: G B ): Right Redzone overwritten
+ ...
+ Redzone (____ptrval____): bb bb bb bb bb bb bb bb ........
+ Object (____ptrval____): f6 f4 a5 40 1d e8 ...@..
+ Redzone (____ptrval____): 1a aa ..
+ Padding (____ptrval____): 00 00 00 00 00 00 00 00 ........
+
+The earlier commits that slowly resulted in the "Before" reporting were:
+
+ d86bd1bece6f ("mm/slub: support left redzone")
+ ffc79d288000 ("slub: use print_hex_dump")
+ 2492268472e7 ("SLUB: change error reporting format to follow lockdep loosely")
+
+Link: https://lkml.kernel.org/r/20210608183955.280836-1-keescook@chromium.org
+Link: https://lkml.kernel.org/r/20210608183955.280836-2-keescook@chromium.org
+Link: https://lore.kernel.org/lkml/cfdb11d7-fb8e-e578-c939-f7f5fb69a6bd@suse.cz/
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Marco Elver <elver@google.com>
+Cc: "Lin, Zhenpeng" <zplin@psu.edu>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/vm/slub.rst | 10 +++++-----
+ mm/slub.c | 14 +++++++-------
+ 2 files changed, 12 insertions(+), 12 deletions(-)
+
+--- a/Documentation/vm/slub.rst
++++ b/Documentation/vm/slub.rst
+@@ -160,7 +160,7 @@ SLUB Debug output
+ Here is a sample of slub debug output::
+
+ ====================================================================
+- BUG kmalloc-8: Redzone overwritten
++ BUG kmalloc-8: Right Redzone overwritten
+ --------------------------------------------------------------------
+
+ INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
+@@ -168,10 +168,10 @@ Here is a sample of slub debug output::
+ INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
+ INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
+
+- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
+- Redzone 0xc90f6d28: 00 cc cc cc .
+- Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
++ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
++ Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
++ Redzone (0xc90f6d28): 00 cc cc cc .
++ Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+
+ [<c010523d>] dump_trace+0x63/0x1eb
+ [<c01053df>] show_trace_log_lvl+0x1a/0x2f
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -688,15 +688,15 @@ static void print_trailer(struct kmem_ca
+ p, p - addr, get_freepointer(s, p));
+
+ if (s->flags & SLAB_RED_ZONE)
+- print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
++ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ s->red_left_pad);
+ else if (p > addr + 16)
+ print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
+
+- print_section(KERN_ERR, "Object ", p,
++ print_section(KERN_ERR, "Object ", p,
+ min_t(unsigned int, s->object_size, PAGE_SIZE));
+ if (s->flags & SLAB_RED_ZONE)
+- print_section(KERN_ERR, "Redzone ", p + s->object_size,
++ print_section(KERN_ERR, "Redzone ", p + s->object_size,
+ s->inuse - s->object_size);
+
+ off = get_info_end(s);
+@@ -708,7 +708,7 @@ static void print_trailer(struct kmem_ca
+
+ if (off != size_from_object(s))
+ /* Beginning of the filler is the free pointer */
+- print_section(KERN_ERR, "Padding ", p + off,
++ print_section(KERN_ERR, "Padding ", p + off,
+ size_from_object(s) - off);
+
+ dump_stack();
+@@ -882,11 +882,11 @@ static int check_object(struct kmem_cach
+ u8 *endobject = object + s->object_size;
+
+ if (s->flags & SLAB_RED_ZONE) {
+- if (!check_bytes_and_report(s, page, object, "Redzone",
++ if (!check_bytes_and_report(s, page, object, "Left Redzone",
+ object - s->red_left_pad, val, s->red_left_pad))
+ return 0;
+
+- if (!check_bytes_and_report(s, page, object, "Redzone",
++ if (!check_bytes_and_report(s, page, object, "Right Redzone",
+ endobject, val, s->inuse - s->object_size))
+ return 0;
+ } else {
+@@ -901,7 +901,7 @@ static int check_object(struct kmem_cach
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
+ (!check_bytes_and_report(s, page, p, "Poison", p,
+ POISON_FREE, s->object_size - 1) ||
+- !check_bytes_and_report(s, page, p, "Poison",
++ !check_bytes_and_report(s, page, p, "End Poison",
+ p + s->object_size - 1, POISON_END, 1)))
+ return 0;
+ /*
--- /dev/null
+From 74c1d3e081533825f2611e46edea1fcdc0701985 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 15 Jun 2021 18:23:22 -0700
+Subject: mm/slub: fix redzoning for small allocations
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 74c1d3e081533825f2611e46edea1fcdc0701985 upstream.
+
+The redzone area for SLUB exists between s->object_size and s->inuse
+(which is at least the word-aligned object_size). If a cache were
+created with an object_size smaller than sizeof(void *), the in-object
+stored freelist pointer would overwrite the redzone (e.g. with boot
+param "slub_debug=ZF"):
+
+ BUG test (Tainted: G B ): Right Redzone overwritten
+ -----------------------------------------------------------------------------
+
+ INFO: 0xffff957ead1c05de-0xffff957ead1c05df @offset=1502. First byte 0x1a instead of 0xbb
+ INFO: Slab 0xffffef3950b47000 objects=170 used=170 fp=0x0000000000000000 flags=0x8000000000000200
+ INFO: Object 0xffff957ead1c05d8 @offset=1496 fp=0xffff957ead1c0620
+
+ Redzone (____ptrval____): bb bb bb bb bb bb bb bb ........
+ Object (____ptrval____): f6 f4 a5 40 1d e8 ...@..
+ Redzone (____ptrval____): 1a aa ..
+ Padding (____ptrval____): 00 00 00 00 00 00 00 00 ........
+
+Store the freelist pointer out of line when object_size is smaller than
+sizeof(void *) and redzoning is enabled.
+
+Additionally remove the "smaller than sizeof(void *)" check under
+CONFIG_DEBUG_VM in kmem_cache_sanity_check() as it is now redundant:
+SLAB and SLOB both handle small sizes.
+
+(Note that no caches within this size range are known to exist in the
+kernel currently.)
+
+Link: https://lkml.kernel.org/r/20210608183955.280836-3-keescook@chromium.org
+Fixes: 81819f0fc828 ("SLUB core")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: "Lin, Zhenpeng" <zplin@psu.edu>
+Cc: Marco Elver <elver@google.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slab_common.c | 3 +--
+ mm/slub.c | 8 +++++---
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -85,8 +85,7 @@ EXPORT_SYMBOL(kmem_cache_size);
+ #ifdef CONFIG_DEBUG_VM
+ static int kmem_cache_sanity_check(const char *name, unsigned int size)
+ {
+- if (!name || in_interrupt() || size < sizeof(void *) ||
+- size > KMALLOC_MAX_SIZE) {
++ if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
+ pr_err("kmem_cache_create(%s) integrity check failed\n", name);
+ return -EINVAL;
+ }
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3586,15 +3586,17 @@ static int calculate_sizes(struct kmem_c
+ */
+ s->inuse = size;
+
+- if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+- s->ctor)) {
++ if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
++ ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
++ s->ctor) {
+ /*
+ * Relocate free pointer after the object if it is not
+ * permitted to overwrite the first word of the object on
+ * kmem_cache_free.
+ *
+ * This is the case if we do RCU, have a constructor or
+- * destructor or are poisoning the objects.
++ * destructor, are poisoning the objects, or are
++ * redzoning an object smaller than sizeof(void *).
+ *
+ * The assumption that s->offset >= s->inuse means free
+ * pointer is outside of the object is used in the