--- /dev/null
+From d59a3838c75557266b489e56880eb300194bcc7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2019 14:11:57 -0700
+Subject: mm/slub.c: init_on_free=1 should wipe freelist ptr for bulk
+ allocations
+
+From: Alexander Potapenko <glider@google.com>
+
+[ Upstream commit 0f181f9fbea8bc7ea2f7e13ae7f8c256b39e254c ]
+
+slab_alloc_node() already zeroed out the freelist pointer if
+init_on_free was on. Thibaut Sautereau noticed that the same needs to
+be done for kmem_cache_alloc_bulk(), which performs the allocations
+separately.
+
+kmem_cache_alloc_bulk() is currently used in two places in the kernel,
+so this change is unlikely to have a major performance impact.
+
+SLAB doesn't require a similar change, as auto-initialization makes the
+allocator store the freelist pointers off-slab.
+
+Link: http://lkml.kernel.org/r/20191007091605.30530-1-glider@google.com
+Fixes: 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options")
+Signed-off-by: Alexander Potapenko <glider@google.com>
+Reported-by: Thibaut Sautereau <thibaut@sautereau.fr>
+Reported-by: Kees Cook <keescook@chromium.org>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/slub.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index d2445dd1c7eda..f24ea152cdbb3 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2648,6 +2648,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ return p;
+ }
+
++/*
++ * If the object has been wiped upon free, make sure it's fully initialized by
++ * zeroing out freelist pointer.
++ */
++static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
++ void *obj)
++{
++ if (unlikely(slab_want_init_on_free(s)) && obj)
++ memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
++}
++
+ /*
+ * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
+ * have the fastpath folded into their functions. So no function call
+@@ -2736,12 +2747,8 @@ redo:
+ prefetch_freepointer(s, next_object);
+ stat(s, ALLOC_FASTPATH);
+ }
+- /*
+- * If the object has been wiped upon free, make sure it's fully
+- * initialized by zeroing out freelist pointer.
+- */
+- if (unlikely(slab_want_init_on_free(s)) && object)
+- memset(object + s->offset, 0, sizeof(void *));
++
++ maybe_wipe_obj_freeptr(s, object);
+
+ if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
+ memset(object, 0, s->object_size);
+@@ -3155,10 +3162,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ goto error;
+
+ c = this_cpu_ptr(s->cpu_slab);
++ maybe_wipe_obj_freeptr(s, p[i]);
++
+ continue; /* goto for-loop */
+ }
+ c->freelist = get_freepointer(s, object);
+ p[i] = object;
++ maybe_wipe_obj_freeptr(s, p[i]);
+ }
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
+--
+2.20.1
+