]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.14
authorSasha Levin <sashal@kernel.org>
Sun, 26 Apr 2020 23:28:01 +0000 (19:28 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 26 Apr 2020 23:28:01 +0000 (19:28 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.14/mm-slub-restore-the-original-intention-of-prefetch_f.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/mm-slub-restore-the-original-intention-of-prefetch_f.patch b/queue-4.14/mm-slub-restore-the-original-intention-of-prefetch_f.patch
new file mode 100644 (file)
index 0000000..1e6f5e3
--- /dev/null
@@ -0,0 +1,56 @@
+From 0c0f73796f5527c5d65c76d1f2ac58dc6bb1ceb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Apr 2020 09:06:17 +0200
+Subject: mm, slub: restore the original intention of prefetch_freepointer()
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 0882ff9190e3bc51e2d78c3aadd7c690eeaa91d5 upstream.
+
+In SLUB, prefetch_freepointer() is used when allocating an object from
+cache's freelist, to make sure the next object in the list is cache-hot,
+since it's probable it will be allocated soon.
+
+Commit 2482ddec670f ("mm: add SLUB free list pointer obfuscation") has
+unintentionally changed the prefetch in a way where the prefetch is
+turned to a real fetch, and only the next->next pointer is prefetched.
+In case there is not a stream of allocations that would benefit from
+prefetching, the extra real fetch might add a useless cache miss to the
+allocation.  Restore the previous behavior.
+
+Link: http://lkml.kernel.org/r/20180809085245.22448-1-vbabka@suse.cz
+Fixes: 2482ddec670f ("mm: add SLUB free list pointer obfuscation")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Kees Cook <keescook@chromium.org>
+Cc: Daniel Micay <danielmicay@gmail.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Matthias Schiffer <mschiffer@universe-factory.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/slub.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 3c1a16f03b2bd..481518c3f61a9 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -269,8 +269,7 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
+ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+ {
+-      if (object)
+-              prefetch(freelist_dereference(s, object + s->offset));
++      prefetch(object + s->offset);
+ }
+ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
+-- 
+2.20.1
+
index 83acaae5b55ae14361716a8b484ab75984d3f5b7..5e8d8f96c6cfb5995ee3b209b227194098c8d337 100644 (file)
@@ -20,3 +20,4 @@ pwm-renesas-tpu-fix-late-runtime-pm-enablement.patch
 pwm-bcm2835-dynamically-allocate-base.patch
 perf-core-disable-page-faults-when-getting-phys-addr.patch
 pci-aspm-allow-re-enabling-clock-pm.patch
+mm-slub-restore-the-original-intention-of-prefetch_f.patch