]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jun 2021 13:09:07 +0000 (15:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jun 2021 13:09:07 +0000 (15:09 +0200)
added patches:
mm-slub-actually-fix-freelist-pointer-vs-redzoning.patch
mm-slub-clarify-verification-reporting.patch
mm-slub-fix-redzoning-for-small-allocations.patch
mm-slub.c-include-swab.h.patch
mm-swap-fix-pte_same_as_swp-not-removing-uffd-wp-bit-when-compare.patch

queue-5.10/mm-slub-actually-fix-freelist-pointer-vs-redzoning.patch [new file with mode: 0644]
queue-5.10/mm-slub-clarify-verification-reporting.patch [new file with mode: 0644]
queue-5.10/mm-slub-fix-redzoning-for-small-allocations.patch [new file with mode: 0644]
queue-5.10/mm-slub.c-include-swab.h.patch [new file with mode: 0644]
queue-5.10/mm-swap-fix-pte_same_as_swp-not-removing-uffd-wp-bit-when-compare.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/mm-slub-actually-fix-freelist-pointer-vs-redzoning.patch b/queue-5.10/mm-slub-actually-fix-freelist-pointer-vs-redzoning.patch
new file mode 100644 (file)
index 0000000..7fa3578
--- /dev/null
@@ -0,0 +1,104 @@
+From e41a49fadbc80b60b48d3c095d9e2ee7ef7c9a8e Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 15 Jun 2021 18:23:26 -0700
+Subject: mm/slub: actually fix freelist pointer vs redzoning
+
+From: Kees Cook <keescook@chromium.org>
+
+commit e41a49fadbc80b60b48d3c095d9e2ee7ef7c9a8e upstream.
+
+It turns out that SLUB redzoning ("slub_debug=Z") checks from
+s->object_size rather than from s->inuse (which is normally bumped to
+make room for the freelist pointer), so a cache created with an object
+size less than 24 would have the freelist pointer written beyond
+s->object_size, causing the redzone to be corrupted by the freelist
+pointer.  This was very visible with "slub_debug=ZF":
+
+  BUG test (Tainted: G    B            ): Right Redzone overwritten
+  -----------------------------------------------------------------------------
+
+  INFO: 0xffff957ead1c05de-0xffff957ead1c05df @offset=1502. First byte 0x1a instead of 0xbb
+  INFO: Slab 0xffffef3950b47000 objects=170 used=170 fp=0x0000000000000000 flags=0x8000000000000200
+  INFO: Object 0xffff957ead1c05d8 @offset=1496 fp=0xffff957ead1c0620
+
+  Redzone  (____ptrval____): bb bb bb bb bb bb bb bb               ........
+  Object   (____ptrval____): 00 00 00 00 00 f6 f4 a5               ........
+  Redzone  (____ptrval____): 40 1d e8 1a aa                        @....
+  Padding  (____ptrval____): 00 00 00 00 00 00 00 00               ........
+
+Adjust the offset to stay within s->object_size.
+
+(Note that no caches of in this size range are known to exist in the
+kernel currently.)
+
+Link: https://lkml.kernel.org/r/20210608183955.280836-4-keescook@chromium.org
+Link: https://lore.kernel.org/linux-mm/20200807160627.GA1420741@elver.google.com/
+Link: https://lore.kernel.org/lkml/0f7dd7b2-7496-5e2d-9488-2ec9f8e90441@suse.cz/Fixes: 89b83f282d8b (slub: avoid redzone when choosing freepointer location)
+Link: https://lore.kernel.org/lkml/CANpmjNOwZ5VpKQn+SYWovTkFB4VsT-RPwyENBmaK0dLcpqStkA@mail.gmail.com
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reported-by: Marco Elver <elver@google.com>
+Reported-by: "Lin, Zhenpeng" <zplin@psu.edu>
+Tested-by: Marco Elver <elver@google.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   14 +++-----------
+ 1 file changed, 3 insertions(+), 11 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3639,7 +3639,6 @@ static int calculate_sizes(struct kmem_c
+ {
+       slab_flags_t flags = s->flags;
+       unsigned int size = s->object_size;
+-      unsigned int freepointer_area;
+       unsigned int order;
+       /*
+@@ -3648,13 +3647,6 @@ static int calculate_sizes(struct kmem_c
+        * the possible location of the free pointer.
+        */
+       size = ALIGN(size, sizeof(void *));
+-      /*
+-       * This is the area of the object where a freepointer can be
+-       * safely written. If redzoning adds more to the inuse size, we
+-       * can't use that portion for writing the freepointer, so
+-       * s->offset must be limited within this for the general case.
+-       */
+-      freepointer_area = size;
+ #ifdef CONFIG_SLUB_DEBUG
+       /*
+@@ -3680,7 +3672,7 @@ static int calculate_sizes(struct kmem_c
+       /*
+        * With that we have determined the number of bytes in actual use
+-       * by the object. This is the potential offset to the free pointer.
++       * by the object and redzoning.
+        */
+       s->inuse = size;
+@@ -3703,13 +3695,13 @@ static int calculate_sizes(struct kmem_c
+                */
+               s->offset = size;
+               size += sizeof(void *);
+-      } else if (freepointer_area > sizeof(void *)) {
++      } else {
+               /*
+                * Store freelist pointer near middle of object to keep
+                * it away from the edges of the object to avoid small
+                * sized over/underflows from neighboring allocations.
+                */
+-              s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
++              s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
+       }
+ #ifdef CONFIG_SLUB_DEBUG
diff --git a/queue-5.10/mm-slub-clarify-verification-reporting.patch b/queue-5.10/mm-slub-clarify-verification-reporting.patch
new file mode 100644 (file)
index 0000000..767e628
--- /dev/null
@@ -0,0 +1,147 @@
+From 8669dbab2ae56085c128894b181c2aa50f97e368 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 15 Jun 2021 18:23:19 -0700
+Subject: mm/slub: clarify verification reporting
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 8669dbab2ae56085c128894b181c2aa50f97e368 upstream.
+
+Patch series "Actually fix freelist pointer vs redzoning", v4.
+
+This fixes redzoning vs the freelist pointer (both for middle-position
+and very small caches).  Both are "theoretical" fixes, in that I see no
+evidence of such small-sized caches actually be used in the kernel, but
+that's no reason to let the bugs continue to exist, especially since
+people doing local development keep tripping over it.  :)
+
+This patch (of 3):
+
+Instead of repeating "Redzone" and "Poison", clarify which sides of
+those zones got tripped.  Additionally fix column alignment in the
+trailer.
+
+Before:
+
+  BUG test (Tainted: G    B            ): Redzone overwritten
+  ...
+  Redzone (____ptrval____): bb bb bb bb bb bb bb bb      ........
+  Object (____ptrval____): f6 f4 a5 40 1d e8            ...@..
+  Redzone (____ptrval____): 1a aa                        ..
+  Padding (____ptrval____): 00 00 00 00 00 00 00 00      ........
+
+After:
+
+  BUG test (Tainted: G    B            ): Right Redzone overwritten
+  ...
+  Redzone  (____ptrval____): bb bb bb bb bb bb bb bb      ........
+  Object   (____ptrval____): f6 f4 a5 40 1d e8            ...@..
+  Redzone  (____ptrval____): 1a aa                        ..
+  Padding  (____ptrval____): 00 00 00 00 00 00 00 00      ........
+
+The earlier commits that slowly resulted in the "Before" reporting were:
+
+  d86bd1bece6f ("mm/slub: support left redzone")
+  ffc79d288000 ("slub: use print_hex_dump")
+  2492268472e7 ("SLUB: change error reporting format to follow lockdep loosely")
+
+Link: https://lkml.kernel.org/r/20210608183955.280836-1-keescook@chromium.org
+Link: https://lkml.kernel.org/r/20210608183955.280836-2-keescook@chromium.org
+Link: https://lore.kernel.org/lkml/cfdb11d7-fb8e-e578-c939-f7f5fb69a6bd@suse.cz/
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Marco Elver <elver@google.com>
+Cc: "Lin, Zhenpeng" <zplin@psu.edu>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/vm/slub.rst |   10 +++++-----
+ mm/slub.c                 |   14 +++++++-------
+ 2 files changed, 12 insertions(+), 12 deletions(-)
+
+--- a/Documentation/vm/slub.rst
++++ b/Documentation/vm/slub.rst
+@@ -181,7 +181,7 @@ SLUB Debug output
+ Here is a sample of slub debug output::
+  ====================================================================
+- BUG kmalloc-8: Redzone overwritten
++ BUG kmalloc-8: Right Redzone overwritten
+  --------------------------------------------------------------------
+  INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
+@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
+  INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
+  INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
+- Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+-   Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
+-  Redzone 0xc90f6d28:  00 cc cc cc                                     .
+-  Padding 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
++ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
++ Object   (0xc90f6d20): 31 30 31 39 2e 30 30 35                         1019.005
++ Redzone  (0xc90f6d28): 00 cc cc cc                                     .
++ Padding  (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+    [<c010523d>] dump_trace+0x63/0x1eb
+    [<c01053df>] show_trace_log_lvl+0x1a/0x2f
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -698,15 +698,15 @@ static void print_trailer(struct kmem_ca
+              p, p - addr, get_freepointer(s, p));
+       if (s->flags & SLAB_RED_ZONE)
+-              print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
++              print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
+                             s->red_left_pad);
+       else if (p > addr + 16)
+               print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
+-      print_section(KERN_ERR, "Object ", p,
++      print_section(KERN_ERR,         "Object   ", p,
+                     min_t(unsigned int, s->object_size, PAGE_SIZE));
+       if (s->flags & SLAB_RED_ZONE)
+-              print_section(KERN_ERR, "Redzone ", p + s->object_size,
++              print_section(KERN_ERR, "Redzone  ", p + s->object_size,
+                       s->inuse - s->object_size);
+       off = get_info_end(s);
+@@ -718,7 +718,7 @@ static void print_trailer(struct kmem_ca
+       if (off != size_from_object(s))
+               /* Beginning of the filler is the free pointer */
+-              print_section(KERN_ERR, "Padding ", p + off,
++              print_section(KERN_ERR, "Padding  ", p + off,
+                             size_from_object(s) - off);
+       dump_stack();
+@@ -895,11 +895,11 @@ static int check_object(struct kmem_cach
+       u8 *endobject = object + s->object_size;
+       if (s->flags & SLAB_RED_ZONE) {
+-              if (!check_bytes_and_report(s, page, object, "Redzone",
++              if (!check_bytes_and_report(s, page, object, "Left Redzone",
+                       object - s->red_left_pad, val, s->red_left_pad))
+                       return 0;
+-              if (!check_bytes_and_report(s, page, object, "Redzone",
++              if (!check_bytes_and_report(s, page, object, "Right Redzone",
+                       endobject, val, s->inuse - s->object_size))
+                       return 0;
+       } else {
+@@ -914,7 +914,7 @@ static int check_object(struct kmem_cach
+               if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
+                       (!check_bytes_and_report(s, page, p, "Poison", p,
+                                       POISON_FREE, s->object_size - 1) ||
+-                       !check_bytes_and_report(s, page, p, "Poison",
++                       !check_bytes_and_report(s, page, p, "End Poison",
+                               p + s->object_size - 1, POISON_END, 1)))
+                       return 0;
+               /*
diff --git a/queue-5.10/mm-slub-fix-redzoning-for-small-allocations.patch b/queue-5.10/mm-slub-fix-redzoning-for-small-allocations.patch
new file mode 100644 (file)
index 0000000..cc97b35
--- /dev/null
@@ -0,0 +1,92 @@
+From 74c1d3e081533825f2611e46edea1fcdc0701985 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 15 Jun 2021 18:23:22 -0700
+Subject: mm/slub: fix redzoning for small allocations
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 74c1d3e081533825f2611e46edea1fcdc0701985 upstream.
+
+The redzone area for SLUB exists between s->object_size and s->inuse
+(which is at least the word-aligned object_size).  If a cache were
+created with an object_size smaller than sizeof(void *), the in-object
+stored freelist pointer would overwrite the redzone (e.g.  with boot
+param "slub_debug=ZF"):
+
+  BUG test (Tainted: G    B            ): Right Redzone overwritten
+  -----------------------------------------------------------------------------
+
+  INFO: 0xffff957ead1c05de-0xffff957ead1c05df @offset=1502. First byte 0x1a instead of 0xbb
+  INFO: Slab 0xffffef3950b47000 objects=170 used=170 fp=0x0000000000000000 flags=0x8000000000000200
+  INFO: Object 0xffff957ead1c05d8 @offset=1496 fp=0xffff957ead1c0620
+
+  Redzone  (____ptrval____): bb bb bb bb bb bb bb bb    ........
+  Object   (____ptrval____): f6 f4 a5 40 1d e8          ...@..
+  Redzone  (____ptrval____): 1a aa                      ..
+  Padding  (____ptrval____): 00 00 00 00 00 00 00 00    ........
+
+Store the freelist pointer out of line when object_size is smaller than
+sizeof(void *) and redzoning is enabled.
+
+Additionally remove the "smaller than sizeof(void *)" check under
+CONFIG_DEBUG_VM in kmem_cache_sanity_check() as it is now redundant:
+SLAB and SLOB both handle small sizes.
+
+(Note that no caches within this size range are known to exist in the
+kernel currently.)
+
+Link: https://lkml.kernel.org/r/20210608183955.280836-3-keescook@chromium.org
+Fixes: 81819f0fc828 ("SLUB core")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: "Lin, Zhenpeng" <zplin@psu.edu>
+Cc: Marco Elver <elver@google.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slab_common.c |    3 +--
+ mm/slub.c        |    8 +++++---
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -87,8 +87,7 @@ EXPORT_SYMBOL(kmem_cache_size);
+ #ifdef CONFIG_DEBUG_VM
+ static int kmem_cache_sanity_check(const char *name, unsigned int size)
+ {
+-      if (!name || in_interrupt() || size < sizeof(void *) ||
+-              size > KMALLOC_MAX_SIZE) {
++      if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
+               pr_err("kmem_cache_create(%s) integrity check failed\n", name);
+               return -EINVAL;
+       }
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3684,15 +3684,17 @@ static int calculate_sizes(struct kmem_c
+        */
+       s->inuse = size;
+-      if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+-              s->ctor)) {
++      if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
++          ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
++          s->ctor) {
+               /*
+                * Relocate free pointer after the object if it is not
+                * permitted to overwrite the first word of the object on
+                * kmem_cache_free.
+                *
+                * This is the case if we do RCU, have a constructor or
+-               * destructor or are poisoning the objects.
++               * destructor, are poisoning the objects, or are
++               * redzoning an object smaller than sizeof(void *).
+                *
+                * The assumption that s->offset >= s->inuse means free
+                * pointer is outside of the object is used in the
diff --git a/queue-5.10/mm-slub.c-include-swab.h.patch b/queue-5.10/mm-slub.c-include-swab.h.patch
new file mode 100644 (file)
index 0000000..b74f615
--- /dev/null
@@ -0,0 +1,35 @@
+From 1b3865d016815cbd69a1879ca1c8a8901fda1072 Mon Sep 17 00:00:00 2001
+From: Andrew Morton <akpm@linux-foundation.org>
+Date: Tue, 15 Jun 2021 18:23:39 -0700
+Subject: mm/slub.c: include swab.h
+
+From: Andrew Morton <akpm@linux-foundation.org>
+
+commit 1b3865d016815cbd69a1879ca1c8a8901fda1072 upstream.
+
+Fixes build with CONFIG_SLAB_FREELIST_HARDENED=y.
+
+Hopefully.  But it's the right thing to do anwyay.
+
+Fixes: 1ad53d9fa3f61 ("slub: improve bit diffusion for freelist ptr obfuscation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=213417
+Reported-by: <vannguye@cisco.com>
+Acked-by: Kees Cook <keescook@chromium.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -15,6 +15,7 @@
+ #include <linux/module.h>
+ #include <linux/bit_spinlock.h>
+ #include <linux/interrupt.h>
++#include <linux/swab.h>
+ #include <linux/bitops.h>
+ #include <linux/slab.h>
+ #include "slab.h"
diff --git a/queue-5.10/mm-swap-fix-pte_same_as_swp-not-removing-uffd-wp-bit-when-compare.patch b/queue-5.10/mm-swap-fix-pte_same_as_swp-not-removing-uffd-wp-bit-when-compare.patch
new file mode 100644 (file)
index 0000000..5c7c81b
--- /dev/null
@@ -0,0 +1,70 @@
+From 099dd6878b9b12d6bbfa6bf29ce0c8ddd38f6901 Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Tue, 15 Jun 2021 18:23:16 -0700
+Subject: mm/swap: fix pte_same_as_swp() not removing uffd-wp bit when compare
+
+From: Peter Xu <peterx@redhat.com>
+
+commit 099dd6878b9b12d6bbfa6bf29ce0c8ddd38f6901 upstream.
+
+I found it by pure code review, that pte_same_as_swp() of unuse_vma()
+didn't take uffd-wp bit into account when comparing ptes.
+pte_same_as_swp() returning false negative could cause failure to
+swapoff swap ptes that was wr-protected by userfaultfd.
+
+Link: https://lkml.kernel.org/r/20210603180546.9083-1-peterx@redhat.com
+Fixes: f45ec5ff16a7 ("userfaultfd: wp: support swap and page migration")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: <stable@vger.kernel.org>   [5.7+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/swapops.h |   15 +++++++++++----
+ mm/swapfile.c           |    2 +-
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -23,6 +23,16 @@
+ #define SWP_TYPE_SHIFT        (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
+ #define SWP_OFFSET_MASK       ((1UL << SWP_TYPE_SHIFT) - 1)
++/* Clear all flags but only keep swp_entry_t related information */
++static inline pte_t pte_swp_clear_flags(pte_t pte)
++{
++      if (pte_swp_soft_dirty(pte))
++              pte = pte_swp_clear_soft_dirty(pte);
++      if (pte_swp_uffd_wp(pte))
++              pte = pte_swp_clear_uffd_wp(pte);
++      return pte;
++}
++
+ /*
+  * Store a type+offset into a swp_entry_t in an arch-independent format
+  */
+@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_ent
+ {
+       swp_entry_t arch_entry;
+-      if (pte_swp_soft_dirty(pte))
+-              pte = pte_swp_clear_soft_dirty(pte);
+-      if (pte_swp_uffd_wp(pte))
+-              pte = pte_swp_clear_uffd_wp(pte);
++      pte = pte_swp_clear_flags(pte);
+       arch_entry = __pte_to_swp_entry(pte);
+       return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+ }
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1903,7 +1903,7 @@ unsigned int count_swap_pages(int type,
+ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
+ {
+-      return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
++      return pte_same(pte_swp_clear_flags(pte), swp_pte);
+ }
+ /*
index 100d53997d1cd1ff8129d04ed2ce997b816ff474..6d82b371d1d2a9a7bd1121ee3a69d4068397cb94 100644 (file)
@@ -133,3 +133,8 @@ net-ll_temac-make-sure-to-free-skb-when-it-is-completely-used.patch
 net-ll_temac-fix-tx-bd-buffer-overwrite.patch
 net-bridge-fix-vlan-tunnel-dst-null-pointer-dereference.patch
 net-bridge-fix-vlan-tunnel-dst-refcnt-when-egressing.patch
+mm-swap-fix-pte_same_as_swp-not-removing-uffd-wp-bit-when-compare.patch
+mm-slub-clarify-verification-reporting.patch
+mm-slub-fix-redzoning-for-small-allocations.patch
+mm-slub-actually-fix-freelist-pointer-vs-redzoning.patch
+mm-slub.c-include-swab.h.patch