]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Nov 2025 12:44:19 +0000 (13:44 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Nov 2025 12:44:19 +0000 (13:44 +0100)
added patches:
mm-truncate-unmap-large-folio-on-split-failure.patch

queue-6.17/mm-truncate-unmap-large-folio-on-split-failure.patch [new file with mode: 0644]
queue-6.17/series

diff --git a/queue-6.17/mm-truncate-unmap-large-folio-on-split-failure.patch b/queue-6.17/mm-truncate-unmap-large-folio-on-split-failure.patch
new file mode 100644 (file)
index 0000000..bfbc727
--- /dev/null
@@ -0,0 +1,115 @@
+From fa04f5b60fda62c98a53a60de3a1e763f11feb41 Mon Sep 17 00:00:00 2001
+From: Kiryl Shutsemau <kas@kernel.org>
+Date: Mon, 27 Oct 2025 11:56:36 +0000
+Subject: mm/truncate: unmap large folio on split failure
+
+From: Kiryl Shutsemau <kas@kernel.org>
+
+commit fa04f5b60fda62c98a53a60de3a1e763f11feb41 upstream.
+
+Accesses within VMA, but beyond i_size rounded up to PAGE_SIZE are
+supposed to generate SIGBUS.
+
+This behavior might not be respected on truncation.
+
+During truncation, the kernel splits a large folio in order to reclaim
+memory.  As a side effect, it unmaps the folio and destroys PMD mappings
+of the folio.  The folio will be refaulted as PTEs and SIGBUS semantics
+are preserved.
+
+However, if the split fails, PMD mappings are preserved and the user will
+not receive SIGBUS on any accesses within the PMD.
+
+Unmap the folio on split failure.  It will lead to refault as PTEs and
+preserve SIGBUS semantics.
+
+Make an exception for shmem/tmpfs that for long time intentionally mapped
+with PMDs across i_size.
+
+Link: https://lkml.kernel.org/r/20251027115636.82382-3-kirill@shutemov.name
+Fixes: b9a8a4195c7d ("truncate,shmem: Handle truncates that split large folios")
+Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: "Darrick J. Wong" <djwong@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/truncate.c |   35 +++++++++++++++++++++++++++++------
+ 1 file changed, 29 insertions(+), 6 deletions(-)
+
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -177,6 +177,32 @@ int truncate_inode_folio(struct address_
+       return 0;
+ }
++static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at,
++                                  unsigned long min_order)
++{
++      enum ttu_flags ttu_flags =
++              TTU_SYNC |
++              TTU_SPLIT_HUGE_PMD |
++              TTU_IGNORE_MLOCK;
++      int ret;
++
++      ret = try_folio_split_to_order(folio, split_at, min_order);
++
++      /*
++       * If the split fails, unmap the folio, so it will be refaulted
++       * with PTEs to respect SIGBUS semantics.
++       *
++       * Make an exception for shmem/tmpfs that for long time
++       * intentionally mapped with PMDs across i_size.
++       */
++      if (ret && !shmem_mapping(folio->mapping)) {
++              try_to_unmap(folio, ttu_flags);
++              WARN_ON(folio_mapped(folio));
++      }
++
++      return ret;
++}
++
+ /*
+  * Handle partial folios.  The folio may be entirely within the
+  * range if a split has raced with us.  If not, we zero the part of the
+@@ -226,7 +252,7 @@ bool truncate_inode_partial_folio(struct
+       min_order = mapping_min_folio_order(folio->mapping);
+       split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
+-      if (!try_folio_split_to_order(folio, split_at, min_order)) {
++      if (!try_folio_split_or_unmap(folio, split_at, min_order)) {
+               /*
+                * try to split at offset + length to make sure folios within
+                * the range can be dropped, especially to avoid memory waste
+@@ -250,13 +276,10 @@ bool truncate_inode_partial_folio(struct
+               if (!folio_trylock(folio2))
+                       goto out;
+-              /*
+-               * make sure folio2 is large and does not change its mapping.
+-               * Its split result does not matter here.
+-               */
++              /* make sure folio2 is large and does not change its mapping */
+               if (folio_test_large(folio2) &&
+                   folio2->mapping == folio->mapping)
+-                      try_folio_split_to_order(folio2, split_at2, min_order);
++                      try_folio_split_or_unmap(folio2, split_at2, min_order);
+               folio_unlock(folio2);
+ out:
index 2c5f9ea76b31084432660a70c8cfb0fd9cdb15db..8d2dee4bcc64ea00b1b473773751060aab35d4ce 100644 (file)
@@ -74,3 +74,4 @@ drm-amd-display-increase-dpcd-read-retries.patch
 drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch
 drm-amd-display-fix-pbn-to-kbps-conversion.patch
 drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch
+mm-truncate-unmap-large-folio-on-split-failure.patch