--- /dev/null
+From 77008e1b2ef73249bceb078a321a3ff6bc087afb Mon Sep 17 00:00:00 2001
+From: Zi Yan <ziy@nvidia.com>
+Date: Thu, 16 Oct 2025 21:36:30 -0400
+Subject: mm/huge_memory: do not change split_huge_page*() target order silently
+
+From: Zi Yan <ziy@nvidia.com>
+
+commit 77008e1b2ef73249bceb078a321a3ff6bc087afb upstream.
+
+Page cache folios from a file system that support large block size (LBS)
+can have minimal folio order greater than 0, thus a high order folio might
+not be able to be split down to order-0. Commit e220917fa507 ("mm: split
+a folio in minimum folio order chunks") bumps the target order of
+split_huge_page*() to the minimum allowed order when splitting a LBS
+folio. This causes confusion for some split_huge_page*() callers like
+memory failure handling code, since they expect after-split folios all
+have order-0 when split succeeds but in reality get min_order_for_split()
+order folios and give warnings.
+
+Fix it by failing a split if the folio cannot be split to the target
+order. Rename try_folio_split() to try_folio_split_to_order() to reflect
+the added new_order parameter. Remove its unused list parameter.
+
+[The test poisons LBS folios, which cannot be split to order-0 folios, and
+also tries to poison all memory. The non split LBS folios take more
+memory than the test anticipated, leading to OOM. The patch fixed the
+kernel warning and the test needs some change to avoid OOM.]
+
+Link: https://lkml.kernel.org/r/20251017013630.139907-1-ziy@nvidia.com
+Fixes: e220917fa507 ("mm: split a folio in minimum folio order chunks")
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Reported-by: syzbot+e6367ea2fdab6ed46056@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68d2c943.a70a0220.1b52b.02b3.GAE@google.com/
+Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
+Reviewed-by: Pankaj Raghav <p.raghav@samsung.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Dev Jain <dev.jain@arm.com>
+Cc: Jane Chu <jane.chu@oracle.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Mariano Pache <npache@redhat.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/huge_mm.h | 21 +++++++--------------
+ mm/huge_memory.c | 7 +------
+ 2 files changed, 8 insertions(+), 20 deletions(-)
+
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -353,20 +353,7 @@ int min_order_for_split(struct folio *fo
+ int split_folio_to_list(struct folio *folio, struct list_head *list);
+ static inline int split_huge_page(struct page *page)
+ {
+- struct folio *folio = page_folio(page);
+- int ret = min_order_for_split(folio);
+-
+- if (ret < 0)
+- return ret;
+-
+- /*
+- * split_huge_page() locks the page before splitting and
+- * expects the same page that has been split to be locked when
+- * returned. split_folio(page_folio(page)) cannot be used here
+- * because it converts the page to folio and passes the head
+- * page to be split.
+- */
+- return split_huge_page_to_list_to_order(page, NULL, ret);
++ return split_huge_page_to_list_to_order(page, NULL, 0);
+ }
+ void deferred_split_folio(struct folio *folio, bool partially_mapped);
+
+@@ -538,6 +525,12 @@ static inline int split_huge_page(struct
+ return 0;
+ }
+
++static inline int min_order_for_split(struct folio *folio)
++{
++ VM_WARN_ON_ONCE_FOLIO(1, folio);
++ return -EINVAL;
++}
++
+ static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+ {
+ return 0;
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3597,12 +3597,7 @@ int min_order_for_split(struct folio *fo
+
+ int split_folio_to_list(struct folio *folio, struct list_head *list)
+ {
+- int ret = min_order_for_split(folio);
+-
+- if (ret < 0)
+- return ret;
+-
+- return split_huge_page_to_list_to_order(&folio->page, list, ret);
++ return split_huge_page_to_list_to_order(&folio->page, list, 0);
+ }
+
+ /*
--- /dev/null
+From fa5a061700364bc28ee1cb1095372f8033645dcb Mon Sep 17 00:00:00 2001
+From: Zi Yan <ziy@nvidia.com>
+Date: Wed, 22 Oct 2025 23:05:21 -0400
+Subject: mm/huge_memory: preserve PG_has_hwpoisoned if a folio is split to >0 order
+
+From: Zi Yan <ziy@nvidia.com>
+
+commit fa5a061700364bc28ee1cb1095372f8033645dcb upstream.
+
+folio split clears PG_has_hwpoisoned, but the flag should be preserved in
+after-split folios containing pages with PG_hwpoisoned flag if the folio
+is split to >0 order folios. Scan all pages in a to-be-split folio to
+determine which after-split folios need the flag.
+
+An alternatives is to change PG_has_hwpoisoned to PG_maybe_hwpoisoned to
+avoid the scan and set it on all after-split folios, but resulting false
+positive has undesirable negative impact. To remove false positive,
+caller of folio_test_has_hwpoisoned() and folio_contain_hwpoisoned_page()
+needs to do the scan. That might be causing a hassle for current and
+future callers and more costly than doing the scan in the split code.
+More details are discussed in [1].
+
+This issue can be exposed via:
+1. splitting a has_hwpoisoned folio to >0 order from debugfs interface;
+2. truncating part of a has_hwpoisoned folio in
+ truncate_inode_partial_folio().
+
+And later accesses to a hwpoisoned page could be possible due to the
+missing has_hwpoisoned folio flag. This will lead to MCE errors.
+
+Link: https://lore.kernel.org/all/CAHbLzkoOZm0PXxE9qwtF4gKR=cpRXrSrJ9V9Pm2DJexs985q4g@mail.gmail.com/ [1]
+Link: https://lkml.kernel.org/r/20251023030521.473097-1-ziy@nvidia.com
+Fixes: c010d47f107f ("mm: thp: split huge page to any lower order pages")
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Yang Shi <yang@os.amperecomputing.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Cc: Pankaj Raghav <kernel@pankajraghav.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Dev Jain <dev.jain@arm.com>
+Cc: Jane Chu <jane.chu@oracle.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Luis Chamberalin <mcgrof@kernel.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3091,9 +3091,17 @@ static void lru_add_page_tail(struct fol
+ }
+ }
+
++static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
++{
++ for (; nr_pages; page++, nr_pages--)
++ if (PageHWPoison(page))
++ return true;
++ return false;
++}
++
+ static void __split_huge_page_tail(struct folio *folio, int tail,
+ struct lruvec *lruvec, struct list_head *list,
+- unsigned int new_order)
++ unsigned int new_order, const bool handle_hwpoison)
+ {
+ struct page *head = &folio->page;
+ struct page *page_tail = head + tail;
+@@ -3170,6 +3178,11 @@ static void __split_huge_page_tail(struc
+ folio_set_large_rmappable(new_folio);
+ }
+
++ /* Set has_hwpoisoned flag on new_folio if any of its pages is HWPoison */
++ if (handle_hwpoison &&
++ page_range_has_hwpoisoned(page_tail, 1 << new_order))
++ folio_set_has_hwpoisoned(new_folio);
++
+ /* Finally unfreeze refcount. Additional reference from page cache. */
+ page_ref_unfreeze(page_tail,
+ 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
+@@ -3194,6 +3207,8 @@ static void __split_huge_page(struct pag
+ pgoff_t end, unsigned int new_order)
+ {
+ struct folio *folio = page_folio(page);
++ /* Scan poisoned pages when split a poisoned folio to large folios */
++ const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
+ struct page *head = &folio->page;
+ struct lruvec *lruvec;
+ struct address_space *swap_cache = NULL;
+@@ -3217,8 +3232,14 @@ static void __split_huge_page(struct pag
+
+ ClearPageHasHWPoisoned(head);
+
++ /* Check first new_nr pages since the loop below skips them */
++ if (handle_hwpoison &&
++ page_range_has_hwpoisoned(folio_page(folio, 0), new_nr))
++ folio_set_has_hwpoisoned(folio);
++
+ for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
+- __split_huge_page_tail(folio, i, lruvec, list, new_order);
++ __split_huge_page_tail(folio, i, lruvec, list, new_order,
++ handle_hwpoison);
+ /* Some pages can be beyond EOF: drop them from page cache */
+ if (head[i].index >= end) {
+ struct folio *tail = page_folio(head + i);
--- /dev/null
+From 74207de2ba10c2973334906822dc94d2e859ffc5 Mon Sep 17 00:00:00 2001
+From: Kiryl Shutsemau <kas@kernel.org>
+Date: Mon, 27 Oct 2025 11:56:35 +0000
+Subject: mm/memory: do not populate page table entries beyond i_size
+
+From: Kiryl Shutsemau <kas@kernel.org>
+
+commit 74207de2ba10c2973334906822dc94d2e859ffc5 upstream.
+
+Patch series "Fix SIGBUS semantics with large folios", v3.
+
+Accessing memory within a VMA, but beyond i_size rounded up to the next
+page size, is supposed to generate SIGBUS.
+
+Darrick reported[1] an xfstests regression in v6.18-rc1. generic/749
+failed due to missing SIGBUS. This was caused by my recent changes that
+try to fault in the whole folio where possible:
+
+ 19773df031bc ("mm/fault: try to map the entire file folio in finish_fault()")
+ 357b92761d94 ("mm/filemap: map entire large folio faultaround")
+
+These changes did not consider i_size when setting up PTEs, leading to
+xfstest breakage.
+
+However, the problem has been present in the kernel for a long time -
+since huge tmpfs was introduced in 2016. The kernel happily maps
+PMD-sized folios as PMD without checking i_size. And huge=always tmpfs
+allocates PMD-size folios on any writes.
+
+I considered this corner case when I implemented a large tmpfs, and my
+conclusion was that no one in their right mind should rely on receiving a
+SIGBUS signal when accessing beyond i_size. I cannot imagine how it could
+be useful for the workload.
+
+But apparently filesystem folks care a lot about preserving strict SIGBUS
+semantics.
+
+Generic/749 was introduced last year with reference to POSIX, but no real
+workloads were mentioned. It also acknowledged the tmpfs deviation from
+the test case.
+
+POSIX indeed says[3]:
+
+ References within the address range starting at pa and
+ continuing for len bytes to whole pages following the end of an
+ object shall result in delivery of a SIGBUS signal.
+
+The patchset fixes the regression introduced by recent changes as well as
+more subtle SIGBUS breakage due to split failure on truncation.
+
+
+This patch (of 2):
+
+Accesses within VMA, but beyond i_size rounded up to PAGE_SIZE are
+supposed to generate SIGBUS.
+
+Recent changes attempted to fault in full folio where possible. They did
+not respect i_size, which led to populating PTEs beyond i_size and
+breaking SIGBUS semantics.
+
+Darrick reported generic/749 breakage because of this.
+
+However, the problem existed before the recent changes. With huge=always
+tmpfs, any write to a file leads to PMD-size allocation. Following the
+fault-in of the folio will install PMD mapping regardless of i_size.
+
+Fix filemap_map_pages() and finish_fault() to not install:
+ - PTEs beyond i_size;
+ - PMD mappings across i_size;
+
+Make an exception for shmem/tmpfs that for long time intentionally
+mapped with PMDs across i_size.
+
+Link: https://lkml.kernel.org/r/20251027115636.82382-1-kirill@shutemov.name
+Link: https://lkml.kernel.org/r/20251027115636.82382-2-kirill@shutemov.name
+Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
+Fixes: 6795801366da ("xfs: Support large folios")
+Reported-by: "Darrick J. Wong" <djwong@kernel.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/filemap.c | 20 +++++++++++++++-----
+ mm/memory.c | 23 +++++++++++++++++++++--
+ 2 files changed, 36 insertions(+), 7 deletions(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -3653,13 +3653,27 @@ vm_fault_t filemap_map_pages(struct vm_f
+ vm_fault_t ret = 0;
+ unsigned long rss = 0;
+ unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type;
++ bool can_map_large;
+
+ rcu_read_lock();
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
+ if (!folio)
+ goto out;
+
+- if (filemap_map_pmd(vmf, folio, start_pgoff)) {
++ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
++ end_pgoff = min(end_pgoff, file_end);
++
++ /*
++ * Do not allow to map with PTEs beyond i_size and with PMD
++ * across i_size to preserve SIGBUS semantics.
++ *
++ * Make an exception for shmem/tmpfs that for long time
++ * intentionally mapped with PMDs across i_size.
++ */
++ can_map_large = shmem_mapping(mapping) ||
++ file_end >= folio_next_index(folio);
++
++ if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
+ ret = VM_FAULT_NOPAGE;
+ goto out;
+ }
+@@ -3672,10 +3686,6 @@ vm_fault_t filemap_map_pages(struct vm_f
+ goto out;
+ }
+
+- file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
+- if (end_pgoff > file_end)
+- end_pgoff = file_end;
+-
+ folio_type = mm_counter_file(folio);
+ do {
+ unsigned long end;
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -68,6 +68,7 @@
+ #include <linux/gfp.h>
+ #include <linux/migrate.h>
+ #include <linux/string.h>
++#include <linux/shmem_fs.h>
+ #include <linux/memory-tiers.h>
+ #include <linux/debugfs.h>
+ #include <linux/userfaultfd_k.h>
+@@ -5088,6 +5089,8 @@ fallback:
+ else
+ page = vmf->page;
+
++ folio = page_folio(page);
++
+ /*
+ * check even for read faults because we might have lost our CoWed
+ * page
+@@ -5098,8 +5101,25 @@ fallback:
+ return ret;
+ }
+
++ if (!needs_fallback && vma->vm_file) {
++ struct address_space *mapping = vma->vm_file->f_mapping;
++ pgoff_t file_end;
++
++ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
++
++ /*
++ * Do not allow to map with PTEs beyond i_size and with PMD
++ * across i_size to preserve SIGBUS semantics.
++ *
++ * Make an exception for shmem/tmpfs that for long time
++ * intentionally mapped with PMDs across i_size.
++ */
++ needs_fallback = !shmem_mapping(mapping) &&
++ file_end < folio_next_index(folio);
++ }
++
+ if (pmd_none(*vmf->pmd)) {
+- if (PageTransCompound(page)) {
++ if (!needs_fallback && PageTransCompound(page)) {
+ ret = do_set_pmd(vmf, page);
+ if (ret != VM_FAULT_FALLBACK)
+ return ret;
+@@ -5111,7 +5131,6 @@ fallback:
+ return VM_FAULT_OOM;
+ }
+
+- folio = page_folio(page);
+ nr_pages = folio_nr_pages(folio);
+
+ /*
--- /dev/null
+From 6f86d0534fddfbd08687fa0f01479d4226bc3c3d Mon Sep 17 00:00:00 2001
+From: Lance Yang <lance.yang@linux.dev>
+Date: Fri, 31 Oct 2025 20:09:55 +0800
+Subject: mm/secretmem: fix use-after-free race in fault handler
+
+From: Lance Yang <lance.yang@linux.dev>
+
+commit 6f86d0534fddfbd08687fa0f01479d4226bc3c3d upstream.
+
+When a page fault occurs in a secret memory file created with
+`memfd_secret(2)`, the kernel will allocate a new folio for it, mark the
+underlying page as not-present in the direct map, and add it to the file
+mapping.
+
+If two tasks cause a fault in the same page concurrently, both could end
+up allocating a folio and removing the page from the direct map, but only
+one would succeed in adding the folio to the file mapping. The task that
+failed undoes the effects of its attempt by (a) freeing the folio again
+and (b) putting the page back into the direct map. However, by doing
+these two operations in this order, the page becomes available to the
+allocator again before it is placed back in the direct mapping.
+
+If another task attempts to allocate the page between (a) and (b), and the
+kernel tries to access it via the direct map, it would result in a
+supervisor not-present page fault.
+
+Fix the ordering to restore the direct map before the folio is freed.
+
+Link: https://lkml.kernel.org/r/20251031120955.92116-1-lance.yang@linux.dev
+Fixes: 1507f51255c9 ("mm: introduce memfd_secret system call to create "secret" memory areas")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Reported-by: Google Big Sleep <big-sleep-vuln-reports@google.com>
+Closes: https://lore.kernel.org/linux-mm/CAEXGt5QeDpiHTu3K9tvjUTPqo+d-=wuCNYPa+6sWKrdQJ-ATdg@mail.gmail.com/
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/secretmem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -84,13 +84,13 @@ retry:
+ __folio_mark_uptodate(folio);
+ err = filemap_add_folio(mapping, folio, offset, gfp);
+ if (unlikely(err)) {
+- folio_put(folio);
+ /*
+ * If a split of large page was required, it
+ * already happened when we marked the page invalid
+ * which guarantees that this call won't fail
+ */
+ set_direct_map_default_noflush(page);
++ folio_put(folio);
+ if (err == -EEXIST)
+ goto retry;
+
--- /dev/null
+From fa04f5b60fda62c98a53a60de3a1e763f11feb41 Mon Sep 17 00:00:00 2001
+From: Kiryl Shutsemau <kas@kernel.org>
+Date: Mon, 27 Oct 2025 11:56:36 +0000
+Subject: mm/truncate: unmap large folio on split failure
+
+From: Kiryl Shutsemau <kas@kernel.org>
+
+commit fa04f5b60fda62c98a53a60de3a1e763f11feb41 upstream.
+
+Accesses within VMA, but beyond i_size rounded up to PAGE_SIZE are
+supposed to generate SIGBUS.
+
+This behavior might not be respected on truncation.
+
+During truncation, the kernel splits a large folio in order to reclaim
+memory. As a side effect, it unmaps the folio and destroys PMD mappings
+of the folio. The folio will be refaulted as PTEs and SIGBUS semantics
+are preserved.
+
+However, if the split fails, PMD mappings are preserved and the user will
+not receive SIGBUS on any accesses within the PMD.
+
+Unmap the folio on split failure. It will lead to refault as PTEs and
+preserve SIGBUS semantics.
+
+Make an exception for shmem/tmpfs that for long time intentionally mapped
+with PMDs across i_size.
+
+Link: https://lkml.kernel.org/r/20251027115636.82382-3-kirill@shutemov.name
+Fixes: b9a8a4195c7d ("truncate,shmem: Handle truncates that split large folios")
+Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: "Darrick J. Wong" <djwong@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/truncate.c | 27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -179,6 +179,31 @@ int truncate_inode_folio(struct address_
+ return 0;
+ }
+
++static int try_folio_split_or_unmap(struct folio *folio)
++{
++ enum ttu_flags ttu_flags =
++ TTU_SYNC |
++ TTU_SPLIT_HUGE_PMD |
++ TTU_IGNORE_MLOCK;
++ int ret;
++
++ ret = split_folio(folio);
++
++ /*
++ * If the split fails, unmap the folio, so it will be refaulted
++ * with PTEs to respect SIGBUS semantics.
++ *
++ * Make an exception for shmem/tmpfs that for long time
++ * intentionally mapped with PMDs across i_size.
++ */
++ if (ret && !shmem_mapping(folio->mapping)) {
++ try_to_unmap(folio, ttu_flags);
++ WARN_ON(folio_mapped(folio));
++ }
++
++ return ret;
++}
++
+ /*
+ * Handle partial folios. The folio may be entirely within the
+ * range if a split has raced with us. If not, we zero the part of the
+@@ -223,7 +248,7 @@ bool truncate_inode_partial_folio(struct
+ folio_invalidate(folio, offset, length);
+ if (!folio_test_large(folio))
+ return true;
+- if (split_folio(folio) == 0)
++ if (try_folio_split_or_unmap(folio) == 0)
+ return true;
+ if (folio_test_dirty(folio))
+ return false;
io_uring-napi-fix-io_napi_entry-rcu-accesses.patch
rust-kbuild-treat-build_error-and-rustdoc-as-kernel-objects.patch
rust-kbuild-workaround-rustdoc-doctests-modifier-bug.patch
+uio_hv_generic-set-event-for-all-channels-on-the-device.patch
+mm-memory-do-not-populate-page-table-entries-beyond-i_size.patch
+mm-truncate-unmap-large-folio-on-split-failure.patch
+mm-secretmem-fix-use-after-free-race-in-fault-handler.patch
+mm-huge_memory-do-not-change-split_huge_page-target-order-silently.patch
+mm-huge_memory-preserve-pg_has_hwpoisoned-if-a-folio-is-split-to-0-order.patch
--- /dev/null
+From d062463edf1770427dc2d637df4088df4835aa47 Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Mon, 10 Mar 2025 15:12:01 -0700
+Subject: uio_hv_generic: Set event for all channels on the device
+
+From: Long Li <longli@microsoft.com>
+
+commit d062463edf1770427dc2d637df4088df4835aa47 upstream.
+
+Hyper-V may offer a non latency sensitive device with subchannels without
+monitor bit enabled. The decision is entirely on the Hyper-V host not
+configurable within guest.
+
+When a device has subchannels, also signal events for the subchannel
+if its monitor bit is disabled.
+
+This patch also removes the memory barrier when monitor bit is enabled
+as it is not necessary. The memory barrier is only needed between
+setting up interrupt mask and calling vmbus_set_event() when monitor
+bit is disabled.
+
+Signed-off-by: Long Li <longli@microsoft.com>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Reviewed-by: Saurabh Sengar <ssengar@linux.microsoft.com>
+Link: https://lore.kernel.org/r/1741644721-20389-1-git-send-email-longli@linuxonhyperv.com
+Fixes: 37bd91f22794 ("uio_hv_generic: Let userspace take care of interrupt mask")
+Cc: <stable@vger.kernel.org> # 6.12.x
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/uio/uio_hv_generic.c | 32 ++++++++++++++++++++++++++------
+ 1 file changed, 26 insertions(+), 6 deletions(-)
+
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -65,6 +65,16 @@ struct hv_uio_private_data {
+ char send_name[32];
+ };
+
++static void set_event(struct vmbus_channel *channel, s32 irq_state)
++{
++ channel->inbound.ring_buffer->interrupt_mask = !irq_state;
++ if (!channel->offermsg.monitor_allocated && irq_state) {
++ /* MB is needed for host to see the interrupt mask first */
++ virt_mb();
++ vmbus_set_event(channel);
++ }
++}
++
+ /*
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+@@ -79,12 +89,15 @@ hv_uio_irqcontrol(struct uio_info *info,
+ {
+ struct hv_uio_private_data *pdata = info->priv;
+ struct hv_device *dev = pdata->device;
++ struct vmbus_channel *primary, *sc;
+
+- dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
+- virt_mb();
++ primary = dev->channel;
++ set_event(primary, irq_state);
+
+- if (!dev->channel->offermsg.monitor_allocated && irq_state)
+- vmbus_setevent(dev->channel);
++ mutex_lock(&vmbus_connection.channel_mutex);
++ list_for_each_entry(sc, &primary->sc_list, sc_list)
++ set_event(sc, irq_state);
++ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return 0;
+ }
+@@ -95,11 +108,18 @@ hv_uio_irqcontrol(struct uio_info *info,
+ static void hv_uio_channel_cb(void *context)
+ {
+ struct vmbus_channel *chan = context;
+- struct hv_device *hv_dev = chan->device_obj;
+- struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
++ struct hv_device *hv_dev;
++ struct hv_uio_private_data *pdata;
+
+ virt_mb();
+
++ /*
++ * The callback may come from a subchannel, in which case look
++ * for the hv device in the primary channel
++ */
++ hv_dev = chan->primary_channel ?
++ chan->primary_channel->device_obj : chan->device_obj;
++ pdata = hv_get_drvdata(hv_dev);
+ uio_event_notify(&pdata->info);
+ }
+