--- /dev/null
+From f5e5b894934c36018dc4c6fb47f210c109045280 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Mar 2025 20:53:08 +0100
+Subject: i2c: ali1535: Fix an error handling path in ali1535_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 9b5463f349d019a261f1e80803447efca3126151 ]
+
+If i2c_add_adapter() fails, the request_region() call in ali1535_setup()
+must be undone by a corresponding release_region() call, as done in the
+remove function.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/0daf63d7a2ce74c02e2664ba805bbfadab7d25e5.1741031571.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-ali1535.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
+index 544c94e86b896..1eac358380405 100644
+--- a/drivers/i2c/busses/i2c-ali1535.c
++++ b/drivers/i2c/busses/i2c-ali1535.c
+@@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
+
+ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
++ int ret;
++
+ if (ali1535_setup(dev)) {
+ dev_warn(&dev->dev,
+ "ALI1535 not detected, module not inserted.\n");
+@@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
+ "SMBus ALI1535 adapter at %04x", ali1535_offset);
+- return i2c_add_adapter(&ali1535_adapter);
++ ret = i2c_add_adapter(&ali1535_adapter);
++ if (ret)
++ goto release_region;
++
++ return 0;
++
++release_region:
++ release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
++ return ret;
+ }
+
+ static void ali1535_remove(struct pci_dev *dev)
+--
+2.39.5
+
--- /dev/null
+From a92dad3b3be79dc1436f44f5f65f21aa3169bedd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Mar 2025 20:58:06 +0100
+Subject: i2c: ali15x3: Fix an error handling path in ali15x3_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 6e55caaf30c88209d097e575a169b1dface1ab69 ]
+
+If i2c_add_adapter() fails, the request_region() call in ali15x3_setup()
+must be undone by a corresponding release_region() call, as done in the
+remove function.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/9b2090cbcc02659f425188ea05f2e02745c4e67b.1741031878.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-ali15x3.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
+index 4761c72081022..418d11266671e 100644
+--- a/drivers/i2c/busses/i2c-ali15x3.c
++++ b/drivers/i2c/busses/i2c-ali15x3.c
+@@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
+
+ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
++ int ret;
++
+ if (ali15x3_setup(dev)) {
+ dev_err(&dev->dev,
+ "ALI15X3 not detected, module not inserted.\n");
+@@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
+ "SMBus ALI15X3 adapter at %04x", ali15x3_smba);
+- return i2c_add_adapter(&ali15x3_adapter);
++ ret = i2c_add_adapter(&ali15x3_adapter);
++ if (ret)
++ goto release_region;
++
++ return 0;
++
++release_region:
++ release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
++ return ret;
+ }
+
+ static void ali15x3_remove(struct pci_dev *dev)
+--
+2.39.5
+
--- /dev/null
+From ca871398c7b9d054773ac70c5012b7d967d2abb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Mar 2025 21:26:54 +0100
+Subject: i2c: sis630: Fix an error handling path in sis630_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 2b22459792fcb4def9f0936d64575ac11a95a58d ]
+
+If i2c_add_adapter() fails, the request_region() call in sis630_setup()
+must be undone by a corresponding release_region() call, as done in the
+remove function.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/3d607601f2c38e896b10207963c6ab499ca5c307.1741033587.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-sis630.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
+index 3505cf29cedda..a19c3d251804d 100644
+--- a/drivers/i2c/busses/i2c-sis630.c
++++ b/drivers/i2c/busses/i2c-sis630.c
+@@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
+
+ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
++ int ret;
++
+ if (sis630_setup(dev)) {
+ dev_err(&dev->dev,
+ "SIS630 compatible bus not detected, "
+@@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
+ "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
+
+- return i2c_add_adapter(&sis630_adapter);
++ ret = i2c_add_adapter(&sis630_adapter);
++ if (ret)
++ goto release_region;
++
++ return 0;
++
++release_region:
++ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
++ return ret;
+ }
+
+ static void sis630_remove(struct pci_dev *dev)
+--
+2.39.5
+
--- /dev/null
+From 484d667fde0605d2fc9a37de8b288ae93eb3dc85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Feb 2025 11:46:44 +0800
+Subject: mm/hugetlb: wait for hugetlb folios to be freed
+
+From: Ge Yang <yangge1116@126.com>
+
+[ Upstream commit 67bab13307c83fb742c2556b06cdc39dbad27f07 ]
+
+Since the introduction of commit c77c0a8ac4c52 ("mm/hugetlb: defer freeing
+of huge pages if in non-task context"), which supports deferring the
+freeing of hugetlb pages, the allocation of contiguous memory through
+cma_alloc() may fail probabilistically.
+
+In the CMA allocation process, if it is found that the CMA area is
+occupied by in-use hugetlb folios, these in-use hugetlb folios need to be
+migrated to another location. When there are no available hugetlb folios
+in the free hugetlb pool during the migration of in-use hugetlb folios,
+new folios are allocated from the buddy system. A temporary state is set
+on the newly allocated folio. Upon completion of the hugetlb folio
+migration, the temporary state is transferred from the new folios to the
+old folios. Normally, when the old folios with the temporary state are
+freed, it is directly released back to the buddy system. However, due to
+the deferred freeing of hugetlb pages, the PageBuddy() check fails,
+ultimately leading to the failure of cma_alloc().
+
+Here is a simplified call trace illustrating the process:
+cma_alloc()
+ ->__alloc_contig_migrate_range() // Migrate in-use hugetlb folios
+ ->unmap_and_move_huge_page()
+ ->folio_putback_hugetlb() // Free old folios
+ ->test_pages_isolated()
+ ->__test_page_isolated_in_pageblock()
+ ->PageBuddy(page) // Check if the page is in buddy
+
+To resolve this issue, we have implemented a function named
+wait_for_freed_hugetlb_folios(). This function ensures that the hugetlb
+folios are properly released back to the buddy system after their
+migration is completed. By invoking wait_for_freed_hugetlb_folios()
+before calling PageBuddy(), we ensure that PageBuddy() will succeed.
+
+Link: https://lkml.kernel.org/r/1739936804-18199-1-git-send-email-yangge1116@126.com
+Fixes: c77c0a8ac4c5 ("mm/hugetlb: defer freeing of huge pages if in non-task context")
+Signed-off-by: Ge Yang <yangge1116@126.com>
+Reviewed-by: Muchun Song <muchun.song@linux.dev>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <21cnbao@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/hugetlb.h | 5 +++++
+ mm/hugetlb.c | 8 ++++++++
+ mm/page_isolation.c | 10 ++++++++++
+ 3 files changed, 23 insertions(+)
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 25a7b13574c28..12f7a7b9c06e9 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -687,6 +687,7 @@ struct huge_bootmem_page {
+ };
+
+ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
++void wait_for_freed_hugetlb_folios(void);
+ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
+ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+@@ -1057,6 +1058,10 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
+ return -ENOMEM;
+ }
+
++static inline void wait_for_freed_hugetlb_folios(void)
++{
++}
++
+ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr,
+ int avoid_reserve)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 1e9aa6de4e21e..e28e820fdb775 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2955,6 +2955,14 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
+ return ret;
+ }
+
++void wait_for_freed_hugetlb_folios(void)
++{
++ if (llist_empty(&hpage_freelist))
++ return;
++
++ flush_work(&free_hpage_work);
++}
++
+ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index 7e04047977cfe..6989c5ffd4741 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -611,6 +611,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
+ struct zone *zone;
+ int ret;
+
++ /*
++ * Due to the deferred freeing of hugetlb folios, the hugepage folios may
++ * not immediately release to the buddy system. This can cause PageBuddy()
++ * to fail in __test_page_isolated_in_pageblock(). To ensure that the
++ * hugetlb folios are properly released back to the buddy system, we
++ * invoke the wait_for_freed_hugetlb_folios() function to wait for the
++ * release to complete.
++ */
++ wait_for_freed_hugetlb_folios();
++
+ /*
+ * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
+ * pages are not aligned to pageblock_nr_pages.
+--
+2.39.5
+
--- /dev/null
+From 7a5f08ddda907f5ff10288e9ccc3cf57fac584fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 17:52:55 +0800
+Subject: mm: shmem: fix potential data corruption during shmem swapin
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+[ Upstream commit 058313515d5aab10d0a01dd634f92ed4a4e71d4c ]
+
+Alex and Kairui reported some issues (system hang or data corruption) when
+swapping out or swapping in large shmem folios. This is especially easy
+to reproduce when the tmpfs is mount with the 'huge=within_size'
+parameter. Thanks to Kairui's reproducer, the issue can be easily
+replicated.
+
+The root cause of the problem is that swap readahead may asynchronously
+swap in order 0 folios into the swap cache, while the shmem mapping can
+still store large swap entries. Then an order 0 folio is inserted into
+the shmem mapping without splitting the large swap entry, which overwrites
+the original large swap entry, leading to data corruption.
+
+When getting a folio from the swap cache, we should split the large swap
+entry stored in the shmem mapping if the orders do not match, to fix this
+issue.
+
+Link: https://lkml.kernel.org/r/2fe47c557e74e9df5fe2437ccdc6c9115fa1bf70.1740476943.git.baolin.wang@linux.alibaba.com
+Fixes: 809bc86517cc ("mm: shmem: support large folio swap out")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reported-by: Alex Xu (Hello71) <alex_y_xu@yahoo.ca>
+Reported-by: Kairui Song <ryncsn@gmail.com>
+Closes: https://lore.kernel.org/all/1738717785.im3r5g2vxc.none@localhost/
+Tested-by: Kairui Song <kasong@tencent.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Lance Yang <ioworker0@gmail.com>
+Cc: Matthew Wilcow <willy@infradead.org>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/shmem.c | 31 +++++++++++++++++++++++++++----
+ 1 file changed, 27 insertions(+), 4 deletions(-)
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 738893d7fe083..e572d86f8f67e 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2164,7 +2164,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ struct folio *folio = NULL;
+ bool skip_swapcache = false;
+ swp_entry_t swap;
+- int error, nr_pages;
++ int error, nr_pages, order, split_order;
+
+ VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
+ swap = radix_to_swp_entry(*foliop);
+@@ -2183,10 +2183,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+
+ /* Look it up and read it in.. */
+ folio = swap_cache_get_folio(swap, NULL, 0);
++ order = xa_get_order(&mapping->i_pages, index);
+ if (!folio) {
+- int order = xa_get_order(&mapping->i_pages, index);
+ bool fallback_order0 = false;
+- int split_order;
+
+ /* Or update major stats only when swapin succeeds?? */
+ if (fault_type) {
+@@ -2250,6 +2249,29 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ error = -ENOMEM;
+ goto failed;
+ }
++ } else if (order != folio_order(folio)) {
++ /*
++ * Swap readahead may swap in order 0 folios into swapcache
++ * asynchronously, while the shmem mapping can still stores
++ * large swap entries. In such cases, we should split the
++ * large swap entry to prevent possible data corruption.
++ */
++ split_order = shmem_split_large_entry(inode, index, swap, gfp);
++ if (split_order < 0) {
++ error = split_order;
++ goto failed;
++ }
++
++ /*
++ * If the large swap entry has already been split, it is
++ * necessary to recalculate the new swap entry based on
++ * the old order alignment.
++ */
++ if (split_order > 0) {
++ pgoff_t offset = index - round_down(index, 1 << split_order);
++
++ swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
++ }
+ }
+
+ alloced:
+@@ -2257,7 +2279,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ folio_lock(folio);
+ if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
+ folio->swap.val != swap.val ||
+- !shmem_confirm_swap(mapping, index, swap)) {
++ !shmem_confirm_swap(mapping, index, swap) ||
++ xa_get_order(&mapping->i_pages, index) != folio_order(folio)) {
+ error = -EEXIST;
+ goto unlock;
+ }
+--
+2.39.5
+
--- /dev/null
+From df6cb03ef8e3bf53ebe143c79cf2c073bfa0584b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 10:16:49 +0800
+Subject: mm: shmem: skip swapcache for swapin of synchronous swap device
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+[ Upstream commit 1dd44c0af4fa1e80a4e82faa10cbf5d22da40362 ]
+
+With fast swap devices (such as zram), swapin latency is crucial to
+applications. For shmem swapin, similar to anonymous memory swapin, we
+can skip the swapcache operation to improve swapin latency. Testing 1G
+shmem sequential swapin without THP enabled, I observed approximately a 6%
+performance improvement: (Note: I repeated 5 times and took the mean data
+for each test)
+
+w/o patch w/ patch changes
+534.8ms 501ms +6.3%
+
+In addition, currently, we always split the large swap entry stored in the
+shmem mapping during shmem large folio swapin, which is not perfect,
+especially with a fast swap device. We should swap in the whole large
+folio instead of splitting the precious large folios to take advantage of
+the large folios and improve the swapin latency if the swap device is
+synchronous device, which is similar to anonymous memory mTHP swapin.
+Testing 1G shmem sequential swapin with 64K mTHP and 2M mTHP, I observed
+obvious performance improvement:
+
+mTHP=64K
+w/o patch w/ patch changes
+550.4ms 169.6ms +69%
+
+mTHP=2M
+w/o patch w/ patch changes
+542.8ms 126.8ms +77%
+
+Note that skipping swapcache requires attention to concurrent swapin
+scenarios. Fortunately the swapcache_prepare() and
+shmem_add_to_page_cache() can help identify concurrent swapin and large
+swap entry split scenarios, and return -EEXIST for retry.
+
+[akpm@linux-foundation.org: use IS_ENABLED(), tweak comment grammar]
+Link: https://lkml.kernel.org/r/3d9f3bd3bc6ec953054baff5134f66feeaae7c1e.1736301701.git.baolin.wang@linux.alibaba.com
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Kairui Song <kasong@tencent.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 058313515d5a ("mm: shmem: fix potential data corruption during shmem swapin")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/shmem.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 105 insertions(+), 5 deletions(-)
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 5960e5035f983..738893d7fe083 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1878,6 +1878,65 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
+ return ERR_PTR(error);
+ }
+
++static struct folio *shmem_swap_alloc_folio(struct inode *inode,
++ struct vm_area_struct *vma, pgoff_t index,
++ swp_entry_t entry, int order, gfp_t gfp)
++{
++ struct shmem_inode_info *info = SHMEM_I(inode);
++ struct folio *new;
++ void *shadow;
++ int nr_pages;
++
++ /*
++ * We have arrived here because our zones are constrained, so don't
++ * limit chance of success with further cpuset and node constraints.
++ */
++ gfp &= ~GFP_CONSTRAINT_MASK;
++ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
++ gfp_t huge_gfp = vma_thp_gfp_mask(vma);
++
++ gfp = limit_gfp_mask(huge_gfp, gfp);
++ }
++
++ new = shmem_alloc_folio(gfp, order, info, index);
++ if (!new)
++ return ERR_PTR(-ENOMEM);
++
++ nr_pages = folio_nr_pages(new);
++ if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
++ gfp, entry)) {
++ folio_put(new);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /*
++ * Prevent parallel swapin from proceeding with the swap cache flag.
++ *
++ * Of course there is another possible concurrent scenario as well,
++ * that is to say, the swap cache flag of a large folio has already
++ * been set by swapcache_prepare(), while another thread may have
++ * already split the large swap entry stored in the shmem mapping.
++ * In this case, shmem_add_to_page_cache() will help identify the
++ * concurrent swapin and return -EEXIST.
++ */
++ if (swapcache_prepare(entry, nr_pages)) {
++ folio_put(new);
++ return ERR_PTR(-EEXIST);
++ }
++
++ __folio_set_locked(new);
++ __folio_set_swapbacked(new);
++ new->swap = entry;
++
++ mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
++ shadow = get_shadow_from_swap_cache(entry);
++ if (shadow)
++ workingset_refault(new, shadow);
++ folio_add_lru(new);
++ swap_read_folio(new, NULL);
++ return new;
++}
++
+ /*
+ * When a page is moved from swapcache to shmem filecache (either by the
+ * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
+@@ -1981,7 +2040,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
+ }
+
+ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
+- struct folio *folio, swp_entry_t swap)
++ struct folio *folio, swp_entry_t swap,
++ bool skip_swapcache)
+ {
+ struct address_space *mapping = inode->i_mapping;
+ swp_entry_t swapin_error;
+@@ -1997,7 +2057,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
+
+ nr_pages = folio_nr_pages(folio);
+ folio_wait_writeback(folio);
+- delete_from_swap_cache(folio);
++ if (!skip_swapcache)
++ delete_from_swap_cache(folio);
+ /*
+ * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
+ * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
+@@ -2101,6 +2162,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ struct swap_info_struct *si;
+ struct folio *folio = NULL;
++ bool skip_swapcache = false;
+ swp_entry_t swap;
+ int error, nr_pages;
+
+@@ -2122,6 +2184,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ /* Look it up and read it in.. */
+ folio = swap_cache_get_folio(swap, NULL, 0);
+ if (!folio) {
++ int order = xa_get_order(&mapping->i_pages, index);
++ bool fallback_order0 = false;
+ int split_order;
+
+ /* Or update major stats only when swapin succeeds?? */
+@@ -2131,6 +2195,33 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ count_memcg_event_mm(fault_mm, PGMAJFAULT);
+ }
+
++ /*
++ * If uffd is active for the vma, we need per-page fault
++ * fidelity to maintain the uffd semantics, then fallback
++ * to swapin order-0 folio, as well as for zswap case.
++ */
++ if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
++ !zswap_never_enabled()))
++ fallback_order0 = true;
++
++ /* Skip swapcache for synchronous device. */
++ if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
++ folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
++ if (!IS_ERR(folio)) {
++ skip_swapcache = true;
++ goto alloced;
++ }
++
++ /*
++ * Fallback to swapin order-0 folio unless the swap entry
++ * already exists.
++ */
++ error = PTR_ERR(folio);
++ folio = NULL;
++ if (error == -EEXIST)
++ goto failed;
++ }
++
+ /*
+ * Now swap device can only swap in order 0 folio, then we
+ * should split the large swap entry stored in the pagecache
+@@ -2161,9 +2252,10 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ }
+ }
+
++alloced:
+ /* We have to do this with folio locked to prevent races */
+ folio_lock(folio);
+- if (!folio_test_swapcache(folio) ||
++ if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
+ folio->swap.val != swap.val ||
+ !shmem_confirm_swap(mapping, index, swap)) {
+ error = -EEXIST;
+@@ -2199,7 +2291,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ if (sgp == SGP_WRITE)
+ folio_mark_accessed(folio);
+
+- delete_from_swap_cache(folio);
++ if (skip_swapcache) {
++ folio->swap.val = 0;
++ swapcache_clear(si, swap, nr_pages);
++ } else {
++ delete_from_swap_cache(folio);
++ }
+ folio_mark_dirty(folio);
+ swap_free_nr(swap, nr_pages);
+ put_swap_device(si);
+@@ -2210,8 +2307,11 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+ if (!shmem_confirm_swap(mapping, index, swap))
+ error = -EEXIST;
+ if (error == -EIO)
+- shmem_set_folio_swapin_error(inode, index, folio, swap);
++ shmem_set_folio_swapin_error(inode, index, folio, swap,
++ skip_swapcache);
+ unlock:
++ if (skip_swapcache)
++ swapcache_clear(si, swap, folio_nr_pages(folio));
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+--
+2.39.5
+
cifs-fix-integer-overflow-while-processing-actimeo-m.patch
cifs-fix-integer-overflow-while-processing-closetime.patch
x86-vmware-parse-mp-tables-for-sev-snp-enabled-guest.patch
+i2c-ali1535-fix-an-error-handling-path-in-ali1535_pr.patch
+i2c-ali15x3-fix-an-error-handling-path-in-ali15x3_pr.patch
+i2c-sis630-fix-an-error-handling-path-in-sis630_prob.patch
+mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch
+mm-shmem-fix-potential-data-corruption-during-shmem-.patch
+mm-hugetlb-wait-for-hugetlb-folios-to-be-freed.patch
+smb3-add-support-for-iakerb.patch
+smb-client-fix-match_session-bug-preventing-session-.patch
--- /dev/null
+From 3d48d46299beebbab1f16007abab4f672087c3ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Mar 2025 15:23:59 -0300
+Subject: smb: client: Fix match_session bug preventing session reuse
+
+From: Henrique Carvalho <henrique.carvalho@suse.com>
+
+[ Upstream commit 605b249ea96770ac4fac4b8510a99e0f8442be5e ]
+
+Fix a bug in match_session() that can causes the session to not be
+reused in some cases.
+
+Reproduction steps:
+
+mount.cifs //server/share /mnt/a -o credentials=creds
+mount.cifs //server/share /mnt/b -o credentials=creds,sec=ntlmssp
+cat /proc/fs/cifs/DebugData | grep SessionId | wc -l
+
+mount.cifs //server/share /mnt/b -o credentials=creds,sec=ntlmssp
+mount.cifs //server/share /mnt/a -o credentials=creds
+cat /proc/fs/cifs/DebugData | grep SessionId | wc -l
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Enzo Matsumiya <ematsumiya@suse.de>
+Signed-off-by: Henrique Carvalho <henrique.carvalho@suse.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/connect.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index fb51cdf552061..d327f31b317db 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1873,9 +1873,8 @@ static int match_session(struct cifs_ses *ses,
+ struct smb3_fs_context *ctx,
+ bool match_super)
+ {
+- if (ctx->sectype != Unspecified &&
+- ctx->sectype != ses->sectype)
+- return 0;
++ struct TCP_Server_Info *server = ses->server;
++ enum securityEnum ctx_sec, ses_sec;
+
+ if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
+ return 0;
+@@ -1887,11 +1886,20 @@ static int match_session(struct cifs_ses *ses,
+ if (ses->chan_max < ctx->max_channels)
+ return 0;
+
+- switch (ses->sectype) {
++ ctx_sec = server->ops->select_sectype(server, ctx->sectype);
++ ses_sec = server->ops->select_sectype(server, ses->sectype);
++
++ if (ctx_sec != ses_sec)
++ return 0;
++
++ switch (ctx_sec) {
++ case IAKerb:
+ case Kerberos:
+ if (!uid_eq(ctx->cred_uid, ses->cred_uid))
+ return 0;
+ break;
++ case NTLMv2:
++ case RawNTLMSSP:
+ default:
+ /* NULL username means anonymous session */
+ if (ses->user_name == NULL) {
+--
+2.39.5
+
--- /dev/null
+From 4741c1f794350895a32c4a74e1883fc19910f85e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2025 01:04:23 -0600
+Subject: smb3: add support for IAKerb
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit eea5119fa5979c350af5783a8148eacdd4219715 ]
+
+There are now more servers which advertise support for IAKerb (passthrough
+Kerberos authentication via proxy). IAKerb is a public extension industry
+standard Kerberos protocol that allows a client without line-of-sight
+to a Domain Controller to authenticate. There can be cases where we
+would fail to mount if the server only advertises the OID for IAKerb
+in SPNEGO/GSSAPI. Add code to allow us to still upcall to userspace
+in these cases to obtain the Kerberos ticket.
+
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 605b249ea967 ("smb: client: Fix match_session bug preventing session reuse")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/asn1.c | 2 ++
+ fs/smb/client/cifs_spnego.c | 4 +++-
+ fs/smb/client/cifsglob.h | 4 ++++
+ fs/smb/client/sess.c | 3 ++-
+ fs/smb/client/smb2pdu.c | 2 +-
+ 5 files changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c
+index b5724ef9f182f..214a44509e7b9 100644
+--- a/fs/smb/client/asn1.c
++++ b/fs/smb/client/asn1.c
+@@ -52,6 +52,8 @@ int cifs_neg_token_init_mech_type(void *context, size_t hdrlen,
+ server->sec_kerberos = true;
+ else if (oid == OID_ntlmssp)
+ server->sec_ntlmssp = true;
++ else if (oid == OID_IAKerb)
++ server->sec_iakerb = true;
+ else {
+ char buf[50];
+
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index af7849e5974ff..2ad067886ec3f 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -130,11 +130,13 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
+
+ dp = description + strlen(description);
+
+- /* for now, only sec=krb5 and sec=mskrb5 are valid */
++ /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */
+ if (server->sec_kerberos)
+ sprintf(dp, ";sec=krb5");
+ else if (server->sec_mskerberos)
+ sprintf(dp, ";sec=mskrb5");
++ else if (server->sec_iakerb)
++ sprintf(dp, ";sec=iakerb");
+ else {
+ cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
+ sprintf(dp, ";sec=krb5");
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index b630beb757a44..a8484af7a2fbc 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -151,6 +151,7 @@ enum securityEnum {
+ NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
+ RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
+ Kerberos, /* Kerberos via SPNEGO */
++ IAKerb, /* Kerberos proxy */
+ };
+
+ enum cifs_reparse_type {
+@@ -743,6 +744,7 @@ struct TCP_Server_Info {
+ bool sec_kerberosu2u; /* supports U2U Kerberos */
+ bool sec_kerberos; /* supports plain Kerberos */
+ bool sec_mskerberos; /* supports legacy MS Kerberos */
++ bool sec_iakerb; /* supports pass-through auth for Kerberos (krb5 proxy) */
+ bool large_buf; /* is current buffer large? */
+ /* use SMBD connection instead of socket */
+ bool rdma;
+@@ -2115,6 +2117,8 @@ static inline char *get_security_type_str(enum securityEnum sectype)
+ return "Kerberos";
+ case NTLMv2:
+ return "NTLMv2";
++ case IAKerb:
++ return "IAKerb";
+ default:
+ return "Unknown";
+ }
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index c88e9657f47a8..95e14977baeab 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -1263,12 +1263,13 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
+ switch (requested) {
+ case Kerberos:
+ case RawNTLMSSP:
++ case IAKerb:
+ return requested;
+ case Unspecified:
+ if (server->sec_ntlmssp &&
+ (global_secflags & CIFSSEC_MAY_NTLMSSP))
+ return RawNTLMSSP;
+- if ((server->sec_kerberos || server->sec_mskerberos) &&
++ if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
+ (global_secflags & CIFSSEC_MAY_KRB5))
+ return Kerberos;
+ fallthrough;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 89a9b8ffe9d92..75b13175a2e78 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -1435,7 +1435,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
+ if (server->sec_ntlmssp &&
+ (global_secflags & CIFSSEC_MAY_NTLMSSP))
+ return RawNTLMSSP;
+- if ((server->sec_kerberos || server->sec_mskerberos) &&
++ if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
+ (global_secflags & CIFSSEC_MAY_KRB5))
+ return Kerberos;
+ fallthrough;
+--
+2.39.5
+