--- /dev/null
+From 97dfbbd135cb5e4426f37ca53a8fa87eaaa4e376 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Wed, 14 May 2025 18:06:02 +0100
+Subject: highmem: add folio_test_partial_kmap()
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 97dfbbd135cb5e4426f37ca53a8fa87eaaa4e376 upstream.
+
+In commit c749d9b7ebbc ("iov_iter: fix copy_page_from_iter_atomic() if
+KMAP_LOCAL_FORCE_MAP"), Hugh correctly noted that if KMAP_LOCAL_FORCE_MAP
+is enabled, we must limit ourselves to PAGE_SIZE bytes per call to
+kmap_local(). The same problem exists in memcpy_from_folio(),
+memcpy_to_folio(), folio_zero_tail(), folio_fill_tail() and
+memcpy_from_file_folio(), so add folio_test_partial_kmap() to do this more
+succinctly.
+
+Link: https://lkml.kernel.org/r/20250514170607.3000994-2-willy@infradead.org
+Fixes: 00cdf76012ab ("mm: add memcpy_from_file_folio()")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/highmem.h | 6 +++---
+ include/linux/page-flags.h | 7 +++++++
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -448,7 +448,7 @@ static inline void memcpy_from_folio(cha
+ const char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+- if (folio_test_highmem(folio) &&
++ if (folio_test_partial_kmap(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+@@ -469,7 +469,7 @@ static inline void memcpy_to_folio(struc
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+- if (folio_test_highmem(folio) &&
++ if (folio_test_partial_kmap(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+@@ -501,7 +501,7 @@ static inline size_t memcpy_from_file_fo
+ size_t offset = offset_in_folio(folio, pos);
+ char *from = kmap_local_folio(folio, offset);
+
+- if (folio_test_highmem(folio)) {
++ if (folio_test_partial_kmap(folio)) {
+ offset = offset_in_page(offset);
+ len = min_t(size_t, len, PAGE_SIZE - offset);
+ } else
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -551,6 +551,13 @@ PAGEFLAG(Readahead, readahead, PF_NO_COM
+ PAGEFLAG_FALSE(HighMem, highmem)
+ #endif
+
++/* Does kmap_local_folio() only allow access to one page of the folio? */
++#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
++#define folio_test_partial_kmap(f) true
++#else
++#define folio_test_partial_kmap(f) folio_test_highmem(f)
++#endif
++
+ #ifdef CONFIG_SWAP
+ static __always_inline bool folio_test_swapcache(struct folio *folio)
+ {
--- /dev/null
+From d9e2f070d8af60f2c8c02b2ddf0a9e90b4e9220c Mon Sep 17 00:00:00 2001
+From: Brett Creeley <brett.creeley@amd.com>
+Date: Mon, 21 Apr 2025 10:46:03 -0700
+Subject: pds_core: Prevent possible adminq overflow/stuck condition
+
+From: Brett Creeley <brett.creeley@amd.com>
+
+commit d9e2f070d8af60f2c8c02b2ddf0a9e90b4e9220c upstream.
+
+The pds_core's adminq is protected by the adminq_lock, which prevents
+more than 1 command to be posted onto it at any one time. This makes it
+so the client drivers cannot simultaneously post adminq commands.
+However, the completions happen in a different context, which means
+multiple adminq commands can be posted sequentially and all waiting
+on completion.
+
+On the FW side, the backing adminq request queue is only 16 entries
+long and the retry mechanism and/or overflow/stuck prevention is
+lacking. This can cause the adminq to get stuck, so commands are no
+longer processed and completions are no longer sent by the FW.
+
+As an initial fix, prevent more than 16 outstanding adminq commands so
+there's no way to cause the adminq from getting stuck. This works
+because the backing adminq request queue will never have more than 16
+pending adminq commands, so it will never overflow. This is done by
+reducing the adminq depth to 16.
+
+Fixes: 45d76f492938 ("pds_core: set up device and adminq")
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Brett Creeley <brett.creeley@amd.com>
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250421174606.3892-2-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Alper Ak <alperyasinak1@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amd/pds_core/core.c | 5 +----
+ drivers/net/ethernet/amd/pds_core/core.h | 2 +-
+ 2 files changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/amd/pds_core/core.c
++++ b/drivers/net/ethernet/amd/pds_core/core.c
+@@ -413,10 +413,7 @@ int pdsc_setup(struct pdsc *pdsc, bool i
+ if (err)
+ return err;
+
+- /* Scale the descriptor ring length based on number of CPUs and VFs */
+- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+- numdescs = roundup_pow_of_two(numdescs);
++ numdescs = PDSC_ADMINQ_MAX_LENGTH;
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -16,7 +16,7 @@
+
+ #define PDSC_WATCHDOG_SECS 5
+ #define PDSC_QUEUE_NAME_MAX_SZ 16
+-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
++#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
+ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+ #define PDSC_TEARDOWN_RECOVERY false
+ #define PDSC_TEARDOWN_REMOVING true