]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Mar 2026 12:12:17 +0000 (13:12 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Mar 2026 12:12:17 +0000 (13:12 +0100)
added patches:
arm64-contpte-fix-set_access_flags-no-op-check-for-smmu-ats-faults.patch
arm64-mm-add-pte_dirty-back-to-page_kernel-to-fix-kexec-hibernation.patch
ata-libata-core-disable-lpm-on-st1000dm010-2ep102.patch
batman-adv-avoid-double-rtnl_lock-elp-metric-worker.patch
crypto-ccp-allow-callers-to-use-hv-fixed-page-api-when-sev-is-disabled.patch
drm-amd-display-fallback-to-boot-snapshot-for-dispclk.patch
drm-ttm-fix-ttm_pool_beneficial_order-return-type.patch
drm-xe-xe2_hpg-correct-implementation-of-wa_16025250150.patch
fgraph-fix-thresh_return-clear-per-task-notrace.patch
fgraph-fix-thresh_return-nosleeptime-double-adjust.patch
hwmon-pmbus-q54sj108a2-fix-stack-overflow-in-debugfs-read.patch
ice-fix-retry-for-aq-command-0x06ee.patch
io_uring-net-reject-send_vectorized-when-unsupported.patch
io_uring-zcrx-use-read_once-with-user-shared-rqes.patch
ksmbd-don-t-log-keys-in-smb3-signing-and-encryption-key-generation.patch
ksmbd-fix-use-after-free-by-using-call_rcu-for-oplock_info.patch
ksmbd-fix-use-after-free-in-smb_lazy_parent_lease_break_close.patch
media-dvb-net-fix-oob-access-in-ule-extension-header-tables.patch
memcg-fix-slab-accounting-in-refill_obj_stock-trylock-path.patch
net-dsa-microchip-fix-error-path-in-ptp-irq-setup.patch
net-ethernet-arc-emac-quiesce-interrupts-before-requesting-irq.patch
net-fix-rcu_tasks-stall-in-threaded-busypoll.patch
net-macb-shuffle-the-tx-ring-before-enabling-tx.patch
net-mana-ring-doorbell-at-4-cq-wraparounds.patch
net-mctp-fix-device-leak-on-probe-failure.patch
net-ncsi-fix-skb-leak-in-error-paths.patch
net-nexthop-fix-percpu-use-after-free-in-remove_nh_grp_entry.patch
net-tcp-ao-fix-mac-comparison-to-be-constant-time.patch
net-tcp-md5-fix-mac-comparison-to-be-constant-time.patch
nouveau-dpcd-return-ebusy-for-aux-xfer-if-the-device-is-asleep.patch
parisc-check-kernel-mapping-earlier-at-bootup.patch
parisc-fix-initial-page-table-creation-for-boot.patch
parisc-increase-initial-mapping-to-64-mb-with-kallsyms.patch
pinctrl-cy8c95x0-don-t-miss-reading-the-last-bank-registers.patch
pmdomain-bcm-bcm2835-power-fix-broken-reset-status-read.patch
pmdomain-rockchip-fix-pd_vcodec-for-rk3588.patch
regulator-pf9453-respect-irq-trigger-settings-from-firmware.patch
s390-stackleak-fix-__stackleak_poison-inline-assembly-constraint.patch
s390-xor-fix-xor_xc_2-inline-assembly-constraints.patch
s390-xor-fix-xor_xc_5-inline-assembly.patch
selftests-fix-mntns-iteration-selftests.patch
slab-distinguish-lock-and-trylock-for-sheaf_flush_main.patch
smb-server-fix-use-after-free-in-smb2_open.patch
tracing-fix-syscall-events-activation-by-ensuring-refcount-hits-zero.patch

45 files changed:
queue-6.19/arm64-contpte-fix-set_access_flags-no-op-check-for-smmu-ats-faults.patch [new file with mode: 0644]
queue-6.19/arm64-mm-add-pte_dirty-back-to-page_kernel-to-fix-kexec-hibernation.patch [new file with mode: 0644]
queue-6.19/ata-libata-core-disable-lpm-on-st1000dm010-2ep102.patch [new file with mode: 0644]
queue-6.19/batman-adv-avoid-double-rtnl_lock-elp-metric-worker.patch [new file with mode: 0644]
queue-6.19/crypto-ccp-allow-callers-to-use-hv-fixed-page-api-when-sev-is-disabled.patch [new file with mode: 0644]
queue-6.19/drm-amd-display-fallback-to-boot-snapshot-for-dispclk.patch [new file with mode: 0644]
queue-6.19/drm-ttm-fix-ttm_pool_beneficial_order-return-type.patch [new file with mode: 0644]
queue-6.19/drm-xe-xe2_hpg-correct-implementation-of-wa_16025250150.patch [new file with mode: 0644]
queue-6.19/fgraph-fix-thresh_return-clear-per-task-notrace.patch [new file with mode: 0644]
queue-6.19/fgraph-fix-thresh_return-nosleeptime-double-adjust.patch [new file with mode: 0644]
queue-6.19/hwmon-pmbus-q54sj108a2-fix-stack-overflow-in-debugfs-read.patch [new file with mode: 0644]
queue-6.19/ice-fix-retry-for-aq-command-0x06ee.patch [new file with mode: 0644]
queue-6.19/io_uring-net-reject-send_vectorized-when-unsupported.patch [new file with mode: 0644]
queue-6.19/io_uring-zcrx-use-read_once-with-user-shared-rqes.patch [new file with mode: 0644]
queue-6.19/ksmbd-don-t-log-keys-in-smb3-signing-and-encryption-key-generation.patch [new file with mode: 0644]
queue-6.19/ksmbd-fix-use-after-free-by-using-call_rcu-for-oplock_info.patch [new file with mode: 0644]
queue-6.19/ksmbd-fix-use-after-free-in-smb_lazy_parent_lease_break_close.patch [new file with mode: 0644]
queue-6.19/media-dvb-net-fix-oob-access-in-ule-extension-header-tables.patch [new file with mode: 0644]
queue-6.19/memcg-fix-slab-accounting-in-refill_obj_stock-trylock-path.patch [new file with mode: 0644]
queue-6.19/net-dsa-microchip-fix-error-path-in-ptp-irq-setup.patch [new file with mode: 0644]
queue-6.19/net-ethernet-arc-emac-quiesce-interrupts-before-requesting-irq.patch [new file with mode: 0644]
queue-6.19/net-fix-rcu_tasks-stall-in-threaded-busypoll.patch [new file with mode: 0644]
queue-6.19/net-macb-shuffle-the-tx-ring-before-enabling-tx.patch [new file with mode: 0644]
queue-6.19/net-mana-ring-doorbell-at-4-cq-wraparounds.patch [new file with mode: 0644]
queue-6.19/net-mctp-fix-device-leak-on-probe-failure.patch [new file with mode: 0644]
queue-6.19/net-ncsi-fix-skb-leak-in-error-paths.patch [new file with mode: 0644]
queue-6.19/net-nexthop-fix-percpu-use-after-free-in-remove_nh_grp_entry.patch [new file with mode: 0644]
queue-6.19/net-tcp-ao-fix-mac-comparison-to-be-constant-time.patch [new file with mode: 0644]
queue-6.19/net-tcp-md5-fix-mac-comparison-to-be-constant-time.patch [new file with mode: 0644]
queue-6.19/nouveau-dpcd-return-ebusy-for-aux-xfer-if-the-device-is-asleep.patch [new file with mode: 0644]
queue-6.19/parisc-check-kernel-mapping-earlier-at-bootup.patch [new file with mode: 0644]
queue-6.19/parisc-fix-initial-page-table-creation-for-boot.patch [new file with mode: 0644]
queue-6.19/parisc-increase-initial-mapping-to-64-mb-with-kallsyms.patch [new file with mode: 0644]
queue-6.19/pinctrl-cy8c95x0-don-t-miss-reading-the-last-bank-registers.patch [new file with mode: 0644]
queue-6.19/pmdomain-bcm-bcm2835-power-fix-broken-reset-status-read.patch [new file with mode: 0644]
queue-6.19/pmdomain-rockchip-fix-pd_vcodec-for-rk3588.patch [new file with mode: 0644]
queue-6.19/regulator-pf9453-respect-irq-trigger-settings-from-firmware.patch [new file with mode: 0644]
queue-6.19/s390-stackleak-fix-__stackleak_poison-inline-assembly-constraint.patch [new file with mode: 0644]
queue-6.19/s390-xor-fix-xor_xc_2-inline-assembly-constraints.patch [new file with mode: 0644]
queue-6.19/s390-xor-fix-xor_xc_5-inline-assembly.patch [new file with mode: 0644]
queue-6.19/selftests-fix-mntns-iteration-selftests.patch [new file with mode: 0644]
queue-6.19/series
queue-6.19/slab-distinguish-lock-and-trylock-for-sheaf_flush_main.patch [new file with mode: 0644]
queue-6.19/smb-server-fix-use-after-free-in-smb2_open.patch [new file with mode: 0644]
queue-6.19/tracing-fix-syscall-events-activation-by-ensuring-refcount-hits-zero.patch [new file with mode: 0644]

diff --git a/queue-6.19/arm64-contpte-fix-set_access_flags-no-op-check-for-smmu-ats-faults.patch b/queue-6.19/arm64-contpte-fix-set_access_flags-no-op-check-for-smmu-ats-faults.patch
new file mode 100644 (file)
index 0000000..87bf283
--- /dev/null
@@ -0,0 +1,133 @@
+From 97c5550b763171dbef61e6239cab372b9f9cd4a2 Mon Sep 17 00:00:00 2001
+From: Piotr Jaroszynski <pjaroszynski@nvidia.com>
+Date: Thu, 5 Mar 2026 15:26:29 -0800
+Subject: arm64: contpte: fix set_access_flags() no-op check for SMMU/ATS faults
+
+From: Piotr Jaroszynski <pjaroszynski@nvidia.com>
+
+commit 97c5550b763171dbef61e6239cab372b9f9cd4a2 upstream.
+
+contpte_ptep_set_access_flags() compared the gathered ptep_get() value
+against the requested entry to detect no-ops. ptep_get() ORs AF/dirty
+from all sub-PTEs in the CONT block, so a dirty sibling can make the
+target appear already-dirty. When the gathered value matches entry, the
+function returns 0 even though the target sub-PTE still has PTE_RDONLY
+set in hardware.
+
+For a CPU with FEAT_HAFDBS this gathered view is fine, since hardware may
+set AF/dirty on any sub-PTE and CPU TLB behavior is effectively gathered
+across the CONT range. But page-table walkers that evaluate each
+descriptor individually (e.g. a CPU without DBM support, or an SMMU
+without HTTU, or with HA/HD disabled in CD.TCR) can keep faulting on the
+unchanged target sub-PTE, causing an infinite fault loop.
+
+Gathering can therefore cause false no-ops when only a sibling has been
+updated:
+ - write faults: target still has PTE_RDONLY (needs PTE_RDONLY cleared)
+ - read faults:  target still lacks PTE_AF
+
+Fix by checking each sub-PTE against the requested AF/dirty/write state
+(the same bits consumed by __ptep_set_access_flags()), using raw
+per-PTE values rather than the gathered ptep_get() view, before
+returning no-op. Keep using the raw target PTE for the write-bit unfold
+decision.
+
+Per Arm ARM (DDI 0487) D8.7.1 ("The Contiguous bit"), any sub-PTE in a CONT
+range may become the effective cached translation and software must
+maintain consistent attributes across the range.
+
+Fixes: 4602e5757bcc ("arm64/mm: wire up PTE_CONT for user mappings")
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: Breno Leitao <leitao@debian.org>
+Cc: stable@vger.kernel.org
+Reviewed-by: Alistair Popple <apopple@nvidia.com>
+Reviewed-by: James Houghton <jthoughton@google.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Breno Leitao <leitao@debian.org>
+Signed-off-by: Piotr Jaroszynski <pjaroszynski@nvidia.com>
+Acked-by: Balbir Singh <balbirs@nvidia.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/contpte.c |   53 ++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 49 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/mm/contpte.c
++++ b/arch/arm64/mm/contpte.c
+@@ -581,6 +581,27 @@ void contpte_clear_young_dirty_ptes(stru
+ }
+ EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
++static bool contpte_all_subptes_match_access_flags(pte_t *ptep, pte_t entry)
++{
++      pte_t *cont_ptep = contpte_align_down(ptep);
++      /*
++       * PFNs differ per sub-PTE. Match only bits consumed by
++       * __ptep_set_access_flags(): AF, DIRTY and write permission.
++       */
++      const pteval_t cmp_mask = PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
++      pteval_t entry_cmp = pte_val(entry) & cmp_mask;
++      int i;
++
++      for (i = 0; i < CONT_PTES; i++) {
++              pteval_t pte_cmp = pte_val(__ptep_get(cont_ptep + i)) & cmp_mask;
++
++              if (pte_cmp != entry_cmp)
++                      return false;
++      }
++
++      return true;
++}
++
+ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
+                                       unsigned long addr, pte_t *ptep,
+                                       pte_t entry, int dirty)
+@@ -590,14 +611,38 @@ int contpte_ptep_set_access_flags(struct
+       int i;
+       /*
+-       * Gather the access/dirty bits for the contiguous range. If nothing has
+-       * changed, its a noop.
++       * Check whether all sub-PTEs in the CONT block already match the
++       * requested access flags/write permission, using raw per-PTE values
++       * rather than the gathered ptep_get() view.
++       *
++       * __ptep_set_access_flags() can update AF, dirty and write
++       * permission, but only to make the mapping more permissive.
++       *
++       * ptep_get() gathers AF/dirty state across the whole CONT block,
++       * which is correct for a CPU with FEAT_HAFDBS. But page-table
++       * walkers that evaluate each descriptor individually (e.g. a CPU
++       * without DBM support, or an SMMU without HTTU, or with HA/HD
++       * disabled in CD.TCR) can keep faulting on the target sub-PTE if
++       * only a sibling has been updated. Gathering can therefore cause
++       * false no-ops when only a sibling has been updated:
++       *  - write faults: target still has PTE_RDONLY (needs PTE_RDONLY cleared)
++       *  - read faults:  target still lacks PTE_AF
++       *
++       * Per Arm ARM (DDI 0487) D8.7.1, any sub-PTE in a CONT range may
++       * become the effective cached translation, so all entries must have
++       * consistent attributes. Check the full CONT block before returning
++       * no-op, and when any sub-PTE mismatches, proceed to update the whole
++       * range.
+        */
+-      orig_pte = pte_mknoncont(ptep_get(ptep));
+-      if (pte_val(orig_pte) == pte_val(entry))
++      if (contpte_all_subptes_match_access_flags(ptep, entry))
+               return 0;
+       /*
++       * Use raw target pte (not gathered) for write-bit unfold decision.
++       */
++      orig_pte = pte_mknoncont(__ptep_get(ptep));
++
++      /*
+        * We can fix up access/dirty bits without having to unfold the contig
+        * range. But if the write bit is changing, we must unfold.
+        */
diff --git a/queue-6.19/arm64-mm-add-pte_dirty-back-to-page_kernel-to-fix-kexec-hibernation.patch b/queue-6.19/arm64-mm-add-pte_dirty-back-to-page_kernel-to-fix-kexec-hibernation.patch
new file mode 100644 (file)
index 0000000..1837241
--- /dev/null
@@ -0,0 +1,63 @@
+From c25c4aa3f79a488cc270507935a29c07dc6bddfc Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Fri, 27 Feb 2026 18:53:06 +0000
+Subject: arm64: mm: Add PTE_DIRTY back to PAGE_KERNEL* to fix kexec/hibernation
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit c25c4aa3f79a488cc270507935a29c07dc6bddfc upstream.
+
+Commit 143937ca51cc ("arm64, mm: avoid always making PTE dirty in
+pte_mkwrite()") changed pte_mkwrite_novma() to only clear PTE_RDONLY
+when PTE_DIRTY is set. This was to allow writable-clean PTEs for swap
+pages that haven't actually been written.
+
+However, this broke kexec and hibernation for some platforms. Both go
+through trans_pgd_create_copy() -> _copy_pte(), which calls
+pte_mkwrite_novma() to make the temporary linear-map copy fully
+writable. With the updated pte_mkwrite_novma(), read-only kernel pages
+(without PTE_DIRTY) remain read-only in the temporary mapping.
+While such behaviour is fine for user pages where hardware DBM or
+trapping will make them writeable, subsequent in-kernel writes by the
+kexec relocation code will fault.
+
+Add PTE_DIRTY back to all _PAGE_KERNEL* protection definitions. This was
+the case prior to 5.4, commit aa57157be69f ("arm64: Ensure
+VM_WRITE|VM_SHARED ptes are clean by default"). With the kernel
+linear-map PTEs always having PTE_DIRTY set, pte_mkwrite_novma()
+correctly clears PTE_RDONLY.
+
+Fixes: 143937ca51cc ("arm64, mm: avoid always making PTE dirty in pte_mkwrite()")
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: stable@vger.kernel.org
+Reported-by: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+Link: https://lore.kernel.org/r/20251204062722.3367201-1-jianpeng.chang.cn@windriver.com
+Cc: Will Deacon <will@kernel.org>
+Cc: Huang, Ying <ying.huang@linux.alibaba.com>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Huang Ying <ying.huang@linux.alibaba.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/pgtable-prot.h |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -50,11 +50,11 @@
+ #define _PAGE_DEFAULT         (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+-#define _PAGE_KERNEL          (PROT_NORMAL)
+-#define _PAGE_KERNEL_RO               ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
+-#define _PAGE_KERNEL_ROX      ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
+-#define _PAGE_KERNEL_EXEC     (PROT_NORMAL & ~PTE_PXN)
+-#define _PAGE_KERNEL_EXEC_CONT        ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
++#define _PAGE_KERNEL          (PROT_NORMAL | PTE_DIRTY)
++#define _PAGE_KERNEL_RO               ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY | PTE_DIRTY)
++#define _PAGE_KERNEL_ROX      ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY | PTE_DIRTY)
++#define _PAGE_KERNEL_EXEC     ((PROT_NORMAL & ~PTE_PXN) | PTE_DIRTY)
++#define _PAGE_KERNEL_EXEC_CONT        ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT | PTE_DIRTY)
+ #define _PAGE_SHARED          (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+ #define _PAGE_SHARED_EXEC     (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
diff --git a/queue-6.19/ata-libata-core-disable-lpm-on-st1000dm010-2ep102.patch b/queue-6.19/ata-libata-core-disable-lpm-on-st1000dm010-2ep102.patch
new file mode 100644 (file)
index 0000000..11cedbf
--- /dev/null
@@ -0,0 +1,35 @@
+From b3b1d3ae1d87bc9398fb715c945968bf4c75a09a Mon Sep 17 00:00:00 2001
+From: Maximilian Pezzullo <maximilianpezzullo@gmail.com>
+Date: Wed, 4 Mar 2026 08:22:59 +0100
+Subject: ata: libata-core: Disable LPM on ST1000DM010-2EP102
+
+From: Maximilian Pezzullo <maximilianpezzullo@gmail.com>
+
+commit b3b1d3ae1d87bc9398fb715c945968bf4c75a09a upstream.
+
+According to a user report, the ST1000DM010-2EP102 has problems with LPM,
+causing random system freezes. The drive belongs to the same BarraCuda
+family as the ST2000DM008-2FR102 which has the same issue.
+
+Cc: stable@vger.kernel.org
+Fixes: 7627a0edef54 ("ata: ahci: Drop low power policy board type")
+Reported-by: Filippo Baiamonte <filippo.ba03@bugzilla.kernel.org>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=221163
+Signed-off-by: Maximilian Pezzullo <maximilianpezzullo@gmail.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-core.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4186,6 +4186,7 @@ static const struct ata_dev_quirks_entry
+                                               ATA_QUIRK_FIRMWARE_WARN },
+       /* Seagate disks with LPM issues */
++      { "ST1000DM010-2EP102", NULL,           ATA_QUIRK_NOLPM },
+       { "ST2000DM008-2FR102", NULL,           ATA_QUIRK_NOLPM },
+       /* drives which fail FPDMA_AA activation (some may freeze afterwards)
diff --git a/queue-6.19/batman-adv-avoid-double-rtnl_lock-elp-metric-worker.patch b/queue-6.19/batman-adv-avoid-double-rtnl_lock-elp-metric-worker.patch
new file mode 100644 (file)
index 0000000..0f56b8a
--- /dev/null
@@ -0,0 +1,106 @@
+From cfc83a3c71517b59c1047db57da31e26a9dc2f33 Mon Sep 17 00:00:00 2001
+From: Sven Eckelmann <sven@narfation.org>
+Date: Mon, 16 Feb 2026 11:20:29 +0100
+Subject: batman-adv: Avoid double-rtnl_lock ELP metric worker
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sven Eckelmann <sven@narfation.org>
+
+commit cfc83a3c71517b59c1047db57da31e26a9dc2f33 upstream.
+
+batadv_v_elp_get_throughput() might be called when the RTNL lock is already
+held. This could be problematic when the work queue item is cancelled via
+cancel_delayed_work_sync() in batadv_v_elp_iface_disable(). In this case,
+an rtnl_lock() would cause a deadlock.
+
+To avoid this, rtnl_trylock() was used in this function to skip the
+retrieval of the ethtool information in case the RTNL lock was already
+held.
+
+But for cfg80211 interfaces, batadv_get_real_netdev() was called - which
+also uses rtnl_lock(). The approach for __ethtool_get_link_ksettings() must
+also be used instead and the lockless version __batadv_get_real_netdev()
+has to be called.
+
+Cc: stable@vger.kernel.org
+Fixes: 8c8ecc98f5c6 ("batman-adv: Drop unmanaged ELP metric worker")
+Reported-by: Christian Schmidbauer <github@grische.xyz>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Tested-by: Sören Skaarup <freifunk_nordm4nn@gmx.de>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/bat_v_elp.c      |   10 +++++++++-
+ net/batman-adv/hard-interface.c |    8 ++++----
+ net/batman-adv/hard-interface.h |    1 +
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -111,7 +111,15 @@ static bool batadv_v_elp_get_throughput(
+                       /* unsupported WiFi driver version */
+                       goto default_throughput;
+-              real_netdev = batadv_get_real_netdev(hard_iface->net_dev);
++              /* only use rtnl_trylock because the elp worker will be cancelled while
++               * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise
++               * wait forever when the elp work_item was started and it is then also
++               * trying to rtnl_lock
++               */
++              if (!rtnl_trylock())
++                      return false;
++              real_netdev = __batadv_get_real_netdev(hard_iface->net_dev);
++              rtnl_unlock();
+               if (!real_netdev)
+                       goto default_throughput;
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -204,7 +204,7 @@ static bool batadv_is_valid_iface(const
+ }
+ /**
+- * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
++ * __batadv_get_real_netdev() - check if the given netdev struct is a virtual
+  *  interface on top of another 'real' interface
+  * @netdev: the device to check
+  *
+@@ -214,7 +214,7 @@ static bool batadv_is_valid_iface(const
+  * Return: the 'real' net device or the original net device and NULL in case
+  *  of an error.
+  */
+-static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
++struct net_device *__batadv_get_real_netdev(struct net_device *netdev)
+ {
+       struct batadv_hard_iface *hard_iface = NULL;
+       struct net_device *real_netdev = NULL;
+@@ -267,7 +267,7 @@ struct net_device *batadv_get_real_netde
+       struct net_device *real_netdev;
+       rtnl_lock();
+-      real_netdev = batadv_get_real_netdevice(net_device);
++      real_netdev = __batadv_get_real_netdev(net_device);
+       rtnl_unlock();
+       return real_netdev;
+@@ -336,7 +336,7 @@ static u32 batadv_wifi_flags_evaluate(st
+       if (batadv_is_cfg80211_netdev(net_device))
+               wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT;
+-      real_netdev = batadv_get_real_netdevice(net_device);
++      real_netdev = __batadv_get_real_netdev(net_device);
+       if (!real_netdev)
+               return wifi_flags;
+--- a/net/batman-adv/hard-interface.h
++++ b/net/batman-adv/hard-interface.h
+@@ -67,6 +67,7 @@ enum batadv_hard_if_bcast {
+ extern struct notifier_block batadv_hard_if_notifier;
++struct net_device *__batadv_get_real_netdev(struct net_device *net_device);
+ struct net_device *batadv_get_real_netdev(struct net_device *net_device);
+ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface);
+ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface);
diff --git a/queue-6.19/crypto-ccp-allow-callers-to-use-hv-fixed-page-api-when-sev-is-disabled.patch b/queue-6.19/crypto-ccp-allow-callers-to-use-hv-fixed-page-api-when-sev-is-disabled.patch
new file mode 100644 (file)
index 0000000..50cd785
--- /dev/null
@@ -0,0 +1,63 @@
+From 8168a7b72bdee3790b126f63bd30306759206b15 Mon Sep 17 00:00:00 2001
+From: Ashish Kalra <ashish.kalra@amd.com>
+Date: Fri, 6 Feb 2026 21:26:45 +0000
+Subject: crypto: ccp - allow callers to use HV-Fixed page API when SEV is disabled
+
+From: Ashish Kalra <ashish.kalra@amd.com>
+
+commit 8168a7b72bdee3790b126f63bd30306759206b15 upstream.
+
+When SEV is disabled, the HV-Fixed page allocation call fails, which in
+turn causes SFS initialization to fail.
+
+Fix the HV-Fixed API so callers (for example, SFS) can use it even when
+SEV is disabled by performing normal page allocation and freeing.
+
+Fixes: e09701dcdd9c ("crypto: ccp - Add new HV-Fixed page allocation/free API")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/ccp/sev-dev.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -1105,15 +1105,12 @@ struct page *snp_alloc_hv_fixed_pages(un
+ {
+       struct psp_device *psp_master = psp_get_master_device();
+       struct snp_hv_fixed_pages_entry *entry;
+-      struct sev_device *sev;
+       unsigned int order;
+       struct page *page;
+-      if (!psp_master || !psp_master->sev_data)
++      if (!psp_master)
+               return NULL;
+-      sev = psp_master->sev_data;
+-
+       order = get_order(PMD_SIZE * num_2mb_pages);
+       /*
+@@ -1126,7 +1123,8 @@ struct page *snp_alloc_hv_fixed_pages(un
+        * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
+        * page state, fail if SNP is already initialized.
+        */
+-      if (sev->snp_initialized)
++      if (psp_master->sev_data &&
++          ((struct sev_device *)psp_master->sev_data)->snp_initialized)
+               return NULL;
+       /* Re-use freed pages that match the request */
+@@ -1162,7 +1160,7 @@ void snp_free_hv_fixed_pages(struct page
+       struct psp_device *psp_master = psp_get_master_device();
+       struct snp_hv_fixed_pages_entry *entry, *nentry;
+-      if (!psp_master || !psp_master->sev_data)
++      if (!psp_master)
+               return;
+       /*
diff --git a/queue-6.19/drm-amd-display-fallback-to-boot-snapshot-for-dispclk.patch b/queue-6.19/drm-amd-display-fallback-to-boot-snapshot-for-dispclk.patch
new file mode 100644 (file)
index 0000000..72484ea
--- /dev/null
@@ -0,0 +1,42 @@
+From 30d937f63bd19bbcaafa4b892eb251f8bbbf04ef Mon Sep 17 00:00:00 2001
+From: Dillon Varone <Dillon.Varone@amd.com>
+Date: Wed, 18 Feb 2026 14:34:28 -0500
+Subject: drm/amd/display: Fallback to boot snapshot for dispclk
+
+From: Dillon Varone <Dillon.Varone@amd.com>
+
+commit 30d937f63bd19bbcaafa4b892eb251f8bbbf04ef upstream.
+
+[WHY & HOW]
+If the dentist is unavailable, fallback to reading CLKIP via the boot
+snapshot to get the current dispclk.
+
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Dillon Varone <Dillon.Varone@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 2ab77600d1e55a042c02437326d3c7563e853c6c)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+@@ -71,7 +71,11 @@ void dcn401_initialize_min_clocks(struct
+                * audio corruption. Read current DISPCLK from DENTIST and request the same
+                * freq to ensure that the timing is valid and unchanged.
+                */
+-              clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
++              if (dc->clk_mgr->funcs->get_dispclk_from_dentist) {
++                      clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
++              } else {
++                      clocks->dispclk_khz = dc->clk_mgr->boot_snapshot.dispclk * 1000;
++              }
+       }
+       clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+       clocks->fclk_p_state_change_support = true;
diff --git a/queue-6.19/drm-ttm-fix-ttm_pool_beneficial_order-return-type.patch b/queue-6.19/drm-ttm-fix-ttm_pool_beneficial_order-return-type.patch
new file mode 100644 (file)
index 0000000..d937e41
--- /dev/null
@@ -0,0 +1,47 @@
+From 6e3f4514e3b432871ac81717d24f56b441857f77 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Fri, 27 Feb 2026 12:49:01 +0000
+Subject: drm/ttm: Fix ttm_pool_beneficial_order() return type
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 6e3f4514e3b432871ac81717d24f56b441857f77 upstream.
+
+Fix a nasty copy and paste bug, where the incorrect boolean return type of
+the ttm_pool_beneficial_order() helper had a consequence of avoiding
+direct reclaim too eagerly for drivers which use this feature (currently
+amdgpu).
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: 7e9c548d3709 ("drm/ttm: Allow drivers to specify maximum beneficial TTM pool size")
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.19+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Link: https://lore.kernel.org/r/20260227124901.3177-1-tvrtko.ursulin@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ttm/ttm_pool_internal.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h b/drivers/gpu/drm/ttm/ttm_pool_internal.h
+index 82c4b7e56a99..24c179fd69d1 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool_internal.h
++++ b/drivers/gpu/drm/ttm/ttm_pool_internal.h
+@@ -17,7 +17,7 @@ static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
+       return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32;
+ }
+-static inline bool ttm_pool_beneficial_order(struct ttm_pool *pool)
++static inline unsigned int ttm_pool_beneficial_order(struct ttm_pool *pool)
+ {
+       return pool->alloc_flags & 0xff;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.19/drm-xe-xe2_hpg-correct-implementation-of-wa_16025250150.patch b/queue-6.19/drm-xe-xe2_hpg-correct-implementation-of-wa_16025250150.patch
new file mode 100644 (file)
index 0000000..f63cb23
--- /dev/null
@@ -0,0 +1,52 @@
+From 89865e6dc8487b627302bdced3f965cd0c406835 Mon Sep 17 00:00:00 2001
+From: Matt Roper <matthew.d.roper@intel.com>
+Date: Fri, 27 Feb 2026 08:43:41 -0800
+Subject: drm/xe/xe2_hpg: Correct implementation of Wa_16025250150
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+commit 89865e6dc8487b627302bdced3f965cd0c406835 upstream.
+
+Wa_16025250150 asks us to set five register fields of the register to
+0x1 each.  However we were just OR'ing this into the existing register
+value (which has a default of 0x4 for each nibble-sized field) resulting
+in final field values of 0x5 instead of the desired 0x1.  Correct the
+RTP programming (use FIELD_SET instead of SET) to ensure each field is
+assigned to exactly the value we want.
+
+Cc: Aradhya Bhatia <aradhya.bhatia@intel.com>
+Cc: Tejas Upadhyay <tejas.upadhyay@intel.com>
+Cc: stable@vger.kernel.org # v6.16+
+Fixes: 7654d51f1fd8 ("drm/xe/xe2hpg: Add Wa_16025250150")
+Reviewed-by: Ngai-Mint Kwan <ngai-mint.kwan@linux.intel.com>
+Link: https://patch.msgid.link/20260227164341.3600098-2-matthew.d.roper@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit d139209ef88e48af1f6731cd45440421c757b6b5)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_wa.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_wa.c
++++ b/drivers/gpu/drm/xe/xe_wa.c
+@@ -255,12 +255,13 @@ static const struct xe_rtp_entry_sr gt_w
+       { XE_RTP_NAME("16025250150"),
+         XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+-        XE_RTP_ACTIONS(SET(LSN_VC_REG2,
+-                           LSN_LNI_WGT(1) |
+-                           LSN_LNE_WGT(1) |
+-                           LSN_DIM_X_WGT(1) |
+-                           LSN_DIM_Y_WGT(1) |
+-                           LSN_DIM_Z_WGT(1)))
++        XE_RTP_ACTIONS(FIELD_SET(LSN_VC_REG2,
++                                 LSN_LNI_WGT_MASK | LSN_LNE_WGT_MASK |
++                                 LSN_DIM_X_WGT_MASK | LSN_DIM_Y_WGT_MASK |
++                                 LSN_DIM_Z_WGT_MASK,
++                                 LSN_LNI_WGT(1) | LSN_LNE_WGT(1) |
++                                 LSN_DIM_X_WGT(1) | LSN_DIM_Y_WGT(1) |
++                                 LSN_DIM_Z_WGT(1)))
+       },
+       /* Xe2_HPM */
diff --git a/queue-6.19/fgraph-fix-thresh_return-clear-per-task-notrace.patch b/queue-6.19/fgraph-fix-thresh_return-clear-per-task-notrace.patch
new file mode 100644 (file)
index 0000000..f367b8b
--- /dev/null
@@ -0,0 +1,50 @@
+From 6ca8379b5d36e22b04e6315c3e49a6083377c862 Mon Sep 17 00:00:00 2001
+From: Shengming Hu <hu.shengming@zte.com.cn>
+Date: Sat, 21 Feb 2026 11:30:07 +0800
+Subject: fgraph: Fix thresh_return clear per-task notrace
+
+From: Shengming Hu <hu.shengming@zte.com.cn>
+
+commit 6ca8379b5d36e22b04e6315c3e49a6083377c862 upstream.
+
+When tracing_thresh is enabled, function graph tracing uses
+trace_graph_thresh_return() as the return handler. Unlike
+trace_graph_return(), it did not clear the per-task TRACE_GRAPH_NOTRACE
+flag set by the entry handler for set_graph_notrace addresses. This could
+leave the task permanently in "notrace" state and effectively disable
+function graph tracing for that task.
+
+Mirror trace_graph_return()'s per-task notrace handling by clearing
+TRACE_GRAPH_NOTRACE and returning early when set.
+
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260221113007819YgrZsMGABff4Rc-O_fZxL@zte.com.cn
+Fixes: b84214890a9bc ("function_graph: Move graph notrace bit to shadow stack global var")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Shengming Hu <hu.shengming@zte.com.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_functions_graph.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -400,14 +400,15 @@ static void trace_graph_thresh_return(st
+                                     struct fgraph_ops *gops,
+                                     struct ftrace_regs *fregs)
+ {
++      unsigned long *task_var = fgraph_get_task_var(gops);
+       struct fgraph_times *ftimes;
+       struct trace_array *tr;
+       int size;
+       ftrace_graph_addr_finish(gops, trace);
+-      if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
+-              trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
++      if (*task_var & TRACE_GRAPH_NOTRACE) {
++              *task_var &= ~TRACE_GRAPH_NOTRACE;
+               return;
+       }
diff --git a/queue-6.19/fgraph-fix-thresh_return-nosleeptime-double-adjust.patch b/queue-6.19/fgraph-fix-thresh_return-nosleeptime-double-adjust.patch
new file mode 100644 (file)
index 0000000..2f43e0b
--- /dev/null
@@ -0,0 +1,63 @@
+From b96d0c59cdbb2a22b2545f6f3d5c6276b05761dd Mon Sep 17 00:00:00 2001
+From: Shengming Hu <hu.shengming@zte.com.cn>
+Date: Sat, 21 Feb 2026 11:33:14 +0800
+Subject: fgraph: Fix thresh_return nosleeptime double-adjust
+
+From: Shengming Hu <hu.shengming@zte.com.cn>
+
+commit b96d0c59cdbb2a22b2545f6f3d5c6276b05761dd upstream.
+
+trace_graph_thresh_return() called handle_nosleeptime() and then delegated
+to trace_graph_return(), which calls handle_nosleeptime() again. When
+sleep-time accounting is disabled this double-adjusts calltime and can
+produce bogus durations (including underflow).
+
+Fix this by computing rettime once, applying handle_nosleeptime() only
+once, using the adjusted calltime for threshold comparison, and writing
+the return event directly via __trace_graph_return() when the threshold is
+met.
+
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260221113314048jE4VRwIyZEALiYByGK0My@zte.com.cn
+Fixes: 3c9880f3ab52b ("ftrace: Use a running sleeptime instead of saving on shadow stack")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Shengming Hu <hu.shengming@zte.com.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_functions_graph.c |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -403,8 +403,12 @@ static void trace_graph_thresh_return(st
+       unsigned long *task_var = fgraph_get_task_var(gops);
+       struct fgraph_times *ftimes;
+       struct trace_array *tr;
++      unsigned int trace_ctx;
++      u64 calltime, rettime;
+       int size;
++      rettime = trace_clock_local();
++
+       ftrace_graph_addr_finish(gops, trace);
+       if (*task_var & TRACE_GRAPH_NOTRACE) {
+@@ -419,11 +423,13 @@ static void trace_graph_thresh_return(st
+       tr = gops->private;
+       handle_nosleeptime(tr, trace, ftimes, size);
+-      if (tracing_thresh &&
+-          (trace_clock_local() - ftimes->calltime < tracing_thresh))
++      calltime = ftimes->calltime;
++
++      if (tracing_thresh && (rettime - calltime < tracing_thresh))
+               return;
+-      else
+-              trace_graph_return(trace, gops, fregs);
++
++      trace_ctx = tracing_gen_ctx();
++      __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
+ }
+ static struct fgraph_ops funcgraph_ops = {
diff --git a/queue-6.19/hwmon-pmbus-q54sj108a2-fix-stack-overflow-in-debugfs-read.patch b/queue-6.19/hwmon-pmbus-q54sj108a2-fix-stack-overflow-in-debugfs-read.patch
new file mode 100644 (file)
index 0000000..abca6f7
--- /dev/null
@@ -0,0 +1,86 @@
+From 25dd70a03b1f5f3aa71e1a5091ecd9cd2a13ee43 Mon Sep 17 00:00:00 2001
+From: Sanman Pradhan <psanman@juniper.net>
+Date: Wed, 4 Mar 2026 15:51:17 -0800
+Subject: hwmon: (pmbus/q54sj108a2) fix stack overflow in debugfs read
+
+From: Sanman Pradhan <psanman@juniper.net>
+
+commit 25dd70a03b1f5f3aa71e1a5091ecd9cd2a13ee43 upstream.
+
+The q54sj108a2_debugfs_read function suffers from a stack buffer overflow
+due to incorrect arguments passed to bin2hex(). The function currently
+passes 'data' as the destination and 'data_char' as the source.
+
+Because bin2hex() converts each input byte into two hex characters, a
+32-byte block read results in 64 bytes of output. Since 'data' is only
+34 bytes (I2C_SMBUS_BLOCK_MAX + 2), this writes 30 bytes past the end
+of the buffer onto the stack.
+
+Additionally, the arguments were swapped: it was reading from the
+zero-initialized 'data_char' and writing to 'data', resulting in
+all-zero output regardless of the actual I2C read.
+
+Fix this by:
+1. Expanding 'data_char' to 66 bytes to safely hold the hex output.
+2. Correcting the bin2hex() argument order and using the actual read count.
+3. Using a pointer to select the correct output buffer for the final
+   simple_read_from_buffer call.
+
+Fixes: d014538aa385 ("hwmon: (pmbus) Driver for Delta power supplies Q54SJ108A2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sanman Pradhan <psanman@juniper.net>
+Link: https://lore.kernel.org/r/20260304235116.1045-1-sanman.p211993@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/pmbus/q54sj108a2.c |   19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/drivers/hwmon/pmbus/q54sj108a2.c
++++ b/drivers/hwmon/pmbus/q54sj108a2.c
+@@ -78,7 +78,8 @@ static ssize_t q54sj108a2_debugfs_read(s
+       int idx = *idxp;
+       struct q54sj108a2_data *psu = to_psu(idxp, idx);
+       char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
+-      char data_char[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
++      char data_char[I2C_SMBUS_BLOCK_MAX * 2 + 2] = { 0 };
++      char *out = data;
+       char *res;
+       switch (idx) {
+@@ -149,27 +150,27 @@ static ssize_t q54sj108a2_debugfs_read(s
+               if (rc < 0)
+                       return rc;
+-              res = bin2hex(data, data_char, 32);
+-              rc = res - data;
+-
++              res = bin2hex(data_char, data, rc);
++              rc = res - data_char;
++              out = data_char;
+               break;
+       case Q54SJ108A2_DEBUGFS_FLASH_KEY:
+               rc = i2c_smbus_read_block_data(psu->client, PMBUS_FLASH_KEY_WRITE, data);
+               if (rc < 0)
+                       return rc;
+-              res = bin2hex(data, data_char, 4);
+-              rc = res - data;
+-
++              res = bin2hex(data_char, data, rc);
++              rc = res - data_char;
++              out = data_char;
+               break;
+       default:
+               return -EINVAL;
+       }
+-      data[rc] = '\n';
++      out[rc] = '\n';
+       rc += 2;
+-      return simple_read_from_buffer(buf, count, ppos, data, rc);
++      return simple_read_from_buffer(buf, count, ppos, out, rc);
+ }
+ static ssize_t q54sj108a2_debugfs_write(struct file *file, const char __user *buf,
diff --git a/queue-6.19/ice-fix-retry-for-aq-command-0x06ee.patch b/queue-6.19/ice-fix-retry-for-aq-command-0x06ee.patch
new file mode 100644 (file)
index 0000000..3f728aa
--- /dev/null
@@ -0,0 +1,103 @@
+From fb4903b3354aed4a2301180cf991226f896c87ed Mon Sep 17 00:00:00 2001
+From: Jakub Staniszewski <jakub.staniszewski@linux.intel.com>
+Date: Tue, 13 Jan 2026 20:38:17 +0100
+Subject: ice: fix retry for AQ command 0x06EE
+
+From: Jakub Staniszewski <jakub.staniszewski@linux.intel.com>
+
+commit fb4903b3354aed4a2301180cf991226f896c87ed upstream.
+
+Executing ethtool -m can fail reporting a netlink I/O error while firmware
+link management holds the i2c bus used to communicate with the module.
+
+According to Intel(R) Ethernet Controller E810 Datasheet Rev 2.8 [1]
+Section 3.3.10.4 Read/Write SFF EEPROM (0x06EE)
+request should to be retried upon receiving EBUSY from firmware.
+
+Commit e9c9692c8a81 ("ice: Reimplement module reads used by ethtool")
+implemented it only for part of ice_get_module_eeprom(), leaving all other
+calls to ice_aq_sff_eeprom() vulnerable to returning early on getting
+EBUSY without retrying.
+
+Remove the retry loop from ice_get_module_eeprom() and add Admin Queue
+(AQ) command with opcode 0x06EE to the list of commands that should be
+retried on receiving EBUSY from firmware.
+
+Cc: stable@vger.kernel.org
+Fixes: e9c9692c8a81 ("ice: Reimplement module reads used by ethtool")
+Signed-off-by: Jakub Staniszewski <jakub.staniszewski@linux.intel.com>
+Co-developed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Signed-off-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://www.intel.com/content/www/us/en/content-details/613875/intel-ethernet-controller-e810-datasheet.html [1]
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_common.c  |    1 
+ drivers/net/ethernet/intel/ice/ice_ethtool.c |   35 ++++++++++-----------------
+ 2 files changed, 15 insertions(+), 21 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -1854,6 +1854,7 @@ static bool ice_should_retry_sq_send_cmd
+       case ice_aqc_opc_lldp_stop:
+       case ice_aqc_opc_lldp_start:
+       case ice_aqc_opc_lldp_filter_ctrl:
++      case ice_aqc_opc_sff_eeprom:
+               return true;
+       }
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -4508,7 +4508,7 @@ ice_get_module_eeprom(struct net_device
+       u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
+       struct ice_hw *hw = &pf->hw;
+       bool is_sfp = false;
+-      unsigned int i, j;
++      unsigned int i;
+       u16 offset = 0;
+       u8 page = 0;
+       int status;
+@@ -4550,26 +4550,19 @@ ice_get_module_eeprom(struct net_device
+               if (page == 0 || !(data[0x2] & 0x4)) {
+                       u32 copy_len;
+-                      /* If i2c bus is busy due to slow page change or
+-                       * link management access, call can fail. This is normal.
+-                       * So we retry this a few times.
+-                       */
+-                      for (j = 0; j < 4; j++) {
+-                              status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
+-                                                         !is_sfp, value,
+-                                                         SFF_READ_BLOCK_SIZE,
+-                                                         0, NULL);
+-                              netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n",
+-                                         addr, offset, page, is_sfp,
+-                                         value[0], value[1], value[2], value[3],
+-                                         value[4], value[5], value[6], value[7],
+-                                         status);
+-                              if (status) {
+-                                      usleep_range(1500, 2500);
+-                                      memset(value, 0, SFF_READ_BLOCK_SIZE);
+-                                      continue;
+-                              }
+-                              break;
++                      status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
++                                                 !is_sfp, value,
++                                                 SFF_READ_BLOCK_SIZE,
++                                                 0, NULL);
++                      netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%pe)\n",
++                                 addr, offset, page, is_sfp,
++                                 value[0], value[1], value[2], value[3],
++                                 value[4], value[5], value[6], value[7],
++                                 ERR_PTR(status));
++                      if (status) {
++                              netdev_err(netdev, "%s: error reading module EEPROM: status %pe\n",
++                                         __func__, ERR_PTR(status));
++                              return status;
+                       }
+                       /* Make sure we have enough room for the new block */
diff --git a/queue-6.19/io_uring-net-reject-send_vectorized-when-unsupported.patch b/queue-6.19/io_uring-net-reject-send_vectorized-when-unsupported.patch
new file mode 100644 (file)
index 0000000..46bad54
--- /dev/null
@@ -0,0 +1,34 @@
+From c36e28becd0586ac98318fd335e5e91d19cd2623 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Mon, 2 Mar 2026 14:32:04 +0000
+Subject: io_uring/net: reject SEND_VECTORIZED when unsupported
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit c36e28becd0586ac98318fd335e5e91d19cd2623 upstream.
+
+IORING_SEND_VECTORIZED with registered buffers is not implemented but
+could be. Don't silently ignore the flag in this case but reject it with
+an error. It only affects sendzc as normal sends don't support
+registered buffers.
+
+Fixes: 6f02527729bd3 ("io_uring/net: Allow to do vectorized send")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -375,6 +375,8 @@ static int io_send_setup(struct io_kiocb
+               kmsg->msg.msg_namelen = addr_len;
+       }
+       if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
++              if (sr->flags & IORING_SEND_VECTORIZED)
++                      return -EINVAL;
+               req->flags |= REQ_F_IMPORT_BUFFER;
+               return 0;
+       }
diff --git a/queue-6.19/io_uring-zcrx-use-read_once-with-user-shared-rqes.patch b/queue-6.19/io_uring-zcrx-use-read_once-with-user-shared-rqes.patch
new file mode 100644 (file)
index 0000000..58a9618
--- /dev/null
@@ -0,0 +1,38 @@
+From 531bb98a030cc1073bd7ed9a502c0a3a781e92ee Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 4 Mar 2026 12:37:43 +0000
+Subject: io_uring/zcrx: use READ_ONCE with user shared RQEs
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 531bb98a030cc1073bd7ed9a502c0a3a781e92ee upstream.
+
+Refill queue entries are shared with the user space, use READ_ONCE when
+reading them.
+
+Fixes: 34a3e60821ab9 ("io_uring/zcrx: implement zerocopy receive pp memory provider");
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/zcrx.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/io_uring/zcrx.c
++++ b/io_uring/zcrx.c
+@@ -898,11 +898,12 @@ static inline bool io_parse_rqe(struct i
+                               struct io_zcrx_ifq *ifq,
+                               struct net_iov **ret_niov)
+ {
++      __u64 off = READ_ONCE(rqe->off);
+       unsigned niov_idx, area_idx;
+       struct io_zcrx_area *area;
+-      area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
+-      niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
++      area_idx = off >> IORING_ZCRX_AREA_SHIFT;
++      niov_idx = (off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
+       if (unlikely(rqe->__pad || area_idx))
+               return false;
diff --git a/queue-6.19/ksmbd-don-t-log-keys-in-smb3-signing-and-encryption-key-generation.patch b/queue-6.19/ksmbd-don-t-log-keys-in-smb3-signing-and-encryption-key-generation.patch
new file mode 100644 (file)
index 0000000..4f47341
--- /dev/null
@@ -0,0 +1,64 @@
+From 441336115df26b966575de56daf7107ed474faed Mon Sep 17 00:00:00 2001
+From: Thorsten Blum <thorsten.blum@linux.dev>
+Date: Tue, 3 Mar 2026 14:25:53 +0100
+Subject: ksmbd: Don't log keys in SMB3 signing and encryption key generation
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+commit 441336115df26b966575de56daf7107ed474faed upstream.
+
+When KSMBD_DEBUG_AUTH logging is enabled, generate_smb3signingkey() and
+generate_smb3encryptionkey() log the session, signing, encryption, and
+decryption key bytes. Remove the logs to avoid exposing credentials.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/auth.c |   22 ++--------------------
+ 1 file changed, 2 insertions(+), 20 deletions(-)
+
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -589,12 +589,8 @@ static int generate_smb3signingkey(struc
+       if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
+               memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
+-      ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
++      ksmbd_debug(AUTH, "generated SMB3 signing key\n");
+       ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
+-      ksmbd_debug(AUTH, "Session Key   %*ph\n",
+-                  SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+-      ksmbd_debug(AUTH, "Signing Key   %*ph\n",
+-                  SMB3_SIGN_KEY_SIZE, key);
+       return 0;
+ }
+@@ -652,23 +648,9 @@ static void generate_smb3encryptionkey(s
+                    ptwin->decryption.context,
+                    sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE);
+-      ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
++      ksmbd_debug(AUTH, "generated SMB3 encryption/decryption keys\n");
+       ksmbd_debug(AUTH, "Cipher type   %d\n", conn->cipher_type);
+       ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
+-      ksmbd_debug(AUTH, "Session Key   %*ph\n",
+-                  SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+-      if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+-          conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+-              ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
+-                          SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+-              ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+-                          SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+-      } else {
+-              ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
+-                          SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+-              ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+-                          SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+-      }
+ }
+ void ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
diff --git a/queue-6.19/ksmbd-fix-use-after-free-by-using-call_rcu-for-oplock_info.patch b/queue-6.19/ksmbd-fix-use-after-free-by-using-call_rcu-for-oplock_info.patch
new file mode 100644 (file)
index 0000000..65a73ea
--- /dev/null
@@ -0,0 +1,114 @@
+From 1dfd062caa165ec9d7ee0823087930f3ab8a6294 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sat, 7 Mar 2026 11:32:31 +0900
+Subject: ksmbd: fix use-after-free by using call_rcu() for oplock_info
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 1dfd062caa165ec9d7ee0823087930f3ab8a6294 upstream.
+
+ksmbd currently frees oplock_info immediately using kfree(), even
+though it is accessed under RCU read-side critical sections in places
+like opinfo_get() and proc_show_files().
+
+Since there is no RCU grace period delay between nullifying the pointer
+and freeing the memory, a reader can still access oplock_info
+structure after it has been freed. This can leads to a use-after-free
+especially in opinfo_get() where atomic_inc_not_zero() is called on
+already freed memory.
+
+Fix this by switching to deferred freeing using call_rcu().
+
+Fixes: 18b4fac5ef17 ("ksmbd: fix use-after-free in smb_break_all_levII_oplock()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c |   29 +++++++++++++++++++++--------
+ fs/smb/server/oplock.h |    5 +++--
+ 2 files changed, 24 insertions(+), 10 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -120,7 +120,7 @@ static void free_lease(struct oplock_inf
+       kfree(lease);
+ }
+-static void free_opinfo(struct oplock_info *opinfo)
++static void __free_opinfo(struct oplock_info *opinfo)
+ {
+       if (opinfo->is_lease)
+               free_lease(opinfo);
+@@ -129,6 +129,18 @@ static void free_opinfo(struct oplock_in
+       kfree(opinfo);
+ }
++static void free_opinfo_rcu(struct rcu_head *rcu)
++{
++      struct oplock_info *opinfo = container_of(rcu, struct oplock_info, rcu);
++
++      __free_opinfo(opinfo);
++}
++
++static void free_opinfo(struct oplock_info *opinfo)
++{
++      call_rcu(&opinfo->rcu, free_opinfo_rcu);
++}
++
+ struct oplock_info *opinfo_get(struct ksmbd_file *fp)
+ {
+       struct oplock_info *opinfo;
+@@ -176,9 +188,9 @@ void opinfo_put(struct oplock_info *opin
+       free_opinfo(opinfo);
+ }
+-static void opinfo_add(struct oplock_info *opinfo)
++static void opinfo_add(struct oplock_info *opinfo, struct ksmbd_file *fp)
+ {
+-      struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
++      struct ksmbd_inode *ci = fp->f_ci;
+       down_write(&ci->m_lock);
+       list_add(&opinfo->op_entry, &ci->m_op_list);
+@@ -1279,20 +1291,21 @@ set_lev:
+       set_oplock_level(opinfo, req_op_level, lctx);
+ out:
+-      rcu_assign_pointer(fp->f_opinfo, opinfo);
+-      opinfo->o_fp = fp;
+-
+       opinfo_count_inc(fp);
+-      opinfo_add(opinfo);
++      opinfo_add(opinfo, fp);
++
+       if (opinfo->is_lease) {
+               err = add_lease_global_list(opinfo);
+               if (err)
+                       goto err_out;
+       }
++      rcu_assign_pointer(fp->f_opinfo, opinfo);
++      opinfo->o_fp = fp;
++
+       return 0;
+ err_out:
+-      free_opinfo(opinfo);
++      __free_opinfo(opinfo);
+       return err;
+ }
+--- a/fs/smb/server/oplock.h
++++ b/fs/smb/server/oplock.h
+@@ -69,8 +69,9 @@ struct oplock_info {
+       struct lease            *o_lease;
+       struct list_head        op_entry;
+       struct list_head        lease_entry;
+-      wait_queue_head_t oplock_q; /* Other server threads */
+-      wait_queue_head_t oplock_brk; /* oplock breaking wait */
++      wait_queue_head_t       oplock_q; /* Other server threads */
++      wait_queue_head_t       oplock_brk; /* oplock breaking wait */
++      struct rcu_head         rcu;
+ };
+ struct lease_break_info {
diff --git a/queue-6.19/ksmbd-fix-use-after-free-in-smb_lazy_parent_lease_break_close.patch b/queue-6.19/ksmbd-fix-use-after-free-in-smb_lazy_parent_lease_break_close.patch
new file mode 100644 (file)
index 0000000..a7bffde
--- /dev/null
@@ -0,0 +1,41 @@
+From eac3361e3d5dd8067b3258c69615888eb45e9f25 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Mon, 2 Mar 2026 12:55:02 +0900
+Subject: ksmbd: fix use-after-free in smb_lazy_parent_lease_break_close()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit eac3361e3d5dd8067b3258c69615888eb45e9f25 upstream.
+
+opinfo pointer obtained via rcu_dereference(fp->f_opinfo) is being
+accessed after rcu_read_unlock() has been called. This creates a
+race condition where the memory could be freed by a concurrent
+writer between the unlock and the subsequent pointer dereferences
+(opinfo->is_lease, etc.), leading to a use-after-free.
+
+Fixes: 5fb282ba4fef ("ksmbd: fix possible null-deref in smb_lazy_parent_lease_break_close")
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1123,10 +1123,12 @@ void smb_lazy_parent_lease_break_close(s
+       rcu_read_lock();
+       opinfo = rcu_dereference(fp->f_opinfo);
+-      rcu_read_unlock();
+-      if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2)
++      if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2) {
++              rcu_read_unlock();
+               return;
++      }
++      rcu_read_unlock();
+       p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
+       if (!p_ci)
diff --git a/queue-6.19/media-dvb-net-fix-oob-access-in-ule-extension-header-tables.patch b/queue-6.19/media-dvb-net-fix-oob-access-in-ule-extension-header-tables.patch
new file mode 100644 (file)
index 0000000..bcef47e
--- /dev/null
@@ -0,0 +1,41 @@
+From 24d87712727a5017ad142d63940589a36cd25647 Mon Sep 17 00:00:00 2001
+From: Ariel Silver <arielsilver77@gmail.com>
+Date: Sat, 21 Feb 2026 15:26:00 +0100
+Subject: media: dvb-net: fix OOB access in ULE extension header tables
+
+From: Ariel Silver <arielsilver77@gmail.com>
+
+commit 24d87712727a5017ad142d63940589a36cd25647 upstream.
+
+The ule_mandatory_ext_handlers[] and ule_optional_ext_handlers[] tables
+in handle_one_ule_extension() are declared with 255 elements (valid
+indices 0-254), but the index htype is derived from network-controlled
+data as (ule_sndu_type & 0x00FF), giving a range of 0-255. When
+htype equals 255, an out-of-bounds read occurs on the function pointer
+table, and the OOB value may be called as a function pointer.
+
+Add a bounds check on htype against the array size before either table
+is accessed. Out-of-range values now cause the SNDU to be discarded.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Ariel Silver <arielsilver77@gmail.com>
+Signed-off-by: Ariel Silver <arielsilver77@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/dvb-core/dvb_net.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/media/dvb-core/dvb_net.c
++++ b/drivers/media/dvb-core/dvb_net.c
+@@ -228,6 +228,9 @@ static int handle_one_ule_extension( str
+       unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
+       unsigned char htype = p->ule_sndu_type & 0x00FF;
++      if (htype >= ARRAY_SIZE(ule_mandatory_ext_handlers))
++              return -1;
++
+       /* Discriminate mandatory and optional extension headers. */
+       if (hlen == 0) {
+               /* Mandatory extension header */
diff --git a/queue-6.19/memcg-fix-slab-accounting-in-refill_obj_stock-trylock-path.patch b/queue-6.19/memcg-fix-slab-accounting-in-refill_obj_stock-trylock-path.patch
new file mode 100644 (file)
index 0000000..c10c091
--- /dev/null
@@ -0,0 +1,52 @@
+From dccd5ee2625d50239510bcd73ed78559005e00a3 Mon Sep 17 00:00:00 2001
+From: Hao Li <hao.li@linux.dev>
+Date: Thu, 26 Feb 2026 19:51:37 +0800
+Subject: memcg: fix slab accounting in refill_obj_stock() trylock path
+
+From: Hao Li <hao.li@linux.dev>
+
+commit dccd5ee2625d50239510bcd73ed78559005e00a3 upstream.
+
+In the trylock path of refill_obj_stock(), mod_objcg_mlstate() should use
+the real alloc/free bytes (i.e., nr_acct) for accounting, rather than
+nr_bytes.
+
+The user-visible impact is that the NR_SLAB_RECLAIMABLE_B and
+NR_SLAB_UNRECLAIMABLE_B stats can end up being incorrect.
+
+For example, if a user allocates a 6144-byte object, then before this
+fix efill_obj_stock() calls mod_objcg_mlstate(..., nr_bytes=2048), even
+though it should account for 6144 bytes (i.e., nr_acct).
+
+When the user later frees the same object with kfree(),
+refill_obj_stock() calls mod_objcg_mlstate(..., nr_bytes=6144).  This
+ends up adding 6144 to the stats, but it should be applying -6144
+(i.e., nr_acct) since the object is being freed.
+
+Link: https://lkml.kernel.org/r/20260226115145.62903-1-hao.li@linux.dev
+Fixes: 200577f69f29 ("memcg: objcg stock trylock without irq disabling")
+Signed-off-by: Hao Li <hao.li@linux.dev>
+Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Vlastimil Babka <vbabka@suse.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memcontrol.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3052,7 +3052,7 @@ static void refill_obj_stock(struct obj_
+       if (!local_trylock(&obj_stock.lock)) {
+               if (pgdat)
+-                      mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
++                      mod_objcg_mlstate(objcg, pgdat, idx, nr_acct);
+               nr_pages = nr_bytes >> PAGE_SHIFT;
+               nr_bytes = nr_bytes & (PAGE_SIZE - 1);
+               atomic_add(nr_bytes, &objcg->nr_charged_bytes);
diff --git a/queue-6.19/net-dsa-microchip-fix-error-path-in-ptp-irq-setup.patch b/queue-6.19/net-dsa-microchip-fix-error-path-in-ptp-irq-setup.patch
new file mode 100644 (file)
index 0000000..13c5039
--- /dev/null
@@ -0,0 +1,56 @@
+From 99c8c16a4aad0b37293cae213e15957c573cf79b Mon Sep 17 00:00:00 2001
+From: "Bastien Curutchet (Schneider Electric)" <bastien.curutchet@bootlin.com>
+Date: Mon, 9 Mar 2026 14:15:43 +0100
+Subject: net: dsa: microchip: Fix error path in PTP IRQ setup
+
+From: Bastien Curutchet (Schneider Electric) <bastien.curutchet@bootlin.com>
+
+commit 99c8c16a4aad0b37293cae213e15957c573cf79b upstream.
+
+If request_threaded_irq() fails during the PTP message IRQ setup, the
+newly created IRQ mapping is never disposed. Indeed, the
+ksz_ptp_irq_setup()'s error path only frees the mappings that were
+successfully set up.
+
+Dispose the newly created mapping if the associated
+request_threaded_irq() fails at setup.
+
+Cc: stable@vger.kernel.org
+Fixes: d0b8fec8ae505 ("net: dsa: microchip: Fix symetry in ksz_ptp_msg_irq_{setup/free}()")
+Signed-off-by: Bastien Curutchet (Schneider Electric) <bastien.curutchet@bootlin.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260309-ksz-ptp-irq-fix-v1-1-757b3b985955@bootlin.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/microchip/ksz_ptp.c |   11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/dsa/microchip/ksz_ptp.c
++++ b/drivers/net/dsa/microchip/ksz_ptp.c
+@@ -1095,6 +1095,7 @@ static int ksz_ptp_msg_irq_setup(struct
+       const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+       struct ksz_irq *ptpirq = &port->ptpirq;
+       struct ksz_ptp_irq *ptpmsg_irq;
++      int ret;
+       ptpmsg_irq = &port->ptpmsg_irq[n];
+       ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
+@@ -1106,9 +1107,13 @@ static int ksz_ptp_msg_irq_setup(struct
+       strscpy(ptpmsg_irq->name, name[n]);
+-      return request_threaded_irq(ptpmsg_irq->num, NULL,
+-                                  ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
+-                                  ptpmsg_irq->name, ptpmsg_irq);
++      ret = request_threaded_irq(ptpmsg_irq->num, NULL,
++                                 ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
++                                 ptpmsg_irq->name, ptpmsg_irq);
++      if (ret)
++              irq_dispose_mapping(ptpmsg_irq->num);
++
++      return ret;
+ }
+ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
diff --git a/queue-6.19/net-ethernet-arc-emac-quiesce-interrupts-before-requesting-irq.patch b/queue-6.19/net-ethernet-arc-emac-quiesce-interrupts-before-requesting-irq.patch
new file mode 100644 (file)
index 0000000..2db2ef3
--- /dev/null
@@ -0,0 +1,53 @@
+From 2503d08f8a2de618e5c3a8183b250ff4a2e2d52c Mon Sep 17 00:00:00 2001
+From: Fan Wu <fanwu01@zju.edu.cn>
+Date: Mon, 9 Mar 2026 13:24:09 +0000
+Subject: net: ethernet: arc: emac: quiesce interrupts before requesting IRQ
+
+From: Fan Wu <fanwu01@zju.edu.cn>
+
+commit 2503d08f8a2de618e5c3a8183b250ff4a2e2d52c upstream.
+
+Normal RX/TX interrupts are enabled later, in arc_emac_open(), so probe
+should not see interrupt delivery in the usual case. However, hardware may
+still present stale or latched interrupt status left by firmware or the
+bootloader.
+
+If probe later unwinds after devm_request_irq() has installed the handler,
+such a stale interrupt can still reach arc_emac_intr() during teardown and
+race with release of the associated net_device.
+
+Avoid that window by putting the device into a known quiescent state before
+requesting the IRQ: disable all EMAC interrupt sources and clear any
+pending EMAC interrupt status bits. This keeps the change hardware-focused
+and minimal, while preventing spurious IRQ delivery from leftover state.
+
+Fixes: e4f2379db6c6 ("ethernet/arc/arc_emac - Add new driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fan Wu <fanwu01@zju.edu.cn>
+Link: https://patch.msgid.link/20260309132409.584966-1-fanwu01@zju.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/arc/emac_main.c |   11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/net/ethernet/arc/emac_main.c
++++ b/drivers/net/ethernet/arc/emac_main.c
+@@ -934,6 +934,17 @@ int arc_emac_probe(struct net_device *nd
+       /* Set poll rate so that it polls every 1 ms */
+       arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
++      /*
++       * Put the device into a known quiescent state before requesting
++       * the IRQ. Clear only EMAC interrupt status bits here; leave the
++       * MDIO completion bit alone and avoid writing TXPL_MASK, which is
++       * used to force TX polling rather than acknowledge interrupts.
++       */
++      arc_reg_set(priv, R_ENABLE, 0);
++      arc_reg_set(priv, R_STATUS, RXINT_MASK | TXINT_MASK | ERR_MASK |
++                  TXCH_MASK | MSER_MASK | RXCR_MASK |
++                  RXFR_MASK | RXFL_MASK);
++
+       ndev->irq = irq;
+       dev_info(dev, "IRQ is %d\n", ndev->irq);
diff --git a/queue-6.19/net-fix-rcu_tasks-stall-in-threaded-busypoll.patch b/queue-6.19/net-fix-rcu_tasks-stall-in-threaded-busypoll.patch
new file mode 100644 (file)
index 0000000..a5e2ffb
--- /dev/null
@@ -0,0 +1,123 @@
+From 1a86a1f7d88996085934139fa4c063b6299a2dd3 Mon Sep 17 00:00:00 2001
+From: YiFei Zhu <zhuyifei@google.com>
+Date: Fri, 27 Feb 2026 22:19:37 +0000
+Subject: net: Fix rcu_tasks stall in threaded busypoll
+
+From: YiFei Zhu <zhuyifei@google.com>
+
+commit 1a86a1f7d88996085934139fa4c063b6299a2dd3 upstream.
+
+I was debugging a NIC driver when I noticed that when I enable
+threaded busypoll, bpftrace hangs when starting up. dmesg showed:
+
+  rcu_tasks_wait_gp: rcu_tasks grace period number 85 (since boot) is 10658 jiffies old.
+  rcu_tasks_wait_gp: rcu_tasks grace period number 85 (since boot) is 40793 jiffies old.
+  rcu_tasks_wait_gp: rcu_tasks grace period number 85 (since boot) is 131273 jiffies old.
+  rcu_tasks_wait_gp: rcu_tasks grace period number 85 (since boot) is 402058 jiffies old.
+  INFO: rcu_tasks detected stalls on tasks:
+  00000000769f52cd: .N nvcsw: 2/2 holdout: 1 idle_cpu: -1/64
+  task:napi/eth2-8265  state:R  running task     stack:0     pid:48300 tgid:48300 ppid:2      task_flags:0x208040 flags:0x00004000
+  Call Trace:
+   <TASK>
+   ? napi_threaded_poll_loop+0x27c/0x2c0
+   ? __pfx_napi_threaded_poll+0x10/0x10
+   ? napi_threaded_poll+0x26/0x80
+   ? kthread+0xfa/0x240
+   ? __pfx_kthread+0x10/0x10
+   ? ret_from_fork+0x31/0x50
+   ? __pfx_kthread+0x10/0x10
+   ? ret_from_fork_asm+0x1a/0x30
+   </TASK>
+
+The cause is that in threaded busypoll, the main loop is in
+napi_threaded_poll rather than napi_threaded_poll_loop, where the
+latter rarely iterates more than once within its loop. For
+rcu_softirq_qs_periodic inside napi_threaded_poll_loop to report its
+qs state, the last_qs must be 100ms behind, and this can't happen
+because napi_threaded_poll_loop rarely iterates in threaded busypoll,
+and each time napi_threaded_poll_loop is called last_qs is reset to
+latest jiffies.
+
+This patch changes so that in threaded busypoll, last_qs is saved
+in the outer napi_threaded_poll, and whether busy_poll_last_qs
+is NULL indicates whether napi_threaded_poll_loop is called for
+busypoll. This way last_qs would not reset to latest jiffies on
+each invocation of napi_threaded_poll_loop.
+
+Fixes: c18d4b190a46 ("net: Extend NAPI threaded polling to allow kthread based busy polling")
+Cc: stable@vger.kernel.org
+Signed-off-by: YiFei Zhu <zhuyifei@google.com>
+Reviewed-by: Samiullah Khawaja <skhawaja@google.com>
+Link: https://patch.msgid.link/20260227221937.1060857-1-zhuyifei@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -7788,11 +7788,12 @@ static int napi_thread_wait(struct napi_
+       return -1;
+ }
+-static void napi_threaded_poll_loop(struct napi_struct *napi, bool busy_poll)
++static void napi_threaded_poll_loop(struct napi_struct *napi,
++                                  unsigned long *busy_poll_last_qs)
+ {
++      unsigned long last_qs = busy_poll_last_qs ? *busy_poll_last_qs : jiffies;
+       struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+       struct softnet_data *sd;
+-      unsigned long last_qs = jiffies;
+       for (;;) {
+               bool repoll = false;
+@@ -7821,12 +7822,12 @@ static void napi_threaded_poll_loop(stru
+               /* When busy poll is enabled, the old packets are not flushed in
+                * napi_complete_done. So flush them here.
+                */
+-              if (busy_poll)
++              if (busy_poll_last_qs)
+                       gro_flush_normal(&napi->gro, HZ >= 1000);
+               local_bh_enable();
+               /* Call cond_resched here to avoid watchdog warnings. */
+-              if (repoll || busy_poll) {
++              if (repoll || busy_poll_last_qs) {
+                       rcu_softirq_qs_periodic(last_qs);
+                       cond_resched();
+               }
+@@ -7834,11 +7835,15 @@ static void napi_threaded_poll_loop(stru
+               if (!repoll)
+                       break;
+       }
++
++      if (busy_poll_last_qs)
++              *busy_poll_last_qs = last_qs;
+ }
+ static int napi_threaded_poll(void *data)
+ {
+       struct napi_struct *napi = data;
++      unsigned long last_qs = jiffies;
+       bool want_busy_poll;
+       bool in_busy_poll;
+       unsigned long val;
+@@ -7856,7 +7861,7 @@ static int napi_threaded_poll(void *data
+                       assign_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state,
+                                  want_busy_poll);
+-              napi_threaded_poll_loop(napi, want_busy_poll);
++              napi_threaded_poll_loop(napi, want_busy_poll ? &last_qs : NULL);
+       }
+       return 0;
+@@ -13167,7 +13172,7 @@ static void run_backlog_napi(unsigned in
+ {
+       struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+-      napi_threaded_poll_loop(&sd->backlog, false);
++      napi_threaded_poll_loop(&sd->backlog, NULL);
+ }
+ static void backlog_napi_setup(unsigned int cpu)
diff --git a/queue-6.19/net-macb-shuffle-the-tx-ring-before-enabling-tx.patch b/queue-6.19/net-macb-shuffle-the-tx-ring-before-enabling-tx.patch
new file mode 100644 (file)
index 0000000..7108b6b
--- /dev/null
@@ -0,0 +1,184 @@
+From 881a0263d502e1a93ebc13a78254e9ad19520232 Mon Sep 17 00:00:00 2001
+From: Kevin Hao <haokexin@gmail.com>
+Date: Sat, 7 Mar 2026 15:08:54 +0800
+Subject: net: macb: Shuffle the tx ring before enabling tx
+
+From: Kevin Hao <haokexin@gmail.com>
+
+commit 881a0263d502e1a93ebc13a78254e9ad19520232 upstream.
+
+Quanyang observed that when using an NFS rootfs on an AMD ZynqMp board,
+the rootfs may take an extended time to recover after a suspend.
+Upon investigation, it was determined that the issue originates from a
+problem in the macb driver.
+
+According to the Zynq UltraScale TRM [1], when transmit is disabled,
+the transmit buffer queue pointer resets to point to the address
+specified by the transmit buffer queue base address register.
+
+In the current implementation, the code merely resets `queue->tx_head`
+and `queue->tx_tail` to '0'. This approach presents several issues:
+
+- Packets already queued in the tx ring are silently lost,
+  leading to memory leaks since the associated skbs cannot be released.
+
+- Concurrent write access to `queue->tx_head` and `queue->tx_tail` may
+  occur from `macb_tx_poll()` or `macb_start_xmit()` when these values
+  are reset to '0'.
+
+- The transmission may become stuck on a packet that has already been sent
+  out, with its 'TX_USED' bit set, but has not yet been processed. However,
+  due to the manipulation of 'queue->tx_head' and 'queue->tx_tail',
+  `macb_tx_poll()` incorrectly assumes there are no packets to handle
+  because `queue->tx_head == queue->tx_tail`. This issue is only resolved
+  when a new packet is placed at this position. This is the root cause of
+  the prolonged recovery time observed for the NFS root filesystem.
+
+To resolve this issue, shuffle the tx ring and tx skb array so that
+the first unsent packet is positioned at the start of the tx ring.
+Additionally, ensure that updates to `queue->tx_head` and
+`queue->tx_tail` are properly protected with the appropriate lock.
+
+[1] https://docs.amd.com/v/u/en-US/ug1085-zynq-ultrascale-trm
+
+Fixes: bf9cf80cab81 ("net: macb: Fix tx/rx malfunction after phy link down and up")
+Reported-by: Quanyang Wang <quanyang.wang@windriver.com>
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260307-zynqmp-v2-1-6ef98a70e1d0@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c |   98 ++++++++++++++++++++++++++++++-
+ 1 file changed, 95 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -36,6 +36,7 @@
+ #include <linux/tcp.h>
+ #include <linux/types.h>
+ #include <linux/udp.h>
++#include <linux/gcd.h>
+ #include <net/pkt_sched.h>
+ #include "macb.h"
+@@ -668,6 +669,97 @@ static void macb_mac_link_down(struct ph
+       netif_tx_stop_all_queues(ndev);
+ }
++/* Use juggling algorithm to left rotate tx ring and tx skb array */
++static void gem_shuffle_tx_one_ring(struct macb_queue *queue)
++{
++      unsigned int head, tail, count, ring_size, desc_size;
++      struct macb_tx_skb tx_skb, *skb_curr, *skb_next;
++      struct macb_dma_desc *desc_curr, *desc_next;
++      unsigned int i, cycles, shift, curr, next;
++      struct macb *bp = queue->bp;
++      unsigned char desc[24];
++      unsigned long flags;
++
++      desc_size = macb_dma_desc_get_size(bp);
++
++      if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc)))
++              return;
++
++      spin_lock_irqsave(&queue->tx_ptr_lock, flags);
++      head = queue->tx_head;
++      tail = queue->tx_tail;
++      ring_size = bp->tx_ring_size;
++      count = CIRC_CNT(head, tail, ring_size);
++
++      if (!(tail % ring_size))
++              goto unlock;
++
++      if (!count) {
++              queue->tx_head = 0;
++              queue->tx_tail = 0;
++              goto unlock;
++      }
++
++      shift = tail % ring_size;
++      cycles = gcd(ring_size, shift);
++
++      for (i = 0; i < cycles; i++) {
++              memcpy(&desc, macb_tx_desc(queue, i), desc_size);
++              memcpy(&tx_skb, macb_tx_skb(queue, i),
++                     sizeof(struct macb_tx_skb));
++
++              curr = i;
++              next = (curr + shift) % ring_size;
++
++              while (next != i) {
++                      desc_curr = macb_tx_desc(queue, curr);
++                      desc_next = macb_tx_desc(queue, next);
++
++                      memcpy(desc_curr, desc_next, desc_size);
++
++                      if (next == ring_size - 1)
++                              desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
++                      if (curr == ring_size - 1)
++                              desc_curr->ctrl |= MACB_BIT(TX_WRAP);
++
++                      skb_curr = macb_tx_skb(queue, curr);
++                      skb_next = macb_tx_skb(queue, next);
++                      memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb));
++
++                      curr = next;
++                      next = (curr + shift) % ring_size;
++              }
++
++              desc_curr = macb_tx_desc(queue, curr);
++              memcpy(desc_curr, &desc, desc_size);
++              if (i == ring_size - 1)
++                      desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
++              if (curr == ring_size - 1)
++                      desc_curr->ctrl |= MACB_BIT(TX_WRAP);
++              memcpy(macb_tx_skb(queue, curr), &tx_skb,
++                     sizeof(struct macb_tx_skb));
++      }
++
++      queue->tx_head = count;
++      queue->tx_tail = 0;
++
++      /* Make descriptor updates visible to hardware */
++      wmb();
++
++unlock:
++      spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
++}
++
++/* Rotate the queue so that the tail is at index 0 */
++static void gem_shuffle_tx_rings(struct macb *bp)
++{
++      struct macb_queue *queue;
++      int q;
++
++      for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++)
++              gem_shuffle_tx_one_ring(queue);
++}
++
+ static void macb_mac_link_up(struct phylink_config *config,
+                            struct phy_device *phy,
+                            unsigned int mode, phy_interface_t interface,
+@@ -706,8 +798,6 @@ static void macb_mac_link_up(struct phyl
+                       ctrl |= MACB_BIT(PAE);
+               for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+-                      queue->tx_head = 0;
+-                      queue->tx_tail = 0;
+                       queue_writel(queue, IER,
+                                    bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+               }
+@@ -721,8 +811,10 @@ static void macb_mac_link_up(struct phyl
+       spin_unlock_irqrestore(&bp->lock, flags);
+-      if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
++      if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
+               macb_set_tx_clk(bp, speed);
++              gem_shuffle_tx_rings(bp);
++      }
+       /* Enable Rx and Tx; Enable PTP unicast */
+       ctrl = macb_readl(bp, NCR);
diff --git a/queue-6.19/net-mana-ring-doorbell-at-4-cq-wraparounds.patch b/queue-6.19/net-mana-ring-doorbell-at-4-cq-wraparounds.patch
new file mode 100644 (file)
index 0000000..91283a1
--- /dev/null
@@ -0,0 +1,88 @@
+From dabffd08545ffa1d7183bc45e387860984025291 Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Thu, 26 Feb 2026 11:28:33 -0800
+Subject: net: mana: Ring doorbell at 4 CQ wraparounds
+
+From: Long Li <longli@microsoft.com>
+
+commit dabffd08545ffa1d7183bc45e387860984025291 upstream.
+
+MANA hardware requires at least one doorbell ring every 8 wraparounds
+of the CQ. The driver rings the doorbell as a form of flow control to
+inform hardware that CQEs have been consumed.
+
+The NAPI poll functions mana_poll_tx_cq() and mana_poll_rx_cq() can
+poll up to CQE_POLLING_BUFFER (512) completions per call. If the CQ
+has fewer than 512 entries, a single poll call can process more than
+4 wraparounds without ringing the doorbell. The doorbell threshold
+check also uses ">" instead of ">=", delaying the ring by one extra
+CQE beyond 4 wraparounds. Combined, these issues can cause the driver
+to exceed the 8-wraparound hardware limit, leading to missed
+completions and stalled queues.
+
+Fix this by capping the number of CQEs polled per call to 4 wraparounds
+of the CQ in both TX and RX paths. Also change the doorbell threshold
+from ">" to ">=" so the doorbell is rung as soon as 4 wraparounds are
+reached.
+
+Cc: stable@vger.kernel.org
+Fixes: 58a63729c957 ("net: mana: Fix doorbell out of order violation and avoid unnecessary doorbell rings")
+Signed-off-by: Long Li <longli@microsoft.com>
+Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20260226192833.1050807-1-longli@microsoft.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/mana_en.c |   23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -1725,8 +1725,14 @@ static void mana_poll_tx_cq(struct mana_
+       ndev = txq->ndev;
+       apc = netdev_priv(ndev);
++      /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
++       * doorbell can be rung in time for the hardware's requirement
++       * of at least one doorbell ring every 8 wraparounds.
++       */
+       comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
+-                                  CQE_POLLING_BUFFER);
++                                  min((cq->gdma_cq->queue_size /
++                                        COMP_ENTRY_SIZE) * 4,
++                                       CQE_POLLING_BUFFER));
+       if (comp_read < 1)
+               return;
+@@ -2111,7 +2117,14 @@ static void mana_poll_rx_cq(struct mana_
+       struct mana_rxq *rxq = cq->rxq;
+       int comp_read, i;
+-      comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
++      /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
++       * doorbell can be rung in time for the hardware's requirement
++       * of at least one doorbell ring every 8 wraparounds.
++       */
++      comp_read = mana_gd_poll_cq(cq->gdma_cq, comp,
++                                  min((cq->gdma_cq->queue_size /
++                                        COMP_ENTRY_SIZE) * 4,
++                                       CQE_POLLING_BUFFER));
+       WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+       rxq->xdp_flush = false;
+@@ -2156,11 +2169,11 @@ static int mana_cq_handler(void *context
+               mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
+               cq->work_done_since_doorbell = 0;
+               napi_complete_done(&cq->napi, w);
+-      } else if (cq->work_done_since_doorbell >
+-                 cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
++      } else if (cq->work_done_since_doorbell >=
++                 (cq->gdma_cq->queue_size / COMP_ENTRY_SIZE) * 4) {
+               /* MANA hardware requires at least one doorbell ring every 8
+                * wraparounds of CQ even if there is no need to arm the CQ.
+-               * This driver rings the doorbell as soon as we have exceeded
++               * This driver rings the doorbell as soon as it has processed
+                * 4 wraparounds.
+                */
+               mana_gd_ring_cq(gdma_queue, 0);
diff --git a/queue-6.19/net-mctp-fix-device-leak-on-probe-failure.patch b/queue-6.19/net-mctp-fix-device-leak-on-probe-failure.patch
new file mode 100644 (file)
index 0000000..7528be3
--- /dev/null
@@ -0,0 +1,51 @@
+From 224a0d284c3caf1951302d1744a714784febed71 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Thu, 5 Mar 2026 11:45:49 +0100
+Subject: net: mctp: fix device leak on probe failure
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 224a0d284c3caf1951302d1744a714784febed71 upstream.
+
+Driver core holds a reference to the USB interface and its parent USB
+device while the interface is bound to a driver and there is no need to
+take additional references unless the structures are needed after
+disconnect.
+
+This driver takes a reference to the USB device during probe but does
+not to release it on probe failures.
+
+Drop the redundant device reference to fix the leak, reduce cargo
+culting, make it easier to spot drivers where an extra reference is
+needed, and reduce the risk of further memory leaks.
+
+Fixes: 0791c0327a6e ("net: mctp: Add MCTP USB transport driver")
+Cc: stable@vger.kernel.org     # 6.15
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Acked-by: Jeremy Kerr <jk@codeconstruct.com.au>
+Link: https://patch.msgid.link/20260305104549.16110-1-johan@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/mctp/mctp-usb.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/mctp/mctp-usb.c
++++ b/drivers/net/mctp/mctp-usb.c
+@@ -329,7 +329,7 @@ static int mctp_usb_probe(struct usb_int
+       SET_NETDEV_DEV(netdev, &intf->dev);
+       dev = netdev_priv(netdev);
+       dev->netdev = netdev;
+-      dev->usbdev = usb_get_dev(interface_to_usbdev(intf));
++      dev->usbdev = interface_to_usbdev(intf);
+       dev->intf = intf;
+       usb_set_intfdata(intf, dev);
+@@ -365,7 +365,6 @@ static void mctp_usb_disconnect(struct u
+       mctp_unregister_netdev(dev->netdev);
+       usb_free_urb(dev->tx_urb);
+       usb_free_urb(dev->rx_urb);
+-      usb_put_dev(dev->usbdev);
+       free_netdev(dev->netdev);
+ }
diff --git a/queue-6.19/net-ncsi-fix-skb-leak-in-error-paths.patch b/queue-6.19/net-ncsi-fix-skb-leak-in-error-paths.patch
new file mode 100644 (file)
index 0000000..af37831
--- /dev/null
@@ -0,0 +1,85 @@
+From 5c3398a54266541610c8d0a7082e654e9ff3e259 Mon Sep 17 00:00:00 2001
+From: Jian Zhang <zhangjian.3032@bytedance.com>
+Date: Thu, 5 Mar 2026 14:06:55 +0800
+Subject: net: ncsi: fix skb leak in error paths
+
+From: Jian Zhang <zhangjian.3032@bytedance.com>
+
+commit 5c3398a54266541610c8d0a7082e654e9ff3e259 upstream.
+
+Early return paths in NCSI RX and AEN handlers fail to release
+the received skb, resulting in a memory leak.
+
+Specifically, ncsi_aen_handler() returns on invalid AEN packets
+without consuming the skb. Similarly, ncsi_rcv_rsp() exits early
+when failing to resolve the NCSI device, response handler, or
+request, leaving the skb unfreed.
+
+CC: stable@vger.kernel.org
+Fixes: 7a82ecf4cfb8 ("net/ncsi: NCSI AEN packet handler")
+Fixes: 138635cc27c9 ("net/ncsi: NCSI response packet handler")
+Signed-off-by: Jian Zhang <zhangjian.3032@bytedance.com>
+Link: https://patch.msgid.link/20260305060656.3357250-1-zhangjian.3032@bytedance.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ncsi/ncsi-aen.c |    3 ++-
+ net/ncsi/ncsi-rsp.c |   16 ++++++++++++----
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -224,7 +224,8 @@ int ncsi_aen_handler(struct ncsi_dev_pri
+       if (!nah) {
+               netdev_warn(ndp->ndev.dev, "Invalid AEN (0x%x) received\n",
+                           h->type);
+-              return -ENOENT;
++              ret = -ENOENT;
++              goto out;
+       }
+       ret = ncsi_validate_aen_pkt(h, nah->payload);
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -1176,8 +1176,10 @@ int ncsi_rcv_rsp(struct sk_buff *skb, st
+       /* Find the NCSI device */
+       nd = ncsi_find_dev(orig_dev);
+       ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
+-      if (!ndp)
+-              return -ENODEV;
++      if (!ndp) {
++              ret = -ENODEV;
++              goto err_free_skb;
++      }
+       /* Check if it is AEN packet */
+       hdr = (struct ncsi_pkt_hdr *)skb_network_header(skb);
+@@ -1199,7 +1201,8 @@ int ncsi_rcv_rsp(struct sk_buff *skb, st
+       if (!nrh) {
+               netdev_err(nd->dev, "Received unrecognized packet (0x%x)\n",
+                          hdr->type);
+-              return -ENOENT;
++              ret = -ENOENT;
++              goto err_free_skb;
+       }
+       /* Associate with the request */
+@@ -1207,7 +1210,8 @@ int ncsi_rcv_rsp(struct sk_buff *skb, st
+       nr = &ndp->requests[hdr->id];
+       if (!nr->used) {
+               spin_unlock_irqrestore(&ndp->lock, flags);
+-              return -ENODEV;
++              ret = -ENODEV;
++              goto err_free_skb;
+       }
+       nr->rsp = skb;
+@@ -1261,4 +1265,8 @@ out_netlink:
+ out:
+       ncsi_free_request(nr);
+       return ret;
++
++err_free_skb:
++      kfree_skb(skb);
++      return ret;
+ }
diff --git a/queue-6.19/net-nexthop-fix-percpu-use-after-free-in-remove_nh_grp_entry.patch b/queue-6.19/net-nexthop-fix-percpu-use-after-free-in-remove_nh_grp_entry.patch
new file mode 100644 (file)
index 0000000..512dc66
--- /dev/null
@@ -0,0 +1,83 @@
+From b2662e7593e94ae09b1cf7ee5f09160a3612bcb2 Mon Sep 17 00:00:00 2001
+From: Mehul Rao <mehulrao@gmail.com>
+Date: Fri, 6 Mar 2026 18:38:20 -0500
+Subject: net: nexthop: fix percpu use-after-free in remove_nh_grp_entry
+
+From: Mehul Rao <mehulrao@gmail.com>
+
+commit b2662e7593e94ae09b1cf7ee5f09160a3612bcb2 upstream.
+
+When removing a nexthop from a group, remove_nh_grp_entry() publishes
+the new group via rcu_assign_pointer() then immediately frees the
+removed entry's percpu stats with free_percpu(). However, the
+synchronize_net() grace period in the caller remove_nexthop_from_groups()
+runs after the free. RCU readers that entered before the publish still
+see the old group and can dereference the freed stats via
+nh_grp_entry_stats_inc() -> get_cpu_ptr(nhge->stats), causing a
+use-after-free on percpu memory.
+
+Fix by deferring the free_percpu() until after synchronize_net() in the
+caller. Removed entries are chained via nh_list onto a local deferred
+free list. After the grace period completes and all RCU readers have
+finished, the percpu stats are safely freed.
+
+Fixes: f4676ea74b85 ("net: nexthop: Add nexthop group entry stats")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mehul Rao <mehulrao@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/20260306233821.196789-1-mehulrao@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/nexthop.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -2005,7 +2005,8 @@ static void nh_hthr_group_rebalance(stru
+ }
+ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
+-                              struct nl_info *nlinfo)
++                              struct nl_info *nlinfo,
++                              struct list_head *deferred_free)
+ {
+       struct nh_grp_entry *nhges, *new_nhges;
+       struct nexthop *nhp = nhge->nh_parent;
+@@ -2065,8 +2066,8 @@ static void remove_nh_grp_entry(struct n
+       rcu_assign_pointer(nhp->nh_grp, newg);
+       list_del(&nhge->nh_list);
+-      free_percpu(nhge->stats);
+       nexthop_put(nhge->nh);
++      list_add(&nhge->nh_list, deferred_free);
+       /* Removal of a NH from a resilient group is notified through
+        * bucket notifications.
+@@ -2086,6 +2087,7 @@ static void remove_nexthop_from_groups(s
+                                      struct nl_info *nlinfo)
+ {
+       struct nh_grp_entry *nhge, *tmp;
++      LIST_HEAD(deferred_free);
+       /* If there is nothing to do, let's avoid the costly call to
+        * synchronize_net()
+@@ -2094,10 +2096,16 @@ static void remove_nexthop_from_groups(s
+               return;
+       list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
+-              remove_nh_grp_entry(net, nhge, nlinfo);
++              remove_nh_grp_entry(net, nhge, nlinfo, &deferred_free);
+       /* make sure all see the newly published array before releasing rtnl */
+       synchronize_net();
++
++      /* Now safe to free percpu stats â€” all RCU readers have finished */
++      list_for_each_entry_safe(nhge, tmp, &deferred_free, nh_list) {
++              list_del(&nhge->nh_list);
++              free_percpu(nhge->stats);
++      }
+ }
+ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
diff --git a/queue-6.19/net-tcp-ao-fix-mac-comparison-to-be-constant-time.patch b/queue-6.19/net-tcp-ao-fix-mac-comparison-to-be-constant-time.patch
new file mode 100644 (file)
index 0000000..406a77d
--- /dev/null
@@ -0,0 +1,53 @@
+From 67edfec516d30d3e62925c397be4a1e5185802fc Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Mon, 2 Mar 2026 12:36:00 -0800
+Subject: net/tcp-ao: Fix MAC comparison to be constant-time
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 67edfec516d30d3e62925c397be4a1e5185802fc upstream.
+
+To prevent timing attacks, MACs need to be compared in constant
+time.  Use the appropriate helper function for this.
+
+Fixes: 0a3a809089eb ("net/tcp: Verify inbound TCP-AO signed segments")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Reviewed-by: Dmitry Safonov <0x7f454c46@gmail.com>
+Link: https://patch.msgid.link/20260302203600.13561-1-ebiggers@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/Kconfig  |    1 +
+ net/ipv4/tcp_ao.c |    3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -748,6 +748,7 @@ config TCP_SIGPOOL
+ config TCP_AO
+       bool "TCP: Authentication Option (RFC5925)"
+       select CRYPTO
++      select CRYPTO_LIB_UTILS
+       select TCP_SIGPOOL
+       depends on 64BIT && IPV6 != m # seq-number extension needs WRITE_ONCE(u64)
+       help
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -10,6 +10,7 @@
+ #define pr_fmt(fmt) "TCP: " fmt
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <linux/inetdevice.h>
+ #include <linux/tcp.h>
+@@ -922,7 +923,7 @@ tcp_ao_verify_hash(const struct sock *sk
+       /* XXX: make it per-AF callback? */
+       tcp_ao_hash_skb(family, hash_buf, key, sk, skb, traffic_key,
+                       (phash - (u8 *)th), sne);
+-      if (memcmp(phash, hash_buf, maclen)) {
++      if (crypto_memneq(phash, hash_buf, maclen)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
+               atomic64_inc(&info->counters.pkt_bad);
+               atomic64_inc(&key->pkt_bad);
diff --git a/queue-6.19/net-tcp-md5-fix-mac-comparison-to-be-constant-time.patch b/queue-6.19/net-tcp-md5-fix-mac-comparison-to-be-constant-time.patch
new file mode 100644 (file)
index 0000000..8ab7d4a
--- /dev/null
@@ -0,0 +1,93 @@
+From 46d0d6f50dab706637f4c18a470aac20a21900d3 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Mon, 2 Mar 2026 12:34:09 -0800
+Subject: net/tcp-md5: Fix MAC comparison to be constant-time
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 46d0d6f50dab706637f4c18a470aac20a21900d3 upstream.
+
+To prevent timing attacks, MACs need to be compared in constant
+time.  Use the appropriate helper function for this.
+
+Fixes: cfb6eeb4c860 ("[TCP]: MD5 Signature Option (RFC2385) support.")
+Fixes: 658ddaaf6694 ("tcp: md5: RST: getting md5 key from listener")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Link: https://patch.msgid.link/20260302203409.13388-1-ebiggers@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/Kconfig    |    1 +
+ net/ipv4/tcp.c      |    3 ++-
+ net/ipv4/tcp_ipv4.c |    3 ++-
+ net/ipv6/tcp_ipv6.c |    3 ++-
+ 4 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -762,6 +762,7 @@ config TCP_AO
+ config TCP_MD5SIG
+       bool "TCP: MD5 Signature Option support (RFC2385)"
+       select CRYPTO_LIB_MD5
++      select CRYPTO_LIB_UTILS
+       help
+         RFC2385 specifies a method of giving MD5 protection to TCP sessions.
+         Its main (only?) use is to protect BGP sessions between core routers
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -244,6 +244,7 @@
+ #define pr_fmt(fmt) "TCP: " fmt
+ #include <crypto/md5.h>
++#include <crypto/utils.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -4912,7 +4913,7 @@ tcp_inbound_md5_hash(const struct sock *
+               tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
+       else
+               tp->af_specific->calc_md5_hash(newhash, key, NULL, skb);
+-      if (memcmp(hash_location, newhash, 16) != 0) {
++      if (crypto_memneq(hash_location, newhash, 16)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
+               trace_tcp_hash_md5_mismatch(sk, skb);
+               return SKB_DROP_REASON_TCP_MD5FAILURE;
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -88,6 +88,7 @@
+ #include <linux/skbuff_ref.h>
+ #include <crypto/md5.h>
++#include <crypto/utils.h>
+ #include <trace/events/tcp.h>
+@@ -838,7 +839,7 @@ static void tcp_v4_send_reset(const stru
+                       goto out;
+               tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
+-              if (memcmp(md5_hash_location, newhash, 16) != 0)
++              if (crypto_memneq(md5_hash_location, newhash, 16))
+                       goto out;
+       }
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -68,6 +68,7 @@
+ #include <linux/seq_file.h>
+ #include <crypto/md5.h>
++#include <crypto/utils.h>
+ #include <trace/events/tcp.h>
+@@ -1043,7 +1044,7 @@ static void tcp_v6_send_reset(const stru
+               key.type = TCP_KEY_MD5;
+               tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
+-              if (memcmp(md5_hash_location, newhash, 16) != 0)
++              if (crypto_memneq(md5_hash_location, newhash, 16))
+                       goto out;
+       }
+ #endif
diff --git a/queue-6.19/nouveau-dpcd-return-ebusy-for-aux-xfer-if-the-device-is-asleep.patch b/queue-6.19/nouveau-dpcd-return-ebusy-for-aux-xfer-if-the-device-is-asleep.patch
new file mode 100644 (file)
index 0000000..79e0ede
--- /dev/null
@@ -0,0 +1,45 @@
+From 8f3c6f08ababad2e3bdd239728cf66a9949446b4 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Tue, 24 Feb 2026 13:17:50 +1000
+Subject: nouveau/dpcd: return EBUSY for aux xfer if the device is asleep
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 8f3c6f08ababad2e3bdd239728cf66a9949446b4 upstream.
+
+If we have runtime suspended, and userspace wants to use /dev/drm_dp_*
+then just tell it the device is busy instead of crashing in the GSP
+code.
+
+WARNING: CPU: 2 PID: 565741 at drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c:164 r535_gsp_msgq_wait+0x9a/0xb0 [nouveau]
+CPU: 2 UID: 0 PID: 565741 Comm: fwupd Not tainted 6.18.10-200.fc43.x86_64 #1 PREEMPT(lazy)
+Hardware name: LENOVO 20QTS0PQ00/20QTS0PQ00, BIOS N2OET65W (1.52 ) 08/05/2024
+RIP: 0010:r535_gsp_msgq_wait+0x9a/0xb0 [nouveau]
+
+This is a simple fix to get backported. We should probably engineer a
+proper power domain solution to wake up devices and keep them awake
+while fw updates are happening.
+
+Cc: stable@vger.kernel.org
+Fixes: 8894f4919bc4 ("drm/nouveau: register a drm_dp_aux channel for each dp connector")
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Link: https://patch.msgid.link/20260224031750.791621-1-airlied@gmail.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1230,6 +1230,9 @@ nouveau_connector_aux_xfer(struct drm_dp
+       u8 size = msg->size;
+       int ret;
++      if (pm_runtime_suspended(nv_connector->base.dev->dev))
++              return -EBUSY;
++
+       nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
+       if (!nv_encoder)
+               return -ENODEV;
diff --git a/queue-6.19/parisc-check-kernel-mapping-earlier-at-bootup.patch b/queue-6.19/parisc-check-kernel-mapping-earlier-at-bootup.patch
new file mode 100644 (file)
index 0000000..b61b513
--- /dev/null
@@ -0,0 +1,60 @@
+From 17c144f1104bfc29a3ce3f7d0931a1bfb7a3558c Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 3 Mar 2026 23:36:11 +0100
+Subject: parisc: Check kernel mapping earlier at bootup
+
+From: Helge Deller <deller@gmx.de>
+
+commit 17c144f1104bfc29a3ce3f7d0931a1bfb7a3558c upstream.
+
+The check if the initial mapping is sufficient needs to happen much
+earlier during bootup. Move this test directly to the start_parisc()
+function and use native PDC iodc functions to print the warning, because
+panic() and printk() are not functional yet.
+
+This fixes boot when enabling various KALLSYSMS options which need
+much more space.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/setup.c |   20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -120,14 +120,6 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+       printk(KERN_CONT ".\n");
+-      /*
+-       * Check if initial kernel page mappings are sufficient.
+-       * panic early if not, else we may access kernel functions
+-       * and variables which can't be reached.
+-       */
+-      if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
+-              panic("KERNEL_INITIAL_ORDER too small!");
+-
+ #ifdef CONFIG_64BIT
+       if(parisc_narrow_firmware) {
+               printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
+@@ -279,6 +271,18 @@ void __init start_parisc(void)
+       int ret, cpunum;
+       struct pdc_coproc_cfg coproc_cfg;
++      /*
++       * Check if initial kernel page mapping is sufficient.
++       * Print warning if not, because we may access kernel functions and
++       * variables which can't be reached yet through the initial mappings.
++       * Note that the panic() and printk() functions are not functional
++       * yet, so we need to use direct iodc() firmware calls instead.
++       */
++      const char warn1[] = "CRITICAL: Kernel may crash because "
++                           "KERNEL_INITIAL_ORDER is too small.\n";
++      if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
++              pdc_iodc_print(warn1, sizeof(warn1) - 1);
++
+       /* check QEMU/SeaBIOS marker in PAGE0 */
+       running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
diff --git a/queue-6.19/parisc-fix-initial-page-table-creation-for-boot.patch b/queue-6.19/parisc-fix-initial-page-table-creation-for-boot.patch
new file mode 100644 (file)
index 0000000..8a750d5
--- /dev/null
@@ -0,0 +1,46 @@
+From 8475d8fe21ec9c7eb2faca555fbc5b68cf0d2597 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Wed, 4 Mar 2026 22:24:18 +0100
+Subject: parisc: Fix initial page table creation for boot
+
+From: Helge Deller <deller@gmx.de>
+
+commit 8475d8fe21ec9c7eb2faca555fbc5b68cf0d2597 upstream.
+
+The KERNEL_INITIAL_ORDER value defines the initial size (usually 32 or
+64 MB) of the page table during bootup. Up until now the whole area was
+initialized with PTE entries, but there was no check if we filled too
+many entries.  Change the code to fill up with so many entries that the
+"_end" symbol can be reached by the kernel, but not more entries than
+actually fit into the initial PTE tables.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/head.S |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -56,6 +56,7 @@ ENTRY(parisc_kernel_start)
+       .import __bss_start,data
+       .import __bss_stop,data
++      .import __end,data
+       load32          PA(__bss_start),%r3
+       load32          PA(__bss_stop),%r4
+@@ -149,7 +150,11 @@ $cpu_ok:
+        * everything ... it will get remapped correctly later */
+       ldo             0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
+       load32          (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
+-      load32          PA(pg0),%r1
++      load32          PA(_end),%r1
++      SHRREG          %r1,PAGE_SHIFT,%r1  /* %r1 is PFN count for _end symbol */
++      cmpb,<<,n       %r11,%r1,1f
++      copy            %r1,%r11        /* %r1 PFN count smaller than %r11 */
++1:    load32          PA(pg0),%r1
+ $pgt_fill_loop:
+       STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
diff --git a/queue-6.19/parisc-increase-initial-mapping-to-64-mb-with-kallsyms.patch b/queue-6.19/parisc-increase-initial-mapping-to-64-mb-with-kallsyms.patch
new file mode 100644 (file)
index 0000000..67df553
--- /dev/null
@@ -0,0 +1,30 @@
+From 8e732934fb81282be41602550e7e07baf265e972 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 3 Mar 2026 23:36:10 +0100
+Subject: parisc: Increase initial mapping to 64 MB with KALLSYMS
+
+From: Helge Deller <deller@gmx.de>
+
+commit 8e732934fb81282be41602550e7e07baf265e972 upstream.
+
+The 32MB initial kernel mapping can become too small when CONFIG_KALLSYMS
+is used. Increase the mapping to 64 MB in this case.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/include/asm/pgtable.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -85,7 +85,7 @@ extern void __update_cache(pte_t pte);
+       printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+ /* This is the size of the initially mapped kernel memory */
+-#if defined(CONFIG_64BIT)
++#if defined(CONFIG_64BIT) || defined(CONFIG_KALLSYMS)
+ #define KERNEL_INITIAL_ORDER  26      /* 1<<26 = 64MB */
+ #else
+ #define KERNEL_INITIAL_ORDER  25      /* 1<<25 = 32MB */
diff --git a/queue-6.19/pinctrl-cy8c95x0-don-t-miss-reading-the-last-bank-registers.patch b/queue-6.19/pinctrl-cy8c95x0-don-t-miss-reading-the-last-bank-registers.patch
new file mode 100644 (file)
index 0000000..5e5e257
--- /dev/null
@@ -0,0 +1,45 @@
+From b6c3af46c26f2d07c10a1452adc34b821719327e Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Mon, 23 Feb 2026 19:06:51 +0100
+Subject: pinctrl: cy8c95x0: Don't miss reading the last bank registers
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit b6c3af46c26f2d07c10a1452adc34b821719327e upstream.
+
+When code had been changed to use for_each_set_clump8(), it mistakenly
+switched from chip->nport to chip->tpin since the cy8c9540 and cy8c9560
+have a 4-pin gap. This, in particular, led to the missed read of
+the last bank interrupt status register and hence missing interrupts
+on those pins. Restore the upper limit in for_each_set_clump8() to take
+into consideration that gap.
+
+Fixes: 83e29a7a1fdf ("pinctrl: cy8c95x0; Switch to use for_each_set_clump8()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/pinctrl-cy8c95x0.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
++++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
+@@ -627,7 +627,7 @@ static int cy8c95x0_write_regs_mask(stru
+       bitmap_scatter(tmask, mask, chip->map, MAX_LINE);
+       bitmap_scatter(tval, val, chip->map, MAX_LINE);
+-      for_each_set_clump8(offset, bits, tmask, chip->tpin) {
++      for_each_set_clump8(offset, bits, tmask, chip->nport * BANK_SZ) {
+               unsigned int i = offset / 8;
+               write_val = bitmap_get_value8(tval, offset);
+@@ -655,7 +655,7 @@ static int cy8c95x0_read_regs_mask(struc
+       bitmap_scatter(tmask, mask, chip->map, MAX_LINE);
+       bitmap_scatter(tval, val, chip->map, MAX_LINE);
+-      for_each_set_clump8(offset, bits, tmask, chip->tpin) {
++      for_each_set_clump8(offset, bits, tmask, chip->nport * BANK_SZ) {
+               unsigned int i = offset / 8;
+               ret = cy8c95x0_regmap_read_bits(chip, reg, i, bits, &read_val);
diff --git a/queue-6.19/pmdomain-bcm-bcm2835-power-fix-broken-reset-status-read.patch b/queue-6.19/pmdomain-bcm-bcm2835-power-fix-broken-reset-status-read.patch
new file mode 100644 (file)
index 0000000..e3fec4a
--- /dev/null
@@ -0,0 +1,55 @@
+From 550bae2c0931dbb664a61b08c21cf156f0a5362a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ma=C3=ADra=20Canal?= <mcanal@igalia.com>
+Date: Thu, 12 Feb 2026 11:49:44 -0300
+Subject: pmdomain: bcm: bcm2835-power: Fix broken reset status read
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maíra Canal <mcanal@igalia.com>
+
+commit 550bae2c0931dbb664a61b08c21cf156f0a5362a upstream.
+
+bcm2835_reset_status() has a misplaced parenthesis on every PM_READ()
+call. Since PM_READ(reg) expands to readl(power->base + (reg)), the
+expression:
+
+    PM_READ(PM_GRAFX & PM_V3DRSTN)
+
+computes the bitwise AND of the register offset PM_GRAFX with the
+bitmask PM_V3DRSTN before using the result as a register offset, reading
+from the wrong MMIO address instead of the intended PM_GRAFX register.
+The same issue affects the PM_IMAGE cases.
+
+Fix by moving the closing parenthesis so PM_READ() receives only the
+register offset, and the bitmask is applied to the value returned by
+the read.
+
+Fixes: 670c672608a1 ("soc: bcm: bcm2835-pm: Add support for power domains under a new binding.")
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pmdomain/bcm/bcm2835-power.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/pmdomain/bcm/bcm2835-power.c
++++ b/drivers/pmdomain/bcm/bcm2835-power.c
+@@ -580,11 +580,11 @@ static int bcm2835_reset_status(struct r
+       switch (id) {
+       case BCM2835_RESET_V3D:
+-              return !PM_READ(PM_GRAFX & PM_V3DRSTN);
++              return !(PM_READ(PM_GRAFX) & PM_V3DRSTN);
+       case BCM2835_RESET_H264:
+-              return !PM_READ(PM_IMAGE & PM_H264RSTN);
++              return !(PM_READ(PM_IMAGE) & PM_H264RSTN);
+       case BCM2835_RESET_ISP:
+-              return !PM_READ(PM_IMAGE & PM_ISPRSTN);
++              return !(PM_READ(PM_IMAGE) & PM_ISPRSTN);
+       default:
+               return -EINVAL;
+       }
diff --git a/queue-6.19/pmdomain-rockchip-fix-pd_vcodec-for-rk3588.patch b/queue-6.19/pmdomain-rockchip-fix-pd_vcodec-for-rk3588.patch
new file mode 100644 (file)
index 0000000..c5093af
--- /dev/null
@@ -0,0 +1,77 @@
+From 0fb59eaca18f1254ecdce34354eec3cb1b3b5e10 Mon Sep 17 00:00:00 2001
+From: Shawn Lin <shawn.lin@rock-chips.com>
+Date: Wed, 25 Feb 2026 10:55:01 +0800
+Subject: pmdomain: rockchip: Fix PD_VCODEC for RK3588
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+commit 0fb59eaca18f1254ecdce34354eec3cb1b3b5e10 upstream.
+
+>From the RK3588 TRM Table 7-1 RK3588 Voltage Domain and Power Domain Summary,
+PD_RKVDEC0/1 and PD_VENC0/1 rely on VD_VCODEC which require extra voltages to
+be applied, otherwise it breaks RK3588-evb1-v10 board after vdec support landed[1].
+The panic looks like below:
+
+  rockchip-pm-domain fd8d8000.power-management:power-controller: failed to set domain 'rkvdec0' on, val=0
+  rockchip-pm-domain fd8d8000.power-management:power-controller: failed to set domain 'rkvdec1' on, val=0
+  ...
+  Hardware name: Rockchip RK3588S EVB1 V10 Board (DT)
+  Workqueue: pm genpd_power_off_work_fn
+  Call trace:
+  show_stack+0x18/0x24 (C)
+  dump_stack_lvl+0x40/0x84
+  dump_stack+0x18/0x24
+  vpanic+0x1ec/0x4fc
+  vpanic+0x0/0x4fc
+  check_panic_on_warn+0x0/0x94
+  arm64_serror_panic+0x6c/0x78
+  do_serror+0xc4/0xcc
+  el1h_64_error_handler+0x3c/0x5c
+  el1h_64_error+0x6c/0x70
+  regmap_mmio_read32le+0x18/0x24 (P)
+  regmap_bus_reg_read+0xfc/0x130
+  regmap_read+0x188/0x1ac
+  regmap_read+0x54/0x78
+  rockchip_pd_power+0xcc/0x5f0
+  rockchip_pd_power_off+0x1c/0x4c
+  genpd_power_off+0x84/0x120
+  genpd_power_off+0x1b4/0x260
+  genpd_power_off_work_fn+0x38/0x58
+  process_scheduled_works+0x194/0x2c4
+  worker_thread+0x2ac/0x3d8
+  kthread+0x104/0x124
+  ret_from_fork+0x10/0x20
+  SMP: stopping secondary CPUs
+  Kernel Offset: disabled
+  CPU features: 0x3000000,000e0005,40230521,0400720b
+  Memory Limit: none
+  ---[ end Kernel panic - not syncing: Asynchronous SError Interrupt ]---
+
+Chaoyi pointed out the PD_VCODEC is the parent of PD_RKVDEC0/1 and PD_VENC0/1, so checking
+the PD_VCODEC is enough.
+
+[1] https://lore.kernel.org/linux-rockchip/20251020212009.8852-2-detlev.casanova@collabora.com/
+
+Fixes: db6df2e3fc16 ("pmdomain: rockchip: add regulator support")
+Cc: stable@vger.kernel.org
+Suggested-by: Chaoyi Chen <chaoyi.chen@rock-chips.com>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Reviewed-by: Chaoyi Chen <chaoyi.chen@rock-chips.com>
+Reviewed-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pmdomain/rockchip/pm-domains.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pmdomain/rockchip/pm-domains.c
++++ b/drivers/pmdomain/rockchip/pm-domains.c
+@@ -1311,7 +1311,7 @@ static const struct rockchip_domain_info
+ static const struct rockchip_domain_info rk3588_pm_domains[] = {
+       [RK3588_PD_GPU]         = DOMAIN_RK3588("gpu",     0x0, BIT(0),  0,       0x0, 0,       BIT(1),  0x0, BIT(0),  BIT(0),  false, true),
+       [RK3588_PD_NPU]         = DOMAIN_RK3588("npu",     0x0, BIT(1),  BIT(1),  0x0, 0,       0,       0x0, 0,       0,       false, true),
+-      [RK3588_PD_VCODEC]      = DOMAIN_RK3588("vcodec",  0x0, BIT(2),  BIT(2),  0x0, 0,       0,       0x0, 0,       0,       false, false),
++      [RK3588_PD_VCODEC]      = DOMAIN_RK3588("vcodec",  0x0, BIT(2),  BIT(2),  0x0, 0,       0,       0x0, 0,       0,       false, true),
+       [RK3588_PD_NPUTOP]      = DOMAIN_RK3588("nputop",  0x0, BIT(3),  0,       0x0, BIT(11), BIT(2),  0x0, BIT(1),  BIT(1),  false, false),
+       [RK3588_PD_NPU1]        = DOMAIN_RK3588("npu1",    0x0, BIT(4),  0,       0x0, BIT(12), BIT(3),  0x0, BIT(2),  BIT(2),  false, false),
+       [RK3588_PD_NPU2]        = DOMAIN_RK3588("npu2",    0x0, BIT(5),  0,       0x0, BIT(13), BIT(4),  0x0, BIT(3),  BIT(3),  false, false),
diff --git a/queue-6.19/regulator-pf9453-respect-irq-trigger-settings-from-firmware.patch b/queue-6.19/regulator-pf9453-respect-irq-trigger-settings-from-firmware.patch
new file mode 100644 (file)
index 0000000..88250d1
--- /dev/null
@@ -0,0 +1,38 @@
+From 2d85ecd6fb0eb2fee0ffa040ec1ddea57b09bc38 Mon Sep 17 00:00:00 2001
+From: Franz Schnyder <franz.schnyder@toradex.com>
+Date: Wed, 18 Feb 2026 11:25:14 +0100
+Subject: regulator: pf9453: Respect IRQ trigger settings from firmware
+
+From: Franz Schnyder <franz.schnyder@toradex.com>
+
+commit 2d85ecd6fb0eb2fee0ffa040ec1ddea57b09bc38 upstream.
+
+The datasheet specifies, that the IRQ_B pin is pulled low when any
+unmasked interrupt bit status is changed, and it is released high once
+the application processor reads the INT1 register. As it specifies a
+level-low behavior, it should not force a falling-edge interrupt.
+
+Remove the IRQF_TRIGGER_FALLING to not force the falling-edge interrupt
+and instead rely on the flag from the device tree.
+
+Fixes: 0959b6706325 ("regulator: pf9453: add PMIC PF9453 support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Franz Schnyder <franz.schnyder@toradex.com>
+Link: https://patch.msgid.link/20260218102518.238943-2-fra.schnyder@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/regulator/pf9453-regulator.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/regulator/pf9453-regulator.c
++++ b/drivers/regulator/pf9453-regulator.c
+@@ -809,7 +809,7 @@ static int pf9453_i2c_probe(struct i2c_c
+       }
+       ret = devm_request_threaded_irq(pf9453->dev, pf9453->irq, NULL, pf9453_irq_handler,
+-                                      (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
++                                      IRQF_ONESHOT,
+                                       "pf9453-irq", pf9453);
+       if (ret)
+               return dev_err_probe(pf9453->dev, ret, "Failed to request IRQ: %d\n", pf9453->irq);
diff --git a/queue-6.19/s390-stackleak-fix-__stackleak_poison-inline-assembly-constraint.patch b/queue-6.19/s390-stackleak-fix-__stackleak_poison-inline-assembly-constraint.patch
new file mode 100644 (file)
index 0000000..d6285f2
--- /dev/null
@@ -0,0 +1,42 @@
+From 674c5ff0f440a051ebf299d29a4c013133d81a65 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Mon, 2 Mar 2026 14:35:00 +0100
+Subject: s390/stackleak: Fix __stackleak_poison() inline assembly constraint
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit 674c5ff0f440a051ebf299d29a4c013133d81a65 upstream.
+
+The __stackleak_poison() inline assembly comes with a "count" operand where
+the "d" constraint is used. "count" is used with the exrl instruction and
+"d" means that the compiler may allocate any register from 0 to 15.
+
+If the compiler would allocate register 0 then the exrl instruction would
+not or the value of "count" into the executed instruction - resulting in a
+stackframe which is only partially poisoned.
+
+Use the correct "a" constraint, which excludes register 0 from register
+allocation.
+
+Fixes: 2a405f6bb3a5 ("s390/stackleak: provide fast __stackleak_poison() implementation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Link: https://lore.kernel.org/r/20260302133500.1560531-4-hca@linux.ibm.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/processor.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -158,7 +158,7 @@ static __always_inline void __stackleak_
+               "       j       4f\n"
+               "3:     mvc     8(1,%[addr]),0(%[addr])\n"
+               "4:"
+-              : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
++              : [addr] "+&a" (erase_low), [count] "+&a" (count), [tmp] "=&a" (tmp)
+               : [poison] "d" (poison)
+               : "memory", "cc"
+               );
diff --git a/queue-6.19/s390-xor-fix-xor_xc_2-inline-assembly-constraints.patch b/queue-6.19/s390-xor-fix-xor_xc_2-inline-assembly-constraints.patch
new file mode 100644 (file)
index 0000000..c1cfb35
--- /dev/null
@@ -0,0 +1,39 @@
+From f775276edc0c505dc0f782773796c189f31a1123 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Mon, 2 Mar 2026 14:34:58 +0100
+Subject: s390/xor: Fix xor_xc_2() inline assembly constraints
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit f775276edc0c505dc0f782773796c189f31a1123 upstream.
+
+The inline assembly constraints for xor_xc_2() are incorrect. "bytes",
+"p1", and "p2" are input operands, while all three of them are modified
+within the inline assembly. Given that the function consists only of this
+inline assembly it seems unlikely that this may cause any problems, however
+fix this in any case.
+
+Fixes: 2cfc5f9ce7f5 ("s390/xor: optimized xor routing using the XC instruction")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Link: https://lore.kernel.org/r/20260302133500.1560531-2-hca@linux.ibm.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/lib/xor.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/lib/xor.c
++++ b/arch/s390/lib/xor.c
+@@ -28,8 +28,8 @@ static void xor_xc_2(unsigned long bytes
+               "       j       3f\n"
+               "2:     xc      0(1,%1),0(%2)\n"
+               "3:"
+-              : : "d" (bytes), "a" (p1), "a" (p2)
+-              : "0", "cc", "memory");
++              : "+d" (bytes), "+a" (p1), "+a" (p2)
++              : : "0", "cc", "memory");
+ }
+ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
diff --git a/queue-6.19/s390-xor-fix-xor_xc_5-inline-assembly.patch b/queue-6.19/s390-xor-fix-xor_xc_5-inline-assembly.patch
new file mode 100644 (file)
index 0000000..d769fc4
--- /dev/null
@@ -0,0 +1,34 @@
+From 5f25805303e201f3afaff0a90f7c7ce257468704 Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Mon, 2 Mar 2026 19:03:34 +0100
+Subject: s390/xor: Fix xor_xc_5() inline assembly
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit 5f25805303e201f3afaff0a90f7c7ce257468704 upstream.
+
+xor_xc_5() contains a larl 1,2f that is not used by the asm and is not
+declared as a clobber. This can corrupt a compiler-allocated value in %r1
+and lead to miscompilation. Remove the instruction.
+
+Fixes: 745600ed6965 ("s390/lib: Use exrl instead of ex in xor functions")
+Cc: stable@vger.kernel.org
+Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/lib/xor.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/s390/lib/xor.c
++++ b/arch/s390/lib/xor.c
+@@ -96,7 +96,6 @@ static void xor_xc_5(unsigned long bytes
+                    const unsigned long * __restrict p5)
+ {
+       asm volatile(
+-              "       larl    1,2f\n"
+               "       aghi    %0,-1\n"
+               "       jm      6f\n"
+               "       srlg    0,%0,8\n"
diff --git a/queue-6.19/selftests-fix-mntns-iteration-selftests.patch b/queue-6.19/selftests-fix-mntns-iteration-selftests.patch
new file mode 100644 (file)
index 0000000..dd559d7
--- /dev/null
@@ -0,0 +1,101 @@
+From 4c7b2ec23cc5d880e3ffe35e8c2aad686b67723a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Thu, 26 Feb 2026 14:50:12 +0100
+Subject: selftests: fix mntns iteration selftests
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit 4c7b2ec23cc5d880e3ffe35e8c2aad686b67723a upstream.
+
+Now that we changed permission checking make sure that we reflect that
+in the selftests.
+
+Link: https://patch.msgid.link/20260226-work-visibility-fixes-v1-4-d2c2853313bd@kernel.org
+Fixes: 9d87b1067382 ("selftests: add tests for mntns iteration")
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Cc: stable@kernel.org # v6.14+
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/filesystems/nsfs/iterate_mntns.c |   25 +++++++++------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
++++ b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
+@@ -37,17 +37,20 @@ FIXTURE(iterate_mount_namespaces) {
+       __u64 mnt_ns_id[MNT_NS_COUNT];
+ };
++static inline bool mntns_in_list(__u64 *mnt_ns_id, struct mnt_ns_info *info)
++{
++      for (int i = 0; i < MNT_NS_COUNT; i++) {
++              if (mnt_ns_id[i] == info->mnt_ns_id)
++                      return true;
++      }
++      return false;
++}
++
+ FIXTURE_SETUP(iterate_mount_namespaces)
+ {
+       for (int i = 0; i < MNT_NS_COUNT; i++)
+               self->fd_mnt_ns[i] = -EBADF;
+-      /*
+-       * Creating a new user namespace let's us guarantee that we only see
+-       * mount namespaces that we did actually create.
+-       */
+-      ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
+-
+       for (int i = 0; i < MNT_NS_COUNT; i++) {
+               struct mnt_ns_info info = {};
+@@ -75,13 +78,15 @@ TEST_F(iterate_mount_namespaces, iterate
+       fd_mnt_ns_cur = fcntl(self->fd_mnt_ns[0], F_DUPFD_CLOEXEC);
+       ASSERT_GE(fd_mnt_ns_cur, 0);
+-      for (;; count++) {
++      for (;;) {
+               struct mnt_ns_info info = {};
+               int fd_mnt_ns_next;
+               fd_mnt_ns_next = ioctl(fd_mnt_ns_cur, NS_MNT_GET_NEXT, &info);
+               if (fd_mnt_ns_next < 0 && errno == ENOENT)
+                       break;
++              if (mntns_in_list(self->mnt_ns_id, &info))
++                      count++;
+               ASSERT_GE(fd_mnt_ns_next, 0);
+               ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+               fd_mnt_ns_cur = fd_mnt_ns_next;
+@@ -96,13 +101,15 @@ TEST_F(iterate_mount_namespaces, iterate
+       fd_mnt_ns_cur = fcntl(self->fd_mnt_ns[MNT_NS_LAST_INDEX], F_DUPFD_CLOEXEC);
+       ASSERT_GE(fd_mnt_ns_cur, 0);
+-      for (;; count++) {
++      for (;;) {
+               struct mnt_ns_info info = {};
+               int fd_mnt_ns_prev;
+               fd_mnt_ns_prev = ioctl(fd_mnt_ns_cur, NS_MNT_GET_PREV, &info);
+               if (fd_mnt_ns_prev < 0 && errno == ENOENT)
+                       break;
++              if (mntns_in_list(self->mnt_ns_id, &info))
++                      count++;
+               ASSERT_GE(fd_mnt_ns_prev, 0);
+               ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+               fd_mnt_ns_cur = fd_mnt_ns_prev;
+@@ -125,7 +132,6 @@ TEST_F(iterate_mount_namespaces, iterate
+               ASSERT_GE(fd_mnt_ns_next, 0);
+               ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+               fd_mnt_ns_cur = fd_mnt_ns_next;
+-              ASSERT_EQ(info.mnt_ns_id, self->mnt_ns_id[i]);
+       }
+ }
+@@ -144,7 +150,6 @@ TEST_F(iterate_mount_namespaces, iterate
+               ASSERT_GE(fd_mnt_ns_prev, 0);
+               ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+               fd_mnt_ns_cur = fd_mnt_ns_prev;
+-              ASSERT_EQ(info.mnt_ns_id, self->mnt_ns_id[i]);
+       }
+ }
index 75e761996158e23434f7fdd7139493267043a472..4bb17f1e364462d16d436567de999d1a90eb173b 100644 (file)
@@ -254,3 +254,47 @@ mm-slab-fix-an-incorrect-check-in-obj_exts_alloc_size.patch
 staging-sm750fb-add-missing-pci_release_region-on-error-and-removal.patch
 staging-rtl8723bs-properly-validate-the-data-in-rtw_get_ie_ex.patch
 staging-rtl8723bs-fix-potential-out-of-bounds-read-in-rtw_restruct_wmm_ie.patch
+pinctrl-cy8c95x0-don-t-miss-reading-the-last-bank-registers.patch
+selftests-fix-mntns-iteration-selftests.patch
+media-dvb-net-fix-oob-access-in-ule-extension-header-tables.patch
+net-mana-ring-doorbell-at-4-cq-wraparounds.patch
+net-fix-rcu_tasks-stall-in-threaded-busypoll.patch
+ice-fix-retry-for-aq-command-0x06ee.patch
+fgraph-fix-thresh_return-clear-per-task-notrace.patch
+tracing-fix-syscall-events-activation-by-ensuring-refcount-hits-zero.patch
+net-tcp-ao-fix-mac-comparison-to-be-constant-time.patch
+fgraph-fix-thresh_return-nosleeptime-double-adjust.patch
+net-tcp-md5-fix-mac-comparison-to-be-constant-time.patch
+batman-adv-avoid-double-rtnl_lock-elp-metric-worker.patch
+drm-xe-xe2_hpg-correct-implementation-of-wa_16025250150.patch
+pmdomain-rockchip-fix-pd_vcodec-for-rk3588.patch
+parisc-increase-initial-mapping-to-64-mb-with-kallsyms.patch
+nouveau-dpcd-return-ebusy-for-aux-xfer-if-the-device-is-asleep.patch
+arm64-mm-add-pte_dirty-back-to-page_kernel-to-fix-kexec-hibernation.patch
+hwmon-pmbus-q54sj108a2-fix-stack-overflow-in-debugfs-read.patch
+io_uring-zcrx-use-read_once-with-user-shared-rqes.patch
+parisc-fix-initial-page-table-creation-for-boot.patch
+arm64-contpte-fix-set_access_flags-no-op-check-for-smmu-ats-faults.patch
+parisc-check-kernel-mapping-earlier-at-bootup.patch
+io_uring-net-reject-send_vectorized-when-unsupported.patch
+regulator-pf9453-respect-irq-trigger-settings-from-firmware.patch
+pmdomain-bcm-bcm2835-power-fix-broken-reset-status-read.patch
+drm-ttm-fix-ttm_pool_beneficial_order-return-type.patch
+crypto-ccp-allow-callers-to-use-hv-fixed-page-api-when-sev-is-disabled.patch
+s390-stackleak-fix-__stackleak_poison-inline-assembly-constraint.patch
+ata-libata-core-disable-lpm-on-st1000dm010-2ep102.patch
+s390-xor-fix-xor_xc_2-inline-assembly-constraints.patch
+drm-amd-display-fallback-to-boot-snapshot-for-dispclk.patch
+s390-xor-fix-xor_xc_5-inline-assembly.patch
+slab-distinguish-lock-and-trylock-for-sheaf_flush_main.patch
+memcg-fix-slab-accounting-in-refill_obj_stock-trylock-path.patch
+ksmbd-fix-use-after-free-in-smb_lazy_parent_lease_break_close.patch
+smb-server-fix-use-after-free-in-smb2_open.patch
+ksmbd-don-t-log-keys-in-smb3-signing-and-encryption-key-generation.patch
+ksmbd-fix-use-after-free-by-using-call_rcu-for-oplock_info.patch
+net-mctp-fix-device-leak-on-probe-failure.patch
+net-nexthop-fix-percpu-use-after-free-in-remove_nh_grp_entry.patch
+net-ncsi-fix-skb-leak-in-error-paths.patch
+net-ethernet-arc-emac-quiesce-interrupts-before-requesting-irq.patch
+net-dsa-microchip-fix-error-path-in-ptp-irq-setup.patch
+net-macb-shuffle-the-tx-ring-before-enabling-tx.patch
diff --git a/queue-6.19/slab-distinguish-lock-and-trylock-for-sheaf_flush_main.patch b/queue-6.19/slab-distinguish-lock-and-trylock-for-sheaf_flush_main.patch
new file mode 100644 (file)
index 0000000..5d1969b
--- /dev/null
@@ -0,0 +1,121 @@
+From 48647d3f9a644d1e81af6558102d43cdb260597b Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Wed, 11 Feb 2026 10:42:30 +0100
+Subject: slab: distinguish lock and trylock for sheaf_flush_main()
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 48647d3f9a644d1e81af6558102d43cdb260597b upstream.
+
+sheaf_flush_main() can be called from __pcs_replace_full_main() where
+it's fine if the trylock fails, and pcs_flush_all() where it's not
+expected to and for some flush callers (when destroying the cache or
+memory hotremove) it would be actually a problem if it failed and left
+the main sheaf not flushed. The flush callers can however safely use
+local_lock() instead of trylock.
+
+The trylock failure should not happen in practice on !PREEMPT_RT, but
+can happen on PREEMPT_RT. The impact is limited in practice because when
+a trylock fails in the kmem_cache_destroy() path, it means someone is
+using the cache while destroying it, which is a bug on its own. The memory
+hotremove path is unlikely to be employed in a production RT config, but
+it's possible.
+
+To fix this, split the function into sheaf_flush_main() (using
+local_lock()) and sheaf_try_flush_main() (using local_trylock()) where
+both call __sheaf_flush_main_batch() to flush a single batch of objects.
+This will also allow lockdep to verify our context assumptions.
+
+The problem was raised in an off-list question by Marcelo.
+
+Fixes: 2d517aa09bbc ("slab: add opt-in caching layer of percpu sheaves")
+Cc: stable@vger.kernel.org
+Reported-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Hao Li <hao.li@linux.dev>
+Link: https://patch.msgid.link/20260211-b4-sheaf-flush-v1-1-4e7f492f0055@suse.cz
+Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   47 +++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 37 insertions(+), 10 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2730,19 +2730,19 @@ static void __kmem_cache_free_bulk(struc
+  * object pointers are moved to a on-stack array under the lock. To bound the
+  * stack usage, limit each batch to PCS_BATCH_MAX.
+  *
+- * returns true if at least partially flushed
++ * Must be called with s->cpu_sheaves->lock locked, returns with the lock
++ * unlocked.
++ *
++ * Returns how many objects are remaining to be flushed
+  */
+-static bool sheaf_flush_main(struct kmem_cache *s)
++static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
+ {
+       struct slub_percpu_sheaves *pcs;
+       unsigned int batch, remaining;
+       void *objects[PCS_BATCH_MAX];
+       struct slab_sheaf *sheaf;
+-      bool ret = false;
+-next_batch:
+-      if (!local_trylock(&s->cpu_sheaves->lock))
+-              return ret;
++      lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+       pcs = this_cpu_ptr(s->cpu_sheaves);
+       sheaf = pcs->main;
+@@ -2760,10 +2760,37 @@ next_batch:
+       stat_add(s, SHEAF_FLUSH, batch);
+-      ret = true;
++      return remaining;
++}
+-      if (remaining)
+-              goto next_batch;
++static void sheaf_flush_main(struct kmem_cache *s)
++{
++      unsigned int remaining;
++
++      do {
++              local_lock(&s->cpu_sheaves->lock);
++
++              remaining = __sheaf_flush_main_batch(s);
++
++      } while (remaining);
++}
++
++/*
++ * Returns true if the main sheaf was at least partially flushed.
++ */
++static bool sheaf_try_flush_main(struct kmem_cache *s)
++{
++      unsigned int remaining;
++      bool ret = false;
++
++      do {
++              if (!local_trylock(&s->cpu_sheaves->lock))
++                      return ret;
++
++              ret = true;
++              remaining = __sheaf_flush_main_batch(s);
++
++      } while (remaining);
+       return ret;
+ }
+@@ -6215,7 +6242,7 @@ alloc_empty:
+       if (put_fail)
+                stat(s, BARN_PUT_FAIL);
+-      if (!sheaf_flush_main(s))
++      if (!sheaf_try_flush_main(s))
+               return NULL;
+       if (!local_trylock(&s->cpu_sheaves->lock))
diff --git a/queue-6.19/smb-server-fix-use-after-free-in-smb2_open.patch b/queue-6.19/smb-server-fix-use-after-free-in-smb2_open.patch
new file mode 100644 (file)
index 0000000..d2cce7d
--- /dev/null
@@ -0,0 +1,44 @@
+From 1e689a56173827669a35da7cb2a3c78ed5c53680 Mon Sep 17 00:00:00 2001
+From: Marios Makassikis <mmakassikis@freebox.fr>
+Date: Tue, 3 Mar 2026 11:14:32 +0100
+Subject: smb: server: fix use-after-free in smb2_open()
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+commit 1e689a56173827669a35da7cb2a3c78ed5c53680 upstream.
+
+The opinfo pointer obtained via rcu_dereference(fp->f_opinfo) is
+dereferenced after rcu_read_unlock(), creating a use-after-free
+window.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -3616,10 +3616,8 @@ int smb2_open(struct ksmbd_work *work)
+ reconnected_fp:
+       rsp->StructureSize = cpu_to_le16(89);
+-      rcu_read_lock();
+-      opinfo = rcu_dereference(fp->f_opinfo);
++      opinfo = opinfo_get(fp);
+       rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
+-      rcu_read_unlock();
+       rsp->Flags = 0;
+       rsp->CreateAction = cpu_to_le32(file_info);
+       rsp->CreationTime = cpu_to_le64(fp->create_time);
+@@ -3660,6 +3658,7 @@ reconnected_fp:
+               next_ptr = &lease_ccontext->Next;
+               next_off = conn->vals->create_lease_size;
+       }
++      opinfo_put(opinfo);
+       if (maximal_access_ctxt) {
+               struct create_context *mxac_ccontext;
diff --git a/queue-6.19/tracing-fix-syscall-events-activation-by-ensuring-refcount-hits-zero.patch b/queue-6.19/tracing-fix-syscall-events-activation-by-ensuring-refcount-hits-zero.patch
new file mode 100644 (file)
index 0000000..1c36e6a
--- /dev/null
@@ -0,0 +1,119 @@
+From 0a663b764dbdf135a126284f454c9f01f95a87d4 Mon Sep 17 00:00:00 2001
+From: Huiwen He <hehuiwen@kylinos.cn>
+Date: Tue, 24 Feb 2026 10:35:44 +0800
+Subject: tracing: Fix syscall events activation by ensuring refcount hits zero
+
+From: Huiwen He <hehuiwen@kylinos.cn>
+
+commit 0a663b764dbdf135a126284f454c9f01f95a87d4 upstream.
+
+When multiple syscall events are specified in the kernel command line
+(e.g., trace_event=syscalls:sys_enter_openat,syscalls:sys_enter_close),
+they are often not captured after boot, even though they appear enabled
+in the tracing/set_event file.
+
+The issue stems from how syscall events are initialized. Syscall
+tracepoints require the global reference count (sys_tracepoint_refcount)
+to transition from 0 to 1 to trigger the registration of the syscall
+work (TIF_SYSCALL_TRACEPOINT) for tasks, including the init process (pid 1).
+
+The current implementation of early_enable_events() with disable_first=true
+used an interleaved sequence of "Disable A -> Enable A -> Disable B -> Enable B".
+If multiple syscalls are enabled, the refcount never drops to zero,
+preventing the 0->1 transition that triggers actual registration.
+
+Fix this by splitting early_enable_events() into two distinct phases:
+1. Disable all events specified in the buffer.
+2. Enable all events specified in the buffer.
+
+This ensures the refcount hits zero before re-enabling, allowing syscall
+events to be properly activated during early boot.
+
+The code is also refactored to use a helper function to avoid logic
+duplication between the disable and enable phases.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://patch.msgid.link/20260224023544.1250787-1-hehuiwen@kylinos.cn
+Fixes: ce1039bd3a89 ("tracing: Fix enabling of syscall events on the command line")
+Signed-off-by: Huiwen He <hehuiwen@kylinos.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events.c |   52 +++++++++++++++++++++++++++++++-------------
+ 1 file changed, 37 insertions(+), 15 deletions(-)
+
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -4512,26 +4512,22 @@ static __init int event_trace_memsetup(v
+       return 0;
+ }
+-__init void
+-early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
++/*
++ * Helper function to enable or disable a comma-separated list of events
++ * from the bootup buffer.
++ */
++static __init void __early_set_events(struct trace_array *tr, char *buf, bool enable)
+ {
+       char *token;
+-      int ret;
+-
+-      while (true) {
+-              token = strsep(&buf, ",");
+-
+-              if (!token)
+-                      break;
++      while ((token = strsep(&buf, ","))) {
+               if (*token) {
+-                      /* Restarting syscalls requires that we stop them first */
+-                      if (disable_first)
++                      if (enable) {
++                              if (ftrace_set_clr_event(tr, token, 1))
++                                      pr_warn("Failed to enable trace event: %s\n", token);
++                      } else {
+                               ftrace_set_clr_event(tr, token, 0);
+-
+-                      ret = ftrace_set_clr_event(tr, token, 1);
+-                      if (ret)
+-                              pr_warn("Failed to enable trace event: %s\n", token);
++                      }
+               }
+               /* Put back the comma to allow this to be called again */
+@@ -4540,6 +4536,32 @@ early_enable_events(struct trace_array *
+       }
+ }
++/**
++ * early_enable_events - enable events from the bootup buffer
++ * @tr: The trace array to enable the events in
++ * @buf: The buffer containing the comma separated list of events
++ * @disable_first: If true, disable all events in @buf before enabling them
++ *
++ * This function enables events from the bootup buffer. If @disable_first
++ * is true, it will first disable all events in the buffer before enabling
++ * them.
++ *
++ * For syscall events, which rely on a global refcount to register the
++ * SYSCALL_WORK_SYSCALL_TRACEPOINT flag (especially for pid 1), we must
++ * ensure the refcount hits zero before re-enabling them. A simple
++ * "disable then enable" per-event is not enough if multiple syscalls are
++ * used, as the refcount will stay above zero. Thus, we need a two-phase
++ * approach: disable all, then enable all.
++ */
++__init void
++early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
++{
++      if (disable_first)
++              __early_set_events(tr, buf, false);
++
++      __early_set_events(tr, buf, true);
++}
++
+ static __init int event_trace_enable(void)
+ {
+       struct trace_array *tr = top_trace_array();