--- /dev/null
+From f2e11b8ae6d992b3031f74cd765b81ef8cd51d36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 21bcdc811a810..8e2b90cb5b952 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1099,6 +1099,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
+ return -EINVAL;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From cba6ff904a4e9ad4505e594c1a6c714a895c2cb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 12:35:09 +0100
+Subject: ARM: clean up the memset64() C wrapper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas WeiĂschuh <thomas.weissschuh@linutronix.de>
+
+[ Upstream commit b52343d1cb47bb27ca32a3f4952cc2fd3cd165bf ]
+
+The current logic to split the 64-bit argument into its 32-bit halves is
+byte-order specific and a bit clunky. Use a union instead which is
+easier to read and works in all cases.
+
+GCC still generates the same machine code.
+
+While at it, rename the arguments of the __memset64() prototype to
+actually reflect their semantics.
+
+Signed-off-by: Thomas WeiĂschuh <thomas.weissschuh@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Ben Hutchings <ben@decadent.org.uk> # for -stable
+Link: https://lore.kernel.org/all/1a11526ae3d8664f705b541b8d6ea57b847b49a8.camel@decadent.org.uk/
+Suggested-by: https://lore.kernel.org/all/aZonkWMwpbFhzDJq@casper.infradead.org/ # for -stable
+Link: https://lore.kernel.org/all/aZonkWMwpbFhzDJq@casper.infradead.org/
+---
+ arch/arm/include/asm/string.h | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
+index b5ad23acb303a..369781ec55112 100644
+--- a/arch/arm/include/asm/string.h
++++ b/arch/arm/include/asm/string.h
+@@ -33,13 +33,17 @@ static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+ }
+
+ #define __HAVE_ARCH_MEMSET64
+-extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);
++extern void *__memset64(uint64_t *, uint32_t first, __kernel_size_t, uint32_t second);
+ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+ {
+- if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+- return __memset64(p, v, n * 8, v >> 32);
+- else
+- return __memset64(p, v >> 32, n * 8, v);
++ union {
++ uint64_t val;
++ struct {
++ uint32_t first, second;
++ };
++ } word = { .val = v };
++
++ return __memset64(p, word.first, n * 8, word.second);
+ }
+
+ #endif
+--
+2.51.0
+
--- /dev/null
+From eb55bb8fbe92dfd505484503b78d46f633a7ee1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index fd4768c5e439a..dd27fdb9521a8 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1630,7 +1630,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 76d1836158cc432a99311594483e710d5ff21da3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 19:43:18 +0100
+Subject: ip6_tunnel: Fix usage of skb_vlan_inet_prepare()
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+Backports of commit 81c734dae203 "ip6_tunnel: use
+skb_vlan_inet_prepare() in __ip6_tnl_rcv()" broke IPv6 tunnelling in
+stable branches 5.10-6.12 inclusive. This is because the return value
+of skb_vlan_inet_prepare() had the opposite sense (0 for error rather
+than for success) before commit 9990ddf47d416 "net: tunnel: make
+skb_vlan_inet_prepare() return drop reasons".
+
+For branches including commit c504e5c2f964 "net: skb: introduce
+kfree_skb_reason()" etc. (i.e. 6.1 and newer) it was simple to
+backport commit 9990ddf47d416, but for 5.10 and 5.15 that doesn't seem
+to be practical.
+
+So just reverse the sense of the return value test here.
+
+Fixes: f9c5c5b791d3 ("ip6_tunnel: use skb_vlan_inet_prepare() in __ip6_tnl_rcv()")
+Fixes: 64c71d60a21a ("ip6_tunnel: use skb_vlan_inet_prepare() in __ip6_tnl_rcv()")
+Signed-off-by: Ben Hutchings <benh@debian.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ip6_tunnel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 855622a6a304f..e8c4e02e75d43 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -876,7 +876,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+
+ skb_reset_network_header(skb);
+
+- if (skb_vlan_inet_prepare(skb, true)) {
++ if (!skb_vlan_inet_prepare(skb, true)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+--
+2.51.0
+
--- /dev/null
+From 0393e66095ee561bf7d033a6e99572b71a941656 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index c9cd1f622a1fc..269b505a6fa63 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9127,6 +9127,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -9207,6 +9214,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -9215,6 +9235,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -9631,6 +9661,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -10063,6 +10098,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -10516,7 +10556,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f814461c7e3165a3b460d748ee184bc7f0eccafe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 5f2009327a593..0f144fbf2a6cb 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -10558,6 +10558,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 49931577da38b..0b3242b058b99 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -14966,6 +14966,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -15876,9 +15902,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -16070,14 +16093,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index 100cb1a94811b..80c168b2a2ddd 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -772,6 +772,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From 08734876bcd908ade49915c2781d0ba946a3b60c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index c5f41023c71b8..c7bf0e6bc303d 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -3880,14 +3880,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -8727,7 +8719,15 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
+arm-clean-up-the-memset64-c-wrapper.patch
+ip6_tunnel-fix-usage-of-skb_vlan_inet_prepare.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
--- /dev/null
+From f6b22602402a3a09ff4b2b541b7b034395ce07fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index e442a4fcead9b..a861915e07f3b 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1387,6 +1387,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ goto unlock;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From 99050c4348f8ee7097d3f2ef49341a53a6093e4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index afd7765b5913e..55fd74f198184 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1978,7 +1978,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.51.0
+
--- /dev/null
+From 3f824cea8545998a60217a49ef00f08fdcfb21ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jul 2023 19:56:45 +0300
+Subject: ALSA: usb-audio: Update for native DSD support quirks
+
+From: Jussi Laako <jussi@sonarnerd.net>
+
+[ Upstream commit f7fea075edfa085c25eb34c44ceacf3602537f98 ]
+
+Maintenance patch for native DSD support.
+
+Remove incorrect T+A device quirks. Move set of device quirks to vendor
+quirks. Add set of missing device and vendor quirks.
+
+Signed-off-by: Jussi Laako <jussi@sonarnerd.net>
+Link: https://lore.kernel.org/r/20230726165645.404311-1-jussi@sonarnerd.net
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: a8cc55bf81a4 ("ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 34 ++++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 778304f349699..afd7765b5913e 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1631,8 +1631,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+
+ /* XMOS based USB DACs */
+ switch (chip->usb_id) {
+- case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
+- case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */
++ case USB_ID(0x139f, 0x5504): /* Nagra DAC */
++ case USB_ID(0x20b1, 0x3089): /* Mola-Mola DAC */
++ case USB_ID(0x2522, 0x0007): /* LH Labs Geek Out 1V5 */
++ case USB_ID(0x2522, 0x0009): /* LH Labs Geek Pulse X Inifinity 2V0 */
+ case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */
+ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
+ if (fp->altsetting == 2)
+@@ -1642,14 +1644,18 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
+ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
+ case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
+- case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
++ case USB_ID(0x16d0, 0x06b4): /* NuPrime Audio HD-AVP/AVA */
+ case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
++ case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */
+ case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
++ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
+ case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
++ case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */
+ case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
+ case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
+ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
+ case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
++ case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
+ case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
+ case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
+ case USB_ID(0x6b42, 0x0042): /* MSB Technology */
+@@ -1659,9 +1665,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+
+ /* Amanero Combo384 USB based DACs with native DSD support */
+ case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
+- case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
+- case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
+- case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
+ if (fp->altsetting == 2) {
+ switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
+ case 0x199:
+@@ -1817,6 +1820,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_IFACE_DELAY),
+ DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
+ QUIRK_FLAG_FORCE_IFACE_RESET),
++ DEVICE_FLG(0x0644, 0x806b, /* TEAC UD-701 */
++ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
++ QUIRK_FLAG_IFACE_DELAY),
+ DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+@@ -1871,6 +1877,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x154e, 0x3006, /* Marantz SA-14S1 */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
++ DEVICE_FLG(0x154e, 0x300b, /* Marantz SA-KI RUBY / SA-12 */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x154e, 0x500e, /* Denon DN-X1600 */
+ QUIRK_FLAG_IGNORE_CLOCK_SOURCE),
+ DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */
+@@ -1925,6 +1933,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x21b4, 0x0230, /* Ayre QB-9 Twenty */
++ QUIRK_FLAG_DSD_RAW),
++ DEVICE_FLG(0x21b4, 0x0232, /* Ayre QX-5 Twenty */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ QUIRK_FLAG_SET_IFACE_FIRST),
+ DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */
+@@ -1967,12 +1979,18 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+ QUIRK_FLAG_VALIDATE_RATES),
++ VENDOR_FLG(0x1511, /* AURALiC */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x18d1, /* iBasso devices */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x1de7, /* Phoenix Audio */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ VENDOR_FLG(0x20b1, /* XMOS based devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x21ed, /* Accuphase Laboratory */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x22d9, /* Oppo */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x23ba, /* Playback Design */
+@@ -1988,10 +2006,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2ab6, /* T+A devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x2d87, /* Cayin device */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3336, /* HEM devices */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3353, /* Khadas devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x35f4, /* MSB Technology */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3842, /* EVGA */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0xc502, /* HiBy devices */
+--
+2.51.0
+
--- /dev/null
+From 0117cc867f05f7dffdd76cd634c456ebc46a9485 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:31 +0100
+Subject: ALSA: usb-audio: Use inclusive terms
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4e9113c533acee2ba1f72fd68ee6ecd36b64484e ]
+
+Replace the remaining with inclusive terms; it's only this function
+name we overlooked at the previous conversion.
+
+Fixes: 53837b4ac2bd ("ALSA: usb-audio: Replace slave/master terms")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index a861915e07f3b..6ba99ff2cefcf 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -159,8 +159,8 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ * This won't be used for implicit feedback which takes the packet size
+ * returned from the sync source
+ */
+-static int slave_next_packet_size(struct snd_usb_endpoint *ep,
+- unsigned int avail)
++static int synced_next_packet_size(struct snd_usb_endpoint *ep,
++ unsigned int avail)
+ {
+ unsigned long flags;
+ unsigned int phase;
+@@ -229,7 +229,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ }
+
+ if (ep->sync_source)
+- return slave_next_packet_size(ep, avail);
++ return synced_next_packet_size(ep, avail);
+ else
+ return next_packet_size(ep, avail);
+ }
+--
+2.51.0
+
--- /dev/null
+From 0efee666cc9df41b34a00279e67d9eb848d01d60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 12:35:09 +0100
+Subject: ARM: clean up the memset64() C wrapper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas WeiĂschuh <thomas.weissschuh@linutronix.de>
+
+[ Upstream commit b52343d1cb47bb27ca32a3f4952cc2fd3cd165bf ]
+
+The current logic to split the 64-bit argument into its 32-bit halves is
+byte-order specific and a bit clunky. Use a union instead which is
+easier to read and works in all cases.
+
+GCC still generates the same machine code.
+
+While at it, rename the arguments of the __memset64() prototype to
+actually reflect their semantics.
+
+Signed-off-by: Thomas WeiĂschuh <thomas.weissschuh@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Ben Hutchings <ben@decadent.org.uk> # for -stable
+Link: https://lore.kernel.org/all/1a11526ae3d8664f705b541b8d6ea57b847b49a8.camel@decadent.org.uk/
+Suggested-by: https://lore.kernel.org/all/aZonkWMwpbFhzDJq@casper.infradead.org/ # for -stable
+Link: https://lore.kernel.org/all/aZonkWMwpbFhzDJq@casper.infradead.org/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/include/asm/string.h | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
+index c35250c4991bc..96fc6cf460ecb 100644
+--- a/arch/arm/include/asm/string.h
++++ b/arch/arm/include/asm/string.h
+@@ -39,13 +39,17 @@ static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+ }
+
+ #define __HAVE_ARCH_MEMSET64
+-extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);
++extern void *__memset64(uint64_t *, uint32_t first, __kernel_size_t, uint32_t second);
+ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+ {
+- if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+- return __memset64(p, v, n * 8, v >> 32);
+- else
+- return __memset64(p, v >> 32, n * 8, v);
++ union {
++ uint64_t val;
++ struct {
++ uint32_t first, second;
++ };
++ } word = { .val = v };
++
++ return __memset64(p, word.first, n * 8, word.second);
+ }
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 320f2644924e030d874a1fb5ee7d4796f401cf57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 05:34:44 +0000
+Subject: bpf: Fix stack-out-of-bounds write in devmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b7bf516c3ecd9a2aae2dc2635178ab87b734fef1 ]
+
+get_upper_ifindexes() iterates over all upper devices and writes their
+indices into an array without checking bounds.
+
+Also the callers assume that the max number of upper devices is
+MAX_NEST_DEV and allocate excluded_devices[1+MAX_NEST_DEV] on the stack,
+but that assumption is not correct and the number of upper devices could
+be larger than MAX_NEST_DEV (e.g., many macvlans), causing a
+stack-out-of-bounds write.
+
+Add a max parameter to get_upper_ifindexes() to avoid the issue.
+When there are too many upper devices, return -EOVERFLOW and abort the
+redirect.
+
+To reproduce, create more than MAX_NEST_DEV(8) macvlans on a device with
+an XDP program attached using BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS.
+Then send a packet to the device to trigger the XDP redirect path.
+
+Reported-by: syzbot+10cc7f13760b31bd2e61@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698c4ce3.050a0220.340abe.000b.GAE@google.com/T/
+Fixes: aeea1b86f936 ("bpf, devmap: Exclude XDP broadcast to master device")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Link: https://lore.kernel.org/r/20260225053506.4738-1-kohei@enjuk.jp
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 5e2e1c3284a39..2bfdca506a4de 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -570,18 +570,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
+ }
+
+ /* Get ifindex of each upper device. 'indexes' must be able to hold at
+- * least MAX_NEST_DEV elements.
+- * Returns the number of ifindexes added.
++ * least 'max' elements.
++ * Returns the number of ifindexes added, or -EOVERFLOW if there are too
++ * many upper devices.
+ */
+-static int get_upper_ifindexes(struct net_device *dev, int *indexes)
++static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
+ {
+ struct net_device *upper;
+ struct list_head *iter;
+ int n = 0;
+
+ netdev_for_each_upper_dev_rcu(dev, upper, iter) {
++ if (n >= max)
++ return -EOVERFLOW;
+ indexes[n++] = upper->ifindex;
+ }
++
+ return n;
+ }
+
+@@ -597,7 +601,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev_rx->ifindex;
+ }
+
+@@ -715,7 +723,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev->ifindex;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f268c39af9d58924e1a1797941aa51e1c81c7156 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 8f96ddaceb9a7..86042c1f89f0b 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1680,7 +1680,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From c7ecc02587c70c7d044d18bfee61a989eb5f8d73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 19:43:18 +0100
+Subject: ip6_tunnel: Fix usage of skb_vlan_inet_prepare()
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+Backports of commit 81c734dae203 "ip6_tunnel: use
+skb_vlan_inet_prepare() in __ip6_tnl_rcv()" broke IPv6 tunnelling in
+stable branches 5.10-6.12 inclusive. This is because the return value
+of skb_vlan_inet_prepare() had the opposite sense (0 for error rather
+than for success) before commit 9990ddf47d416 "net: tunnel: make
+skb_vlan_inet_prepare() return drop reasons".
+
+For branches including commit c504e5c2f964 "net: skb: introduce
+kfree_skb_reason()" etc. (i.e. 6.1 and newer) it was simple to
+backport commit 9990ddf47d416, but for 5.10 and 5.15 that doesn't seem
+to be practical.
+
+So just reverse the sense of the return value test here.
+
+Fixes: f9c5c5b791d3 ("ip6_tunnel: use skb_vlan_inet_prepare() in __ip6_tnl_rcv()")
+Fixes: 64c71d60a21a ("ip6_tunnel: use skb_vlan_inet_prepare() in __ip6_tnl_rcv()")
+Signed-off-by: Ben Hutchings <benh@debian.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ip6_tunnel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 553851e3aca14..7c1b5d01f8203 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -846,7 +846,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+
+ skb_reset_network_header(skb);
+
+- if (skb_vlan_inet_prepare(skb, true)) {
++ if (!skb_vlan_inet_prepare(skb, true)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+--
+2.51.0
+
--- /dev/null
+From 0f5ef0945f54f2e5f7a708e8e1da39af12e23940 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 156221bd56615..b4b77170cbd10 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9526,6 +9526,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -9606,6 +9613,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -9614,6 +9634,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -10030,6 +10060,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -10478,6 +10513,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -10931,7 +10971,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 856473fd16a0d6dffdfb1dc6169ddea52d460c8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index e2f9b23a3fbb2..d7a3304de305c 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11812,6 +11812,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index fb139e1e35ca3..38c8e4c410232 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -16161,6 +16161,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -17071,9 +17097,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -17259,14 +17282,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index 5962cf508842f..762c4178a878d 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -781,6 +781,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From 588400efe32e65b038bcee0e91a4a1ec171c1f26 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Sep 2024 15:30:05 -0700
+Subject: scsi: ufs: core: Always initialize the UIC done completion
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit b1e8c53749adb795bfb0bf4e2f7836e26684bb90 ]
+
+Simplify __ufshcd_send_uic_cmd() by always initializing the
+uic_cmd::done completion. This is fine since the time required to
+initialize a completion is small compared to the time required to
+process an UIC command.
+
+Reviewed-by: Peter Wang <peter.wang@mediatek.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20240912223019.3510966-5-bvanassche@acm.org
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: 62c015373e1c ("scsi: ufs: core: Move link recovery for hibern8 exit failure to wl_resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 736a2dd630a7a..9d9088e207cc2 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -2300,13 +2300,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+- * @completion: initialize the completion only if this is set to true
+ *
+ * Returns 0 only if success.
+ */
+ static int
+-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+- bool completion)
++__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ {
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
+@@ -2316,8 +2314,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ return -EIO;
+ }
+
+- if (completion)
+- init_completion(&uic_cmd->done);
++ init_completion(&uic_cmd->done);
+
+ uic_cmd->cmd_active = 1;
+ ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+@@ -2340,7 +2337,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
++ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+@@ -3969,7 +3966,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ reenable_intr = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
++ ret = __ufshcd_send_uic_cmd(hba, cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+--
+2.51.0
+
--- /dev/null
+From 621830f63d609e30d77856f92e4f1b3759c698de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 9d9088e207cc2..55eaf04d75932 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4018,14 +4018,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -8914,7 +8906,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
+arm-clean-up-the-memset64-c-wrapper.patch
+ip6_tunnel-fix-usage-of-skb_vlan_inet_prepare.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+alsa-usb-audio-update-for-native-dsd-support-quirks.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+scsi-ufs-core-always-initialize-the-uic-done-complet.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+alsa-usb-audio-use-inclusive-terms.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+bpf-fix-stack-out-of-bounds-write-in-devmap.patch
--- /dev/null
+From 4406eddabe78eac9b3ad9a5e7febfa93222dbe65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 86a8624e8781e..8f486c5c938f2 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1392,6 +1392,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ goto unlock;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From 68226dd3c40fbd47aefcc9bdc0bf6493f0a8bfe9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 755ba2fe05b5a..f9e998fad773c 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2298,7 +2298,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.51.0
+
--- /dev/null
+From c46946d3ecd478122d17d35cc965bbf405677575 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:31 +0100
+Subject: ALSA: usb-audio: Use inclusive terms
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4e9113c533acee2ba1f72fd68ee6ecd36b64484e ]
+
+Replace the remaining with inclusive terms; it's only this function
+name we overlooked at the previous conversion.
+
+Fixes: 53837b4ac2bd ("ALSA: usb-audio: Replace slave/master terms")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 8f486c5c938f2..b5af8dc1e48de 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -160,8 +160,8 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ * This won't be used for implicit feedback which takes the packet size
+ * returned from the sync source
+ */
+-static int slave_next_packet_size(struct snd_usb_endpoint *ep,
+- unsigned int avail)
++static int synced_next_packet_size(struct snd_usb_endpoint *ep,
++ unsigned int avail)
+ {
+ unsigned long flags;
+ unsigned int phase;
+@@ -230,7 +230,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ }
+
+ if (ep->sync_source)
+- return slave_next_packet_size(ep, avail);
++ return synced_next_packet_size(ep, avail);
+ else
+ return next_packet_size(ep, avail);
+ }
+--
+2.51.0
+
--- /dev/null
+From d07340c348a1039fdf08001f916b91484c5831bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 05:34:44 +0000
+Subject: bpf: Fix stack-out-of-bounds write in devmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b7bf516c3ecd9a2aae2dc2635178ab87b734fef1 ]
+
+get_upper_ifindexes() iterates over all upper devices and writes their
+indices into an array without checking bounds.
+
+Also the callers assume that the max number of upper devices is
+MAX_NEST_DEV and allocate excluded_devices[1+MAX_NEST_DEV] on the stack,
+but that assumption is not correct and the number of upper devices could
+be larger than MAX_NEST_DEV (e.g., many macvlans), causing a
+stack-out-of-bounds write.
+
+Add a max parameter to get_upper_ifindexes() to avoid the issue.
+When there are too many upper devices, return -EOVERFLOW and abort the
+redirect.
+
+To reproduce, create more than MAX_NEST_DEV(8) macvlans on a device with
+an XDP program attached using BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS.
+Then send a packet to the device to trigger the XDP redirect path.
+
+Reported-by: syzbot+10cc7f13760b31bd2e61@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698c4ce3.050a0220.340abe.000b.GAE@google.com/T/
+Fixes: aeea1b86f936 ("bpf, devmap: Exclude XDP broadcast to master device")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Link: https://lore.kernel.org/r/20260225053506.4738-1-kohei@enjuk.jp
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index ac1d5dbc89185..5e05732db2368 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -571,18 +571,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
+ }
+
+ /* Get ifindex of each upper device. 'indexes' must be able to hold at
+- * least MAX_NEST_DEV elements.
+- * Returns the number of ifindexes added.
++ * least 'max' elements.
++ * Returns the number of ifindexes added, or -EOVERFLOW if there are too
++ * many upper devices.
+ */
+-static int get_upper_ifindexes(struct net_device *dev, int *indexes)
++static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
+ {
+ struct net_device *upper;
+ struct list_head *iter;
+ int n = 0;
+
+ netdev_for_each_upper_dev_rcu(dev, upper, iter) {
++ if (n >= max)
++ return -EOVERFLOW;
+ indexes[n++] = upper->ifindex;
+ }
++
+ return n;
+ }
+
+@@ -598,7 +602,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev_rx->ifindex;
+ }
+
+@@ -716,7 +724,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev->ifindex;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 220914884919580679132b2f5cefefd152d6ca66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:41 +0000
+Subject: btrfs: fix compat mask in error messages in btrfs_check_features()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 587bb33b10bda645a1028c1737ad3992b3d7cf61 ]
+
+Commit d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency
+checks") introduced a regression when it comes to handling unsupported
+incompat or compat_ro flags. Beforehand we only printed the flags that
+we didn't recognize, afterwards we printed them all, which is less
+useful. Fix the error handling so it behaves like it used to.
+
+Fixes: d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 52e083b63070d..0ff373022c11f 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3356,7 +3356,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ btrfs_err(fs_info,
+ "cannot mount because of unknown incompat features (0x%llx)",
+- incompat);
++ incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
+ return -EINVAL;
+ }
+
+@@ -3388,7 +3388,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (compat_ro_unsupp && is_rw_mount) {
+ btrfs_err(fs_info,
+ "cannot mount read-write because of unknown compat_ro features (0x%llx)",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+@@ -3401,7 +3401,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_err(fs_info,
+ "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 5a31eecb04d1ecfc6d50fd3f42b02e8684490200 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 6108cfab1ba59..d1b6bb8f08dd1 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1697,7 +1697,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 7242b79bb6c3c6146ef21a79c1fe2f8848142b01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Aug 2023 16:19:19 -0400
+Subject: btrfs: move btrfs_crc32c_final into free-space-cache.c
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 102f2640a346e84cb5c2d19805a9dd38a776013c ]
+
+This is the only place this helper is used, take it out of ctree.h and
+move it into free-space-cache.c.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.h | 5 -----
+ fs/btrfs/free-space-cache.c | 5 +++++
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index bd84a8b774a68..96146b920bdd3 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2827,11 +2827,6 @@ static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
+ return crc32c(crc, address, length);
+ }
+
+-static inline void btrfs_crc32c_final(u32 crc, u8 *result)
+-{
+- put_unaligned_le32(~crc, result);
+-}
+-
+ static inline u64 btrfs_name_hash(const char *name, int len)
+ {
+ return crc32c((u32)~1, name, len);
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 75ad735322c4a..9f4dae426037b 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -48,6 +48,11 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes, bool update_stats);
+
++static void btrfs_crc32c_final(u32 crc, u8 *result)
++{
++ put_unaligned_le32(~crc, result);
++}
++
+ static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+ {
+ struct btrfs_free_space *info;
+--
+2.51.0
+
--- /dev/null
+From 1d8423ef769314b3819b37e7f759125de476fee6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 00:21:19 +0800
+Subject: drm/logicvc: Fix device node reference leak in
+ logicvc_drm_config_parse()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fef0e649f8b42bdffe4a916dd46e1b1e9ad2f207 ]
+
+The logicvc_drm_config_parse() function calls of_get_child_by_name() to
+find the "layers" node but fails to release the reference, leading to a
+device node reference leak.
+
+Fix this by using the __free(device_node) cleanup attribute to automatic
+release the reference when the variable goes out of scope.
+
+Fixes: efeeaefe9be5 ("drm: Add support for the LogiCVC display controller")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Reviewed-by: Kory Maincent <kory.maincent@bootlin.com>
+Link: https://patch.msgid.link/20260130-logicvc_drm-v1-1-04366463750c@gmail.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/logicvc/logicvc_drm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
+index cc9a4e965f779..8cbf4f2debf7d 100644
+--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
++++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
+@@ -90,7 +90,6 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_drm_config *config = &logicvc->config;
+- struct device_node *layers_node;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
+@@ -126,7 +125,8 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ if (ret)
+ return ret;
+
+- layers_node = of_get_child_by_name(of_node, "layers");
++ struct device_node *layers_node __free(device_node) =
++ of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "Missing non-optional layers node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From 9a0aab5170b7d94f4d9f638d54dbe11124009583 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 12:12:36 -0500
+Subject: drm/vmwgfx: Fix invalid kref_put callback in vmw_bo_dirty_release
+
+From: Brad Spengler <brad.spengler@opensrcsec.com>
+
+[ Upstream commit 211ecfaaef186ee5230a77d054cdec7fbfc6724a ]
+
+The kref_put() call uses (void *)kvfree as the release callback, which
+is incorrect. kref_put() expects a function with signature
+void (*release)(struct kref *), but kvfree has signature
+void (*)(const void *). Calling through an incompatible function pointer
+is undefined behavior.
+
+The code only worked by accident because ref_count is the first member
+of vmw_bo_dirty, making the kref pointer equal to the struct pointer.
+
+Fix this by adding a proper release callback that uses container_of()
+to retrieve the containing structure before freeing.
+
+Fixes: c1962742ffff ("drm/vmwgfx: Use kref in vmw_bo_dirty")
+Signed-off-by: Brad Spengler <brad.spengler@opensrcsec.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Ian Forbes <ian.forbes@broadcom.com>
+Link: https://patch.msgid.link/20260107171236.3573118-1-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+index 09e938498442c..84d1d05346185 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+@@ -274,6 +274,13 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
+ return ret;
+ }
+
++static void vmw_bo_dirty_free(struct kref *kref)
++{
++ struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count);
++
++ kvfree(dirty);
++}
++
+ /**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+@@ -288,7 +295,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
+ {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+- if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
++ if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free))
+ vbo->dirty = NULL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3bf913d22e271ad652f91ab3a8a16183846b425d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 11:53:57 -0600
+Subject: drm/vmwgfx: Return the correct value in vmw_translate_ptr functions
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 5023ca80f9589295cb60735016e39fc5cc714243 ]
+
+Before the referenced fixes these functions used a lookup function that
+returned a pointer. This was changed to another lookup function that
+returned an error code with the pointer becoming an out parameter.
+
+The error path when the lookup failed was not changed to reflect this
+change and the code continued to return the PTR_ERR of the now
+uninitialized pointer. This could cause the vmw_translate_ptr functions
+to return success when they actually failed causing further uninitialized
+and OOB accesses.
+
+Reported-by: Kuzey Arda Bulut <kuzeyardabulut@gmail.com>
+Fixes: a309c7194e8a ("drm/vmwgfx: Remove rcu locks from user resources")
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patch.msgid.link/20260113175357.129285-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index d8cc99ef7e2a0..34b9161ec7e81 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1156,7 +1156,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+ tmp_bo = vmw_bo;
+@@ -1211,7 +1211,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+ tmp_bo = vmw_bo;
+--
+2.51.0
+
--- /dev/null
+From bb6d2ba6c89ef78c012208fd028fbb216c352b8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 12:41:25 +0100
+Subject: irqchip/sifive-plic: Fix frozen interrupt due to affinity setting
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 1072020685f4b81f6efad3b412cdae0bd62bb043 ]
+
+PLIC ignores interrupt completion message for disabled interrupt, explained
+by the specification:
+
+ The PLIC signals it has completed executing an interrupt handler by
+ writing the interrupt ID it received from the claim to the
+ claim/complete register. The PLIC does not check whether the completion
+ ID is the same as the last claim ID for that target. If the completion
+ ID does not match an interrupt source that is currently enabled for
+ the target, the completion is silently ignored.
+
+This caused problems in the past, because an interrupt can be disabled
+while still being handled and plic_irq_eoi() had no effect. That was fixed
+by checking if the interrupt is disabled, and if so enable it, before
+sending the completion message. That check is done with irqd_irq_disabled().
+
+However, that is not sufficient because the enable bit for the handling
+hart can be zero despite irqd_irq_disabled(d) being false. This can happen
+when affinity setting is changed while a hart is still handling the
+interrupt.
+
+This problem is easily reproducible by dumping a large file to uart (which
+generates lots of interrupts) and at the same time keep changing the uart
+interrupt's affinity setting. The uart port becomes frozen almost
+instantaneously.
+
+Fix this by checking PLIC's enable bit instead of irqd_irq_disabled().
+
+Fixes: cc9f04f9a84f ("irqchip/sifive-plic: Implement irq_set_affinity() for SMP host")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260212114125.3148067-1-namcao@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-sifive-plic.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 36de764ee2b61..fb1dae22ab17f 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -144,8 +144,13 @@ static void plic_irq_disable(struct irq_data *d)
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
++ u32 __iomem *reg;
++ bool enabled;
+
+- if (unlikely(irqd_irq_disabled(d))) {
++ reg = handler->enable_base + (d->hwirq / 32) * sizeof(u32);
++ enabled = readl(reg) & BIT(d->hwirq % 32);
++
++ if (unlikely(!enabled)) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+--
+2.51.0
+
--- /dev/null
+From 75368d52b25265b11da07d32ba520a1e97164275 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 4d7bf0536348f..146b37e97832a 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9507,6 +9507,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -9587,6 +9594,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -9595,6 +9615,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -10011,6 +10041,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -10472,6 +10507,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -10924,7 +10964,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 55b5f370a170d10d49d511fdfa823abcd32d6992 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 6535474fe8a7f..12a362eab1cb2 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -12059,6 +12059,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index d5e21e74888a7..90213058b8356 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -15910,6 +15910,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -16819,9 +16845,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -17007,14 +17030,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index cbb1aa1cf025b..db6a42147c895 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -783,6 +783,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From 081dd796f63834dc50c2305fcbf3d0189a08b01b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 19:28:06 +0000
+Subject: scsi: pm8001: Fix use-after-free in pm8001_queue_command()
+
+From: Salomon Dushimirimana <salomondush@google.com>
+
+[ Upstream commit 38353c26db28efd984f51d426eac2396d299cca7 ]
+
+Commit e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()") refactors
+pm8001_queue_command(), however it introduces a potential cause of a double
+free scenario when it changes the function to return -ENODEV in case of phy
+down/device gone state.
+
+In this path, pm8001_queue_command() updates task status and calls
+task_done to indicate to upper layer that the task has been handled.
+However, this also frees the underlying SAS task. A -ENODEV is then
+returned to the caller. When libsas sas_ata_qc_issue() receives this error
+value, it assumes the task wasn't handled/queued by LLDD and proceeds to
+clean up and free the task again, resulting in a double free.
+
+Since pm8001_queue_command() handles the SAS task in this case, it should
+return 0 to the caller indicating that the task has been handled.
+
+Fixes: e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()")
+Signed-off-by: Salomon Dushimirimana <salomondush@google.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://patch.msgid.link/20260213192806.439432-1-salomondush@google.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 4cd648be68dde..e416cabbea4a2 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -467,8 +467,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
+ } else {
+ task->task_done(task);
+ }
+- rc = -ENODEV;
+- goto err_out;
++ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
++ return 0;
+ }
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+--
+2.51.0
+
--- /dev/null
+From eb87ada6428f3f89e11f3df05308d6ec988ea495 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Sep 2024 15:30:05 -0700
+Subject: scsi: ufs: core: Always initialize the UIC done completion
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit b1e8c53749adb795bfb0bf4e2f7836e26684bb90 ]
+
+Simplify __ufshcd_send_uic_cmd() by always initializing the
+uic_cmd::done completion. This is fine since the time required to
+initialize a completion is small compared to the time required to
+process an UIC command.
+
+Reviewed-by: Peter Wang <peter.wang@mediatek.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20240912223019.3510966-5-bvanassche@acm.org
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: 62c015373e1c ("scsi: ufs: core: Move link recovery for hibern8 exit failure to wl_resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 01927facaa203..6d44c2adb251a 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -2340,13 +2340,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+- * @completion: initialize the completion only if this is set to true
+ *
+ * Returns 0 only if success.
+ */
+ static int
+-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+- bool completion)
++__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ {
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
+@@ -2356,8 +2354,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ return -EIO;
+ }
+
+- if (completion)
+- init_completion(&uic_cmd->done);
++ init_completion(&uic_cmd->done);
+
+ uic_cmd->cmd_active = 1;
+ ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+@@ -2383,7 +2380,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
++ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+@@ -4081,7 +4078,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ reenable_intr = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
++ ret = __ufshcd_send_uic_cmd(hba, cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+--
+2.51.0
+
--- /dev/null
+From c21481c69246a46d7993f15f09bcd497fc7a3dd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 6d44c2adb251a..29f232894372c 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4130,14 +4130,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -9250,7 +9242,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
--- /dev/null
+drm-vmwgfx-fix-invalid-kref_put-callback-in-vmw_bo_d.patch
+drm-vmwgfx-return-the-correct-value-in-vmw_translate.patch
+drm-logicvc-fix-device-node-reference-leak-in-logicv.patch
+irqchip-sifive-plic-fix-frozen-interrupt-due-to-affi.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+scsi-pm8001-fix-use-after-free-in-pm8001_queue_comma.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+scsi-ufs-core-always-initialize-the-uic-done-complet.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+alsa-usb-audio-use-inclusive-terms.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+btrfs-move-btrfs_crc32c_final-into-free-space-cache..patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
+bpf-fix-stack-out-of-bounds-write-in-devmap.patch
--- /dev/null
+From 702b6b8c77998113c4353e5b9628875177e7c459 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 11:17:28 +0000
+Subject: ALSA: hda: cs35l56: Fix signedness error in cs35l56_hda_posture_put()
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 003ce8c9b2ca28fbb4860651e76fb1c9a91f2ea1 ]
+
+In cs35l56_hda_posture_put() assign ucontrol->value.integer.value[0] to
+a long instead of an unsigned long. ucontrol->value.integer.value[0] is
+a long.
+
+This fixes the sparse warning:
+
+sound/hda/codecs/side-codecs/cs35l56_hda.c:256:20: warning: unsigned value
+that used to be signed checked against zero?
+sound/hda/codecs/side-codecs/cs35l56_hda.c:252:29: signed value source
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Fixes: 73cfbfa9caea8 ("ALSA: hda/cs35l56: Add driver for Cirrus Logic CS35L56 amplifier")
+Link: https://patch.msgid.link/20260226111728.1700431-1-rf@opensource.cirrus.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/cs35l56_hda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 52d2ddf248323..2a936f43fad2d 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -250,7 +250,7 @@ static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+- unsigned long pos = ucontrol->value.integer.value[0];
++ long pos = ucontrol->value.integer.value[0];
+ bool changed;
+ int ret;
+
+--
+2.51.0
+
--- /dev/null
+From d28bbfb5e229b6109d666dac5b525ccc770471fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 04:59:45 +0000
+Subject: ALSA: pci: hda: use snd_kcontrol_chip()
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit 483dd12dbe34c6d4e71d4d543bcb1292bcb62d08 ]
+
+We can use snd_kcontrol_chip(). Let's use it.
+
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/87plglauda.wl-kuninori.morimoto.gx@renesas.com
+Stable-dep-of: 003ce8c9b2ca ("ALSA: hda: cs35l56: Fix signedness error in cs35l56_hda_posture_put()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/cs35l56_hda.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 7823f71012a8a..52d2ddf248323 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -180,7 +180,7 @@ static int cs35l56_hda_mixer_info(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_mixer_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int reg_val;
+ int i;
+
+@@ -202,7 +202,7 @@ static int cs35l56_hda_mixer_get(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_mixer_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int item = ucontrol->value.enumerated.item[0];
+ bool changed;
+
+@@ -231,7 +231,7 @@ static int cs35l56_hda_posture_info(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_posture_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int pos;
+ int ret;
+
+@@ -249,7 +249,7 @@ static int cs35l56_hda_posture_get(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned long pos = ucontrol->value.integer.value[0];
+ bool changed;
+ int ret;
+@@ -298,7 +298,7 @@ static int cs35l56_hda_vol_info(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int raw_vol;
+ int vol;
+ int ret;
+@@ -324,7 +324,7 @@ static int cs35l56_hda_vol_get(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ long vol = ucontrol->value.integer.value[0];
+ unsigned int raw_vol;
+ bool changed;
+--
+2.51.0
+
--- /dev/null
+From 382eedb761544aa4982d00e76fae8028db6747ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 21:58:48 +1030
+Subject: ALSA: scarlett2: Fix DSP filter control array handling
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 1d241483368f2fd87fbaba64d6aec6bad3a1e12e ]
+
+scarlett2_add_dsp_ctls() was incorrectly storing the precomp and PEQ
+filter coefficient control pointers into the precomp_flt_switch_ctls
+and peq_flt_switch_ctls arrays instead of the intended targets
+precomp_flt_ctls and peq_flt_ctls. Pass NULL instead, as the filter
+coefficient control pointers are not used, and remove the unused
+precomp_flt_ctls and peq_flt_ctls arrays from struct scarlett2_data.
+
+Additionally, scarlett2_update_filter_values() was reading
+dsp_input_count * peq_flt_count values for
+SCARLETT2_CONFIG_PEQ_FLT_SWITCH, but the peq_flt_switch array is
+indexed only by dsp_input_count (one switch per DSP input, not per
+filter). Fix the read count.
+
+Fixes: b64678eb4e70 ("ALSA: scarlett2: Add DSP controls")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Link: https://patch.msgid.link/86497b71db060677d97c38a6ce5f89bb3b25361b.1771581197.git.g@b4.vu
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/mixer_scarlett2.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+index 8c7755fc1519f..1242840104173 100644
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -1294,8 +1294,6 @@ struct scarlett2_data {
+ struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
+ struct snd_kcontrol *mix_ctls[SCARLETT2_MIX_MAX];
+ struct snd_kcontrol *compressor_ctls[SCARLETT2_COMPRESSOR_CTLS_MAX];
+- struct snd_kcontrol *precomp_flt_ctls[SCARLETT2_PRECOMP_FLT_CTLS_MAX];
+- struct snd_kcontrol *peq_flt_ctls[SCARLETT2_PEQ_FLT_CTLS_MAX];
+ struct snd_kcontrol *precomp_flt_switch_ctls[SCARLETT2_DSP_SWITCH_MAX];
+ struct snd_kcontrol *peq_flt_switch_ctls[SCARLETT2_DSP_SWITCH_MAX];
+ struct snd_kcontrol *direct_monitor_ctl;
+@@ -3415,7 +3413,6 @@ static int scarlett2_update_autogain(struct usb_mixer_interface *mixer)
+ private->autogain_status[i] =
+ private->num_autogain_status_texts - 1;
+
+-
+ for (i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
+ if (scarlett2_has_config_item(private,
+ scarlett2_ag_target_configs[i])) {
+@@ -5595,8 +5592,7 @@ static int scarlett2_update_filter_values(struct usb_mixer_interface *mixer)
+
+ err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_PEQ_FLT_SWITCH,
+- info->dsp_input_count * info->peq_flt_count,
+- private->peq_flt_switch);
++ info->dsp_input_count, private->peq_flt_switch);
+ if (err < 0)
+ return err;
+
+@@ -6794,7 +6790,7 @@ static int scarlett2_add_dsp_ctls(struct usb_mixer_interface *mixer, int i)
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_precomp_flt_ctl,
+ i * info->precomp_flt_count + j,
+- 1, s, &private->precomp_flt_switch_ctls[j]);
++ 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+@@ -6804,7 +6800,7 @@ static int scarlett2_add_dsp_ctls(struct usb_mixer_interface *mixer, int i)
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_peq_flt_ctl,
+ i * info->peq_flt_count + j,
+- 1, s, &private->peq_flt_switch_ctls[j]);
++ 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 2d19cbe3ca0b13e19c7a58a0cfcf08ee737226d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2024 23:57:54 +0930
+Subject: ALSA: scarlett2: Fix redeclaration of loop variable
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 5e7b782259fd396c7802948f5901bb2d769ddff8 ]
+
+Was using both "for (i = 0, ..." and "for (int i = 0, ..." in
+scarlett2_update_autogain(). Remove "int" to fix.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/ecb0a8931c1883abd6c0e335c63961653bef85f0.1727971672.git.g@b4.vu
+Stable-dep-of: 1d241483368f ("ALSA: scarlett2: Fix DSP filter control array handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/mixer_scarlett2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+index f6292c4b8d214..8c7755fc1519f 100644
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -3416,7 +3416,7 @@ static int scarlett2_update_autogain(struct usb_mixer_interface *mixer)
+ private->num_autogain_status_texts - 1;
+
+
+- for (int i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
++ for (i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
+ if (scarlett2_has_config_item(private,
+ scarlett2_ag_target_configs[i])) {
+ err = scarlett2_usb_get_config(
+@@ -3427,7 +3427,7 @@ static int scarlett2_update_autogain(struct usb_mixer_interface *mixer)
+ }
+
+ /* convert from negative dBFS as used by the device */
+- for (int i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
++ for (i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
+ private->ag_targets[i] = -ag_target_values[i];
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 0825844e83de655969798a719001c80530125671 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index cb94c2cad2213..729d86fffab4c 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1399,6 +1399,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ goto unlock;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From 10de8b4a10559edaebd9b3ea09a3691faedc9642 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 947467112409a..41752b8197463 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2408,7 +2408,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.51.0
+
--- /dev/null
+From d3c4ea602d6a698aeecdae7ab478d10c2c2403fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:31 +0100
+Subject: ALSA: usb-audio: Use inclusive terms
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4e9113c533acee2ba1f72fd68ee6ecd36b64484e ]
+
+Replace the remaining with inclusive terms; it's only this function
+name we overlooked at the previous conversion.
+
+Fixes: 53837b4ac2bd ("ALSA: usb-audio: Replace slave/master terms")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 729d86fffab4c..9d22613f71e24 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -160,8 +160,8 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ * This won't be used for implicit feedback which takes the packet size
+ * returned from the sync source
+ */
+-static int slave_next_packet_size(struct snd_usb_endpoint *ep,
+- unsigned int avail)
++static int synced_next_packet_size(struct snd_usb_endpoint *ep,
++ unsigned int avail)
+ {
+ unsigned long flags;
+ unsigned int phase;
+@@ -230,7 +230,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ }
+
+ if (ep->sync_source)
+- return slave_next_packet_size(ep, avail);
++ return synced_next_packet_size(ep, avail);
+ else
+ return next_packet_size(ep, avail);
+ }
+--
+2.51.0
+
--- /dev/null
+From 622b1e19c7c4aeafcc6610db098aed7a94478e93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 07:55:25 +0000
+Subject: bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic
+ tearing
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit ef06fd16d48704eac868441d98d4ef083d8f3d07 ]
+
+struct bpf_plt contains a u64 target field. Currently, the BPF JIT
+allocator requests an alignment of 4 bytes (sizeof(u32)) for the JIT
+buffer.
+
+Because the base address of the JIT buffer can be 4-byte aligned (e.g.,
+ending in 0x4 or 0xc), the relative padding logic in build_plt() fails
+to ensure that target lands on an 8-byte boundary.
+
+This leads to two issues:
+1. UBSAN reports misaligned-access warnings when dereferencing the
+ structure.
+2. More critically, target is updated concurrently via WRITE_ONCE() in
+ bpf_arch_text_poke() while the JIT'd code executes ldr. On arm64,
+ 64-bit loads/stores are only guaranteed to be single-copy atomic if
+ they are 64-bit aligned. A misaligned target risks a torn read,
+ causing the JIT to jump to a corrupted address.
+
+Fix this by increasing the allocation alignment requirement to 8 bytes
+(sizeof(u64)) in bpf_jit_binary_pack_alloc(). This anchors the base of
+the JIT buffer to an 8-byte boundary, allowing the relative padding math
+in build_plt() to correctly align the target field.
+
+Fixes: b2ad54e1533e ("bpf, arm64: Implement bpf_arch_text_poke() for arm64")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Acked-by: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20260226075525.233321-1-tabba@google.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/net/bpf_jit_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 82b57436f2f10..9310196e0a09e 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1880,7 +1880,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
+ image_size = extable_offset + extable_size;
+ ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
+- sizeof(u32), &header, &image_ptr,
++ sizeof(u64), &header, &image_ptr,
+ jit_fill_hole);
+ if (!ro_header) {
+ prog = orig_prog;
+--
+2.51.0
+
--- /dev/null
+From ececcded89b889f92b4c0c66c7e435d82ed368da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 05:34:44 +0000
+Subject: bpf: Fix stack-out-of-bounds write in devmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b7bf516c3ecd9a2aae2dc2635178ab87b734fef1 ]
+
+get_upper_ifindexes() iterates over all upper devices and writes their
+indices into an array without checking bounds.
+
+Also the callers assume that the max number of upper devices is
+MAX_NEST_DEV and allocate excluded_devices[1+MAX_NEST_DEV] on the stack,
+but that assumption is not correct and the number of upper devices could
+be larger than MAX_NEST_DEV (e.g., many macvlans), causing a
+stack-out-of-bounds write.
+
+Add a max parameter to get_upper_ifindexes() to avoid the issue.
+When there are too many upper devices, return -EOVERFLOW and abort the
+redirect.
+
+To reproduce, create more than MAX_NEST_DEV(8) macvlans on a device with
+an XDP program attached using BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS.
+Then send a packet to the device to trigger the XDP redirect path.
+
+Reported-by: syzbot+10cc7f13760b31bd2e61@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698c4ce3.050a0220.340abe.000b.GAE@google.com/T/
+Fixes: aeea1b86f936 ("bpf, devmap: Exclude XDP broadcast to master device")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Link: https://lore.kernel.org/r/20260225053506.4738-1-kohei@enjuk.jp
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 3aa002a47a966..39b7efa396b8e 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -588,18 +588,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
+ }
+
+ /* Get ifindex of each upper device. 'indexes' must be able to hold at
+- * least MAX_NEST_DEV elements.
+- * Returns the number of ifindexes added.
++ * least 'max' elements.
++ * Returns the number of ifindexes added, or -EOVERFLOW if there are too
++ * many upper devices.
+ */
+-static int get_upper_ifindexes(struct net_device *dev, int *indexes)
++static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
+ {
+ struct net_device *upper;
+ struct list_head *iter;
+ int n = 0;
+
+ netdev_for_each_upper_dev_rcu(dev, upper, iter) {
++ if (n >= max)
++ return -EOVERFLOW;
+ indexes[n++] = upper->ifindex;
+ }
++
+ return n;
+ }
+
+@@ -615,7 +619,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev_rx->ifindex;
+ }
+
+@@ -733,7 +741,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev->ifindex;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 2c74ddbaf1dfb86ebf5d926f7aa9b37c939e74c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:41 +0000
+Subject: btrfs: fix compat mask in error messages in btrfs_check_features()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 587bb33b10bda645a1028c1737ad3992b3d7cf61 ]
+
+Commit d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency
+checks") introduced a regression when it comes to handling unsupported
+incompat or compat_ro flags. Beforehand we only printed the flags that
+we didn't recognize, afterwards we printed them all, which is less
+useful. Fix the error handling so it behaves like it used to.
+
+Fixes: d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 034cd7b1d0f5f..fa4d22f6f29d7 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3119,7 +3119,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ btrfs_err(fs_info,
+ "cannot mount because of unknown incompat features (0x%llx)",
+- incompat);
++ incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
+ return -EINVAL;
+ }
+
+@@ -3151,7 +3151,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (compat_ro_unsupp && is_rw_mount) {
+ btrfs_err(fs_info,
+ "cannot mount read-write because of unknown compat_ro features (0x%llx)",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+@@ -3164,7 +3164,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_err(fs_info,
+ "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c8eeb7adb77f6cf7cdb6569b4b47233dc03c1eb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 3bb7a376bd3fc..894136eb443ee 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1882,7 +1882,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From e4d77ad23d19b7af0fc18fefc423c00a1875f53d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 14:39:46 +0000
+Subject: btrfs: fix objectid value in error message in check_extent_data_ref()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit a10172780526c2002e062102ad4f2aabac495889 ]
+
+Fix a copy-paste error in check_extent_data_ref(): we're printing root
+as in the message above, we should be printing objectid.
+
+Fixes: f333a3c7e832 ("btrfs: tree-checker: validate dref root and objectid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 894136eb443ee..60bba7fbeb351 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1701,7 +1701,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+- root);
++ objectid);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+--
+2.51.0
+
--- /dev/null
+From 8c94d639c23dc906b0cb9d5450e06a891c6693a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:13 +0000
+Subject: btrfs: fix warning in scrub_verify_one_metadata()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 44e2fda66427a0442d8d2c0e6443256fb458ab6b ]
+
+Commit b471965fdb2d ("btrfs: fix replace/scrub failure with
+metadata_uuid") fixed the comparison in scrub_verify_one_metadata() to
+use metadata_uuid rather than fsid, but left the warning as it was. Fix
+it so it matches what we're doing.
+
+Fixes: b471965fdb2d ("btrfs: fix replace/scrub failure with metadata_uuid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/scrub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 3cbb9f22d3944..513c2bfa8d628 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -634,7 +634,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ btrfs_warn_rl(fs_info,
+ "tree block %llu mirror %u has bad fsid, has %pU want %pU",
+ logical, stripe->mirror_num,
+- header->fsid, fs_info->fs_devices->fsid);
++ header->fsid, fs_info->fs_devices->metadata_uuid);
+ return;
+ }
+ if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+--
+2.51.0
+
--- /dev/null
+From 2f87de27b6e4726d03d71ca7c93d1b896fb9229c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:32:39 +0000
+Subject: btrfs: print correct subvol num if active swapfile prevents deletion
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 1c7e9111f4e6d6d42bc47759c9af1ef91f03ac2c ]
+
+Fix the error message in btrfs_delete_subvolume() if we can't delete a
+subvolume because it has an active swapfile: we were printing the number
+of the parent rather than the target.
+
+Fixes: 60021bd754c6 ("btrfs: prevent subvol with swapfile from being deleted")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index b1d9595762ef6..09ebe5acbe439 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4629,7 +4629,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ spin_unlock(&dest->root_item_lock);
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu with active swapfile",
+- btrfs_root_id(root));
++ btrfs_root_id(dest));
+ ret = -EPERM;
+ goto out_up_write;
+ }
+--
+2.51.0
+
--- /dev/null
+From dd733c59c4ba1796af8fcd22898df516db8d3573 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 13:54:12 -0500
+Subject: cgroup/cpuset: Fix incorrect use of cpuset_update_tasks_cpumask() in
+ update_cpumasks_hier()
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 68230aac8b9aad243626fbaf3ca170012c17fec5 ]
+
+Commit e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2")
+incorrectly changed the 2nd parameter of cpuset_update_tasks_cpumask()
+from tmp->new_cpus to cp->effective_cpus. This second parameter is just
+a temporary cpumask for internal use. The cpuset_update_tasks_cpumask()
+function was originally called update_tasks_cpumask() before commit
+381b53c3b549 ("cgroup/cpuset: rename functions shared between v1
+and v2").
+
+This mistake can incorrectly change the effective_cpus of the
+cpuset when it is the top_cpuset or in arm64 architecture where
+task_cpu_possible_mask() may differ from cpu_possible_mask. So far
+top_cpuset hasn't been passed to update_cpumasks_hier() yet, but arm64
+arch can still be impacted. Fix it by reverting the incorrect change.
+
+Fixes: e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 1b93eb7b29c58..77b07548c3027 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2126,7 +2126,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ WARN_ON(!is_in_v2_mode() &&
+ !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+
+- cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
++ cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
+
+ /*
+ * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
+--
+2.51.0
+
--- /dev/null
+From 3ded1f37ab1b08536d86d5413ccb9b35da1316fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 13:50:23 -0800
+Subject: drm/amdgpu: Fix locking bugs in error paths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 480ad5f6ead4a47b969aab6618573cd6822bb6a4 ]
+
+Do not unlock psp->ras_context.mutex if it has not been locked. This has
+been detected by the Clang thread-safety analyzer.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: YiPeng Chai <YiPeng.Chai@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Cc: amd-gfx@lists.freedesktop.org
+Fixes: b3fb79cda568 ("drm/amdgpu: add mutex to protect ras shared memory")
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 6fa01b4335978051d2cd80841728fd63cc597970)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 6e8aad91bcd30..0d3c18f04ac36 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -332,13 +332,13 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ if (!context || !context->initialized) {
+ dev_err(adev->dev, "TA is not initialized\n");
+ ret = -EINVAL;
+- goto err_free_shared_buf;
++ goto free_shared_buf;
+ }
+
+ if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_invoke) {
+ dev_err(adev->dev, "Unsupported function to invoke TA\n");
+ ret = -EOPNOTSUPP;
+- goto err_free_shared_buf;
++ goto free_shared_buf;
+ }
+
+ context->session_id = ta_id;
+@@ -346,7 +346,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ mutex_lock(&psp->ras_context.mutex);
+ ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
+ if (ret)
+- goto err_free_shared_buf;
++ goto unlock;
+
+ ret = psp_fn_ta_invoke(psp, cmd_id);
+ if (ret || context->resp_status) {
+@@ -354,15 +354,17 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ ret, context->resp_status);
+ if (!ret) {
+ ret = -EINVAL;
+- goto err_free_shared_buf;
++ goto unlock;
+ }
+ }
+
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
+ ret = -EFAULT;
+
+-err_free_shared_buf:
++unlock:
+ mutex_unlock(&psp->ras_context.mutex);
++
++free_shared_buf:
+ kfree(shared_buf);
+
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 65d739e192b3271f95c91312feb8b257a0061d0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Sep 2025 23:15:54 +0200
+Subject: drm/amdgpu: Replace kzalloc + copy_from_user with memdup_user
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit 99eeb8358e6cdb7050bf2956370c15dcceda8c7e ]
+
+Replace kzalloc() followed by copy_from_user() with memdup_user() to
+improve and simplify ta_if_load_debugfs_write() and
+ta_if_invoke_debugfs_write().
+
+No functional changes intended.
+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 480ad5f6ead4 ("drm/amdgpu: Fix locking bugs in error paths")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 20 ++++++--------------
+ 1 file changed, 6 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 38face981c3e3..6e8aad91bcd30 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
+
+ copy_pos += sizeof(uint32_t);
+
+- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
+- if (!ta_bin)
+- return -ENOMEM;
+- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
+- ret = -EFAULT;
+- goto err_free_bin;
+- }
++ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
++ if (IS_ERR(ta_bin))
++ return PTR_ERR(ta_bin);
+
+ /* Set TA context and functions */
+ set_ta_context_funcs(psp, ta_type, &context);
+@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ return -EFAULT;
+ copy_pos += sizeof(uint32_t);
+
+- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
+- if (!shared_buf)
+- return -ENOMEM;
+- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
+- ret = -EFAULT;
+- goto err_free_shared_buf;
+- }
++ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
++ if (IS_ERR(shared_buf))
++ return PTR_ERR(shared_buf);
+
+ set_ta_context_funcs(psp, ta_type, &context);
+
+--
+2.51.0
+
--- /dev/null
+From c65a9db687ffdfdbdc743435e497bc4c9dd5e7e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:07 -0800
+Subject: drm/amdgpu: Unlock a mutex before destroying it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 5e0bcc7b88bcd081aaae6f481b10d9ab294fcb69 ]
+
+Mutexes must be unlocked before these are destroyed. This has been detected
+by the Clang thread-safety analyzer.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Yang Wang <kevinyang.wang@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Cc: amd-gfx@lists.freedesktop.org
+Fixes: f5e4cc8461c4 ("drm/amdgpu: implement RAS ACA driver framework")
+Reviewed-by: Yang Wang <kevinyang.wang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 270258ba320beb99648dceffb67e86ac76786e55)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+index a7ecc33ddf223..ef5356b5a65ec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+@@ -583,6 +583,7 @@ static void aca_error_fini(struct aca_error *aerr)
+ aca_bank_error_remove(aerr, bank_error);
+
+ out_unlock:
++ mutex_unlock(&aerr->lock);
+ mutex_destroy(&aerr->lock);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 74b71270a3ed9f0a83e3296c28ccf9787b19091b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 00:21:19 +0800
+Subject: drm/logicvc: Fix device node reference leak in
+ logicvc_drm_config_parse()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fef0e649f8b42bdffe4a916dd46e1b1e9ad2f207 ]
+
+The logicvc_drm_config_parse() function calls of_get_child_by_name() to
+find the "layers" node but fails to release the reference, leading to a
+device node reference leak.
+
+Fix this by using the __free(device_node) cleanup attribute to automatic
+release the reference when the variable goes out of scope.
+
+Fixes: efeeaefe9be5 ("drm: Add support for the LogiCVC display controller")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Reviewed-by: Kory Maincent <kory.maincent@bootlin.com>
+Link: https://patch.msgid.link/20260130-logicvc_drm-v1-1-04366463750c@gmail.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/logicvc/logicvc_drm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
+index 01a37e28c0803..6d88f86459882 100644
+--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
++++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
+@@ -90,7 +90,6 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_drm_config *config = &logicvc->config;
+- struct device_node *layers_node;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
+@@ -126,7 +125,8 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ if (ret)
+ return ret;
+
+- layers_node = of_get_child_by_name(of_node, "layers");
++ struct device_node *layers_node __free(device_node) =
++ of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "Missing non-optional layers node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From 88a1c304cdd14b956e04d5958493ae026ae40081 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 12:12:36 -0500
+Subject: drm/vmwgfx: Fix invalid kref_put callback in vmw_bo_dirty_release
+
+From: Brad Spengler <brad.spengler@opensrcsec.com>
+
+[ Upstream commit 211ecfaaef186ee5230a77d054cdec7fbfc6724a ]
+
+The kref_put() call uses (void *)kvfree as the release callback, which
+is incorrect. kref_put() expects a function with signature
+void (*release)(struct kref *), but kvfree has signature
+void (*)(const void *). Calling through an incompatible function pointer
+is undefined behavior.
+
+The code only worked by accident because ref_count is the first member
+of vmw_bo_dirty, making the kref pointer equal to the struct pointer.
+
+Fix this by adding a proper release callback that uses container_of()
+to retrieve the containing structure before freeing.
+
+Fixes: c1962742ffff ("drm/vmwgfx: Use kref in vmw_bo_dirty")
+Signed-off-by: Brad Spengler <brad.spengler@opensrcsec.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Ian Forbes <ian.forbes@broadcom.com>
+Link: https://patch.msgid.link/20260107171236.3573118-1-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+index de2498749e276..5bb710824d72f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+@@ -274,6 +274,13 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
+ return ret;
+ }
+
++static void vmw_bo_dirty_free(struct kref *kref)
++{
++ struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count);
++
++ kvfree(dirty);
++}
++
+ /**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+@@ -288,7 +295,7 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
+ {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+- if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
++ if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free))
+ vbo->dirty = NULL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 55959bef325e722bd3c24cb2d704743144f3528e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 11:53:57 -0600
+Subject: drm/vmwgfx: Return the correct value in vmw_translate_ptr functions
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 5023ca80f9589295cb60735016e39fc5cc714243 ]
+
+Before the referenced fixes these functions used a lookup function that
+returned a pointer. This was changed to another lookup function that
+returned an error code with the pointer becoming an out parameter.
+
+The error path when the lookup failed was not changed to reflect this
+change and the code continued to return the PTR_ERR of the now
+uninitialized pointer. This could cause the vmw_translate_ptr functions
+to return success when they actually failed causing further uninitialized
+and OOB accesses.
+
+Reported-by: Kuzey Arda Bulut <kuzeyardabulut@gmail.com>
+Fixes: a309c7194e8a ("drm/vmwgfx: Remove rcu locks from user resources")
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patch.msgid.link/20260113175357.129285-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 0c1bd3acf3598..6b921db2dcd23 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1161,7 +1161,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+@@ -1217,7 +1217,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+--
+2.51.0
+
--- /dev/null
+From 372432ed64c6622432c40bdacd88dd821a0a63c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 12:41:25 +0100
+Subject: irqchip/sifive-plic: Fix frozen interrupt due to affinity setting
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 1072020685f4b81f6efad3b412cdae0bd62bb043 ]
+
+PLIC ignores interrupt completion message for disabled interrupt, explained
+by the specification:
+
+ The PLIC signals it has completed executing an interrupt handler by
+ writing the interrupt ID it received from the claim to the
+ claim/complete register. The PLIC does not check whether the completion
+ ID is the same as the last claim ID for that target. If the completion
+ ID does not match an interrupt source that is currently enabled for
+ the target, the completion is silently ignored.
+
+This caused problems in the past, because an interrupt can be disabled
+while still being handled and plic_irq_eoi() had no effect. That was fixed
+by checking if the interrupt is disabled, and if so enable it, before
+sending the completion message. That check is done with irqd_irq_disabled().
+
+However, that is not sufficient because the enable bit for the handling
+hart can be zero despite irqd_irq_disabled(d) being false. This can happen
+when affinity setting is changed while a hart is still handling the
+interrupt.
+
+This problem is easily reproducible by dumping a large file to uart (which
+generates lots of interrupts) and at the same time keep changing the uart
+interrupt's affinity setting. The uart port becomes frozen almost
+instantaneously.
+
+Fix this by checking PLIC's enable bit instead of irqd_irq_disabled().
+
+Fixes: cc9f04f9a84f ("irqchip/sifive-plic: Implement irq_set_affinity() for SMP host")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260212114125.3148067-1-namcao@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-sifive-plic.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index c0cf4fed13e09..b58b3cd807d40 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -154,8 +154,13 @@ static void plic_irq_disable(struct irq_data *d)
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
++ u32 __iomem *reg;
++ bool enabled;
++
++ reg = handler->enable_base + (d->hwirq / 32) * sizeof(u32);
++ enabled = readl(reg) & BIT(d->hwirq % 32);
+
+- if (unlikely(irqd_irq_disabled(d))) {
++ if (unlikely(!enabled)) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+--
+2.51.0
+
--- /dev/null
+From e1ad41f222bbf8b385560106f7f3980b03349d18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Jul 2025 10:25:26 -0700
+Subject: KVM: arm64: Advertise support for FEAT_SCTLR2
+
+From: Oliver Upton <oliver.upton@linux.dev>
+
+[ Upstream commit 075c2dc7367e7e80d83adae8db597e48ceb7ba94 ]
+
+Everything is in place to handle the additional state for SCTLR2_ELx,
+which is all that FEAT_SCTLR2 implies.
+
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20250708172532.1699409-22-oliver.upton@linux.dev
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Stable-dep-of: f66857bafd4f ("KVM: arm64: Hide S1POE from guests when not supported by the host")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/sys_regs.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 5c09c788aaa61..784603a355487 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1559,8 +1559,10 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
+ val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
+ break;
+ case SYS_ID_AA64MMFR3_EL1:
+- val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE |
+- ID_AA64MMFR3_EL1_S1PIE;
++ val &= ID_AA64MMFR3_EL1_TCRX |
++ ID_AA64MMFR3_EL1_SCTLRX |
++ ID_AA64MMFR3_EL1_S1POE |
++ ID_AA64MMFR3_EL1_S1PIE;
+ break;
+ case SYS_ID_MMFR4_EL1:
+ val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+@@ -2521,6 +2523,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ ID_AA64MMFR2_EL1_NV |
+ ID_AA64MMFR2_EL1_CCIDX)),
+ ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
++ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1PIE |
+ ID_AA64MMFR3_EL1_S1POE)),
+ ID_SANITISED(ID_AA64MMFR4_EL1),
+--
+2.51.0
+
--- /dev/null
+From 1b21c82ba0b23ae7f3275637441929b3263e34e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 14:38:12 +0000
+Subject: KVM: arm64: Hide S1POE from guests when not supported by the host
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit f66857bafd4f151c5cc6856e47be2e12c1721e43 ]
+
+When CONFIG_ARM64_POE is disabled, KVM does not save/restore POR_EL1.
+However, ID_AA64MMFR3_EL1 sanitisation currently exposes the feature to
+guests whenever the hardware supports it, ignoring the host kernel
+configuration.
+
+If a guest detects this feature and attempts to use it, the host will
+fail to context-switch POR_EL1, potentially leading to state corruption.
+
+Fix this by masking ID_AA64MMFR3_EL1.S1POE in the sanitised system
+registers, preventing KVM from advertising the feature when the host
+does not support it (i.e. system_supports_poe() is false).
+
+Fixes: 70ed7238297f ("KVM: arm64: Sanitise ID_AA64MMFR3_EL1")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://patch.msgid.link/20260213143815.1732675-2-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/sys_regs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 784603a355487..a76b3182e0917 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1563,6 +1563,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
+ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1POE |
+ ID_AA64MMFR3_EL1_S1PIE;
++
++ if (!system_supports_poe())
++ val &= ~ID_AA64MMFR3_EL1_S1POE;
+ break;
+ case SYS_ID_MMFR4_EL1:
+ val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+--
+2.51.0
+
--- /dev/null
+From 37cfa946f298133c2059e627d17b02c57a7706c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 06:10:08 -0600
+Subject: PCI: Correct PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit 39195990e4c093c9eecf88f29811c6de29265214 ]
+
+fb82437fdd8c ("PCI: Change capability register offsets to hex") incorrectly
+converted the PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value from decimal 52 to hex
+0x32:
+
+ -#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
+ +#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+
+This broke PCI capabilities in a VMM because subsequent ones weren't
+DWORD-aligned.
+
+Change PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 to the correct value of 0x34.
+
+fb82437fdd8c was from Baruch Siach <baruch@tkos.co.il>, but this was not
+Baruch's fault; it's a mistake I made when applying the patch.
+
+Fixes: fb82437fdd8c ("PCI: Change capability register offsets to hex")
+Reported-by: David Woodhouse <dwmw2@infradead.org>
+Closes: https://lore.kernel.org/all/3ae392a0158e9d9ab09a1d42150429dd8ca42791.camel@infradead.org
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Krzysztof WilczyĆski <kwilczynski@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/pci_regs.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index f3c9de0a497cf..bf6c143551ec0 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -699,7 +699,7 @@
+ #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
+ #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+ #define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
++#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x34 /* end of v2 EPs w/ link */
+ #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
+ #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
+ #define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+--
+2.51.0
+
--- /dev/null
+From a7a52190b36f870069844551ae50593154a479d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Oct 2024 20:32:46 +0900
+Subject: PCI: dwc: endpoint: Implement the pci_epc_ops::align_addr() operation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit e73ea1c2d4d8f7ba5daaf7aa51171f63cf79bcd8 ]
+
+The function dw_pcie_prog_outbound_atu() used to program outbound ATU
+entries for mapping RC PCI addresses to local CPU addresses does not
+allow PCI addresses that are not aligned to the value of region_align
+of struct dw_pcie. This value is determined from the iATU hardware
+registers during probing of the iATU (done by dw_pcie_iatu_detect()).
+This value is thus valid for all DWC PCIe controllers, and valid
+regardless of the hardware configuration used when synthesizing the
+DWC PCIe controller.
+
+Implement the ->align_addr() endpoint controller operation to allow
+this mapping alignment to be transparently handled by endpoint function
+drivers through the function pci_epc_mem_map().
+
+Link: https://lore.kernel.org/linux-pci/20241012113246.95634-7-dlemoal@kernel.org
+Link: https://lore.kernel.org/linux-pci/20241015090712.112674-1-dlemoal@kernel.org
+Link: https://lore.kernel.org/linux-pci/20241017132052.4014605-5-cassel@kernel.org
+Co-developed-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+[mani: squashed the patch that changed phy_addr_t to u64]
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+[kwilczynski: squashed patch that updated the pci_size variable]
+Signed-off-by: Krzysztof WilczyĆski <kwilczynski@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Stable-dep-of: c22533c66cca ("PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 00289948f9c12..b093c4153f14f 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -290,6 +290,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+ return -EINVAL;
+ }
+
++static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
++ size_t *pci_size, size_t *offset)
++{
++ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u64 mask = pci->region_align - 1;
++ size_t ofst = pci_addr & mask;
++
++ *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
++ *offset = ofst;
++
++ return pci_addr & ~mask;
++}
++
+ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr)
+ {
+@@ -467,6 +481,7 @@ static const struct pci_epc_ops epc_ops = {
+ .write_header = dw_pcie_ep_write_header,
+ .set_bar = dw_pcie_ep_set_bar,
+ .clear_bar = dw_pcie_ep_clear_bar,
++ .align_addr = dw_pcie_ep_align_addr,
+ .map_addr = dw_pcie_ep_map_addr,
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+--
+2.51.0
+
--- /dev/null
+From 0040c7238b5449f623c9b4c724d2742a06c7d4fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 18:55:41 +0100
+Subject: PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit c22533c66ccae10511ad6a7afc34bb26c47577e3 ]
+
+Endpoint drivers use dw_pcie_ep_raise_msix_irq() to raise an MSI-X
+interrupt to the host using a writel(), which generates a PCI posted write
+transaction. There's no completion for posted writes, so the writel() may
+return before the PCI write completes. dw_pcie_ep_raise_msix_irq() also
+unmaps the outbound ATU entry used for the PCI write, so the write races
+with the unmap.
+
+If the PCI write loses the race with the ATU unmap, the write may corrupt
+host memory or cause IOMMU errors, e.g., these when running fio with a
+larger queue depth against nvmet-pci-epf:
+
+ arm-smmu-v3 fc900000.iommu: 0x0000010000000010
+ arm-smmu-v3 fc900000.iommu: 0x0000020000000000
+ arm-smmu-v3 fc900000.iommu: 0x000000090000f040
+ arm-smmu-v3 fc900000.iommu: 0x0000000000000000
+ arm-smmu-v3 fc900000.iommu: event: F_TRANSLATION client: 0000:01:00.0 sid: 0x100 ssid: 0x0 iova: 0x90000f040 ipa: 0x0
+ arm-smmu-v3 fc900000.iommu: unpriv data write s1 "Input address caused fault" stag: 0x0
+
+Flush the write by performing a readl() of the same address to ensure that
+the write has reached the destination before the ATU entry is unmapped.
+
+The same problem was solved for dw_pcie_ep_raise_msi_irq() in commit
+8719c64e76bf ("PCI: dwc: ep: Cache MSI outbound iATU mapping"), but there
+it was solved by dedicating an outbound iATU only for MSI. We can't do the
+same for MSI-X because each vector can have a different msg_addr and the
+msg_addr may be changed while the vector is masked.
+
+Fixes: beb4641a787d ("PCI: dwc: Add MSI-X callbacks handler")
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20260211175540.105677-2-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index b8c9cb5d65f70..189675747b2bc 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -647,6 +647,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+
+ writel(msg_data, ep->msi_mem + offset);
+
++ /* flush posted write before unmap */
++ readl(ep->msi_mem + offset);
++
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 6234e363eb8e505b0f01f69642128a911ace8d4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2024 15:20:55 +0200
+Subject: PCI: dwc: ep: Use align addr function for
+ dw_pcie_ep_raise_{msi,msix}_irq()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit 3fafc38b77bebeeea5faa2a588b92353775bb390 ]
+
+Use the dw_pcie_ep_align_addr() function to calculate the alignment in
+dw_pcie_ep_raise_{msi,msix}_irq() instead of open coding the same.
+
+Link: https://lore.kernel.org/r/20241017132052.4014605-6-cassel@kernel.org
+Link: https://lore.kernel.org/r/20241104205144.409236-2-cassel@kernel.org
+Tested-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+[kwilczynski: squashed patch that fixes memory map sizes]
+Signed-off-by: Krzysztof WilczyĆski <kwilczynski@kernel.org>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Stable-dep-of: c22533c66cca ("PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../pci/controller/dwc/pcie-designware-ep.c | 20 +++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index b093c4153f14f..b8c9cb5d65f70 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -526,7 +526,8 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u32 msg_addr_lower, msg_addr_upper, reg;
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epc *epc = ep->epc;
+- unsigned int aligned_offset;
++ size_t map_size = sizeof(u32);
++ size_t offset;
+ u16 msg_ctrl, msg_data;
+ bool has_upper;
+ u64 msg_addr;
+@@ -554,14 +555,13 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ }
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+- msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
++ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+- epc->mem->window.page_size);
++ map_size);
+ if (ret)
+ return ret;
+
+- writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
++ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+@@ -612,8 +612,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epc *epc = ep->epc;
++ size_t map_size = sizeof(u32);
++ size_t offset;
+ u32 reg, msg_data, vec_ctrl;
+- unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
+ int ret;
+@@ -638,14 +639,13 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ return -EPERM;
+ }
+
+- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+- msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
++ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+- epc->mem->window.page_size);
++ map_size);
+ if (ret)
+ return ret;
+
+- writel(msg_data, ep->msi_mem + aligned_offset);
++ writel(msg_data, ep->msi_mem + offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+--
+2.51.0
+
--- /dev/null
+From 7ed1c825066561fd8949dcffc26997cd56c7739a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Oct 2024 20:32:41 +0900
+Subject: PCI: endpoint: Introduce pci_epc_function_is_valid()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit ca3c342fb3c76eee739a1cfc4ff59841722ebee7 ]
+
+Introduce the epc core helper function pci_epc_function_is_valid() to
+verify that an epc pointer, a physical function number and a virtual
+function number are all valid. This avoids repeating the code pattern:
+
+if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return err;
+
+if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return err;
+
+in many functions of the endpoint controller core code.
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Link: https://lore.kernel.org/r/20241012113246.95634-2-dlemoal@kernel.org
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Stable-dep-of: c22533c66cca ("PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/endpoint/pci-epc-core.c | 79 +++++++++++------------------
+ 1 file changed, 31 insertions(+), 48 deletions(-)
+
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index de665342dc16d..66c7434a63153 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -128,6 +128,18 @@ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
+
++static bool pci_epc_function_is_valid(struct pci_epc *epc,
++ u8 func_no, u8 vfunc_no)
++{
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
++ return false;
++
++ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ return false;
++
++ return true;
++}
++
+ /**
+ * pci_epc_get_features() - get the features supported by EPC
+ * @epc: the features supported by *this* EPC device will be returned
+@@ -145,10 +157,7 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
+ {
+ const struct pci_epc_features *epc_features;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return NULL;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return NULL;
+
+ if (!epc->ops->get_features)
+@@ -218,10 +227,7 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ {
+ int ret;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return -EINVAL;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+ if (!epc->ops->raise_irq)
+@@ -262,10 +268,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ {
+ int ret;
+
+- if (IS_ERR_OR_NULL(epc))
+- return -EINVAL;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+ if (!epc->ops->map_msi_irq)
+@@ -293,10 +296,7 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+ {
+ int interrupt;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return 0;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return 0;
+
+ if (!epc->ops->get_msi)
+@@ -329,11 +329,10 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
+ int ret;
+ u8 encode_int;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+- interrupts < 1 || interrupts > 32)
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (interrupts < 1 || interrupts > 32)
+ return -EINVAL;
+
+ if (!epc->ops->set_msi)
+@@ -361,10 +360,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+ {
+ int interrupt;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return 0;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return 0;
+
+ if (!epc->ops->get_msix)
+@@ -397,11 +393,10 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ {
+ int ret;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+- interrupts < 1 || interrupts > 2048)
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (interrupts < 1 || interrupts > 2048)
+ return -EINVAL;
+
+ if (!epc->ops->set_msix)
+@@ -428,10 +423,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
+ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr)
+ {
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return;
+
+ if (!epc->ops->unmap_addr)
+@@ -459,10 +451,7 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ {
+ int ret;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return -EINVAL;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+ if (!epc->ops->map_addr)
+@@ -489,12 +478,11 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+ {
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+- (epf_bar->barno == BAR_5 &&
+- epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return;
+
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (epf_bar->barno == BAR_5 &&
++ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ return;
+
+ if (!epc->ops->clear_bar)
+@@ -521,18 +509,16 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ int ret;
+ int flags = epf_bar->flags;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+- (epf_bar->barno == BAR_5 &&
+- flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
++ return -EINVAL;
++
++ if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
+ (flags & PCI_BASE_ADDRESS_SPACE_IO &&
+ flags & PCI_BASE_ADDRESS_IO_MASK) ||
+ (upper_32_bits(epf_bar->size) &&
+ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
+ return -EINVAL;
+
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+- return -EINVAL;
+-
+ if (!epc->ops->set_bar)
+ return 0;
+
+@@ -561,10 +547,7 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ {
+ int ret;
+
+- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+- return -EINVAL;
+-
+- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+ /* Only Virtual Function #1 has deviceID */
+--
+2.51.0
+
--- /dev/null
+From c1e1e1b593fc2eca7b06eac90712e7cd4be3ca6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Oct 2024 20:32:43 +0900
+Subject: PCI: endpoint: Introduce pci_epc_mem_map()/unmap()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit ce1dfe6d328966b75821c1f043a940eb2569768a ]
+
+Some endpoint controllers have requirements on the alignment of the
+controller physical memory address that must be used to map a RC PCI
+address region. For instance, the endpoint controller of the RK3399 SoC
+uses at most the lower 20 bits of a physical memory address region as
+the lower bits of a RC PCI address region. For mapping a PCI address
+region of size bytes starting from pci_addr, the exact number of
+address bits used is the number of address bits changing in the address
+range [pci_addr..pci_addr + size - 1]. For this example, this creates
+the following constraints:
+1) The offset into the controller physical memory allocated for a
+ mapping depends on the mapping size *and* the starting PCI address
+ for the mapping.
+2) A mapping size cannot exceed the controller windows size (1MB) minus
+ the offset needed into the allocated physical memory, which can end
+ up being a smaller size than the desired mapping size.
+
+Handling these constraints independently of the controller being used
+in an endpoint function driver is not possible with the current EPC
+API as only the ->align field in struct pci_epc_features is provided
+but used for BAR (inbound ATU mappings) mapping only. A new API is
+needed for function drivers to discover mapping constraints and handle
+non-static requirements based on the RC PCI address range to access.
+
+Introduce the endpoint controller operation ->align_addr() to allow
+the EPC core functions to obtain the size and the offset into a
+controller address region that must be allocated and mapped to access
+a RC PCI address region. The size of the mapping provided by the
+align_addr() operation can then be used as the size argument for the
+function pci_epc_mem_alloc_addr() and the offset into the allocated
+controller memory provided can be used to correctly handle data
+transfers. For endpoint controllers that have PCI address alignment
+constraints, the align_addr() operation may indicate upon return an
+effective PCI address mapping size that is smaller (but not 0) than the
+requested PCI address region size.
+
+The controller ->align_addr() operation is optional: controllers that
+do not have any alignment constraints for mapping RC PCI address regions
+do not need to implement this operation. For such controllers, it is
+always assumed that the mapping size is equal to the requested size of
+the PCI region and that the mapping offset is 0.
+
+The function pci_epc_mem_map() is introduced to use this new controller
+operation (if it is defined) to handle controller memory allocation and
+mapping to a RC PCI address region in endpoint function drivers.
+
+This function first uses the ->align_addr() controller operation to
+determine the controller memory address size (and offset into) needed
+for mapping an RC PCI address region. The result of this operation is
+used to allocate a controller physical memory region using
+pci_epc_mem_alloc_addr() and then to map that memory to the RC PCI
+address space with pci_epc_map_addr().
+
+Since ->align_addr() () may indicate that not all of a RC PCI address
+region can be mapped, pci_epc_mem_map() may only partially map the RC
+PCI address region specified. It is the responsibility of the caller
+(an endpoint function driver) to handle such smaller mapping by
+repeatedly using pci_epc_mem_map() over the desried PCI address range.
+
+The counterpart of pci_epc_mem_map() to unmap and free a mapped
+controller memory address region is pci_epc_mem_unmap().
+
+Both functions operate using the new struct pci_epc_map data structure.
+This new structure represents a mapping PCI address, mapping effective
+size, the size of the controller memory needed for the mapping as well
+as the physical and virtual CPU addresses of the mapping (phys_base and
+virt_base fields). For convenience, the physical and virtual CPU
+addresses within that mapping to use to access the target RC PCI address
+region are also provided (phys_addr and virt_addr fields).
+
+Endpoint function drivers can use struct pci_epc_map to access the
+mapped RC PCI address region using the ->virt_addr and ->pci_size
+fields.
+
+Co-developed-by: Rick Wertenbroek <rick.wertenbroek@gmail.com>
+Signed-off-by: Rick Wertenbroek <rick.wertenbroek@gmail.com>
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20241012113246.95634-4-dlemoal@kernel.org
+[mani: squashed the patch that changed phy_addr_t to u64]
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Stable-dep-of: c22533c66cca ("PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/endpoint/pci-epc-core.c | 103 ++++++++++++++++++++++++++++
+ include/linux/pci-epc.h | 38 ++++++++++
+ 2 files changed, 141 insertions(+)
+
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 66c7434a63153..75c6688290034 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -466,6 +466,109 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+
++/**
++ * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
++ * @epc: the EPC device on which the CPU address is to be allocated and mapped
++ * @func_no: the physical endpoint function number in the EPC device
++ * @vfunc_no: the virtual endpoint function number in the physical function
++ * @pci_addr: PCI address to which the CPU address should be mapped
++ * @pci_size: the number of bytes to map starting from @pci_addr
++ * @map: where to return the mapping information
++ *
++ * Allocate a controller memory address region and map it to a RC PCI address
++ * region, taking into account the controller physical address mapping
++ * constraints using the controller operation align_addr(). If this operation is
++ * not defined, we assume that there are no alignment constraints for the
++ * mapping.
++ *
++ * The effective size of the PCI address range mapped from @pci_addr is
++ * indicated by @map->pci_size. This size may be less than the requested
++ * @pci_size. The local virtual CPU address for the mapping is indicated by
++ * @map->virt_addr (@map->phys_addr indicates the physical address).
++ * The size and CPU address of the controller memory allocated and mapped are
++ * respectively indicated by @map->map_size and @map->virt_base (and
++ * @map->phys_base for the physical address of @map->virt_base).
++ *
++ * Returns 0 on success and a negative error code in case of error.
++ */
++int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
++ u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
++{
++ size_t map_size = pci_size;
++ size_t map_offset = 0;
++ int ret;
++
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
++ return -EINVAL;
++
++ if (!pci_size || !map)
++ return -EINVAL;
++
++ /*
++ * Align the PCI address to map. If the controller defines the
++ * .align_addr() operation, use it to determine the PCI address to map
++ * and the size of the mapping. Otherwise, assume that the controller
++ * has no alignment constraint.
++ */
++ memset(map, 0, sizeof(*map));
++ map->pci_addr = pci_addr;
++ if (epc->ops->align_addr)
++ map->map_pci_addr =
++ epc->ops->align_addr(epc, pci_addr,
++ &map_size, &map_offset);
++ else
++ map->map_pci_addr = pci_addr;
++ map->map_size = map_size;
++ if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
++ map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
++ else
++ map->pci_size = pci_size;
++
++ map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
++ map->map_size);
++ if (!map->virt_base)
++ return -ENOMEM;
++
++ map->phys_addr = map->phys_base + map_offset;
++ map->virt_addr = map->virt_base + map_offset;
++
++ ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
++ map->map_pci_addr, map->map_size);
++ if (ret) {
++ pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
++ map->map_size);
++ return ret;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(pci_epc_mem_map);
++
++/**
++ * pci_epc_mem_unmap() - unmap and free a CPU address region
++ * @epc: the EPC device on which the CPU address is allocated and mapped
++ * @func_no: the physical endpoint function number in the EPC device
++ * @vfunc_no: the virtual endpoint function number in the physical function
++ * @map: the mapping information
++ *
++ * Unmap and free a CPU address region that was allocated and mapped with
++ * pci_epc_mem_map().
++ */
++void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
++ struct pci_epc_map *map)
++{
++ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
++ return;
++
++ if (!map || !map->virt_base)
++ return;
++
++ pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
++ pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
++ map->map_size);
++}
++EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
++
+ /**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
+index 42ef06136bd1a..de8cc3658220b 100644
+--- a/include/linux/pci-epc.h
++++ b/include/linux/pci-epc.h
+@@ -32,11 +32,43 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
+ }
+ }
+
++/**
++ * struct pci_epc_map - information about EPC memory for mapping a RC PCI
++ * address range
++ * @pci_addr: start address of the RC PCI address range to map
++ * @pci_size: size of the RC PCI address range mapped from @pci_addr
++ * @map_pci_addr: RC PCI address used as the first address mapped (may be lower
++ * than @pci_addr)
++ * @map_size: size of the controller memory needed for mapping the RC PCI address
++ * range @pci_addr..@pci_addr+@pci_size
++ * @phys_base: base physical address of the allocated EPC memory for mapping the
++ * RC PCI address range
++ * @phys_addr: physical address at which @pci_addr is mapped
++ * @virt_base: base virtual address of the allocated EPC memory for mapping the
++ * RC PCI address range
++ * @virt_addr: virtual address at which @pci_addr is mapped
++ */
++struct pci_epc_map {
++ u64 pci_addr;
++ size_t pci_size;
++
++ u64 map_pci_addr;
++ size_t map_size;
++
++ phys_addr_t phys_base;
++ phys_addr_t phys_addr;
++ void __iomem *virt_base;
++ void __iomem *virt_addr;
++};
++
+ /**
+ * struct pci_epc_ops - set of function pointers for performing EPC operations
+ * @write_header: ops to populate configuration space header
+ * @set_bar: ops to configure the BAR
+ * @clear_bar: ops to reset the BAR
++ * @align_addr: operation to get the mapping address, mapping size and offset
++ * into a controller memory window needed to map an RC PCI address
++ * region
+ * @map_addr: ops to map CPU address to PCI address
+ * @unmap_addr: ops to unmap CPU address and PCI address
+ * @set_msi: ops to set the requested number of MSI interrupts in the MSI
+@@ -61,6 +93,8 @@ struct pci_epc_ops {
+ struct pci_epf_bar *epf_bar);
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar);
++ u64 (*align_addr)(struct pci_epc *epc, u64 pci_addr, size_t *size,
++ size_t *offset);
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+@@ -278,6 +312,10 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size);
+ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size);
++int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
++ u64 pci_addr, size_t pci_size, struct pci_epc_map *map);
++void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
++ struct pci_epc_map *map);
+
+ #else
+ static inline void pci_epc_init_notify(struct pci_epc *epc)
+--
+2.51.0
+
--- /dev/null
+From 2e9ef94daef9012a4eb1c6e3ab4d66c43aa58ac7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 01a87cd9b5cce..814b6536b09d4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10001,6 +10001,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -10077,6 +10084,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -10085,6 +10105,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -10584,6 +10614,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -10906,6 +10941,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -11358,7 +11398,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f510ef09f2e7aa9467c9b8363bce72fe32ccbd63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 15:06:40 -0500
+Subject: rseq: Clarify rseq registration rseq_size bound check comment
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+[ Upstream commit 26d43a90be81fc90e26688a51d3ec83188602731 ]
+
+The rseq registration validates that the rseq_size argument is greater
+or equal to 32 (the original rseq size), but the comment associated with
+this check does not clearly state this.
+
+Clarify the comment to that effect.
+
+Fixes: ee3e3ac05c26 ("rseq: Introduce extensible rseq ABI")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260220200642.1317826-2-mathieu.desnoyers@efficios.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/rseq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 810005f927d7c..e6ee81dd1e457 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -432,8 +432,9 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
+ * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
+ * size, the required alignment is the original struct rseq alignment.
+ *
+- * In order to be valid, rseq_len is either the original rseq size, or
+- * large enough to contain all supported fields, as communicated to
++ * The rseq_len is required to be greater or equal to the original rseq
++ * size. In order to be valid, rseq_len is either the original rseq size,
++ * or large enough to contain all supported fields, as communicated to
+ * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
+ */
+ if (rseq_len < ORIG_RSEQ_SIZE ||
+--
+2.51.0
+
--- /dev/null
+From 2650f49733bba09f8520f5d861f7ef9d07596c4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 15:20:04 +0100
+Subject: s390/idle: Fix cpu idle exit cpu time accounting
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 0d785e2c324c90662baa4fe07a0d02233ff92824 ]
+
+With the conversion to generic entry [1] cpu idle exit cpu time accounting
+was converted from assembly to C. This introduced an reversed order of cpu
+time accounting.
+
+On cpu idle exit the current accounting happens with the following call
+chain:
+
+-> do_io_irq()/do_ext_irq()
+ -> irq_enter_rcu()
+ -> account_hardirq_enter()
+ -> vtime_account_irq()
+ -> vtime_account_kernel()
+
+vtime_account_kernel() accounts the passed cpu time since last_update_timer
+as system time, and updates last_update_timer to the current cpu timer
+value.
+
+However the subsequent call of
+
+ -> account_idle_time_irq()
+
+will incorrectly subtract passed cpu time from timer_idle_enter to the
+updated last_update_timer value from system_timer. Then last_update_timer
+is updated to a sys_enter_timer, which means that last_update_timer goes
+back in time.
+
+Subsequently account_hardirq_exit() will account too much cpu time as
+hardirq time. The sum of all accounted cpu times is still correct, however
+some cpu time which was previously accounted as system time is now
+accounted as hardirq time, plus there is the oddity that last_update_timer
+goes back in time.
+
+Restore previous behavior by extracting cpu time accounting code from
+account_idle_time_irq() into a new update_timer_idle() function and call it
+before irq_enter_rcu().
+
+Fixes: 56e62a737028 ("s390: convert to generic entry") [1]
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/idle.h | 1 +
+ arch/s390/kernel/idle.c | 13 +++++++++----
+ arch/s390/kernel/irq.c | 10 ++++++++--
+ 3 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
+index 09f763b9eb40a..133059d9a949c 100644
+--- a/arch/s390/include/asm/idle.h
++++ b/arch/s390/include/asm/idle.h
+@@ -23,5 +23,6 @@ extern struct device_attribute dev_attr_idle_count;
+ extern struct device_attribute dev_attr_idle_time_us;
+
+ void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
++void update_timer_idle(void);
+
+ #endif /* _S390_IDLE_H */
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index 39cb8d0ae3480..0f9e53f0a0686 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -21,11 +21,10 @@
+
+ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+
+-void account_idle_time_irq(void)
++void update_timer_idle(void)
+ {
+ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
+ struct lowcore *lc = get_lowcore();
+- unsigned long idle_time;
+ u64 cycles_new[8];
+ int i;
+
+@@ -35,13 +34,19 @@ void account_idle_time_irq(void)
+ this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
+ }
+
+- idle_time = lc->int_clock - idle->clock_idle_enter;
+-
+ lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
+ lc->last_update_clock = lc->int_clock;
+
+ lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
+ lc->last_update_timer = lc->sys_enter_timer;
++}
++
++void account_idle_time_irq(void)
++{
++ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
++ unsigned long idle_time;
++
++ idle_time = get_lowcore()->int_clock - idle->clock_idle_enter;
+
+ /* Account time spent with enabled wait psw loaded as idle time. */
+ WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
+diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
+index 2639a3d12736a..1fe941dc86c32 100644
+--- a/arch/s390/kernel/irq.c
++++ b/arch/s390/kernel/irq.c
+@@ -140,6 +140,10 @@ void noinstr do_io_irq(struct pt_regs *regs)
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ bool from_idle;
+
++ from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
++ if (from_idle)
++ update_timer_idle();
++
+ irq_enter_rcu();
+
+ if (user_mode(regs)) {
+@@ -148,7 +152,6 @@ void noinstr do_io_irq(struct pt_regs *regs)
+ current->thread.last_break = regs->last_break;
+ }
+
+- from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (from_idle)
+ account_idle_time_irq();
+
+@@ -176,6 +179,10 @@ void noinstr do_ext_irq(struct pt_regs *regs)
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ bool from_idle;
+
++ from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
++ if (from_idle)
++ update_timer_idle();
++
+ irq_enter_rcu();
+
+ if (user_mode(regs)) {
+@@ -188,7 +195,6 @@ void noinstr do_ext_irq(struct pt_regs *regs)
+ regs->int_parm = get_lowcore()->ext_params;
+ regs->int_parm_long = get_lowcore()->ext_params2;
+
+- from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (from_idle)
+ account_idle_time_irq();
+
+--
+2.51.0
+
--- /dev/null
+From 98c9cebcc7f830031b8f8ea8dc44aee27bbc567a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 15:20:05 +0100
+Subject: s390/vtime: Fix virtual timer forwarding
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit dbc0fb35679ed5d0adecf7d02137ac2c77244b3b ]
+
+Since delayed accounting of system time [1] the virtual timer is
+forwarded by do_account_vtime() but also vtime_account_kernel(),
+vtime_account_softirq(), and vtime_account_hardirq(). This leads
+to double accounting of system, guest, softirq, and hardirq time.
+
+Remove accounting from the vtime_account*() family to restore old behavior.
+
+There is only one user of the vtimer interface, which might explain
+why nobody noticed this so far.
+
+Fixes: b7394a5f4ce9 ("sched/cputime, s390: Implement delayed accounting of system time") [1]
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/vtime.c | 18 ++----------------
+ 1 file changed, 2 insertions(+), 16 deletions(-)
+
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 234a0ba305108..122d30b104401 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -225,10 +225,6 @@ static u64 vtime_delta(void)
+ return timer - lc->last_update_timer;
+ }
+
+-/*
+- * Update process times based on virtual cpu times stored by entry.S
+- * to the lowcore fields user_timer, system_timer & steal_clock.
+- */
+ void vtime_account_kernel(struct task_struct *tsk)
+ {
+ struct lowcore *lc = get_lowcore();
+@@ -238,27 +234,17 @@ void vtime_account_kernel(struct task_struct *tsk)
+ lc->guest_timer += delta;
+ else
+ lc->system_timer += delta;
+-
+- virt_timer_forward(delta);
+ }
+ EXPORT_SYMBOL_GPL(vtime_account_kernel);
+
+ void vtime_account_softirq(struct task_struct *tsk)
+ {
+- u64 delta = vtime_delta();
+-
+- get_lowcore()->softirq_timer += delta;
+-
+- virt_timer_forward(delta);
++ get_lowcore()->softirq_timer += vtime_delta();
+ }
+
+ void vtime_account_hardirq(struct task_struct *tsk)
+ {
+- u64 delta = vtime_delta();
+-
+- get_lowcore()->hardirq_timer += delta;
+-
+- virt_timer_forward(delta);
++ get_lowcore()->hardirq_timer += vtime_delta();
+ }
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From b375ec49f527ea3b4d0c3ebfa1686d14ec494810 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jan 2025 11:59:59 +0100
+Subject: sched/fair: Fix EEVDF entity placement bug causing scheduling lag
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 6d71a9c6160479899ee744d2c6d6602a191deb1f ]
+
+I noticed this in my traces today:
+
+ turbostat-1222 [006] d..2. 311.935649: reweight_entity: (ffff888108f13e00-ffff88885ef38440-6)
+ { weight: 1048576 avg_vruntime: 3184159639071 vruntime: 3184159640194 (-1123) deadline: 3184162621107 } ->
+ { weight: 2 avg_vruntime: 3184177463330 vruntime: 3184748414495 (-570951165) deadline: 4747605329439 }
+ turbostat-1222 [006] d..2. 311.935651: reweight_entity: (ffff888108f13e00-ffff88885ef38440-6)
+ { weight: 2 avg_vruntime: 3184177463330 vruntime: 3184748414495 (-570951165) deadline: 4747605329439 } ->
+ { weight: 1048576 avg_vruntime: 3184176414812 vruntime: 3184177464419 (-1049607) deadline: 3184180445332 }
+
+Which is a weight transition: 1048576 -> 2 -> 1048576.
+
+One would expect the lag to shoot out *AND* come back, notably:
+
+ -1123*1048576/2 = -588775424
+ -588775424*2/1048576 = -1123
+
+Except the trace shows it is all off. Worse, subsequent cycles shoot it
+out further and further.
+
+This made me have a very hard look at reweight_entity(), and
+specifically the ->on_rq case, which is more prominent with
+DELAY_DEQUEUE.
+
+And indeed, it is all sorts of broken. While the computation of the new
+lag is correct, the computation for the new vruntime, using the new lag
+is broken for it does not consider the logic set out in place_entity().
+
+With the below patch, I now see things like:
+
+ migration/12-55 [012] d..3. 309.006650: reweight_entity: (ffff8881e0e6f600-ffff88885f235f40-12)
+ { weight: 977582 avg_vruntime: 4860513347366 vruntime: 4860513347908 (-542) deadline: 4860516552475 } ->
+ { weight: 2 avg_vruntime: 4860528915984 vruntime: 4860793840706 (-264924722) deadline: 6427157349203 }
+ migration/14-62 [014] d..3. 309.006698: reweight_entity: (ffff8881e0e6cc00-ffff88885f3b5f40-15)
+ { weight: 2 avg_vruntime: 4874472992283 vruntime: 4939833828823 (-65360836540) deadline: 6316614641111 } ->
+ { weight: 967149 avg_vruntime: 4874217684324 vruntime: 4874217688559 (-4235) deadline: 4874220535650 }
+
+Which isn't perfect yet, but much closer.
+
+Reported-by: Doug Smythies <dsmythies@telus.net>
+Reported-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Fixes: eab03c23c2a1 ("sched/eevdf: Fix vruntime adjustment on reweight")
+Link: https://lore.kernel.org/r/20250109105959.GA2981@noisy.programming.kicks-ass.net
+Stable-dep-of: 6e3c0a4e1ad1 ("sched/fair: Fix lag clamp")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 145 ++++++--------------------------------------
+ 1 file changed, 18 insertions(+), 127 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 6efb1dfcd943a..bdb17a3b83f3d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -689,21 +689,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ *
+ * XXX could add max_slice to the augmented data to track this.
+ */
+-static s64 entity_lag(u64 avruntime, struct sched_entity *se)
++static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ s64 vlag, limit;
+
+- vlag = avruntime - se->vruntime;
+- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+-
+- return clamp(vlag, -limit, limit);
+-}
+-
+-static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+ SCHED_WARN_ON(!se->on_rq);
+
+- se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
++ vlag = avg_vruntime(cfs_rq) - se->vruntime;
++ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
++
++ se->vlag = clamp(vlag, -limit, limit);
+ }
+
+ /*
+@@ -3763,137 +3758,32 @@ static inline void
+ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+ #endif
+
+-static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
+- unsigned long weight)
+-{
+- unsigned long old_weight = se->load.weight;
+- s64 vlag, vslice;
+-
+- /*
+- * VRUNTIME
+- * --------
+- *
+- * COROLLARY #1: The virtual runtime of the entity needs to be
+- * adjusted if re-weight at !0-lag point.
+- *
+- * Proof: For contradiction assume this is not true, so we can
+- * re-weight without changing vruntime at !0-lag point.
+- *
+- * Weight VRuntime Avg-VRuntime
+- * before w v V
+- * after w' v' V'
+- *
+- * Since lag needs to be preserved through re-weight:
+- *
+- * lag = (V - v)*w = (V'- v')*w', where v = v'
+- * ==> V' = (V - v)*w/w' + v (1)
+- *
+- * Let W be the total weight of the entities before reweight,
+- * since V' is the new weighted average of entities:
+- *
+- * V' = (WV + w'v - wv) / (W + w' - w) (2)
+- *
+- * by using (1) & (2) we obtain:
+- *
+- * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
+- * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
+- * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
+- * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
+- *
+- * Since we are doing at !0-lag point which means V != v, we
+- * can simplify (3):
+- *
+- * ==> W / (W + w' - w) = w / w'
+- * ==> Ww' = Ww + ww' - ww
+- * ==> W * (w' - w) = w * (w' - w)
+- * ==> W = w (re-weight indicates w' != w)
+- *
+- * So the cfs_rq contains only one entity, hence vruntime of
+- * the entity @v should always equal to the cfs_rq's weighted
+- * average vruntime @V, which means we will always re-weight
+- * at 0-lag point, thus breach assumption. Proof completed.
+- *
+- *
+- * COROLLARY #2: Re-weight does NOT affect weighted average
+- * vruntime of all the entities.
+- *
+- * Proof: According to corollary #1, Eq. (1) should be:
+- *
+- * (V - v)*w = (V' - v')*w'
+- * ==> v' = V' - (V - v)*w/w' (4)
+- *
+- * According to the weighted average formula, we have:
+- *
+- * V' = (WV - wv + w'v') / (W - w + w')
+- * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
+- * = (WV - wv + w'V' - Vw + wv) / (W - w + w')
+- * = (WV + w'V' - Vw) / (W - w + w')
+- *
+- * ==> V'*(W - w + w') = WV + w'V' - Vw
+- * ==> V' * (W - w) = (W - w) * V (5)
+- *
+- * If the entity is the only one in the cfs_rq, then reweight
+- * always occurs at 0-lag point, so V won't change. Or else
+- * there are other entities, hence W != w, then Eq. (5) turns
+- * into V' = V. So V won't change in either case, proof done.
+- *
+- *
+- * So according to corollary #1 & #2, the effect of re-weight
+- * on vruntime should be:
+- *
+- * v' = V' - (V - v) * w / w' (4)
+- * = V - (V - v) * w / w'
+- * = V - vl * w / w'
+- * = V - vl'
+- */
+- if (avruntime != se->vruntime) {
+- vlag = entity_lag(avruntime, se);
+- vlag = div_s64(vlag * old_weight, weight);
+- se->vruntime = avruntime - vlag;
+- }
+-
+- /*
+- * DEADLINE
+- * --------
+- *
+- * When the weight changes, the virtual time slope changes and
+- * we should adjust the relative virtual deadline accordingly.
+- *
+- * d' = v' + (d - v)*w/w'
+- * = V' - (V - v)*w/w' + (d - v)*w/w'
+- * = V - (V - v)*w/w' + (d - v)*w/w'
+- * = V + (d - V)*w/w'
+- */
+- vslice = (s64)(se->deadline - avruntime);
+- vslice = div_s64(vslice * old_weight, weight);
+- se->deadline = avruntime + vslice;
+-}
++static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
+
+ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long weight)
+ {
+ bool curr = cfs_rq->curr == se;
+- u64 avruntime;
+
+ if (se->on_rq) {
+ /* commit outstanding execution time */
+ update_curr(cfs_rq);
+- avruntime = avg_vruntime(cfs_rq);
++ update_entity_lag(cfs_rq, se);
++ se->deadline -= se->vruntime;
++ se->rel_deadline = 1;
+ if (!curr)
+ __dequeue_entity(cfs_rq, se);
+ update_load_sub(&cfs_rq->load, se->load.weight);
+ }
+ dequeue_load_avg(cfs_rq, se);
+
+- if (se->on_rq) {
+- reweight_eevdf(se, avruntime, weight);
+- } else {
+- /*
+- * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
+- * we need to scale se->vlag when w_i changes.
+- */
+- se->vlag = div_s64(se->vlag * se->load.weight, weight);
+- }
++ /*
++ * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
++ * we need to scale se->vlag when w_i changes.
++ */
++ se->vlag = div_s64(se->vlag * se->load.weight, weight);
++ if (se->rel_deadline)
++ se->deadline = div_s64(se->deadline * se->load.weight, weight);
+
+ update_load_set(&se->load, weight);
+
+@@ -3908,6 +3798,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
+ update_load_add(&cfs_rq->load, se->load.weight);
++ place_entity(cfs_rq, se, 0);
+ if (!curr)
+ __enqueue_entity(cfs_rq, se);
+ }
+@@ -5348,7 +5239,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
+ se->vruntime = vruntime - lag;
+
+- if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) {
++ if (se->rel_deadline) {
+ se->deadline += se->vruntime;
+ se->rel_deadline = 0;
+ return;
+--
+2.51.0
+
--- /dev/null
+From d2f496fde40ba9ff80d23e9cf16236b0215a3518 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 12:16:28 +0200
+Subject: sched/fair: Fix lag clamp
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 6e3c0a4e1ad1e0455b7880fad02b3ee179f56c09 ]
+
+Vincent reported that he was seeing undue lag clamping in a mixed
+slice workload. Implement the max_slice tracking as per the todo
+comment.
+
+Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
+Reported-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20250422101628.GA33555@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched.h | 1 +
+ kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++----
+ 2 files changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index af143d3af85fa..9b722cf6ceb45 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -545,6 +545,7 @@ struct sched_entity {
+ u64 deadline;
+ u64 min_vruntime;
+ u64 min_slice;
++ u64 max_slice;
+
+ struct list_head group_node;
+ unsigned char on_rq;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index bdb17a3b83f3d..4ffa0fdb61aa3 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -673,6 +673,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ return cfs_rq->zero_vruntime + avg;
+ }
+
++static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq);
++
+ /*
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+@@ -686,17 +688,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ * EEVDF gives the following limit for a steady state system:
+ *
+ * -r_max < lag < max(r_max, q)
+- *
+- * XXX could add max_slice to the augmented data to track this.
+ */
+ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
++ u64 max_slice = cfs_rq_max_slice(cfs_rq) + TICK_NSEC;
+ s64 vlag, limit;
+
+ SCHED_WARN_ON(!se->on_rq);
+
+ vlag = avg_vruntime(cfs_rq) - se->vruntime;
+- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
++ limit = calc_delta_fair(max_slice, se);
+
+ se->vlag = clamp(vlag, -limit, limit);
+ }
+@@ -764,6 +765,21 @@ static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+ return min_slice;
+ }
+
++static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq)
++{
++ struct sched_entity *root = __pick_root_entity(cfs_rq);
++ struct sched_entity *curr = cfs_rq->curr;
++ u64 max_slice = 0ULL;
++
++ if (curr && curr->on_rq)
++ max_slice = curr->slice;
++
++ if (root)
++ max_slice = max(max_slice, root->max_slice);
++
++ return max_slice;
++}
++
+ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+ {
+ return entity_before(__node_2_se(a), __node_2_se(b));
+@@ -789,6 +805,15 @@ static inline void __min_slice_update(struct sched_entity *se, struct rb_node *n
+ }
+ }
+
++static inline void __max_slice_update(struct sched_entity *se, struct rb_node *node)
++{
++ if (node) {
++ struct sched_entity *rse = __node_2_se(node);
++ if (rse->max_slice > se->max_slice)
++ se->max_slice = rse->max_slice;
++ }
++}
++
+ /*
+ * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
+ */
+@@ -796,6 +821,7 @@ static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
+ {
+ u64 old_min_vruntime = se->min_vruntime;
+ u64 old_min_slice = se->min_slice;
++ u64 old_max_slice = se->max_slice;
+ struct rb_node *node = &se->run_node;
+
+ se->min_vruntime = se->vruntime;
+@@ -806,8 +832,13 @@ static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
+ __min_slice_update(se, node->rb_right);
+ __min_slice_update(se, node->rb_left);
+
++ se->max_slice = se->slice;
++ __max_slice_update(se, node->rb_right);
++ __max_slice_update(se, node->rb_left);
++
+ return se->min_vruntime == old_min_vruntime &&
+- se->min_slice == old_min_slice;
++ se->min_slice == old_min_slice &&
++ se->max_slice == old_max_slice;
+ }
+
+ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+--
+2.51.0
+
--- /dev/null
+From 5520841cddf9ac5e52a526fba76e40355bbe802f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 08e6b8ed601c4..5b9830a28c8db 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -12044,6 +12044,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ break;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 2a1f2b2017159..7dba06fa82d85 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -15916,6 +15916,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -16879,9 +16905,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -17067,14 +17090,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index c1e9ec0243bac..9caada8cbe58f 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -783,6 +783,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From 519ebda34c8fbee55a883cc8b7aa632da72e527a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 19:28:06 +0000
+Subject: scsi: pm8001: Fix use-after-free in pm8001_queue_command()
+
+From: Salomon Dushimirimana <salomondush@google.com>
+
+[ Upstream commit 38353c26db28efd984f51d426eac2396d299cca7 ]
+
+Commit e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()") refactors
+pm8001_queue_command(), however it introduces a potential cause of a double
+free scenario when it changes the function to return -ENODEV in case of phy
+down/device gone state.
+
+In this path, pm8001_queue_command() updates task status and calls
+task_done to indicate to upper layer that the task has been handled.
+However, this also frees the underlying SAS task. A -ENODEV is then
+returned to the caller. When libsas sas_ata_qc_issue() receives this error
+value, it assumes the task wasn't handled/queued by LLDD and proceeds to
+clean up and free the task again, resulting in a double free.
+
+Since pm8001_queue_command() handles the SAS task in this case, it should
+return 0 to the caller indicating that the task has been handled.
+
+Fixes: e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()")
+Signed-off-by: Salomon Dushimirimana <salomondush@google.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://patch.msgid.link/20260213192806.439432-1-salomondush@google.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 4daab8b6d6752..0f911228cb2f1 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -476,8 +476,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
+ } else {
+ task->task_done(task);
+ }
+- rc = -ENODEV;
+- goto err_out;
++ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
++ return 0;
+ }
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+--
+2.51.0
+
--- /dev/null
+From 203ab5085c8814cefecf85ac748edf938b9e968c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index ba0cc2a051ff3..ad5866149e240 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4348,14 +4348,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -9947,7 +9939,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
--- /dev/null
+drm-vmwgfx-fix-invalid-kref_put-callback-in-vmw_bo_d.patch
+drm-vmwgfx-return-the-correct-value-in-vmw_translate.patch
+drm-logicvc-fix-device-node-reference-leak-in-logicv.patch
+kvm-arm64-advertise-support-for-feat_sctlr2.patch
+kvm-arm64-hide-s1poe-from-guests-when-not-supported-.patch
+irqchip-sifive-plic-fix-frozen-interrupt-due-to-affi.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+scsi-pm8001-fix-use-after-free-in-pm8001_queue_comma.patch
+alsa-scarlett2-fix-redeclaration-of-loop-variable.patch
+alsa-scarlett2-fix-dsp-filter-control-array-handling.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+x86-fred-correct-speculative-safety-in-fred_extint.patch
+sched-fair-fix-eevdf-entity-placement-bug-causing-sc.patch
+sched-fair-fix-lag-clamp.patch
+rseq-clarify-rseq-registration-rseq_size-bound-check.patch
+cgroup-cpuset-fix-incorrect-use-of-cpuset_update_tas.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+alsa-usb-audio-use-inclusive-terms.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+s390-idle-fix-cpu-idle-exit-cpu-time-accounting.patch
+s390-vtime-fix-virtual-timer-forwarding.patch
+pci-endpoint-introduce-pci_epc_function_is_valid.patch
+pci-endpoint-introduce-pci_epc_mem_map-unmap.patch
+pci-dwc-endpoint-implement-the-pci_epc_ops-align_add.patch
+pci-dwc-ep-use-align-addr-function-for-dw_pcie_ep_ra.patch
+pci-dwc-ep-flush-msi-x-write-before-unmapping-its-at.patch
+drm-amdgpu-unlock-a-mutex-before-destroying-it.patch
+drm-amdgpu-replace-kzalloc-copy_from_user-with-memdu.patch
+drm-amdgpu-fix-locking-bugs-in-error-paths.patch
+alsa-pci-hda-use-snd_kcontrol_chip.patch
+alsa-hda-cs35l56-fix-signedness-error-in-cs35l56_hda.patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+btrfs-fix-objectid-value-in-error-message-in-check_e.patch
+btrfs-fix-warning-in-scrub_verify_one_metadata.patch
+btrfs-print-correct-subvol-num-if-active-swapfile-pr.patch
+btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
+bpf-arm64-force-8-byte-alignment-for-jit-buffer-to-p.patch
+bpf-fix-stack-out-of-bounds-write-in-devmap.patch
+pci-correct-pci_cap_exp_endpoint_sizeof_v2-value.patch
--- /dev/null
+From 526a074d8d44e6b4c7e85cef2266c95c53c35bf3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 13:15:04 +0000
+Subject: x86/fred: Correct speculative safety in fred_extint()
+
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+
+[ Upstream commit aa280a08e7d8fae58557acc345b36b3dc329d595 ]
+
+array_index_nospec() is no use if the result gets spilled to the stack, as
+it makes the believed safe-under-speculation value subject to memory
+predictions.
+
+For all practical purposes, this means array_index_nospec() must be used in
+the expression that accesses the array.
+
+As the code currently stands, it's the wrong side of irqentry_enter(), and
+'index' is put into %ebp across the function call.
+
+Remove the index variable and reposition array_index_nospec(), so it's
+calculated immediately before the array access.
+
+Fixes: 14619d912b65 ("x86/fred: FRED entry/exit and dispatch code")
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260106131504.679932-1-andrew.cooper3@citrix.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/entry_fred.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/entry/entry_fred.c b/arch/x86/entry/entry_fred.c
+index f004a4dc74c2d..563e439b743f2 100644
+--- a/arch/x86/entry/entry_fred.c
++++ b/arch/x86/entry/entry_fred.c
+@@ -159,8 +159,6 @@ void __init fred_complete_exception_setup(void)
+ static noinstr void fred_extint(struct pt_regs *regs)
+ {
+ unsigned int vector = regs->fred_ss.vector;
+- unsigned int index = array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
+- NR_SYSTEM_VECTORS);
+
+ if (WARN_ON_ONCE(vector < FIRST_EXTERNAL_VECTOR))
+ return;
+@@ -169,7 +167,8 @@ static noinstr void fred_extint(struct pt_regs *regs)
+ irqentry_state_t state = irqentry_enter(regs);
+
+ instrumentation_begin();
+- sysvec_table[index](regs);
++ sysvec_table[array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
++ NR_SYSTEM_VECTORS)](regs);
+ instrumentation_end();
+ irqentry_exit(regs, state);
+ } else {
+--
+2.51.0
+
--- /dev/null
+From 7ad4c5df8987580e5b0cb9d158d684c74a796bab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:28:15 -0800
+Subject: accel/amdxdna: Prevent ubuf size overflow
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 03808abb1d868aed7478a11a82e5bb4b3f1ca6d6 ]
+
+The ubuf size calculation may overflow, resulting in an undersized
+allocation and possible memory corruption.
+
+Use check_add_overflow() helpers to validate the size calculation before
+allocation.
+
+Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated buffer")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260217192815.1784689-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/amdxdna_ubuf.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
+index 9e3b3b055caa8..62a478f6b45fb 100644
+--- a/drivers/accel/amdxdna/amdxdna_ubuf.c
++++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
+@@ -7,6 +7,7 @@
+ #include <drm/drm_device.h>
+ #include <drm/drm_print.h>
+ #include <linux/dma-buf.h>
++#include <linux/overflow.h>
+ #include <linux/pagemap.h>
+ #include <linux/vmalloc.h>
+
+@@ -176,7 +177,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ goto free_ent;
+ }
+
+- exp_info.size += va_ent[i].len;
++ if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) {
++ ret = -EINVAL;
++ goto free_ent;
++ }
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+--
+2.51.0
+
--- /dev/null
+From ada69b3850a61ec0880bd69f50f3668bc119fdff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 22:02:37 -0800
+Subject: accel/amdxdna: Remove buffer size check when creating command BO
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 08fe1b5166fdc81b010d7bf39cd6440620e7931e ]
+
+Large command buffers may be used, and they do not always need to be
+mapped or accessed by the driver. Performing a size check at command BO
+creation time unnecessarily rejects valid use cases.
+
+Remove the buffer size check from command BO creation, and defer vmap
+and size validation to the paths where the driver actually needs to map
+and access the command buffer.
+
+Fixes: ac49797c1815 ("accel/amdxdna: Add GEM buffer object management")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260206060237.4050492-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/amdxdna_gem.c | 38 ++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
+index 7f91863c3f24c..a1c9cc4a2b9d8 100644
+--- a/drivers/accel/amdxdna/amdxdna_gem.c
++++ b/drivers/accel/amdxdna/amdxdna_gem.c
+@@ -20,8 +20,6 @@
+ #include "amdxdna_pci_drv.h"
+ #include "amdxdna_ubuf.h"
+
+-#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+-
+ MODULE_IMPORT_NS("DMA_BUF");
+
+ static int
+@@ -745,12 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ {
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+- int ret;
+-
+- if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+- XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+- return ERR_PTR(-EINVAL);
+- }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+@@ -764,17 +756,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+- ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
+- if (ret) {
+- XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+- goto release_obj;
+- }
+-
+ return abo;
+-
+-release_obj:
+- drm_gem_object_put(to_gobj(abo));
+- return ERR_PTR(ret);
+ }
+
+ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+@@ -871,6 +853,7 @@ struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
++ int ret;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+@@ -879,9 +862,26 @@ struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ }
+
+ abo = to_xdna_obj(gobj);
+- if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
++ if (bo_type != AMDXDNA_BO_INVALID && abo->type != bo_type)
++ goto put_obj;
++
++ if (bo_type != AMDXDNA_BO_CMD || abo->mem.kva)
+ return abo;
+
++ if (abo->mem.size > SZ_32K) {
++ XDNA_ERR(xdna, "Cmd bo is too big %ld", abo->mem.size);
++ goto put_obj;
++ }
++
++ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
++ if (ret) {
++ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
++ goto put_obj;
++ }
++
++ return abo;
++
++put_obj:
+ drm_gem_object_put(gobj);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 7d63d29417e39f5d32681dc9c55a59e6d23dcc30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Feb 2026 13:19:46 -0800
+Subject: accel/amdxdna: Validate command buffer payload count
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 901ec3470994006bc8dd02399e16b675566c3416 ]
+
+The count field in the command header is used to determine the valid
+payload size. Verify that the valid payload does not exceed the remaining
+buffer space.
+
+Fixes: aac243092b70 ("accel/amdxdna: Add command execution")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260219211946.1920485-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/amdxdna_ctx.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
+index 856fb25086f12..cfee89681ff3c 100644
+--- a/drivers/accel/amdxdna/amdxdna_ctx.c
++++ b/drivers/accel/amdxdna/amdxdna_ctx.c
+@@ -104,7 +104,10 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+- if (unlikely(count <= num_masks)) {
++ if (unlikely(count <= num_masks ||
++ count * sizeof(u32) +
++ offsetof(struct amdxdna_cmd, data[0]) >
++ abo->mem.size)) {
+ *size = 0;
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From a0305396b959e0504aaaa589d4837d946cb7aaad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 11:17:28 +0000
+Subject: ALSA: hda: cs35l56: Fix signedness error in cs35l56_hda_posture_put()
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 003ce8c9b2ca28fbb4860651e76fb1c9a91f2ea1 ]
+
+In cs35l56_hda_posture_put() assign ucontrol->value.integer.value[0] to
+a long instead of an unsigned long. ucontrol->value.integer.value[0] is
+a long.
+
+This fixes the sparse warning:
+
+sound/hda/codecs/side-codecs/cs35l56_hda.c:256:20: warning: unsigned value
+that used to be signed checked against zero?
+sound/hda/codecs/side-codecs/cs35l56_hda.c:252:29: signed value source
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Fixes: 73cfbfa9caea8 ("ALSA: hda/cs35l56: Add driver for Cirrus Logic CS35L56 amplifier")
+Link: https://patch.msgid.link/20260226111728.1700431-1-rf@opensource.cirrus.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/hda/codecs/side-codecs/cs35l56_hda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/hda/codecs/side-codecs/cs35l56_hda.c b/sound/hda/codecs/side-codecs/cs35l56_hda.c
+index 5bb1c4ebeaf3c..acbacd0766064 100644
+--- a/sound/hda/codecs/side-codecs/cs35l56_hda.c
++++ b/sound/hda/codecs/side-codecs/cs35l56_hda.c
+@@ -249,7 +249,7 @@ static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+- unsigned long pos = ucontrol->value.integer.value[0];
++ long pos = ucontrol->value.integer.value[0];
+ bool changed;
+ int ret;
+
+--
+2.51.0
+
--- /dev/null
+From 237bca4a34e4305cbec1e64218dcf2ab065d8782 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 21:58:48 +1030
+Subject: ALSA: scarlett2: Fix DSP filter control array handling
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 1d241483368f2fd87fbaba64d6aec6bad3a1e12e ]
+
+scarlett2_add_dsp_ctls() was incorrectly storing the precomp and PEQ
+filter coefficient control pointers into the precomp_flt_switch_ctls
+and peq_flt_switch_ctls arrays instead of the intended targets
+precomp_flt_ctls and peq_flt_ctls. Pass NULL instead, as the filter
+coefficient control pointers are not used, and remove the unused
+precomp_flt_ctls and peq_flt_ctls arrays from struct scarlett2_data.
+
+Additionally, scarlett2_update_filter_values() was reading
+dsp_input_count * peq_flt_count values for
+SCARLETT2_CONFIG_PEQ_FLT_SWITCH, but the peq_flt_switch array is
+indexed only by dsp_input_count (one switch per DSP input, not per
+filter). Fix the read count.
+
+Fixes: b64678eb4e70 ("ALSA: scarlett2: Add DSP controls")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Link: https://patch.msgid.link/86497b71db060677d97c38a6ce5f89bb3b25361b.1771581197.git.g@b4.vu
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/mixer_scarlett2.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+index bef8c9e544dd3..380beb7ed4cf8 100644
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -1328,8 +1328,6 @@ struct scarlett2_data {
+ struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
+ struct snd_kcontrol *mix_ctls[SCARLETT2_MIX_MAX];
+ struct snd_kcontrol *compressor_ctls[SCARLETT2_COMPRESSOR_CTLS_MAX];
+- struct snd_kcontrol *precomp_flt_ctls[SCARLETT2_PRECOMP_FLT_CTLS_MAX];
+- struct snd_kcontrol *peq_flt_ctls[SCARLETT2_PEQ_FLT_CTLS_MAX];
+ struct snd_kcontrol *precomp_flt_switch_ctls[SCARLETT2_DSP_SWITCH_MAX];
+ struct snd_kcontrol *peq_flt_switch_ctls[SCARLETT2_DSP_SWITCH_MAX];
+ struct snd_kcontrol *direct_monitor_ctl;
+@@ -3447,7 +3445,6 @@ static int scarlett2_update_autogain(struct usb_mixer_interface *mixer)
+ private->autogain_status[i] =
+ private->num_autogain_status_texts - 1;
+
+-
+ for (i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
+ if (scarlett2_has_config_item(private,
+ scarlett2_ag_target_configs[i])) {
+@@ -5372,8 +5369,7 @@ static int scarlett2_update_filter_values(struct usb_mixer_interface *mixer)
+
+ err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_PEQ_FLT_SWITCH,
+- info->dsp_input_count * info->peq_flt_count,
+- private->peq_flt_switch);
++ info->dsp_input_count, private->peq_flt_switch);
+ if (err < 0)
+ return err;
+
+@@ -6546,7 +6542,7 @@ static int scarlett2_add_dsp_ctls(struct usb_mixer_interface *mixer, int i)
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_precomp_flt_ctl,
+ i * info->precomp_flt_count + j,
+- 1, s, &private->precomp_flt_switch_ctls[j]);
++ 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+@@ -6556,7 +6552,7 @@ static int scarlett2_add_dsp_ctls(struct usb_mixer_interface *mixer, int i)
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_peq_flt_ctl,
+ i * info->peq_flt_count + j,
+- 1, s, &private->peq_flt_switch_ctls[j]);
++ 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 7da86288db41b266d837ff4b64f9c3d826a0f253 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:36:35 +1030
+Subject: ALSA: usb-audio: Add QUIRK_FLAG_SKIP_IFACE_SETUP
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 38c322068a26a01d7ff64da92179e68cdde9860b ]
+
+Add a quirk flag to skip the usb_set_interface(),
+snd_usb_init_pitch(), and snd_usb_init_sample_rate() calls in
+__snd_usb_parse_audio_interface(). These are redundant with
+snd_usb_endpoint_prepare() at stream-open time.
+
+Enable the quirk for Focusrite devices, as init_sample_rate(rate_max)
+sets 192kHz during probing, which disables the internal mixer and Air
+and Safe modes.
+
+Fixes: 16f1f838442d ("Revert "ALSA: usb-audio: Drop superfluous interface setup at parsing"")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/65a7909b15f9feb76c2a6f4f8814c240ddc50737.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 3 ++-
+ sound/usb/stream.c | 3 +++
+ sound/usb/usbaudio.h | 6 ++++++
+ 3 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index ea5de072c36a1..c411005cd4d87 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2421,7 +2421,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- 0),
++ QUIRK_FLAG_SKIP_IFACE_SETUP),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+@@ -2503,6 +2503,7 @@ static const char *const snd_usb_audio_quirk_flag_names[] = {
+ QUIRK_STRING_ENTRY(MIC_RES_384),
+ QUIRK_STRING_ENTRY(MIXER_PLAYBACK_MIN_MUTE),
+ QUIRK_STRING_ENTRY(MIXER_CAPTURE_MIN_MUTE),
++ QUIRK_STRING_ENTRY(SKIP_IFACE_SETUP),
+ NULL
+ };
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 5c235a5ba7e1b..3b2526964e4b4 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1261,6 +1261,9 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ set_iface_first = true;
+
+ /* try to set the interface... */
++ if (chip->quirk_flags & QUIRK_FLAG_SKIP_IFACE_SETUP)
++ continue;
++
+ usb_set_interface(chip->dev, iface_no, 0);
+ if (set_iface_first)
+ usb_set_interface(chip->dev, iface_no, altno);
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 79978cae9799c..085530cf62d92 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -224,6 +224,10 @@ extern bool snd_usb_skip_validation;
+ * playback value represents muted state instead of minimum audible volume
+ * QUIRK_FLAG_MIXER_CAPTURE_MIN_MUTE
+ * Similar to QUIRK_FLAG_MIXER_PLAYBACK_MIN_MUTE, but for capture streams
++ * QUIRK_FLAG_SKIP_IFACE_SETUP
++ * Skip the probe-time interface setup (usb_set_interface,
++ * init_pitch, init_sample_rate); redundant with
++ * snd_usb_endpoint_prepare() at stream-open time
+ */
+
+ enum {
+@@ -253,6 +257,7 @@ enum {
+ QUIRK_TYPE_MIC_RES_384 = 23,
+ QUIRK_TYPE_MIXER_PLAYBACK_MIN_MUTE = 24,
+ QUIRK_TYPE_MIXER_CAPTURE_MIN_MUTE = 25,
++ QUIRK_TYPE_SKIP_IFACE_SETUP = 26,
+ /* Please also edit snd_usb_audio_quirk_flag_names */
+ };
+
+@@ -284,5 +289,6 @@ enum {
+ #define QUIRK_FLAG_MIC_RES_384 QUIRK_FLAG(MIC_RES_384)
+ #define QUIRK_FLAG_MIXER_PLAYBACK_MIN_MUTE QUIRK_FLAG(MIXER_PLAYBACK_MIN_MUTE)
+ #define QUIRK_FLAG_MIXER_CAPTURE_MIN_MUTE QUIRK_FLAG(MIXER_CAPTURE_MIN_MUTE)
++#define QUIRK_FLAG_SKIP_IFACE_SETUP QUIRK_FLAG(SKIP_IFACE_SETUP)
+
+ #endif /* __USBAUDIO_H */
+--
+2.51.0
+
--- /dev/null
+From 49dc431606ee212fcd8ac1b9bd82538a0340b592 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 3ac1fbec6327e..173edce027d7b 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1374,6 +1374,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ return -EINVAL;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From e8a20b22ccef3d929a85cbf4e5c29bfe5c947a07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 6860b5bd55f1e..ea5de072c36a1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2421,7 +2421,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.51.0
+
--- /dev/null
+From 9956b800bf7e35450d82067455acbb3763d481e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:31 +0100
+Subject: ALSA: usb-audio: Use inclusive terms
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4e9113c533acee2ba1f72fd68ee6ecd36b64484e ]
+
+Replace the remaining with inclusive terms; it's only this function
+name we overlooked at the previous conversion.
+
+Fixes: 53837b4ac2bd ("ALSA: usb-audio: Replace slave/master terms")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 173edce027d7b..eff3329d86b7e 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -160,8 +160,8 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ * This won't be used for implicit feedback which takes the packet size
+ * returned from the sync source
+ */
+-static int slave_next_packet_size(struct snd_usb_endpoint *ep,
+- unsigned int avail)
++static int synced_next_packet_size(struct snd_usb_endpoint *ep,
++ unsigned int avail)
+ {
+ unsigned int phase;
+ int ret;
+@@ -227,7 +227,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ }
+
+ if (ep->sync_source)
+- return slave_next_packet_size(ep, avail);
++ return synced_next_packet_size(ep, avail);
+ else
+ return next_packet_size(ep, avail);
+ }
+--
+2.51.0
+
--- /dev/null
+From d6b99e9a8ed1365533cdda3cb4949c6511aec959 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:43:49 +0100
+Subject: ALSA: usb: qcom: Correct parameter comment for
+ uaudio_transfer_buffer_setup()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1d6452a0ce78cd3f4e48943b5ba21d273a658298 ]
+
+At fixing the memory leak of xfer buffer, we forgot to update the
+corresponding comment, too. This resulted in a kernel-doc warning
+with W=1. Let's correct it.
+
+Fixes: 5c7ef5001292 ("ALSA: qc_audio_offload: avoid leaking xfer_buf allocation")
+Link: https://patch.msgid.link/20260226154414.1081568-4-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/qcom/qc_audio_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/qcom/qc_audio_offload.c b/sound/usb/qcom/qc_audio_offload.c
+index cfb30a195364a..297490f0f5874 100644
+--- a/sound/usb/qcom/qc_audio_offload.c
++++ b/sound/usb/qcom/qc_audio_offload.c
+@@ -1007,7 +1007,7 @@ static int enable_audio_stream(struct snd_usb_substream *subs,
+ /**
+ * uaudio_transfer_buffer_setup() - fetch and populate xfer buffer params
+ * @subs: usb substream
+- * @xfer_buf: xfer buf to be allocated
++ * @xfer_buf_cpu: xfer buf to be allocated
+ * @xfer_buf_len: size of allocation
+ * @mem_info: QMI response info
+ *
+--
+2.51.0
+
--- /dev/null
+From db4220cb029256c248894c2eac5d943f0c254a8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 22:10:11 +0000
+Subject: arm64: io: Extract user memory type in ioremap_prot()
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit 8f098037139b294050053123ab2bc0f819d08932 ]
+
+The only caller of ioremap_prot() outside of the generic ioremap()
+implementation is generic_access_phys(), which passes a 'pgprot_t' value
+determined from the user mapping of the target 'pfn' being accessed by
+the kernel. On arm64, the 'pgprot_t' contains all of the non-address
+bits from the pte, including the permission controls, and so we end up
+returning a new user mapping from ioremap_prot() which faults when
+accessed from the kernel on systems with PAN:
+
+ | Unable to handle kernel read from unreadable memory at virtual address ffff80008ea89000
+ | ...
+ | Call trace:
+ | __memcpy_fromio+0x80/0xf8
+ | generic_access_phys+0x20c/0x2b8
+ | __access_remote_vm+0x46c/0x5b8
+ | access_remote_vm+0x18/0x30
+ | environ_read+0x238/0x3e8
+ | vfs_read+0xe4/0x2b0
+ | ksys_read+0xcc/0x178
+ | __arm64_sys_read+0x4c/0x68
+
+Extract only the memory type from the user 'pgprot_t' in ioremap_prot()
+and assert that we're being passed a user mapping, to protect us against
+any changes in future that may require additional handling. To avoid
+falsely flagging users of ioremap(), provide our own ioremap() macro
+which simply wraps __ioremap_prot().
+
+Cc: Zeng Heng <zengheng4@huawei.com>
+Cc: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 893dea9ccd08 ("arm64: Add HAVE_IOREMAP_PROT support")
+Reported-by: Jinjiang Tu <tujinjiang@huawei.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/io.h | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index cd2fddfe814ac..8cbd1e96fd50b 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -266,10 +266,23 @@ typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
+ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
+ void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
+
+-#define ioremap_prot __ioremap_prot
++static inline void __iomem *ioremap_prot(phys_addr_t phys, size_t size,
++ pgprot_t user_prot)
++{
++ pgprot_t prot;
++ ptdesc_t user_prot_val = pgprot_val(user_prot);
++
++ if (WARN_ON_ONCE(!(user_prot_val & PTE_USER)))
++ return NULL;
+
+-#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
++ prot = __pgprot_modify(PAGE_KERNEL, PTE_ATTRINDX_MASK,
++ user_prot_val & PTE_ATTRINDX_MASK);
++ return __ioremap_prot(phys, size, prot);
++}
++#define ioremap_prot ioremap_prot
+
++#define ioremap(addr, size) \
++ __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+ #define ioremap_wc(addr, size) \
+ __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
+ #define ioremap_np(addr, size) \
+--
+2.51.0
+
--- /dev/null
+From e8e08fa4ad603b38423ab708b7cc55ee09b68d6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 22:10:10 +0000
+Subject: arm64: io: Rename ioremap_prot() to __ioremap_prot()
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit f6bf47ab32e0863df50f5501d207dcdddb7fc507 ]
+
+Rename our ioremap_prot() implementation to __ioremap_prot() and convert
+all arch-internal callers over to the new function.
+
+ioremap_prot() remains as a #define to __ioremap_prot() for
+generic_access_phys() and will be subsequently extended to handle user
+permissions in 'prot'.
+
+Cc: Zeng Heng <zengheng4@huawei.com>
+Cc: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: 8f098037139b ("arm64: io: Extract user memory type in ioremap_prot()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/io.h | 11 ++++++-----
+ arch/arm64/kernel/acpi.c | 2 +-
+ arch/arm64/mm/ioremap.c | 6 +++---
+ 3 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index 83e03abbb2ca9..cd2fddfe814ac 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -264,19 +264,20 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+ typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
+ pgprot_t *prot);
+ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
++void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
+
+-#define ioremap_prot ioremap_prot
++#define ioremap_prot __ioremap_prot
+
+ #define _PAGE_IOREMAP PROT_DEVICE_nGnRE
+
+ #define ioremap_wc(addr, size) \
+- ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
++ __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
+ #define ioremap_np(addr, size) \
+- ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
++ __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
+
+ #define ioremap_encrypted(addr, size) \
+- ioremap_prot((addr), (size), PAGE_KERNEL)
++ __ioremap_prot((addr), (size), PAGE_KERNEL)
+
+ /*
+ * io{read,write}{16,32,64}be() macros
+@@ -297,7 +298,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
+ if (pfn_is_map_memory(__phys_to_pfn(addr)))
+ return (void __iomem *)__phys_to_virt(addr);
+
+- return ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
++ return __ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
+index f1cb2447afc9c..b285e174f4f51 100644
+--- a/arch/arm64/kernel/acpi.c
++++ b/arch/arm64/kernel/acpi.c
+@@ -377,7 +377,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+ prot = __acpi_get_writethrough_mem_attribute();
+ }
+ }
+- return ioremap_prot(phys, size, prot);
++ return __ioremap_prot(phys, size, prot);
+ }
+
+ /*
+diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
+index 10e246f112710..1e4794a2af7d6 100644
+--- a/arch/arm64/mm/ioremap.c
++++ b/arch/arm64/mm/ioremap.c
+@@ -14,8 +14,8 @@ int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook)
+ return 0;
+ }
+
+-void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+- pgprot_t pgprot)
++void __iomem *__ioremap_prot(phys_addr_t phys_addr, size_t size,
++ pgprot_t pgprot)
+ {
+ unsigned long last_addr = phys_addr + size - 1;
+
+@@ -38,7 +38,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+
+ return generic_ioremap_prot(phys_addr, size, pgprot);
+ }
+-EXPORT_SYMBOL(ioremap_prot);
++EXPORT_SYMBOL(__ioremap_prot);
+
+ /*
+ * Must be called after early_fixmap_init
+--
+2.51.0
+
--- /dev/null
+From ac0d624403d70b847ddd1a78183e51f3f395410c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:47:52 +0100
+Subject: ASoC: SDCA: Fix comments for sdca_irq_request()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 71c1978ab6d2c6d48c31311855f1a85377c152ae ]
+
+The kernel-doc comments for sdca_irq_request() contained some typos
+that lead to build warnings with W=1. Let's correct them.
+
+Fixes: b126394d9ec6 ("ASoC: SDCA: Generic interrupt support")
+Acked-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260226154753.1083320-1-tiwai@suse.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sdca/sdca_interrupts.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/sdca/sdca_interrupts.c b/sound/soc/sdca/sdca_interrupts.c
+index 79bf3042f57d4..f83413587da5a 100644
+--- a/sound/soc/sdca/sdca_interrupts.c
++++ b/sound/soc/sdca/sdca_interrupts.c
+@@ -246,9 +246,9 @@ static int sdca_irq_request_locked(struct device *dev,
+ }
+
+ /**
+- * sdca_request_irq - request an individual SDCA interrupt
++ * sdca_irq_request - request an individual SDCA interrupt
+ * @dev: Pointer to the struct device against which things should be allocated.
+- * @interrupt_info: Pointer to the interrupt information structure.
++ * @info: Pointer to the interrupt information structure.
+ * @sdca_irq: SDCA interrupt position.
+ * @name: Name to be given to the IRQ.
+ * @handler: A callback thread function to be called for the IRQ.
+--
+2.51.0
+
--- /dev/null
+From 3a71fcc99157276581d17ea938258b674b237be5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 19:15:02 +0800
+Subject: bpf: Add bitwise tracking for BPF_END
+
+From: Tianci Cao <ziye@zju.edu.cn>
+
+[ Upstream commit 9d21199842247ab05c675fb9b6c6ca393a5c0024 ]
+
+This patch implements bitwise tracking (tnum analysis) for BPF_END
+(byte swap) operation.
+
+Currently, the BPF verifier does not track value for BPF_END operation,
+treating the result as completely unknown. This limits the verifier's
+ability to prove safety of programs that perform endianness conversions,
+which are common in networking code.
+
+For example, the following code pattern for port number validation:
+
+int test(struct pt_regs *ctx) {
+ __u64 x = bpf_get_prandom_u32();
+ x &= 0x3f00; // Range: [0, 0x3f00], var_off: (0x0; 0x3f00)
+ x = bswap16(x); // Should swap to range [0, 0x3f], var_off: (0x0; 0x3f)
+ if (x > 0x3f) goto trap;
+ return 0;
+trap:
+ return *(u64 *)NULL; // Should be unreachable
+}
+
+Currently generates verifier output:
+
+1: (54) w0 &= 16128 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))
+2: (d7) r0 = bswap16 r0 ; R0=scalar()
+3: (25) if r0 > 0x3f goto pc+2 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=63,var_off=(0x0; 0x3f))
+
+Without this patch, even though the verifier knows `x` has certain bits
+set, after bswap16, it loses all tracking information and treats port
+as having a completely unknown value [0, 65535].
+
+According to the BPF instruction set[1], there are 3 kinds of BPF_END:
+
+1. `bswap(16|32|64)`: opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE)
+ - do unconditional swap
+2. `le(16|32|64)`: opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE)
+ - on big-endian: do swap
+ - on little-endian: truncation (16/32-bit) or no-op (64-bit)
+3. `be(16|32|64)`: opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE)
+ - on little-endian: do swap
+ - on big-endian: truncation (16/32-bit) or no-op (64-bit)
+
+Since BPF_END operations are inherently bit-wise permutations, tnum
+(bitwise tracking) offers the most efficient and precise mechanism
+for value analysis. By implementing `tnum_bswap16`, `tnum_bswap32`,
+and `tnum_bswap64`, we can derive exact `var_off` values concisely,
+directly reflecting the bit-level changes.
+
+Here is the overview of changes:
+
+1. In `tnum_bswap(16|32|64)` (kernel/bpf/tnum.c):
+
+Call `swab(16|32|64)` function on the value and mask of `var_off`, and
+do truncation for 16/32-bit cases.
+
+2. In `adjust_scalar_min_max_vals` (kernel/bpf/verifier.c):
+
+Call helper function `scalar_byte_swap`.
+- Only do byte swap when
+ * alu64 (unconditional swap) OR
+ * switching between big-endian and little-endian machines.
+- If need do byte swap:
+ * Firstly call `tnum_bswap(16|32|64)` to update `var_off`.
+ * Then reset the bound since byte swap scrambles the range.
+- For 16/32-bit cases, truncate dst register to match the swapped size.
+
+This enables better verification of networking code that frequently uses
+byte swaps for protocol processing, reducing false positive rejections.
+
+[1] https://www.kernel.org/doc/Documentation/bpf/standardization/instruction-set.rst
+
+Co-developed-by: Shenghao Yuan <shenghaoyuan0928@163.com>
+Signed-off-by: Shenghao Yuan <shenghaoyuan0928@163.com>
+Co-developed-by: Yazhou Tang <tangyazhou518@outlook.com>
+Signed-off-by: Yazhou Tang <tangyazhou518@outlook.com>
+Signed-off-by: Tianci Cao <ziye@zju.edu.cn>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/r/20260204111503.77871-2-ziye@zju.edu.cn
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: efc11a667878 ("bpf: Improve bounds when tnum has a single possible value")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tnum.h | 5 ++++
+ kernel/bpf/tnum.c | 16 ++++++++++++
+ kernel/bpf/verifier.c | 60 ++++++++++++++++++++++++++++++++++++++++---
+ 3 files changed, 78 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/tnum.h b/include/linux/tnum.h
+index c52b862dad45b..fa4654ffb6217 100644
+--- a/include/linux/tnum.h
++++ b/include/linux/tnum.h
+@@ -63,6 +63,11 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2);
+ /* Return @a with all but the lowest @size bytes cleared */
+ struct tnum tnum_cast(struct tnum a, u8 size);
+
++/* Swap the bytes of a tnum */
++struct tnum tnum_bswap16(struct tnum a);
++struct tnum tnum_bswap32(struct tnum a);
++struct tnum tnum_bswap64(struct tnum a);
++
+ /* Returns true if @a is a known constant */
+ static inline bool tnum_is_const(struct tnum a)
+ {
+diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
+index f8e70e9c3998d..26fbfbb017001 100644
+--- a/kernel/bpf/tnum.c
++++ b/kernel/bpf/tnum.c
+@@ -8,6 +8,7 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/tnum.h>
++#include <linux/swab.h>
+
+ #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
+ /* A completely unknown value */
+@@ -253,3 +254,18 @@ struct tnum tnum_const_subreg(struct tnum a, u32 value)
+ {
+ return tnum_with_subreg(a, tnum_const(value));
+ }
++
++struct tnum tnum_bswap16(struct tnum a)
++{
++ return TNUM(swab16(a.value & 0xFFFF), swab16(a.mask & 0xFFFF));
++}
++
++struct tnum tnum_bswap32(struct tnum a)
++{
++ return TNUM(swab32(a.value & 0xFFFFFFFF), swab32(a.mask & 0xFFFFFFFF));
++}
++
++struct tnum tnum_bswap64(struct tnum a)
++{
++ return TNUM(swab64(a.value), swab64(a.mask));
++}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index dcbf21f61d2e6..449997aa77a06 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -15377,6 +15377,48 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
+ __update_reg_bounds(dst_reg);
+ }
+
++static void scalar_byte_swap(struct bpf_reg_state *dst_reg, struct bpf_insn *insn)
++{
++ /*
++ * Byte swap operation - update var_off using tnum_bswap.
++ * Three cases:
++ * 1. bswap(16|32|64): opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE)
++ * unconditional swap
++ * 2. to_le(16|32|64): opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE)
++ * swap on big-endian, truncation or no-op on little-endian
++ * 3. to_be(16|32|64): opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE)
++ * swap on little-endian, truncation or no-op on big-endian
++ */
++
++ bool alu64 = BPF_CLASS(insn->code) == BPF_ALU64;
++ bool to_le = BPF_SRC(insn->code) == BPF_TO_LE;
++ bool is_big_endian;
++#ifdef CONFIG_CPU_BIG_ENDIAN
++ is_big_endian = true;
++#else
++ is_big_endian = false;
++#endif
++ /* Apply bswap if alu64 or switch between big-endian and little-endian machines */
++ bool need_bswap = alu64 || (to_le == is_big_endian);
++
++ if (need_bswap) {
++ if (insn->imm == 16)
++ dst_reg->var_off = tnum_bswap16(dst_reg->var_off);
++ else if (insn->imm == 32)
++ dst_reg->var_off = tnum_bswap32(dst_reg->var_off);
++ else if (insn->imm == 64)
++ dst_reg->var_off = tnum_bswap64(dst_reg->var_off);
++ /*
++ * Byteswap scrambles the range, so we must reset bounds.
++ * Bounds will be re-derived from the new tnum later.
++ */
++ __mark_reg_unbounded(dst_reg);
++ }
++ /* For bswap16/32, truncate dst register to match the swapped size */
++ if (insn->imm == 16 || insn->imm == 32)
++ coerce_reg_to_size(dst_reg, insn->imm / 8);
++}
++
+ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
+ const struct bpf_reg_state *src_reg)
+ {
+@@ -15403,6 +15445,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
+ case BPF_XOR:
+ case BPF_OR:
+ case BPF_MUL:
++ case BPF_END:
+ return true;
+
+ /* Shift operators range is only computable if shift dimension operand
+@@ -15551,12 +15594,23 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
+ else
+ scalar_min_max_arsh(dst_reg, &src_reg);
+ break;
++ case BPF_END:
++ scalar_byte_swap(dst_reg, insn);
++ break;
+ default:
+ break;
+ }
+
+- /* ALU32 ops are zero extended into 64bit register */
+- if (alu32)
++ /*
++ * ALU32 ops are zero extended into 64bit register.
++ *
++ * BPF_END is already handled inside the helper (truncation),
++ * so skip zext here to avoid unexpected zero extension.
++ * e.g., le64: opcode=(BPF_END|BPF_ALU|BPF_TO_LE), imm=0x40
++ * This is a 64bit byte swap operation with alu32==true,
++ * but we should not zero extend the result.
++ */
++ if (alu32 && opcode != BPF_END)
+ zext_32_to_64(dst_reg);
+ reg_bounds_sync(dst_reg);
+ return 0;
+@@ -15736,7 +15790,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ }
+
+ /* check dest operand */
+- if (opcode == BPF_NEG &&
++ if ((opcode == BPF_NEG || opcode == BPF_END) &&
+ regs[insn->dst_reg].type == SCALAR_VALUE) {
+ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
+ err = err ?: adjust_scalar_min_max_vals(env, insn,
+--
+2.51.0
+
--- /dev/null
+From 7b58cd868314f689c674b76c78af5201cb8d868b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 07:55:25 +0000
+Subject: bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic
+ tearing
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit ef06fd16d48704eac868441d98d4ef083d8f3d07 ]
+
+struct bpf_plt contains a u64 target field. Currently, the BPF JIT
+allocator requests an alignment of 4 bytes (sizeof(u32)) for the JIT
+buffer.
+
+Because the base address of the JIT buffer can be 4-byte aligned (e.g.,
+ending in 0x4 or 0xc), the relative padding logic in build_plt() fails
+to ensure that target lands on an 8-byte boundary.
+
+This leads to two issues:
+1. UBSAN reports misaligned-access warnings when dereferencing the
+ structure.
+2. More critically, target is updated concurrently via WRITE_ONCE() in
+ bpf_arch_text_poke() while the JIT'd code executes ldr. On arm64,
+ 64-bit loads/stores are only guaranteed to be single-copy atomic if
+ they are 64-bit aligned. A misaligned target risks a torn read,
+ causing the JIT to jump to a corrupted address.
+
+Fix this by increasing the allocation alignment requirement to 8 bytes
+(sizeof(u64)) in bpf_jit_binary_pack_alloc(). This anchors the base of
+the JIT buffer to an 8-byte boundary, allowing the relative padding math
+in build_plt() to correctly align the target field.
+
+Fixes: b2ad54e1533e ("bpf, arm64: Implement bpf_arch_text_poke() for arm64")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Acked-by: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20260226075525.233321-1-tabba@google.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/net/bpf_jit_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 83a6ca613f9c2..107eb71b533a0 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -2122,7 +2122,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
+ image_size = extable_offset + extable_size;
+ ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
+- sizeof(u32), &header, &image_ptr,
++ sizeof(u64), &header, &image_ptr,
+ jit_fill_hole);
+ if (!ro_header) {
+ prog = orig_prog;
+--
+2.51.0
+
--- /dev/null
+From dcf84ef1f24618d22b2b4d71e4caed5e06803877 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:14:55 +0800
+Subject: bpf: Fix race in cpumap on PREEMPT_RT
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 869c63d5975d55e97f6b168e885452b3da20ea47 ]
+
+On PREEMPT_RT kernels, the per-CPU xdp_bulk_queue (bq) can be accessed
+concurrently by multiple preemptible tasks on the same CPU.
+
+The original code assumes bq_enqueue() and __cpu_map_flush() run
+atomically with respect to each other on the same CPU, relying on
+local_bh_disable() to prevent preemption. However, on PREEMPT_RT,
+local_bh_disable() only calls migrate_disable() (when
+PREEMPT_RT_NEEDS_BH_LOCK is not set) and does not disable
+preemption, which allows CFS scheduling to preempt a task during
+bq_flush_to_queue(), enabling another task on the same CPU to enter
+bq_enqueue() and operate on the same per-CPU bq concurrently.
+
+This leads to several races:
+
+1. Double __list_del_clearprev(): after bq->count is reset in
+ bq_flush_to_queue(), a preempting task can call bq_enqueue() ->
+ bq_flush_to_queue() on the same bq when bq->count reaches
+ CPU_MAP_BULK_SIZE. Both tasks then call __list_del_clearprev()
+ on the same bq->flush_node, the second call dereferences the
+ prev pointer that was already set to NULL by the first.
+
+2. bq->count and bq->q[] races: concurrent bq_enqueue() can corrupt
+ the packet queue while bq_flush_to_queue() is processing it.
+
+The race between task A (__cpu_map_flush -> bq_flush_to_queue) and
+task B (bq_enqueue -> bq_flush_to_queue) on the same CPU:
+
+ Task A (xdp_do_flush) Task B (cpu_map_enqueue)
+ ---------------------- ------------------------
+ bq_flush_to_queue(bq)
+ spin_lock(&q->producer_lock)
+ /* flush bq->q[] to ptr_ring */
+ bq->count = 0
+ spin_unlock(&q->producer_lock)
+ bq_enqueue(rcpu, xdpf)
+ <-- CFS preempts Task A --> bq->q[bq->count++] = xdpf
+ /* ... more enqueues until full ... */
+ bq_flush_to_queue(bq)
+ spin_lock(&q->producer_lock)
+ /* flush to ptr_ring */
+ spin_unlock(&q->producer_lock)
+ __list_del_clearprev(flush_node)
+ /* sets flush_node.prev = NULL */
+ <-- Task A resumes -->
+ __list_del_clearprev(flush_node)
+ flush_node.prev->next = ...
+ /* prev is NULL -> kernel oops */
+
+Fix this by adding a local_lock_t to xdp_bulk_queue and acquiring it
+in bq_enqueue() and __cpu_map_flush(). These paths already run under
+local_bh_disable(), so use local_lock_nested_bh() which on non-RT is
+a pure annotation with no overhead, and on PREEMPT_RT provides a
+per-CPU sleeping lock that serializes access to the bq.
+
+To reproduce, insert an mdelay(100) between bq->count = 0 and
+__list_del_clearprev() in bq_flush_to_queue(), then run reproducer
+provided by syzkaller.
+
+Fixes: 3253cb49cbad ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
+Reported-by: syzbot+2b3391f44313b3983e91@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/69369331.a70a0220.38f243.009d.GAE@google.com/T/
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
+Link: https://lore.kernel.org/r/20260225121459.183121-2-jiayuan.chen@linux.dev
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/cpumap.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index 703e5df1f4ef9..306bf98378041 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -29,6 +29,7 @@
+ #include <linux/sched.h>
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
++#include <linux/local_lock.h>
+ #include <linux/completion.h>
+ #include <trace/events/xdp.h>
+ #include <linux/btf_ids.h>
+@@ -52,6 +53,7 @@ struct xdp_bulk_queue {
+ struct list_head flush_node;
+ struct bpf_cpu_map_entry *obj;
+ unsigned int count;
++ local_lock_t bq_lock;
+ };
+
+ /* Struct for every remote "destination" CPU in map */
+@@ -451,6 +453,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ for_each_possible_cpu(i) {
+ bq = per_cpu_ptr(rcpu->bulkq, i);
+ bq->obj = rcpu;
++ local_lock_init(&bq->bq_lock);
+ }
+
+ /* Alloc queue */
+@@ -717,6 +720,8 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
+ struct ptr_ring *q;
+ int i;
+
++ lockdep_assert_held(&bq->bq_lock);
++
+ if (unlikely(!bq->count))
+ return;
+
+@@ -744,11 +749,15 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
+ }
+
+ /* Runs under RCU-read-side, plus in softirq under NAPI protection.
+- * Thus, safe percpu variable access.
++ * Thus, safe percpu variable access. PREEMPT_RT relies on
++ * local_lock_nested_bh() to serialise access to the per-CPU bq.
+ */
+ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+ {
+- struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
++ struct xdp_bulk_queue *bq;
++
++ local_lock_nested_bh(&rcpu->bulkq->bq_lock);
++ bq = this_cpu_ptr(rcpu->bulkq);
+
+ if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
+ bq_flush_to_queue(bq);
+@@ -769,6 +778,8 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+
+ list_add(&bq->flush_node, flush_list);
+ }
++
++ local_unlock_nested_bh(&rcpu->bulkq->bq_lock);
+ }
+
+ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+@@ -805,7 +816,9 @@ void __cpu_map_flush(struct list_head *flush_list)
+ struct xdp_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
++ local_lock_nested_bh(&bq->obj->bulkq->bq_lock);
+ bq_flush_to_queue(bq);
++ local_unlock_nested_bh(&bq->obj->bulkq->bq_lock);
+
+ /* If already running, costs spin_lock_irqsave + smb_mb */
+ wake_up_process(bq->obj->kthread);
+--
+2.51.0
+
--- /dev/null
+From cdc3d699e3a032d7baaef050dc04e20c72f512cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:14:56 +0800
+Subject: bpf: Fix race in devmap on PREEMPT_RT
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 1872e75375c40add4a35990de3be77b5741c252c ]
+
+On PREEMPT_RT kernels, the per-CPU xdp_dev_bulk_queue (bq) can be
+accessed concurrently by multiple preemptible tasks on the same CPU.
+
+The original code assumes bq_enqueue() and __dev_flush() run atomically
+with respect to each other on the same CPU, relying on
+local_bh_disable() to prevent preemption. However, on PREEMPT_RT,
+local_bh_disable() only calls migrate_disable() (when
+PREEMPT_RT_NEEDS_BH_LOCK is not set) and does not disable
+preemption, which allows CFS scheduling to preempt a task during
+bq_xmit_all(), enabling another task on the same CPU to enter
+bq_enqueue() and operate on the same per-CPU bq concurrently.
+
+This leads to several races:
+
+1. Double-free / use-after-free on bq->q[]: bq_xmit_all() snapshots
+ cnt = bq->count, then iterates bq->q[0..cnt-1] to transmit frames.
+ If preempted after the snapshot, a second task can call bq_enqueue()
+ -> bq_xmit_all() on the same bq, transmitting (and freeing) the
+ same frames. When the first task resumes, it operates on stale
+ pointers in bq->q[], causing use-after-free.
+
+2. bq->count and bq->q[] corruption: concurrent bq_enqueue() modifying
+ bq->count and bq->q[] while bq_xmit_all() is reading them.
+
+3. dev_rx/xdp_prog teardown race: __dev_flush() clears bq->dev_rx and
+ bq->xdp_prog after bq_xmit_all(). If preempted between
+ bq_xmit_all() return and bq->dev_rx = NULL, a preempting
+ bq_enqueue() sees dev_rx still set (non-NULL), skips adding bq to
+ the flush_list, and enqueues a frame. When __dev_flush() resumes,
+ it clears dev_rx and removes bq from the flush_list, orphaning the
+ newly enqueued frame.
+
+4. __list_del_clearprev() on flush_node: similar to the cpumap race,
+ both tasks can call __list_del_clearprev() on the same flush_node,
+ the second dereferences the prev pointer already set to NULL.
+
+The race between task A (__dev_flush -> bq_xmit_all) and task B
+(bq_enqueue -> bq_xmit_all) on the same CPU:
+
+ Task A (xdp_do_flush) Task B (ndo_xdp_xmit redirect)
+ ---------------------- --------------------------------
+ __dev_flush(flush_list)
+ bq_xmit_all(bq)
+ cnt = bq->count /* e.g. 16 */
+ /* start iterating bq->q[] */
+ <-- CFS preempts Task A -->
+ bq_enqueue(dev, xdpf)
+ bq->count == DEV_MAP_BULK_SIZE
+ bq_xmit_all(bq, 0)
+ cnt = bq->count /* same 16! */
+ ndo_xdp_xmit(bq->q[])
+ /* frames freed by driver */
+ bq->count = 0
+ <-- Task A resumes -->
+ ndo_xdp_xmit(bq->q[])
+ /* use-after-free: frames already freed! */
+
+Fix this by adding a local_lock_t to xdp_dev_bulk_queue and acquiring
+it in bq_enqueue() and __dev_flush(). These paths already run under
+local_bh_disable(), so use local_lock_nested_bh() which on non-RT is
+a pure annotation with no overhead, and on PREEMPT_RT provides a
+per-CPU sleeping lock that serializes access to the bq.
+
+Fixes: 3253cb49cbad ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
+Link: https://lore.kernel.org/r/20260225121459.183121-3-jiayuan.chen@linux.dev
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 2984e938f94dc..3d619d01088e3 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -45,6 +45,7 @@
+ * types of devmap; only the lookup and insertion is different.
+ */
+ #include <linux/bpf.h>
++#include <linux/local_lock.h>
+ #include <net/xdp.h>
+ #include <linux/filter.h>
+ #include <trace/events/xdp.h>
+@@ -60,6 +61,7 @@ struct xdp_dev_bulk_queue {
+ struct net_device *dev_rx;
+ struct bpf_prog *xdp_prog;
+ unsigned int count;
++ local_lock_t bq_lock;
+ };
+
+ struct bpf_dtab_netdev {
+@@ -381,6 +383,8 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
+ int to_send = cnt;
+ int i;
+
++ lockdep_assert_held(&bq->bq_lock);
++
+ if (unlikely(!cnt))
+ return;
+
+@@ -425,10 +429,12 @@ void __dev_flush(struct list_head *flush_list)
+ struct xdp_dev_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
++ local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
+ bq_xmit_all(bq, XDP_XMIT_FLUSH);
+ bq->dev_rx = NULL;
+ bq->xdp_prog = NULL;
+ __list_del_clearprev(&bq->flush_node);
++ local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
+ }
+ }
+
+@@ -451,12 +457,16 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
+
+ /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
+ * variable access, and map elements stick around. See comment above
+- * xdp_do_flush() in filter.c.
++ * xdp_do_flush() in filter.c. PREEMPT_RT relies on local_lock_nested_bh()
++ * to serialise access to the per-CPU bq.
+ */
+ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
+ {
+- struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
++ struct xdp_dev_bulk_queue *bq;
++
++ local_lock_nested_bh(&dev->xdp_bulkq->bq_lock);
++ bq = this_cpu_ptr(dev->xdp_bulkq);
+
+ if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
+ bq_xmit_all(bq, 0);
+@@ -477,6 +487,8 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ }
+
+ bq->q[bq->count++] = xdpf;
++
++ local_unlock_nested_bh(&dev->xdp_bulkq->bq_lock);
+ }
+
+ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+@@ -1127,8 +1139,13 @@ static int dev_map_notification(struct notifier_block *notifier,
+ if (!netdev->xdp_bulkq)
+ return NOTIFY_BAD;
+
+- for_each_possible_cpu(cpu)
+- per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
++ for_each_possible_cpu(cpu) {
++ struct xdp_dev_bulk_queue *bq;
++
++ bq = per_cpu_ptr(netdev->xdp_bulkq, cpu);
++ bq->dev = netdev;
++ local_lock_init(&bq->bq_lock);
++ }
+ break;
+ case NETDEV_UNREGISTER:
+ /* This rcu_read_lock/unlock pair is needed because
+--
+2.51.0
+
--- /dev/null
+From 9de704e00a504b1d62880ea90219047f90a680fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 05:34:44 +0000
+Subject: bpf: Fix stack-out-of-bounds write in devmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b7bf516c3ecd9a2aae2dc2635178ab87b734fef1 ]
+
+get_upper_ifindexes() iterates over all upper devices and writes their
+indices into an array without checking bounds.
+
+Also the callers assume that the max number of upper devices is
+MAX_NEST_DEV and allocate excluded_devices[1+MAX_NEST_DEV] on the stack,
+but that assumption is not correct and the number of upper devices could
+be larger than MAX_NEST_DEV (e.g., many macvlans), causing a
+stack-out-of-bounds write.
+
+Add a max parameter to get_upper_ifindexes() to avoid the issue.
+When there are too many upper devices, return -EOVERFLOW and abort the
+redirect.
+
+To reproduce, create more than MAX_NEST_DEV(8) macvlans on a device with
+an XDP program attached using BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS.
+Then send a packet to the device to trigger the XDP redirect path.
+
+Reported-by: syzbot+10cc7f13760b31bd2e61@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698c4ce3.050a0220.340abe.000b.GAE@google.com/T/
+Fixes: aeea1b86f936 ("bpf, devmap: Exclude XDP broadcast to master device")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Link: https://lore.kernel.org/r/20260225053506.4738-1-kohei@enjuk.jp
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 2625601de76e9..2984e938f94dc 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -588,18 +588,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
+ }
+
+ /* Get ifindex of each upper device. 'indexes' must be able to hold at
+- * least MAX_NEST_DEV elements.
+- * Returns the number of ifindexes added.
++ * least 'max' elements.
++ * Returns the number of ifindexes added, or -EOVERFLOW if there are too
++ * many upper devices.
+ */
+-static int get_upper_ifindexes(struct net_device *dev, int *indexes)
++static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
+ {
+ struct net_device *upper;
+ struct list_head *iter;
+ int n = 0;
+
+ netdev_for_each_upper_dev_rcu(dev, upper, iter) {
++ if (n >= max)
++ return -EOVERFLOW;
+ indexes[n++] = upper->ifindex;
+ }
++
+ return n;
+ }
+
+@@ -615,7 +619,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev_rx->ifindex;
+ }
+
+@@ -733,7 +741,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev->ifindex;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c0f7a5ad77ccf2805ea52044aa463f7771be12c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 22:35:02 +0100
+Subject: bpf: Improve bounds when tnum has a single possible value
+
+From: Paul Chaignon <paul.chaignon@gmail.com>
+
+[ Upstream commit efc11a667878a1d655ff034a93a539debbfedb12 ]
+
+We're hitting an invariant violation in Cilium that sometimes leads to
+BPF programs being rejected and Cilium failing to start [1]. The
+following extract from verifier logs shows what's happening:
+
+ from 201 to 236: R1=0 R6=ctx() R7=1 R9=scalar(smin=umin=smin32=umin32=3584,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100)) R10=fp0
+ 236: R1=0 R6=ctx() R7=1 R9=scalar(smin=umin=smin32=umin32=3584,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100)) R10=fp0
+ ; if (magic == MARK_MAGIC_HOST || magic == MARK_MAGIC_OVERLAY || magic == MARK_MAGIC_ENCRYPT) @ bpf_host.c:1337
+ 236: (16) if w9 == 0xe00 goto pc+45 ; R9=scalar(smin=umin=smin32=umin32=3585,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100))
+ 237: (16) if w9 == 0xf00 goto pc+1
+ verifier bug: REG INVARIANTS VIOLATION (false_reg1): range bounds violation u64=[0xe01, 0xe00] s64=[0xe01, 0xe00] u32=[0xe01, 0xe00] s32=[0xe01, 0xe00] var_off=(0xe00, 0x0)
+
+We reach instruction 236 with two possible values for R9, 0xe00 and
+0xf00. This is perfectly reflected in the tnum, but of course the ranges
+are less accurate and cover [0xe00; 0xf00]. Taking the fallthrough path
+at instruction 236 allows the verifier to reduce the range to
+[0xe01; 0xf00]. The tnum is however not updated.
+
+With these ranges, at instruction 237, the verifier is not able to
+deduce that R9 is always equal to 0xf00. Hence the fallthrough pass is
+explored first, the verifier refines the bounds using the assumption
+that R9 != 0xf00, and ends up with an invariant violation.
+
+This pattern of impossible branch + bounds refinement is common to all
+invariant violations seen so far. The long-term solution is likely to
+rely on the refinement + invariant violation check to detect dead
+branches, as started by Eduard. To fix the current issue, we need
+something with less refactoring that we can backport.
+
+This patch uses the tnum_step helper introduced in the previous patch to
+detect the above situation. In particular, three cases are now detected
+in the bounds refinement:
+
+1. The u64 range and the tnum only overlap in umin.
+ u64: ---[xxxxxx]-----
+ tnum: --xx----------x-
+
+2. The u64 range and the tnum only overlap in the maximum value
+ represented by the tnum, called tmax.
+ u64: ---[xxxxxx]-----
+ tnum: xx-----x--------
+
+3. The u64 range and the tnum only overlap in between umin (excluded)
+ and umax.
+ u64: ---[xxxxxx]-----
+ tnum: xx----x-------x-
+
+To detect these three cases, we call tnum_step(tnum, umin), which
+returns the smallest member of the tnum greater than umin, called
+tnum_next here. We're in case (1) if umin is part of the tnum and
+tnum_next is greater than umax. We're in case (2) if umin is not part of
+the tnum and tnum_next is equal to tmax. Finally, we're in case (3) if
+umin is not part of the tnum, tnum_next is inferior or equal to umax,
+and calling tnum_step a second time gives us a value past umax.
+
+This change implements these three cases. With it, the above bytecode
+looks as follows:
+
+ 0: (85) call bpf_get_prandom_u32#7 ; R0=scalar()
+ 1: (47) r0 |= 3584 ; R0=scalar(smin=0x8000000000000e00,umin=umin32=3584,smin32=0x80000e00,var_off=(0xe00; 0xfffffffffffff1ff))
+ 2: (57) r0 &= 3840 ; R0=scalar(smin=umin=smin32=umin32=3584,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100))
+ 3: (15) if r0 == 0xe00 goto pc+2 ; R0=3840
+ 4: (15) if r0 == 0xf00 goto pc+1
+ 4: R0=3840
+ 6: (95) exit
+
+In addition to the new selftests, this change was also verified with
+Agni [3]. For the record, the raw SMT is available at [4]. The property
+it verifies is that: If a concrete value x is contained in all input
+abstract values, after __update_reg_bounds, it will continue to be
+contained in all output abstract values.
+
+Link: https://github.com/cilium/cilium/issues/44216 [1]
+Link: https://pchaigno.github.io/test-verifier-complexity.html [2]
+Link: https://github.com/bpfverif/agni [3]
+Link: https://pastebin.com/raw/naCfaqNx [4]
+Fixes: 0df1a55afa83 ("bpf: Warn on internal verifier errors")
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Tested-by: Marco Schirrmeister <mschirrmeister@gmail.com>
+Co-developed-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+Signed-off-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
+Link: https://lore.kernel.org/r/ef254c4f68be19bd393d450188946821c588565d.1772225741.git.paul.chaignon@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 449997aa77a06..e37ff28e3cd9d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2347,6 +2347,9 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg)
+
+ static void __update_reg64_bounds(struct bpf_reg_state *reg)
+ {
++ u64 tnum_next, tmax;
++ bool umin_in_tnum;
++
+ /* min signed is max(sign bit) | min(other bits) */
+ reg->smin_value = max_t(s64, reg->smin_value,
+ reg->var_off.value | (reg->var_off.mask & S64_MIN));
+@@ -2356,6 +2359,33 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
+ reg->umin_value = max(reg->umin_value, reg->var_off.value);
+ reg->umax_value = min(reg->umax_value,
+ reg->var_off.value | reg->var_off.mask);
++
++ /* Check if u64 and tnum overlap in a single value */
++ tnum_next = tnum_step(reg->var_off, reg->umin_value);
++ umin_in_tnum = (reg->umin_value & ~reg->var_off.mask) == reg->var_off.value;
++ tmax = reg->var_off.value | reg->var_off.mask;
++ if (umin_in_tnum && tnum_next > reg->umax_value) {
++ /* The u64 range and the tnum only overlap in umin.
++ * u64: ---[xxxxxx]-----
++ * tnum: --xx----------x-
++ */
++ ___mark_reg_known(reg, reg->umin_value);
++ } else if (!umin_in_tnum && tnum_next == tmax) {
++ /* The u64 range and the tnum only overlap in the maximum value
++ * represented by the tnum, called tmax.
++ * u64: ---[xxxxxx]-----
++ * tnum: xx-----x--------
++ */
++ ___mark_reg_known(reg, tmax);
++ } else if (!umin_in_tnum && tnum_next <= reg->umax_value &&
++ tnum_step(reg->var_off, tnum_next) > reg->umax_value) {
++ /* The u64 range and the tnum only overlap in between umin
++ * (excluded) and umax.
++ * u64: ---[xxxxxx]-----
++ * tnum: xx----x-------x-
++ */
++ ___mark_reg_known(reg, tnum_next);
++ }
+ }
+
+ static void __update_reg_bounds(struct bpf_reg_state *reg)
+--
+2.51.0
+
--- /dev/null
+From 1ec1d653f51f7899be21d2a98319b32fd566d01d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 22:32:21 +0100
+Subject: bpf: Introduce tnum_step to step through tnum's members
+
+From: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+
+[ Upstream commit 76e954155b45294c502e3d3a9e15757c858ca55e ]
+
+This commit introduces tnum_step(), a function that, when given t, and a
+number z returns the smallest member of t larger than z. The number z
+must be greater or equal to the smallest member of t and less than the
+largest member of t.
+
+The first step is to compute j, a number that keeps all of t's known
+bits, and matches all unknown bits to z's bits. Since j is a member of
+the t, it is already a candidate for result. However, we want our result
+to be (minimally) greater than z.
+
+There are only two possible cases:
+
+(1) Case j <= z. In this case, we want to increase the value of j and
+make it > z.
+(2) Case j > z. In this case, we want to decrease the value of j while
+keeping it > z.
+
+(Case 1) j <= z
+
+t = xx11x0x0
+z = 10111101 (189)
+j = 10111000 (184)
+ ^
+ k
+
+(Case 1.1) Let's first consider the case where j < z. We will address j
+== z later.
+
+Since z > j, there had to be a bit position that was 1 in z and a 0 in
+j, beyond which all positions of higher significance are equal in j and
+z. Further, this position could not have been unknown in a, because the
+unknown positions of a match z. This position had to be a 1 in z and
+known 0 in t.
+
+Let k be position of the most significant 1-to-0 flip. In our example, k
+= 3 (starting the count at 1 at the least significant bit). Setting (to
+1) the unknown bits of t in positions of significance smaller than
+k will not produce a result > z. Hence, we must set/unset the unknown
+bits at positions of significance higher than k. Specifically, we look
+for the next larger combination of 1s and 0s to place in those
+positions, relative to the combination that exists in z. We can achieve
+this by concatenating bits at unknown positions of t into an integer,
+adding 1, and writing the bits of that result back into the
+corresponding bit positions previously extracted from z.
+
+>From our example, considering only positions of significance greater
+than k:
+
+t = xx..x
+z = 10..1
+ + 1
+ -----
+ 11..0
+
+This is the exact combination 1s and 0s we need at the unknown bits of t
+in positions of significance greater than k. Further, our result must
+only increase the value minimally above z. Hence, unknown bits in
+positions of significance smaller than k should remain 0. We finally
+have,
+
+result = 11110000 (240)
+
+(Case 1.2) Now consider the case when j = z, for example
+
+t = 1x1x0xxx
+z = 10110100 (180)
+j = 10110100 (180)
+
+Matching the unknown bits of the t to the bits of z yielded exactly z.
+To produce a number greater than z, we must set/unset the unknown bits
+in t, and *all* the unknown bits of t candidates for being set/unset. We
+can do this similar to Case 1.1, by adding 1 to the bits extracted from
+the masked bit positions of z. Essentially, this case is equivalent to
+Case 1.1, with k = 0.
+
+t = 1x1x0xxx
+z = .0.1.100
+ + 1
+ ---------
+ .0.1.101
+
+This is the exact combination of bits needed in the unknown positions of
+t. After recalling the known positions of t, we get
+
+result = 10110101 (181)
+
+(Case 2) j > z
+
+t = x00010x1
+z = 10000010 (130)
+j = 10001011 (139)
+ ^
+ k
+
+Since j > z, there had to be a bit position which was 0 in z, and a 1 in
+j, beyond which all positions of higher significance are equal in j and
+z. This position had to be a 0 in z and known 1 in t. Let k be the
+position of the most significant 0-to-1 flip. In our example, k = 4.
+
+Because of the 0-to-1 flip at position k, a member of t can become
+greater than z if the bits in positions greater than k are themselves >=
+to z. To make that member *minimally* greater than z, the bits in
+positions greater than k must be exactly = z. Hence, we simply match all
+of t's unknown bits in positions more significant than k to z's bits. In
+positions less significant than k, we set all t's unknown bits to 0
+to retain minimality.
+
+In our example, in positions of greater significance than k (=4),
+t=x000. These positions are matched with z (1000) to produce 1000. In
+positions of lower significance than k, t=10x1. All unknown bits are set
+to 0 to produce 1001. The final result is:
+
+result = 10001001 (137)
+
+This concludes the computation for a result > z that is a member of t.
+
+The procedure for tnum_step() in this commit implements the idea
+described above. As a proof of correctness, we verified the algorithm
+against a logical specification of tnum_step. The specification asserts
+the following about the inputs t, z and output res that:
+
+1. res is a member of t, and
+2. res is strictly greater than z, and
+3. there does not exist another value res2 such that
+ 3a. res2 is also a member of t, and
+ 3b. res2 is greater than z
+ 3c. res2 is smaller than res
+
+We checked the implementation against this logical specification using
+an SMT solver. The verification formula in SMTLIB format is available
+at [1]. The verification returned an "unsat": indicating that no input
+assignment exists for which the implementation and the specification
+produce different outputs.
+
+In addition, we also automatically generated the logical encoding of the
+C implementation using Agni [2] and verified it against the same
+specification. This verification also returned an "unsat", confirming
+that the implementation is equivalent to the specification. The formula
+for this check is also available at [3].
+
+Link: https://pastebin.com/raw/2eRWbiit [1]
+Link: https://github.com/bpfverif/agni [2]
+Link: https://pastebin.com/raw/EztVbBJ2 [3]
+Co-developed-by: Srinivas Narayana <srinivas.narayana@rutgers.edu>
+Signed-off-by: Srinivas Narayana <srinivas.narayana@rutgers.edu>
+Co-developed-by: Santosh Nagarakatte <santosh.nagarakatte@rutgers.edu>
+Signed-off-by: Santosh Nagarakatte <santosh.nagarakatte@rutgers.edu>
+Signed-off-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+Link: https://lore.kernel.org/r/93fdf71910411c0f19e282ba6d03b4c65f9c5d73.1772225741.git.paul.chaignon@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: efc11a667878 ("bpf: Improve bounds when tnum has a single possible value")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tnum.h | 3 +++
+ kernel/bpf/tnum.c | 56 ++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 59 insertions(+)
+
+diff --git a/include/linux/tnum.h b/include/linux/tnum.h
+index fa4654ffb6217..ca2cfec8de08a 100644
+--- a/include/linux/tnum.h
++++ b/include/linux/tnum.h
+@@ -131,4 +131,7 @@ static inline bool tnum_subreg_is_const(struct tnum a)
+ return !(tnum_subreg(a)).mask;
+ }
+
++/* Returns the smallest member of t larger than z */
++u64 tnum_step(struct tnum t, u64 z);
++
+ #endif /* _LINUX_TNUM_H */
+diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
+index 26fbfbb017001..4abc359b3db01 100644
+--- a/kernel/bpf/tnum.c
++++ b/kernel/bpf/tnum.c
+@@ -269,3 +269,59 @@ struct tnum tnum_bswap64(struct tnum a)
+ {
+ return TNUM(swab64(a.value), swab64(a.mask));
+ }
++
++/* Given tnum t, and a number z such that tmin <= z < tmax, where tmin
++ * is the smallest member of the t (= t.value) and tmax is the largest
++ * member of t (= t.value | t.mask), returns the smallest member of t
++ * larger than z.
++ *
++ * For example,
++ * t = x11100x0
++ * z = 11110001 (241)
++ * result = 11110010 (242)
++ *
++ * Note: if this function is called with z >= tmax, it just returns
++ * early with tmax; if this function is called with z < tmin, the
++ * algorithm already returns tmin.
++ */
++u64 tnum_step(struct tnum t, u64 z)
++{
++ u64 tmax, j, p, q, r, s, v, u, w, res;
++ u8 k;
++
++ tmax = t.value | t.mask;
++
++ /* if z >= largest member of t, return largest member of t */
++ if (z >= tmax)
++ return tmax;
++
++ /* if z < smallest member of t, return smallest member of t */
++ if (z < t.value)
++ return t.value;
++
++ /* keep t's known bits, and match all unknown bits to z */
++ j = t.value | (z & t.mask);
++
++ if (j > z) {
++ p = ~z & t.value & ~t.mask;
++ k = fls64(p); /* k is the most-significant 0-to-1 flip */
++ q = U64_MAX << k;
++ r = q & z; /* positions > k matched to z */
++ s = ~q & t.value; /* positions <= k matched to t.value */
++ v = r | s;
++ res = v;
++ } else {
++ p = z & ~t.value & ~t.mask;
++ k = fls64(p); /* k is the most-significant 1-to-0 flip */
++ q = U64_MAX << k;
++ r = q & t.mask & z; /* unknown positions > k, matched to z */
++ s = q & ~t.mask; /* known positions > k, set to 1 */
++ v = r | s;
++ /* add 1 to unknown positions > k to make value greater than z */
++ u = v + (1ULL << k);
++ /* extract bits in unknown positions > k from u, rest from t.value */
++ w = (u & t.mask) | t.value;
++ res = w;
++ }
++ return res;
++}
+--
+2.51.0
+
--- /dev/null
+From 840424c30d19a7b99f7684d9184421d47edb8664 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:41 +0000
+Subject: btrfs: fix compat mask in error messages in btrfs_check_features()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 587bb33b10bda645a1028c1737ad3992b3d7cf61 ]
+
+Commit d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency
+checks") introduced a regression when it comes to handling unsupported
+incompat or compat_ro flags. Beforehand we only printed the flags that
+we didn't recognize, afterwards we printed them all, which is less
+useful. Fix the error handling so it behaves like it used to.
+
+Fixes: d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3fd5d6a27d4c0..9c3a944cbc24a 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3160,7 +3160,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ btrfs_err(fs_info,
+ "cannot mount because of unknown incompat features (0x%llx)",
+- incompat);
++ incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
+ return -EINVAL;
+ }
+
+@@ -3192,7 +3192,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (compat_ro_unsupp && is_rw_mount) {
+ btrfs_err(fs_info,
+ "cannot mount read-write because of unknown compat_ro features (0x%llx)",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+@@ -3205,7 +3205,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_err(fs_info,
+ "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 121b9e4de081f9b92c2cada60e46899ae14f959d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 18:25:42 +0000
+Subject: btrfs: fix error message order of parameters in
+ btrfs_delete_delayed_dir_index()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 3cf0f35779d364cf2003c617bb7f3f3e41023372 ]
+
+Fix the error message in btrfs_delete_delayed_dir_index() if
+__btrfs_add_delayed_item() fails: the message says root, inode, index,
+error, but we're actually passing index, root, inode, error.
+
+Fixes: adc1ef55dc04 ("btrfs: add details to error messages at btrfs_delete_delayed_dir_index()")
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/delayed-inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 59b489d7e4b58..ea48706a3d810 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1676,7 +1676,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ if (unlikely(ret)) {
+ btrfs_err(trans->fs_info,
+ "failed to add delayed dir index item, root: %llu, inode: %llu, index: %llu, error: %d",
+- index, btrfs_root_id(node->root), node->inode_id, ret);
++ btrfs_root_id(node->root), node->inode_id, index, ret);
+ btrfs_delayed_item_release_metadata(dir->root, item);
+ btrfs_release_delayed_item(item);
+ }
+--
+2.51.0
+
--- /dev/null
+From 3a3765a1626693b9c958662b0a7571e13ad21c3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index c10b4c242acfc..7bc758ec64a11 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1894,7 +1894,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 0f29fbc22012b7927ea76dddf3c87b557d8103e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 14:39:46 +0000
+Subject: btrfs: fix objectid value in error message in check_extent_data_ref()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit a10172780526c2002e062102ad4f2aabac495889 ]
+
+Fix a copy-paste error in check_extent_data_ref(): we're printing root
+as in the message above, we should be printing objectid.
+
+Fixes: f333a3c7e832 ("btrfs: tree-checker: validate dref root and objectid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 7bc758ec64a11..420c0f0e17c85 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1713,7 +1713,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+- root);
++ objectid);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+--
+2.51.0
+
--- /dev/null
+From 039eebb1dec1a84b0b37e35f632ff1cd6fc74e61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:13 +0000
+Subject: btrfs: fix warning in scrub_verify_one_metadata()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 44e2fda66427a0442d8d2c0e6443256fb458ab6b ]
+
+Commit b471965fdb2d ("btrfs: fix replace/scrub failure with
+metadata_uuid") fixed the comparison in scrub_verify_one_metadata() to
+use metadata_uuid rather than fsid, but left the warning as it was. Fix
+it so it matches what we're doing.
+
+Fixes: b471965fdb2d ("btrfs: fix replace/scrub failure with metadata_uuid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/scrub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 747e2c748376a..16936d17166ee 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -747,7 +747,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block %llu mirror %u has bad fsid, has %pU want %pU",
+ logical, stripe->mirror_num,
+- header->fsid, fs_info->fs_devices->fsid);
++ header->fsid, fs_info->fs_devices->metadata_uuid);
+ return;
+ }
+ if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+--
+2.51.0
+
--- /dev/null
+From c2792a0c1e57bf3d4db9d893908c16e28625b8d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 22:12:15 +0100
+Subject: btrfs: free pages on error in btrfs_uring_read_extent()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Sabaté Solà <mssola@mssola.com>
+
+[ Upstream commit 3f501412f2079ca14bf68a18d80a2b7a823f1f64 ]
+
+In this function the 'pages' object is never freed in the hopes that it is
+picked up by btrfs_uring_read_finished() whenever that executes in the
+future. But that's just the happy path. Along the way previous
+allocations might have gone wrong, or we might not get -EIOCBQUEUED from
+btrfs_encoded_read_regular_fill_pages(). In all these cases, we go to a
+cleanup section that frees all memory allocated by this function without
+assuming any deferred execution, and this also needs to happen for the
+'pages' allocation.
+
+Fixes: 34310c442e17 ("btrfs: add io_uring command for encoded reads (ENCODED_READ ioctl)")
+Signed-off-by: Miquel Sabaté Solà <mssola@mssola.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ioctl.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 9a34d6530658e..736a1b3170700 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4725,7 +4725,7 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
+ {
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+- struct page **pages;
++ struct page **pages = NULL;
+ struct btrfs_uring_priv *priv = NULL;
+ unsigned long nr_pages;
+ int ret;
+@@ -4783,6 +4783,11 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ kfree(priv);
++ for (int i = 0; i < nr_pages; i++) {
++ if (pages[i])
++ __free_page(pages[i]);
++ }
++ kfree(pages);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1e1d72f59e3b2f249ab67f933abc4c1c38883293 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:32:39 +0000
+Subject: btrfs: print correct subvol num if active swapfile prevents deletion
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 1c7e9111f4e6d6d42bc47759c9af1ef91f03ac2c ]
+
+Fix the error message in btrfs_delete_subvolume() if we can't delete a
+subvolume because it has an active swapfile: we were printing the number
+of the parent rather than the target.
+
+Fixes: 60021bd754c6 ("btrfs: prevent subvol with swapfile from being deleted")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 47e762856521d..2799b10592d5a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4658,7 +4658,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ spin_unlock(&dest->root_item_lock);
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu with active swapfile",
+- btrfs_root_id(root));
++ btrfs_root_id(dest));
+ ret = -EPERM;
+ goto out_up_write;
+ }
+--
+2.51.0
+
--- /dev/null
+From 36313c7f613fa5a891fec48909f864d8ba48e888 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 13:54:12 -0500
+Subject: cgroup/cpuset: Fix incorrect use of cpuset_update_tasks_cpumask() in
+ update_cpumasks_hier()
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 68230aac8b9aad243626fbaf3ca170012c17fec5 ]
+
+Commit e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2")
+incorrectly changed the 2nd parameter of cpuset_update_tasks_cpumask()
+from tmp->new_cpus to cp->effective_cpus. This second parameter is just
+a temporary cpumask for internal use. The cpuset_update_tasks_cpumask()
+function was originally called update_tasks_cpumask() before commit
+381b53c3b549 ("cgroup/cpuset: rename functions shared between v1
+and v2").
+
+This mistake can incorrectly change the effective_cpus of the
+cpuset when it is the top_cpuset or in arm64 architecture where
+task_cpu_possible_mask() may differ from cpu_possible_mask. So far
+top_cpuset hasn't been passed to update_cpumasks_hier() yet, but arm64
+arch can still be impacted. Fix it by reverting the incorrect change.
+
+Fixes: e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index abaa54037918a..08b0c264bd268 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2301,7 +2301,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ WARN_ON(!is_in_v2_mode() &&
+ !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+
+- cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
++ cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
+
+ /*
+ * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
+--
+2.51.0
+
--- /dev/null
+From 4bba50999bf8b563804b92de5c5cdfd8c4507e1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 14:50:38 -0700
+Subject: cxl: Fix race of nvdimm_bus object when creating nvdimm objects
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+[ Upstream commit 96a1fd0d84b17360840f344826897fa71049870e ]
+
+Found issue during running of cxl-translate.sh unit test. Adding a 3s
+sleep right before the test seems to make the issue reproduce fairly
+consistently. The cxl_translate module has dependency on cxl_acpi and
+causes orphaned nvdimm objects to reprobe after cxl_acpi is removed.
+The nvdimm_bus object is registered by the cxl_nvb object when
+cxl_acpi_probe() is called. With the nvdimm_bus object missing,
+__nd_device_register() will trigger NULL pointer dereference when
+accessing the dev->parent that points to &nvdimm_bus->dev.
+
+[ 192.884510] BUG: kernel NULL pointer dereference, address: 000000000000006c
+[ 192.895383] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS edk2-20250812-19.fc42 08/12/2025
+[ 192.897721] Workqueue: cxl_port cxl_bus_rescan_queue [cxl_core]
+[ 192.899459] RIP: 0010:kobject_get+0xc/0x90
+[ 192.924871] Call Trace:
+[ 192.925959] <TASK>
+[ 192.926976] ? pm_runtime_init+0xb9/0xe0
+[ 192.929712] __nd_device_register.part.0+0x4d/0xc0 [libnvdimm]
+[ 192.933314] __nvdimm_create+0x206/0x290 [libnvdimm]
+[ 192.936662] cxl_nvdimm_probe+0x119/0x1d0 [cxl_pmem]
+[ 192.940245] cxl_bus_probe+0x1a/0x60 [cxl_core]
+[ 192.943349] really_probe+0xde/0x380
+
+This patch also relies on the previous change where
+devm_cxl_add_nvdimm_bridge() is called from drivers/cxl/pmem.c instead
+of drivers/cxl/core.c to ensure the dependency of cxl_acpi on cxl_pmem.
+
+1. Set probe_type of cxl_nvb to PROBE_FORCE_SYNCHRONOUS to ensure the
+ driver is probed synchronously when add_device() is called.
+2. Add a check in __devm_cxl_add_nvdimm_bridge() to ensure that the
+ cxl_nvb driver is attached during cxl_acpi_probe().
+3. Take the cxl_root uport_dev lock and the cxl_nvb->dev lock in
+ devm_cxl_add_nvdimm() before checking nvdimm_bus is valid.
+4. Set cxl_nvdimm flag to CXL_NVD_F_INVALIDATED so cxl_nvdimm_probe()
+ will exit with -EBUSY.
+
+The removal of cxl_nvdimm devices should prevent any orphaned devices
+from probing once the nvdimm_bus is gone.
+
+[ dj: Fixed 0-day reported kdoc issue. ]
+[ dj: Fix cxl_nvb reference leak on error. Gregory (kreview-0811365) ]
+
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Fixes: 8fdcb1704f61 ("cxl/pmem: Add initial infrastructure for pmem support")
+Tested-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: Alison Schofield <alison.schofield@intel.com?>
+Link: https://patch.msgid.link/20260205001633.1813643-3-dave.jiang@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/pmem.c | 29 +++++++++++++++++++++++++++++
+ drivers/cxl/cxl.h | 5 +++++
+ drivers/cxl/pmem.c | 10 ++++++++--
+ 3 files changed, 42 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
+index e1325936183a6..e3a8b8d813333 100644
+--- a/drivers/cxl/core/pmem.c
++++ b/drivers/cxl/core/pmem.c
+@@ -115,6 +115,15 @@ static void unregister_nvb(void *_cxl_nvb)
+ device_unregister(&cxl_nvb->dev);
+ }
+
++static bool cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge *cxl_nvb)
++{
++ struct device *dev = &cxl_nvb->dev;
++
++ guard(device)(dev);
++ /* If the device has no driver, then it failed to attach. */
++ return dev->driver == NULL;
++}
++
+ struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port)
+ {
+@@ -138,6 +147,11 @@ struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
+ if (rc)
+ goto err;
+
++ if (cxl_nvdimm_bridge_failed_attach(cxl_nvb)) {
++ unregister_nvb(cxl_nvb);
++ return ERR_PTR(-ENODEV);
++ }
++
+ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+ if (rc)
+ return ERR_PTR(rc);
+@@ -247,6 +261,21 @@ int devm_cxl_add_nvdimm(struct cxl_port *parent_port,
+ if (!cxl_nvb)
+ return -ENODEV;
+
++ /*
++ * Take the uport_dev lock to guard against race of nvdimm_bus object.
++ * cxl_acpi_probe() registers the nvdimm_bus and is done under the
++ * root port uport_dev lock.
++ *
++ * Take the cxl_nvb device lock to ensure that cxl_nvb driver is in a
++ * consistent state. And the driver registers nvdimm_bus.
++ */
++ guard(device)(cxl_nvb->port->uport_dev);
++ guard(device)(&cxl_nvb->dev);
++ if (!cxl_nvb->nvdimm_bus) {
++ rc = -ENODEV;
++ goto err_alloc;
++ }
++
+ cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
+ if (IS_ERR(cxl_nvd)) {
+ rc = PTR_ERR(cxl_nvd);
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 443296da74db1..3a794278cc7fe 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -555,11 +555,16 @@ struct cxl_nvdimm_bridge {
+
+ #define CXL_DEV_ID_LEN 19
+
++enum {
++ CXL_NVD_F_INVALIDATED = 0,
++};
++
+ struct cxl_nvdimm {
+ struct device dev;
+ struct cxl_memdev *cxlmd;
+ u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
+ u64 dirty_shutdowns;
++ unsigned long flags;
+ };
+
+ struct cxl_pmem_region_mapping {
+diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
+index 714beaf1704be..c00b84b960761 100644
+--- a/drivers/cxl/pmem.c
++++ b/drivers/cxl/pmem.c
+@@ -14,7 +14,7 @@
+ static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+
+ /**
+- * __devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
++ * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+ * @host: platform firmware root device
+ * @port: CXL port at the root of a CXL topology
+ *
+@@ -143,6 +143,9 @@ static int cxl_nvdimm_probe(struct device *dev)
+ struct nvdimm *nvdimm;
+ int rc;
+
++ if (test_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags))
++ return -EBUSY;
++
+ set_exclusive_cxl_commands(mds, exclusive_cmds);
+ rc = devm_add_action_or_reset(dev, clear_exclusive, mds);
+ if (rc)
+@@ -323,8 +326,10 @@ static int detach_nvdimm(struct device *dev, void *data)
+ scoped_guard(device, dev) {
+ if (dev->driver) {
+ cxl_nvd = to_cxl_nvdimm(dev);
+- if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
++ if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) {
+ release = true;
++ set_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags);
++ }
+ }
+ }
+ if (release)
+@@ -367,6 +372,7 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
+ .probe = cxl_nvdimm_bridge_probe,
+ .id = CXL_DEVICE_NVDIMM_BRIDGE,
+ .drv = {
++ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
+ },
+ };
+--
+2.51.0
+
--- /dev/null
+From fffa4bed2b520675577ab99251896a9c68319ccc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 17:31:23 -0700
+Subject: cxl: Move devm_cxl_add_nvdimm_bridge() to cxl_pmem.ko
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+[ Upstream commit e7e222ad73d93fe54d6e6e3a15253a0ecf081a1b ]
+
+Moving the symbol devm_cxl_add_nvdimm_bridge() to
+drivers/cxl/cxl_pmem.c, so that cxl_pmem can export a symbol that gives
+cxl_acpi a depedency on cxl_pmem kernel module. This is a prepatory patch
+to resolve the issue of a race for nvdimm_bus object that is created
+during cxl_acpi_probe().
+
+No functional changes besides moving code.
+
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Ira Weiny <ira.weiny@intel.com>
+Tested-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: Alison Schofield <alison.schofield@intel.com?>
+Link: https://patch.msgid.link/20260205001633.1813643-2-dave.jiang@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Stable-dep-of: 96a1fd0d84b1 ("cxl: Fix race of nvdimm_bus object when creating nvdimm objects")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/pmem.c | 13 +++----------
+ drivers/cxl/cxl.h | 2 ++
+ drivers/cxl/pmem.c | 14 ++++++++++++++
+ 3 files changed, 19 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
+index 8853415c106a9..e1325936183a6 100644
+--- a/drivers/cxl/core/pmem.c
++++ b/drivers/cxl/core/pmem.c
+@@ -115,15 +115,8 @@ static void unregister_nvb(void *_cxl_nvb)
+ device_unregister(&cxl_nvb->dev);
+ }
+
+-/**
+- * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+- * @host: platform firmware root device
+- * @port: CXL port at the root of a CXL topology
+- *
+- * Return: bridge device that can host cxl_nvdimm objects
+- */
+-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+- struct cxl_port *port)
++struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
++ struct cxl_port *port)
+ {
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+@@ -155,7 +148,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ put_device(dev);
+ return ERR_PTR(rc);
+ }
+-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
++EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_nvdimm_bridge, "cxl_pmem");
+
+ static void cxl_nvdimm_release(struct device *dev)
+ {
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 231ddccf89773..443296da74db1 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -866,6 +866,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
+ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
+ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port);
++struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
++ struct cxl_port *port);
+ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
+ bool is_cxl_nvdimm(struct device *dev);
+ int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
+diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
+index e197883690efc..714beaf1704be 100644
+--- a/drivers/cxl/pmem.c
++++ b/drivers/cxl/pmem.c
+@@ -13,6 +13,20 @@
+
+ static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+
++/**
++ * __devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
++ * @host: platform firmware root device
++ * @port: CXL port at the root of a CXL topology
++ *
++ * Return: bridge device that can host cxl_nvdimm objects
++ */
++struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
++ struct cxl_port *port)
++{
++ return __devm_cxl_add_nvdimm_bridge(host, port);
++}
++EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
++
+ static void clear_exclusive(void *mds)
+ {
+ clear_exclusive_cxl_commands(mds, exclusive_cmds);
+--
+2.51.0
+
--- /dev/null
+From b6917955c5564d11f75628c80f1fc1e38b44ab4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:27:05 +0100
+Subject: debugobject: Make it work with deferred page initialization - again
+
+From: Thomas Gleixner <tglx@kernel.org>
+
+[ Upstream commit fd3634312a04f336dcbfb481060219f0cd320738 ]
+
+debugobjects uses __GFP_HIGH for allocations as it might be invoked
+within locked regions. That worked perfectly fine until v6.18. It still
+works correctly when deferred page initialization is disabled and works
+by chance when no page allocation is required before deferred page
+initialization has completed.
+
+Since v6.18 allocations w/o a reclaim flag cause new_slab() to end up in
+alloc_frozen_pages_nolock_noprof(), which returns early when deferred
+page initialization has not yet completed. As the deferred page
+initialization takes quite a while the debugobject pool is depleted and
+debugobjects are disabled.
+
+This can be worked around when PREEMPT_COUNT is enabled as that allows
+debugobjects to add __GFP_KSWAPD_RECLAIM to the GFP flags when the context
+is preemtible. When PREEMPT_COUNT is disabled the context is unknown and
+the reclaim bit can't be set because the caller might hold locks which
+might deadlock in the allocator.
+
+In preemptible context the reclaim bit is harmless and not a performance
+issue as that's usually invoked from slow path initialization context.
+
+That makes debugobjects depend on PREEMPT_COUNT || !DEFERRED_STRUCT_PAGE_INIT.
+
+Fixes: af92793e52c3 ("slab: Introduce kmalloc_nolock() and kfree_nolock().")
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Link: https://patch.msgid.link/87pl6gznti.ffs@tglx
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/Kconfig.debug | 1 +
+ lib/debugobjects.c | 19 ++++++++++++++++++-
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 88fa610e83840..21cd68084e468 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -721,6 +721,7 @@ source "mm/Kconfig.debug"
+
+ config DEBUG_OBJECTS
+ bool "Debug object operations"
++ depends on PREEMPT_COUNT || !DEFERRED_STRUCT_PAGE_INIT
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, additional code will be inserted into the
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 7f50c4480a4e3..b3151679d0d30 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -398,9 +398,26 @@ static void fill_pool(void)
+
+ atomic_inc(&cpus_allocating);
+ while (pool_should_refill(&pool_global)) {
++ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ HLIST_HEAD(head);
+
+- if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
++ /*
++ * Allow reclaim only in preemptible context and during
++ * early boot. If not preemptible, the caller might hold
++ * locks causing a deadlock in the allocator.
++ *
++ * If the reclaim flag is not set during early boot then
++ * allocations, which happen before deferred page
++ * initialization has completed, will fail.
++ *
++ * In preemptible context the flag is harmless and not a
++ * performance issue as that's usually invoked from slow
++ * path initialization context.
++ */
++ if (preemptible() || system_state < SYSTEM_SCHEDULING)
++ gfp |= __GFP_KSWAPD_RECLAIM;
++
++ if (!kmem_alloc_batch(&head, obj_cache, gfp))
+ break;
+
+ guard(raw_spinlock_irqsave)(&pool_lock);
+--
+2.51.0
+
--- /dev/null
+From 750f3d673ad5fae56e89f342ea516df32d0e3b07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 10:18:51 +0530
+Subject: drm/amdgpu: Fix error handling in slot reset
+
+From: Lijo Lazar <lijo.lazar@amd.com>
+
+[ Upstream commit b57c4ec98c17789136a4db948aec6daadceb5024 ]
+
+If the device has not recovered after slot reset is called, it goes to
+out label for error handling. There it could make decision based on
+uninitialized hive pointer and could result in accessing an uninitialized
+list.
+
+Initialize the list and hive properly so that it handles the error
+situation and also releases the reset domain lock which is acquired
+during error_detected callback.
+
+Fixes: 732c6cefc1ec ("drm/amdgpu: Replace tmp_adev with hive in amdgpu_pci_slot_reset")
+Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
+Reviewed-by: Ce Sun <cesun102@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit bb71362182e59caa227e4192da5a612b09349696)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b28ebb44c695d..fb096bf551ef2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -7062,6 +7062,15 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
+
+ memset(&reset_context, 0, sizeof(reset_context));
++ INIT_LIST_HEAD(&device_list);
++ hive = amdgpu_get_xgmi_hive(adev);
++ if (hive) {
++ mutex_lock(&hive->hive_lock);
++ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
++ list_add_tail(&tmp_adev->reset_list, &device_list);
++ } else {
++ list_add_tail(&adev->reset_list, &device_list);
++ }
+
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+@@ -7102,19 +7111,13 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+- INIT_LIST_HEAD(&device_list);
+
+- hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+- mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
++ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+- list_add_tail(&tmp_adev->reset_list, &device_list);
+- }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+- list_add_tail(&adev->reset_list, &device_list);
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
+--
+2.51.0
+
--- /dev/null
+From d84d677bb30a9d6becbb75cef828003f302689a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 13:50:23 -0800
+Subject: drm/amdgpu: Fix locking bugs in error paths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 480ad5f6ead4a47b969aab6618573cd6822bb6a4 ]
+
+Do not unlock psp->ras_context.mutex if it has not been locked. This has
+been detected by the Clang thread-safety analyzer.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: YiPeng Chai <YiPeng.Chai@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Cc: amd-gfx@lists.freedesktop.org
+Fixes: b3fb79cda568 ("drm/amdgpu: add mutex to protect ras shared memory")
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 6fa01b4335978051d2cd80841728fd63cc597970)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 6e8aad91bcd30..0d3c18f04ac36 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -332,13 +332,13 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ if (!context || !context->initialized) {
+ dev_err(adev->dev, "TA is not initialized\n");
+ ret = -EINVAL;
+- goto err_free_shared_buf;
++ goto free_shared_buf;
+ }
+
+ if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_invoke) {
+ dev_err(adev->dev, "Unsupported function to invoke TA\n");
+ ret = -EOPNOTSUPP;
+- goto err_free_shared_buf;
++ goto free_shared_buf;
+ }
+
+ context->session_id = ta_id;
+@@ -346,7 +346,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ mutex_lock(&psp->ras_context.mutex);
+ ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
+ if (ret)
+- goto err_free_shared_buf;
++ goto unlock;
+
+ ret = psp_fn_ta_invoke(psp, cmd_id);
+ if (ret || context->resp_status) {
+@@ -354,15 +354,17 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ ret, context->resp_status);
+ if (!ret) {
+ ret = -EINVAL;
+- goto err_free_shared_buf;
++ goto unlock;
+ }
+ }
+
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
+ ret = -EFAULT;
+
+-err_free_shared_buf:
++unlock:
+ mutex_unlock(&psp->ras_context.mutex);
++
++free_shared_buf:
+ kfree(shared_buf);
+
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 1badad436f224d71e157a74583abdb24f91c7f73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:07 -0800
+Subject: drm/amdgpu: Unlock a mutex before destroying it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 5e0bcc7b88bcd081aaae6f481b10d9ab294fcb69 ]
+
+Mutexes must be unlocked before these are destroyed. This has been detected
+by the Clang thread-safety analyzer.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Yang Wang <kevinyang.wang@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Cc: amd-gfx@lists.freedesktop.org
+Fixes: f5e4cc8461c4 ("drm/amdgpu: implement RAS ACA driver framework")
+Reviewed-by: Yang Wang <kevinyang.wang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 270258ba320beb99648dceffb67e86ac76786e55)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+index 9b31804491500..3f9b094e93a29 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+@@ -641,6 +641,7 @@ static void aca_error_fini(struct aca_error *aerr)
+ aca_bank_error_remove(aerr, bank_error);
+
+ out_unlock:
++ mutex_unlock(&aerr->lock);
+ mutex_destroy(&aerr->lock);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From d37875072555d7c3024256633c82bcbb0964a3a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 12:41:31 +0000
+Subject: drm/amdgpu/userq: Do not allow userspace to trivially triger kernel
+ warnings
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+[ Upstream commit 7b7d7693a55d606d700beb9549c9f7f0e5d9c24f ]
+
+Userspace can either deliberately pass in the too small num_fences, or the
+required number can legitimately grow between the two calls to the userq
+wait ioctl. In both cases we do not want the emit the kernel warning
+backtrace since nothing is wrong with the kernel and userspace will simply
+get an errno reported back. So lets simply drop the WARN_ONs.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: a292fdecd728 ("drm/amdgpu: Implement userqueue signal/wait IOCTL")
+Cc: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 2c333ea579de6cc20ea7bc50e9595ef72863e65c)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+index 5c181ac75d548..ead1538974454 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+@@ -829,7 +829,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+@@ -846,7 +846,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+@@ -870,7 +870,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+@@ -894,7 +894,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ if (r)
+ goto free_fences;
+
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+--
+2.51.0
+
--- /dev/null
+From 3c09abcd98e31e79db31928d2622134c71f40658 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 22:12:28 +0000
+Subject: drm/client: Do not destroy NULL modes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Cavitt <jonathan.cavitt@intel.com>
+
+[ Upstream commit c601fd5414315fc515f746b499110e46272e7243 ]
+
+'modes' in drm_client_modeset_probe may fail to kcalloc. If this
+occurs, we jump to 'out', calling modes_destroy on it, which
+dereferences it. This may result in a NULL pointer dereference in the
+error case. Prevent that.
+
+Fixes: 3039cc0c0653 ("drm/client: Make copies of modes")
+Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Cc: Ville SyrjÀlÀ <ville.syrjala@linux.intel.com>
+Signed-off-by: Ville SyrjÀlÀ <ville.syrjala@linux.intel.com>
+Link: https://patch.msgid.link/20260224221227.69126-2-jonathan.cavitt@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client_modeset.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 9c2c3b0c8c470..eaf71c9ecf136 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -930,7 +930,8 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ mutex_unlock(&client->modeset_mutex);
+ out:
+ kfree(crtcs);
+- modes_destroy(dev, modes, connector_count);
++ if (modes)
++ modes_destroy(dev, modes, connector_count);
+ kfree(modes);
+ kfree(offsets);
+ kfree(enabled);
+--
+2.51.0
+
--- /dev/null
+From b93d682dd89a73f446378a66dfb8a034940de39a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Feb 2026 22:47:26 +0000
+Subject: drm/fourcc: fix plane order for 10/12/16-bit YCbCr formats
+
+From: Simon Ser <contact@emersion.fr>
+
+[ Upstream commit e9e0b48cd15b46dcb2bbc165f6b0fee698b855d6 ]
+
+The short comments had the correct order, but the long comments
+had the planes reversed.
+
+Fixes: 2271e0a20ef7 ("drm: drm_fourcc: add 10/12/16bit software decoder YCbCr formats")
+Signed-off-by: Simon Ser <contact@emersion.fr>
+Reviewed-by: Daniel Stone <daniels@collabora.com>
+Reviewed-by: Robert Mader <robert.mader@collabora.com>
+Link: https://patch.msgid.link/20260208224718.57199-1-contact@emersion.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/drm/drm_fourcc.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index e527b24bd824b..c89aede3cb120 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -401,8 +401,8 @@ extern "C" {
+ * implementation can multiply the values by 2^6=64. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [6:10] little endian
+- * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
+- * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
++ * index 1 = Cb plane, [15:0] z:Cb [6:10] little endian
++ * index 2 = Cr plane, [15:0] z:Cr [6:10] little endian
+ */
+ #define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+ #define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+@@ -414,8 +414,8 @@ extern "C" {
+ * implementation can multiply the values by 2^4=16. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [4:12] little endian
+- * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
+- * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
++ * index 1 = Cb plane, [15:0] z:Cb [4:12] little endian
++ * index 2 = Cr plane, [15:0] z:Cr [4:12] little endian
+ */
+ #define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+ #define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+@@ -424,8 +424,8 @@ extern "C" {
+ /*
+ * 3 plane YCbCr
+ * index 0 = Y plane, [15:0] Y little endian
+- * index 1 = Cr plane, [15:0] Cr little endian
+- * index 2 = Cb plane, [15:0] Cb little endian
++ * index 1 = Cb plane, [15:0] Cb little endian
++ * index 2 = Cr plane, [15:0] Cr little endian
+ */
+ #define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+ #define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+--
+2.51.0
+
--- /dev/null
+From aa9cdad030e162ecbc0017ceef40da3d60819f31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 17:06:29 +0800
+Subject: drm/imx: parallel-display: check return value of
+ devm_drm_bridge_add() in imx_pd_probe()
+
+From: Chen Ni <nichen@iscas.ac.cn>
+
+[ Upstream commit c5f8658f97ec392eeaf355d4e9775ae1f23ca1d3 ]
+
+Return the value of devm_drm_bridge_add() in order to propagate the
+error properly, if it fails due to resource allocation failure or bridge
+registration failure.
+
+This ensures that the probe function fails safely rather than proceeding
+with a potentially incomplete bridge setup.
+
+Fixes: bf7e97910b9f ("drm/imx: parallel-display: add the bridge before attaching it")
+Signed-off-by: Chen Ni <nichen@iscas.ac.cn>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Link: https://patch.msgid.link/20260204090629.2209542-1-nichen@iscas.ac.cn
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/ipuv3/parallel-display.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+index 7fc6af7033078..d5f2ee41c03fe 100644
+--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
++++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+@@ -256,7 +256,9 @@ static int imx_pd_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, imxpd);
+
+- devm_drm_bridge_add(dev, &imxpd->bridge);
++ ret = devm_drm_bridge_add(dev, &imxpd->bridge);
++ if (ret)
++ return ret;
+
+ return component_add(dev, &imx_pd_ops);
+ }
+--
+2.51.0
+
--- /dev/null
+From b1ae2bd583657a72c50fbead0df28c15f4e9535d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 00:21:19 +0800
+Subject: drm/logicvc: Fix device node reference leak in
+ logicvc_drm_config_parse()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fef0e649f8b42bdffe4a916dd46e1b1e9ad2f207 ]
+
+The logicvc_drm_config_parse() function calls of_get_child_by_name() to
+find the "layers" node but fails to release the reference, leading to a
+device node reference leak.
+
+Fix this by using the __free(device_node) cleanup attribute to automatic
+release the reference when the variable goes out of scope.
+
+Fixes: efeeaefe9be5 ("drm: Add support for the LogiCVC display controller")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Reviewed-by: Kory Maincent <kory.maincent@bootlin.com>
+Link: https://patch.msgid.link/20260130-logicvc_drm-v1-1-04366463750c@gmail.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/logicvc/logicvc_drm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
+index 204b0fee55d0b..bbebf4fc7f51a 100644
+--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
++++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
+@@ -92,7 +92,6 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_drm_config *config = &logicvc->config;
+- struct device_node *layers_node;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
+@@ -128,7 +127,8 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ if (ret)
+ return ret;
+
+- layers_node = of_get_child_by_name(of_node, "layers");
++ struct device_node *layers_node __free(device_node) =
++ of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "Missing non-optional layers node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From ec57208fb9237ad568bb40ca4c616d5537aed843 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Feb 2026 22:04:38 -0600
+Subject: drm/tiny: sharp-memory: fix pointer error dereference
+
+From: Ethan Tidmore <ethantidmore06@gmail.com>
+
+[ Upstream commit 46120745bb4e7e1f09959624716b4c5d6e2c2e9e ]
+
+The function devm_drm_dev_alloc() returns a pointer error upon failure
+not NULL. Change null check to pointer error check.
+
+Detected by Smatch:
+drivers/gpu/drm/tiny/sharp-memory.c:549 sharp_memory_probe() error:
+'smd' dereferencing possible ERR_PTR()
+
+Fixes: b8f9f21716fec ("drm/tiny: Add driver for Sharp Memory LCD")
+Signed-off-by: Ethan Tidmore <ethantidmore06@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patch.msgid.link/20260216040438.43702-1-ethantidmore06@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tiny/sharp-memory.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
+index 64272cd0f6e22..cbf69460ebf32 100644
+--- a/drivers/gpu/drm/tiny/sharp-memory.c
++++ b/drivers/gpu/drm/tiny/sharp-memory.c
+@@ -541,8 +541,8 @@ static int sharp_memory_probe(struct spi_device *spi)
+
+ smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver,
+ struct sharp_memory_device, drm);
+- if (!smd)
+- return -ENOMEM;
++ if (IS_ERR(smd))
++ return PTR_ERR(smd);
+
+ spi_set_drvdata(spi, smd);
+
+--
+2.51.0
+
--- /dev/null
+From afbb15ec46f45c9999fd53a70989958f40efccc7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 12:12:36 -0500
+Subject: drm/vmwgfx: Fix invalid kref_put callback in vmw_bo_dirty_release
+
+From: Brad Spengler <brad.spengler@opensrcsec.com>
+
+[ Upstream commit 211ecfaaef186ee5230a77d054cdec7fbfc6724a ]
+
+The kref_put() call uses (void *)kvfree as the release callback, which
+is incorrect. kref_put() expects a function with signature
+void (*release)(struct kref *), but kvfree has signature
+void (*)(const void *). Calling through an incompatible function pointer
+is undefined behavior.
+
+The code only worked by accident because ref_count is the first member
+of vmw_bo_dirty, making the kref pointer equal to the struct pointer.
+
+Fix this by adding a proper release callback that uses container_of()
+to retrieve the containing structure before freeing.
+
+Fixes: c1962742ffff ("drm/vmwgfx: Use kref in vmw_bo_dirty")
+Signed-off-by: Brad Spengler <brad.spengler@opensrcsec.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Ian Forbes <ian.forbes@broadcom.com>
+Link: https://patch.msgid.link/20260107171236.3573118-1-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+index fd4e76486f2d1..45561bc1c9eff 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+@@ -260,6 +260,13 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
+ return ret;
+ }
+
++static void vmw_bo_dirty_free(struct kref *kref)
++{
++ struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count);
++
++ kvfree(dirty);
++}
++
+ /**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+@@ -274,7 +281,7 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
+ {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+- if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
++ if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free))
+ vbo->dirty = NULL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From cdb1ae96de09ae6b6e3cb90eedba004fb40c5db2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 11:53:57 -0600
+Subject: drm/vmwgfx: Return the correct value in vmw_translate_ptr functions
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 5023ca80f9589295cb60735016e39fc5cc714243 ]
+
+Before the referenced fixes these functions used a lookup function that
+returned a pointer. This was changed to another lookup function that
+returned an error code with the pointer becoming an out parameter.
+
+The error path when the lookup failed was not changed to reflect this
+change and the code continued to return the PTR_ERR of the now
+uninitialized pointer. This could cause the vmw_translate_ptr functions
+to return success when they actually failed causing further uninitialized
+and OOB accesses.
+
+Reported-by: Kuzey Arda Bulut <kuzeyardabulut@gmail.com>
+Fixes: a309c7194e8a ("drm/vmwgfx: Remove rcu locks from user resources")
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patch.msgid.link/20260113175357.129285-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 3057f8baa7d25..e1f18020170ab 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1143,7 +1143,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+@@ -1199,7 +1199,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+--
+2.51.0
+
--- /dev/null
+From 273514dea14b76e0b90742482b4a2a9a2bd62cb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Feb 2026 14:30:59 -0800
+Subject: drm/xe/wa: Steer RMW of MCR registers while building default LRC
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+[ Upstream commit 43d37df67f7770d8d261fdcb64ecc8c314e91303 ]
+
+When generating the default LRC, if a register is not masked, we apply
+any save-restore programming necessary via a read-modify-write sequence
+that will ensure we only update the relevant bits/fields without
+clobbering the rest of the register. However some of the registers that
+need to be updated might be MCR registers which require steering to a
+non-terminated instance to ensure we can read back a valid, non-zero
+value. The steering of reads originating from a command streamer is
+controlled by register CS_MMIO_GROUP_INSTANCE_SELECT. Emit additional
+MI_LRI commands to update the steering before any RMW of an MCR register
+to ensure the reads are performed properly.
+
+Note that needing to perform a RMW of an MCR register while building the
+default LRC is pretty rare. Most of the MCR registers that are part of
+an engine's LRCs are also masked registers, so no MCR is necessary.
+
+Fixes: f2f90989ccff ("drm/xe: Avoid reading RMW registers in emit_wa_job")
+Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Reviewed-by: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Link: https://patch.msgid.link/20260206223058.387014-2-matthew.d.roper@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 6c2e331c915ba9e774aa847921262805feb00863)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/regs/xe_engine_regs.h | 6 +++
+ drivers/gpu/drm/xe/xe_gt.c | 66 +++++++++++++++++++-----
+ 2 files changed, 60 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+index f4c3e1187a00a..27fba92301c4e 100644
+--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+@@ -96,6 +96,12 @@
+ #define ENABLE_SEMAPHORE_POLL_BIT REG_BIT(13)
+
+ #define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED)
++
++#define CS_MMIO_GROUP_INSTANCE_SELECT(base) XE_REG((base) + 0xcc)
++#define SELECTIVE_READ_ADDRESSING REG_BIT(30)
++#define SELECTIVE_READ_GROUP REG_GENMASK(29, 23)
++#define SELECTIVE_READ_INSTANCE REG_GENMASK(22, 16)
++
+ /*
+ * CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
+ * The lsb of each can be considered a separate enabling bit for encryption.
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 61bed3b04dedd..1099335298471 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -187,11 +187,15 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ return ret;
+ }
+
++/* Dwords required to emit a RMW of a register */
++#define EMIT_RMW_DW 20
++
+ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ {
+- struct xe_reg_sr *sr = &q->hwe->reg_lrc;
++ struct xe_hw_engine *hwe = q->hwe;
++ struct xe_reg_sr *sr = &hwe->reg_lrc;
+ struct xe_reg_sr_entry *entry;
+- int count_rmw = 0, count = 0, ret;
++ int count_rmw = 0, count_rmw_mcr = 0, count = 0, ret;
+ unsigned long idx;
+ struct xe_bb *bb;
+ size_t bb_len = 0;
+@@ -201,6 +205,8 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
++ else if (entry->reg.mcr)
++ ++count_rmw_mcr;
+ else
+ ++count_rmw;
+ }
+@@ -208,17 +214,35 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ if (count)
+ bb_len += count * 2 + 1;
+
+- if (count_rmw)
+- bb_len += count_rmw * 20 + 7;
++ /*
++ * RMW of MCR registers is the same as a normal RMW, except an
++ * additional LRI (3 dwords) is required per register to steer the read
++ * to a nom-terminated instance.
++ *
++ * We could probably shorten the batch slightly by eliding the
++ * steering for consecutive MCR registers that have the same
++ * group/instance target, but it's not worth the extra complexity to do
++ * so.
++ */
++ bb_len += count_rmw * EMIT_RMW_DW;
++ bb_len += count_rmw_mcr * (EMIT_RMW_DW + 3);
++
++ /*
++ * After doing all RMW, we need 7 trailing dwords to clean up,
++ * plus an additional 3 dwords to reset steering if any of the
++ * registers were MCR.
++ */
++ if (count_rmw || count_rmw_mcr)
++ bb_len += 7 + (count_rmw_mcr ? 3 : 0);
+
+- if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
++ if (hwe->class == XE_ENGINE_CLASS_RENDER)
+ /*
+ * Big enough to emit all of the context's 3DSTATE via
+ * xe_lrc_emit_hwe_state_instructions()
+ */
+- bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32);
++ bb_len += xe_gt_lrc_size(gt, hwe->class) / sizeof(u32);
+
+- xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len);
++ xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", hwe->name, bb_len);
+
+ bb = xe_bb_new(gt, bb_len, false);
+ if (IS_ERR(bb))
+@@ -253,13 +277,23 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ }
+ }
+
+- if (count_rmw) {
+- /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */
+-
++ if (count_rmw || count_rmw_mcr) {
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
++ if (entry->reg.mcr) {
++ struct xe_reg_mcr reg = { .__reg.raw = entry->reg.raw };
++ u8 group, instance;
++
++ xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance);
++ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
++ *cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(hwe->mmio_base).addr;
++ *cs++ = SELECTIVE_READ_ADDRESSING |
++ REG_FIELD_PREP(SELECTIVE_READ_GROUP, group) |
++ REG_FIELD_PREP(SELECTIVE_READ_INSTANCE, instance);
++ }
++
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ *cs++ = entry->reg.addr;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+@@ -285,8 +319,9 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = entry->reg.addr;
+
+- xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+- entry->reg.addr, entry->clr_bits, entry->set_bits);
++ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x%s\n",
++ entry->reg.addr, entry->clr_bits, entry->set_bits,
++ entry->reg.mcr ? " (MCR)" : "");
+ }
+
+ /* reset used GPR */
+@@ -298,6 +333,13 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = 0;
++
++ /* reset steering */
++ if (count_rmw_mcr) {
++ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
++ *cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr;
++ *cs++ = 0;
++ }
+ }
+
+ cs = xe_lrc_emit_hwe_state_instructions(q, cs);
+--
+2.51.0
+
--- /dev/null
+From e3be1797b90a17c3af1dbaf2fdd25c6b3767811e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 12:41:25 +0100
+Subject: irqchip/sifive-plic: Fix frozen interrupt due to affinity setting
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 1072020685f4b81f6efad3b412cdae0bd62bb043 ]
+
+PLIC ignores interrupt completion message for disabled interrupt, explained
+by the specification:
+
+ The PLIC signals it has completed executing an interrupt handler by
+ writing the interrupt ID it received from the claim to the
+ claim/complete register. The PLIC does not check whether the completion
+ ID is the same as the last claim ID for that target. If the completion
+ ID does not match an interrupt source that is currently enabled for
+ the target, the completion is silently ignored.
+
+This caused problems in the past, because an interrupt can be disabled
+while still being handled and plic_irq_eoi() had no effect. That was fixed
+by checking if the interrupt is disabled, and if so enable it, before
+sending the completion message. That check is done with irqd_irq_disabled().
+
+However, that is not sufficient because the enable bit for the handling
+hart can be zero despite irqd_irq_disabled(d) being false. This can happen
+when affinity setting is changed while a hart is still handling the
+interrupt.
+
+This problem is easily reproducible by dumping a large file to uart (which
+generates lots of interrupts) and at the same time keep changing the uart
+interrupt's affinity setting. The uart port becomes frozen almost
+instantaneously.
+
+Fix this by checking PLIC's enable bit instead of irqd_irq_disabled().
+
+Fixes: cc9f04f9a84f ("irqchip/sifive-plic: Implement irq_set_affinity() for SMP host")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260212114125.3148067-1-namcao@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-sifive-plic.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index cbd7697bc1481..0799c15c745d4 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -154,8 +154,13 @@ static void plic_irq_disable(struct irq_data *d)
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
++ u32 __iomem *reg;
++ bool enabled;
++
++ reg = handler->enable_base + (d->hwirq / 32) * sizeof(u32);
++ enabled = readl(reg) & BIT(d->hwirq % 32);
+
+- if (unlikely(irqd_irq_disabled(d))) {
++ if (unlikely(!enabled)) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+--
+2.51.0
+
--- /dev/null
+From 2bebd8384b04abcbb0ca2a2b06d3695c258dfd0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 14:38:14 +0000
+Subject: KVM: arm64: Fix ID register initialization for non-protected pKVM
+ guests
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit 7e7c2cf0024d89443a7af52e09e47b1fe634ab17 ]
+
+In protected mode, the hypervisor maintains a separate instance of
+the `kvm` structure for each VM. For non-protected VMs, this structure is
+initialized from the host's `kvm` state.
+
+Currently, `pkvm_init_features_from_host()` copies the
+`KVM_ARCH_FLAG_ID_REGS_INITIALIZED` flag from the host without the
+underlying `id_regs` data being initialized. This results in the
+hypervisor seeing the flag as set while the ID registers remain zeroed.
+
+Consequently, `kvm_has_feat()` checks at EL2 fail (return 0) for
+non-protected VMs. This breaks logic that relies on feature detection,
+such as `ctxt_has_tcrx()` for TCR2_EL1 support. As a result, certain
+system registers (e.g., TCR2_EL1, PIR_EL1, POR_EL1) are not
+saved/restored during the world switch, which could lead to state
+corruption.
+
+Fix this by explicitly copying the ID registers from the host `kvm` to
+the hypervisor `kvm` for non-protected VMs during initialization, since
+we trust the host with its non-protected guests' features. Also ensure
+`KVM_ARCH_FLAG_ID_REGS_INITIALIZED` is cleared initially in
+`pkvm_init_features_from_host` so that `vm_copy_id_regs` can properly
+initialize them and set the flag once done.
+
+Fixes: 41d6028e28bd ("KVM: arm64: Convert the SVE guest vcpu flag to a vm flag")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://patch.msgid.link/20260213143815.1732675-4-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/hyp/nvhe/pkvm.c | 35 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 33 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+index 43bde061b65de..d866f6ba19b5f 100644
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -343,6 +343,7 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
+ /* No restrictions for non-protected VMs. */
+ if (!kvm_vm_is_protected(kvm)) {
+ hyp_vm->kvm.arch.flags = host_arch_flags;
++ hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
+
+ bitmap_copy(kvm->arch.vcpu_features,
+ host_kvm->arch.vcpu_features,
+@@ -469,6 +470,35 @@ static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *h
+ return ret;
+ }
+
++static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
++{
++ struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
++ const struct kvm *host_kvm = hyp_vm->host_kvm;
++ struct kvm *kvm = &hyp_vm->kvm;
++
++ if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
++ return -EINVAL;
++
++ if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
++ return 0;
++
++ memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
++
++ return 0;
++}
++
++static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
++{
++ int ret = 0;
++
++ if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
++ kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
++ else
++ ret = vm_copy_id_regs(hyp_vcpu);
++
++ return ret;
++}
++
+ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
+ struct pkvm_hyp_vm *hyp_vm,
+ struct kvm_vcpu *host_vcpu)
+@@ -488,8 +518,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
+ hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
+ hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+
+- if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+- kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
++ ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
++ if (ret)
++ goto done;
+
+ ret = pkvm_vcpu_init_traps(hyp_vcpu);
+ if (ret)
+--
+2.51.0
+
--- /dev/null
+From 30f3726cbee39506f539e0c4b257707c0ad2dc7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 14:38:12 +0000
+Subject: KVM: arm64: Hide S1POE from guests when not supported by the host
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit f66857bafd4f151c5cc6856e47be2e12c1721e43 ]
+
+When CONFIG_ARM64_POE is disabled, KVM does not save/restore POR_EL1.
+However, ID_AA64MMFR3_EL1 sanitisation currently exposes the feature to
+guests whenever the hardware supports it, ignoring the host kernel
+configuration.
+
+If a guest detects this feature and attempts to use it, the host will
+fail to context-switch POR_EL1, potentially leading to state corruption.
+
+Fix this by masking ID_AA64MMFR3_EL1.S1POE in the sanitised system
+registers, preventing KVM from advertising the feature when the host
+does not support it (i.e. system_supports_poe() is false).
+
+Fixes: 70ed7238297f ("KVM: arm64: Sanitise ID_AA64MMFR3_EL1")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://patch.msgid.link/20260213143815.1732675-2-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/sys_regs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index ec3fbe0b8d525..7b7f3c932dcd5 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1801,6 +1801,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
+ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1POE |
+ ID_AA64MMFR3_EL1_S1PIE;
++
++ if (!system_supports_poe())
++ val &= ~ID_AA64MMFR3_EL1_S1POE;
+ break;
+ case SYS_ID_MMFR4_EL1:
+ val &= ~ID_MMFR4_EL1_CCIDX;
+--
+2.51.0
+
--- /dev/null
+From 231d24116f7a5dcd5f50be581119d92607cfeb3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 06:10:08 -0600
+Subject: PCI: Correct PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit 39195990e4c093c9eecf88f29811c6de29265214 ]
+
+fb82437fdd8c ("PCI: Change capability register offsets to hex") incorrectly
+converted the PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value from decimal 52 to hex
+0x32:
+
+ -#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
+ +#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+
+This broke PCI capabilities in a VMM because subsequent ones weren't
+DWORD-aligned.
+
+Change PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 to the correct value of 0x34.
+
+fb82437fdd8c was from Baruch Siach <baruch@tkos.co.il>, but this was not
+Baruch's fault; it's a mistake I made when applying the patch.
+
+Fixes: fb82437fdd8c ("PCI: Change capability register offsets to hex")
+Reported-by: David Woodhouse <dwmw2@infradead.org>
+Closes: https://lore.kernel.org/all/3ae392a0158e9d9ab09a1d42150429dd8ca42791.camel@infradead.org
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Krzysztof WilczyĆski <kwilczynski@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/pci_regs.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index 07e06aafec502..1172bda87abff 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -706,7 +706,7 @@
+ #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
+ #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+ #define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
++#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x34 /* end of v2 EPs w/ link */
+ #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
+ #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
+ #define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+--
+2.51.0
+
--- /dev/null
+From 0d52a1b64a42ca4789a81623b6695ee732125f05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 18:55:41 +0100
+Subject: PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit c22533c66ccae10511ad6a7afc34bb26c47577e3 ]
+
+Endpoint drivers use dw_pcie_ep_raise_msix_irq() to raise an MSI-X
+interrupt to the host using a writel(), which generates a PCI posted write
+transaction. There's no completion for posted writes, so the writel() may
+return before the PCI write completes. dw_pcie_ep_raise_msix_irq() also
+unmaps the outbound ATU entry used for the PCI write, so the write races
+with the unmap.
+
+If the PCI write loses the race with the ATU unmap, the write may corrupt
+host memory or cause IOMMU errors, e.g., these when running fio with a
+larger queue depth against nvmet-pci-epf:
+
+ arm-smmu-v3 fc900000.iommu: 0x0000010000000010
+ arm-smmu-v3 fc900000.iommu: 0x0000020000000000
+ arm-smmu-v3 fc900000.iommu: 0x000000090000f040
+ arm-smmu-v3 fc900000.iommu: 0x0000000000000000
+ arm-smmu-v3 fc900000.iommu: event: F_TRANSLATION client: 0000:01:00.0 sid: 0x100 ssid: 0x0 iova: 0x90000f040 ipa: 0x0
+ arm-smmu-v3 fc900000.iommu: unpriv data write s1 "Input address caused fault" stag: 0x0
+
+Flush the write by performing a readl() of the same address to ensure that
+the write has reached the destination before the ATU entry is unmapped.
+
+The same problem was solved for dw_pcie_ep_raise_msi_irq() in commit
+8719c64e76bf ("PCI: dwc: ep: Cache MSI outbound iATU mapping"), but there
+it was solved by dedicating an outbound iATU only for MSI. We can't do the
+same for MSI-X because each vector can have a different msg_addr and the
+msg_addr may be changed while the vector is masked.
+
+Fixes: beb4641a787d ("PCI: dwc: Add MSI-X callbacks handler")
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20260211175540.105677-2-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 7f2112c2fb215..6d3beec92b54e 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -793,6 +793,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+
+ writel(msg_data, ep->msi_mem + offset);
+
++ /* flush posted write before unmap */
++ readl(ep->msi_mem + offset);
++
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 5dc6959042e9eb048ae995e34e7034172df1b8e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Jun 2025 21:51:05 -0700
+Subject: perf/core: Fix invalid wait context in ctx_sched_in()
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+[ Upstream commit 486ff5ad49bc50315bcaf6d45f04a33ef0a45ced ]
+
+Lockdep found a bug in the event scheduling when a pinned event was
+failed and wakes up the threads in the ring buffer like below.
+
+It seems it should not grab a wait-queue lock under perf-context lock.
+Let's do it with irq_work.
+
+ [ 39.913691] =============================
+ [ 39.914157] [ BUG: Invalid wait context ]
+ [ 39.914623] 6.15.0-next-20250530-next-2025053 #1 Not tainted
+ [ 39.915271] -----------------------------
+ [ 39.915731] repro/837 is trying to lock:
+ [ 39.916191] ffff88801acfabd8 (&event->waitq){....}-{3:3}, at: __wake_up+0x26/0x60
+ [ 39.917182] other info that might help us debug this:
+ [ 39.917761] context-{5:5}
+ [ 39.918079] 4 locks held by repro/837:
+ [ 39.918530] #0: ffffffff8725cd00 (rcu_read_lock){....}-{1:3}, at: __perf_event_task_sched_in+0xd1/0xbc0
+ [ 39.919612] #1: ffff88806ca3c6f8 (&cpuctx_lock){....}-{2:2}, at: __perf_event_task_sched_in+0x1a7/0xbc0
+ [ 39.920748] #2: ffff88800d91fc18 (&ctx->lock){....}-{2:2}, at: __perf_event_task_sched_in+0x1f9/0xbc0
+ [ 39.921819] #3: ffffffff8725cd00 (rcu_read_lock){....}-{1:3}, at: perf_event_wakeup+0x6c/0x470
+
+Fixes: f4b07fd62d4d ("perf/core: Use POLLHUP for a pinned event in error")
+Closes: https://lore.kernel.org/lkml/aD2w50VDvGIH95Pf@ly-workstation
+Reported-by: "Lai, Yi" <yi1.lai@linux.intel.com>
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: "Lai, Yi" <yi1.lai@linux.intel.com>
+Link: https://patch.msgid.link/20250603045105.1731451-1-namhyung@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6889a6bd8a395..0255795191cc8 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4016,7 +4016,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ if (*perf_event_fasync(event))
+ event->pending_kill = POLL_ERR;
+
+- perf_event_wakeup(event);
++ event->pending_wakeup = 1;
++ irq_work_queue(&event->pending_irq);
+ } else {
+ struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu);
+
+--
+2.51.0
+
--- /dev/null
+From 6d023a2559b8b1c2010e0524bd74cbaaa05ec0a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0255795191cc8..b7e73ac3e512f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10427,6 +10427,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -10503,6 +10510,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -10511,6 +10531,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -11009,6 +11039,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -11341,6 +11376,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -11805,7 +11845,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 52529655a9d3d94afe09535c444d8f18b42b20a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 19:19:03 +0800
+Subject: regulator: bq257xx: Fix device node reference leak in
+ bq257xx_reg_dt_parse_gpio()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 4baaddaa44af01cd4ce239493060738fd0881835 ]
+
+In bq257xx_reg_dt_parse_gpio(), if fails to get subchild, it returns
+without calling of_node_put(child), causing the device node reference
+leak.
+
+Fixes: 981dd162b635 ("regulator: bq257xx: Add bq257xx boost regulator driver")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Link: https://patch.msgid.link/20260224-bq257-v1-1-8ebbc731c1c3@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/bq257xx-regulator.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/regulator/bq257xx-regulator.c b/drivers/regulator/bq257xx-regulator.c
+index fc1ccede44688..dab8f1ab44503 100644
+--- a/drivers/regulator/bq257xx-regulator.c
++++ b/drivers/regulator/bq257xx-regulator.c
+@@ -115,11 +115,10 @@ static void bq257xx_reg_dt_parse_gpio(struct platform_device *pdev)
+ return;
+
+ subchild = of_get_child_by_name(child, pdata->desc.of_match);
++ of_node_put(child);
+ if (!subchild)
+ return;
+
+- of_node_put(child);
+-
+ pdata->otg_en_gpio = devm_fwnode_gpiod_get_index(&pdev->dev,
+ of_fwnode_handle(subchild),
+ "enable", 0,
+--
+2.51.0
+
--- /dev/null
+From 17acccfd01329459ab6a374a05e363b2d16f0a3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 15:06:40 -0500
+Subject: rseq: Clarify rseq registration rseq_size bound check comment
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+[ Upstream commit 26d43a90be81fc90e26688a51d3ec83188602731 ]
+
+The rseq registration validates that the rseq_size argument is greater
+or equal to 32 (the original rseq size), but the comment associated with
+this check does not clearly state this.
+
+Clarify the comment to that effect.
+
+Fixes: ee3e3ac05c26 ("rseq: Introduce extensible rseq ABI")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260220200642.1317826-2-mathieu.desnoyers@efficios.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/rseq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 2452b7366b00e..07b0b46ec640f 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -519,8 +519,9 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
+ * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
+ * size, the required alignment is the original struct rseq alignment.
+ *
+- * In order to be valid, rseq_len is either the original rseq size, or
+- * large enough to contain all supported fields, as communicated to
++ * The rseq_len is required to be greater or equal to the original rseq
++ * size. In order to be valid, rseq_len is either the original rseq size,
++ * or large enough to contain all supported fields, as communicated to
+ * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
+ */
+ if (rseq_len < ORIG_RSEQ_SIZE ||
+--
+2.51.0
+
--- /dev/null
+From 80916cec9c07cbb2ed6a22c75b171152175ae24c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 15:20:04 +0100
+Subject: s390/idle: Fix cpu idle exit cpu time accounting
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 0d785e2c324c90662baa4fe07a0d02233ff92824 ]
+
+With the conversion to generic entry [1] cpu idle exit cpu time accounting
+was converted from assembly to C. This introduced an reversed order of cpu
+time accounting.
+
+On cpu idle exit the current accounting happens with the following call
+chain:
+
+-> do_io_irq()/do_ext_irq()
+ -> irq_enter_rcu()
+ -> account_hardirq_enter()
+ -> vtime_account_irq()
+ -> vtime_account_kernel()
+
+vtime_account_kernel() accounts the passed cpu time since last_update_timer
+as system time, and updates last_update_timer to the current cpu timer
+value.
+
+However the subsequent call of
+
+ -> account_idle_time_irq()
+
+will incorrectly subtract passed cpu time from timer_idle_enter to the
+updated last_update_timer value from system_timer. Then last_update_timer
+is updated to a sys_enter_timer, which means that last_update_timer goes
+back in time.
+
+Subsequently account_hardirq_exit() will account too much cpu time as
+hardirq time. The sum of all accounted cpu times is still correct, however
+some cpu time which was previously accounted as system time is now
+accounted as hardirq time, plus there is the oddity that last_update_timer
+goes back in time.
+
+Restore previous behavior by extracting cpu time accounting code from
+account_idle_time_irq() into a new update_timer_idle() function and call it
+before irq_enter_rcu().
+
+Fixes: 56e62a737028 ("s390: convert to generic entry") [1]
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/idle.h | 1 +
+ arch/s390/kernel/idle.c | 13 +++++++++----
+ arch/s390/kernel/irq.c | 10 ++++++++--
+ 3 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
+index 09f763b9eb40a..133059d9a949c 100644
+--- a/arch/s390/include/asm/idle.h
++++ b/arch/s390/include/asm/idle.h
+@@ -23,5 +23,6 @@ extern struct device_attribute dev_attr_idle_count;
+ extern struct device_attribute dev_attr_idle_time_us;
+
+ void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
++void update_timer_idle(void);
+
+ #endif /* _S390_IDLE_H */
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index 39cb8d0ae3480..0f9e53f0a0686 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -21,11 +21,10 @@
+
+ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+
+-void account_idle_time_irq(void)
++void update_timer_idle(void)
+ {
+ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
+ struct lowcore *lc = get_lowcore();
+- unsigned long idle_time;
+ u64 cycles_new[8];
+ int i;
+
+@@ -35,13 +34,19 @@ void account_idle_time_irq(void)
+ this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
+ }
+
+- idle_time = lc->int_clock - idle->clock_idle_enter;
+-
+ lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
+ lc->last_update_clock = lc->int_clock;
+
+ lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
+ lc->last_update_timer = lc->sys_enter_timer;
++}
++
++void account_idle_time_irq(void)
++{
++ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
++ unsigned long idle_time;
++
++ idle_time = get_lowcore()->int_clock - idle->clock_idle_enter;
+
+ /* Account time spent with enabled wait psw loaded as idle time. */
+ WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
+diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
+index bdf9c7cb5685b..080e9285b3379 100644
+--- a/arch/s390/kernel/irq.c
++++ b/arch/s390/kernel/irq.c
+@@ -146,6 +146,10 @@ void noinstr do_io_irq(struct pt_regs *regs)
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ bool from_idle;
+
++ from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
++ if (from_idle)
++ update_timer_idle();
++
+ irq_enter_rcu();
+
+ if (user_mode(regs)) {
+@@ -154,7 +158,6 @@ void noinstr do_io_irq(struct pt_regs *regs)
+ current->thread.last_break = regs->last_break;
+ }
+
+- from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (from_idle)
+ account_idle_time_irq();
+
+@@ -182,6 +185,10 @@ void noinstr do_ext_irq(struct pt_regs *regs)
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ bool from_idle;
+
++ from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
++ if (from_idle)
++ update_timer_idle();
++
+ irq_enter_rcu();
+
+ if (user_mode(regs)) {
+@@ -194,7 +201,6 @@ void noinstr do_ext_irq(struct pt_regs *regs)
+ regs->int_parm = get_lowcore()->ext_params;
+ regs->int_parm_long = get_lowcore()->ext_params2;
+
+- from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (from_idle)
+ account_idle_time_irq();
+
+--
+2.51.0
+
--- /dev/null
+From dda2d0f575e601a044602d26ee3beff762c9587d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 15:20:05 +0100
+Subject: s390/vtime: Fix virtual timer forwarding
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit dbc0fb35679ed5d0adecf7d02137ac2c77244b3b ]
+
+Since delayed accounting of system time [1] the virtual timer is
+forwarded by do_account_vtime() but also vtime_account_kernel(),
+vtime_account_softirq(), and vtime_account_hardirq(). This leads
+to double accounting of system, guest, softirq, and hardirq time.
+
+Remove accounting from the vtime_account*() family to restore old behavior.
+
+There is only one user of the vtimer interface, which might explain
+why nobody noticed this so far.
+
+Fixes: b7394a5f4ce9 ("sched/cputime, s390: Implement delayed accounting of system time") [1]
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/vtime.c | 18 ++----------------
+ 1 file changed, 2 insertions(+), 16 deletions(-)
+
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 234a0ba305108..122d30b104401 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -225,10 +225,6 @@ static u64 vtime_delta(void)
+ return timer - lc->last_update_timer;
+ }
+
+-/*
+- * Update process times based on virtual cpu times stored by entry.S
+- * to the lowcore fields user_timer, system_timer & steal_clock.
+- */
+ void vtime_account_kernel(struct task_struct *tsk)
+ {
+ struct lowcore *lc = get_lowcore();
+@@ -238,27 +234,17 @@ void vtime_account_kernel(struct task_struct *tsk)
+ lc->guest_timer += delta;
+ else
+ lc->system_timer += delta;
+-
+- virt_timer_forward(delta);
+ }
+ EXPORT_SYMBOL_GPL(vtime_account_kernel);
+
+ void vtime_account_softirq(struct task_struct *tsk)
+ {
+- u64 delta = vtime_delta();
+-
+- get_lowcore()->softirq_timer += delta;
+-
+- virt_timer_forward(delta);
++ get_lowcore()->softirq_timer += vtime_delta();
+ }
+
+ void vtime_account_hardirq(struct task_struct *tsk)
+ {
+- u64 delta = vtime_delta();
+-
+- get_lowcore()->hardirq_timer += delta;
+-
+- virt_timer_forward(delta);
++ get_lowcore()->hardirq_timer += vtime_delta();
+ }
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From c4b60a807dcf492b56309c835177a0b8f75880ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jan 2026 12:31:13 +0000
+Subject: sched/eevdf: Update se->vprot in reweight_entity()
+
+From: Wang Tao <wangtao554@huawei.com>
+
+[ Upstream commit ff38424030f98976150e42ca35f4b00e6ab8fa23 ]
+
+In the EEVDF framework with Run-to-Parity protection, `se->vprot` is an
+independent variable defining the virtual protection timestamp.
+
+When `reweight_entity()` is called (e.g., via nice/renice), it performs
+the following actions to preserve Lag consistency:
+ 1. Scales `se->vlag` based on the new weight.
+ 2. Calls `place_entity()`, which recalculates `se->vruntime` based on
+ the new weight and scaled lag.
+
+However, the current implementation fails to update `se->vprot`, leading
+to mismatches between the task's actual runtime and its expected duration.
+
+Fixes: 63304558ba5d ("sched/eevdf: Curb wakeup-preemption")
+Suggested-by: Zhang Qiao <zhangqiao22@huawei.com>
+Signed-off-by: Wang Tao <wangtao554@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20260120123113.3518950-1-wangtao554@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 1644ad90acdca..8587218ee9073 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3805,6 +3805,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long weight)
+ {
+ bool curr = cfs_rq->curr == se;
++ bool rel_vprot = false;
++ u64 vprot;
+
+ if (se->on_rq) {
+ /* commit outstanding execution time */
+@@ -3812,6 +3814,11 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ update_entity_lag(cfs_rq, se);
+ se->deadline -= se->vruntime;
+ se->rel_deadline = 1;
++ if (curr && protect_slice(se)) {
++ vprot = se->vprot - se->vruntime;
++ rel_vprot = true;
++ }
++
+ cfs_rq->nr_queued--;
+ if (!curr)
+ __dequeue_entity(cfs_rq, se);
+@@ -3827,6 +3834,9 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ if (se->rel_deadline)
+ se->deadline = div_s64(se->deadline * se->load.weight, weight);
+
++ if (rel_vprot)
++ vprot = div_s64(vprot * se->load.weight, weight);
++
+ update_load_set(&se->load, weight);
+
+ do {
+@@ -3838,6 +3848,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
+ place_entity(cfs_rq, se, 0);
++ if (rel_vprot)
++ se->vprot = se->vruntime + vprot;
+ update_load_add(&cfs_rq->load, se->load.weight);
+ if (!curr)
+ __enqueue_entity(cfs_rq, se);
+--
+2.51.0
+
--- /dev/null
+From 73a3008fd99d8c25429cd85589a92de7893bd20e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 12:16:28 +0200
+Subject: sched/fair: Fix lag clamp
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 6e3c0a4e1ad1e0455b7880fad02b3ee179f56c09 ]
+
+Vincent reported that he was seeing undue lag clamping in a mixed
+slice workload. Implement the max_slice tracking as per the todo
+comment.
+
+Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
+Reported-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20250422101628.GA33555@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched.h | 1 +
+ kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++----
+ 2 files changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 6ad294330c0b6..3e2005e9e2f0b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -574,6 +574,7 @@ struct sched_entity {
+ u64 deadline;
+ u64 min_vruntime;
+ u64 min_slice;
++ u64 max_slice;
+
+ struct list_head group_node;
+ unsigned char on_rq;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 8587218ee9073..292141f4aaa54 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -748,6 +748,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ return cfs_rq->zero_vruntime;
+ }
+
++static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq);
++
+ /*
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+@@ -761,17 +763,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ * EEVDF gives the following limit for a steady state system:
+ *
+ * -r_max < lag < max(r_max, q)
+- *
+- * XXX could add max_slice to the augmented data to track this.
+ */
+ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
++ u64 max_slice = cfs_rq_max_slice(cfs_rq) + TICK_NSEC;
+ s64 vlag, limit;
+
+ WARN_ON_ONCE(!se->on_rq);
+
+ vlag = avg_vruntime(cfs_rq) - se->vruntime;
+- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
++ limit = calc_delta_fair(max_slice, se);
+
+ se->vlag = clamp(vlag, -limit, limit);
+ }
+@@ -829,6 +830,21 @@ static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+ return min_slice;
+ }
+
++static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq)
++{
++ struct sched_entity *root = __pick_root_entity(cfs_rq);
++ struct sched_entity *curr = cfs_rq->curr;
++ u64 max_slice = 0ULL;
++
++ if (curr && curr->on_rq)
++ max_slice = curr->slice;
++
++ if (root)
++ max_slice = max(max_slice, root->max_slice);
++
++ return max_slice;
++}
++
+ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+ {
+ return entity_before(__node_2_se(a), __node_2_se(b));
+@@ -853,6 +869,15 @@ static inline void __min_slice_update(struct sched_entity *se, struct rb_node *n
+ }
+ }
+
++static inline void __max_slice_update(struct sched_entity *se, struct rb_node *node)
++{
++ if (node) {
++ struct sched_entity *rse = __node_2_se(node);
++ if (rse->max_slice > se->max_slice)
++ se->max_slice = rse->max_slice;
++ }
++}
++
+ /*
+ * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
+ */
+@@ -860,6 +885,7 @@ static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
+ {
+ u64 old_min_vruntime = se->min_vruntime;
+ u64 old_min_slice = se->min_slice;
++ u64 old_max_slice = se->max_slice;
+ struct rb_node *node = &se->run_node;
+
+ se->min_vruntime = se->vruntime;
+@@ -870,8 +896,13 @@ static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
+ __min_slice_update(se, node->rb_right);
+ __min_slice_update(se, node->rb_left);
+
++ se->max_slice = se->slice;
++ __max_slice_update(se, node->rb_right);
++ __max_slice_update(se, node->rb_left);
++
+ return se->min_vruntime == old_min_vruntime &&
+- se->min_slice == old_min_slice;
++ se->min_slice == old_min_slice &&
++ se->max_slice == old_max_slice;
+ }
+
+ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+--
+2.51.0
+
--- /dev/null
+From f47905d2e66d051c7fec17590248541eab966e06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:28:16 +0100
+Subject: sched/fair: Fix zero_vruntime tracking
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit b3d99f43c72b56cf7a104a364e7fb34b0702828b ]
+
+It turns out that zero_vruntime tracking is broken when there is but a single
+task running. Current update paths are through __{en,de}queue_entity(), and
+when there is but a single task, pick_next_task() will always return that one
+task, and put_prev_set_next_task() will end up in neither function.
+
+This can cause entity_key() to grow indefinitely large and cause overflows,
+leading to much pain and suffering.
+
+Furtermore, doing update_zero_vruntime() from __{de,en}queue_entity(), which
+are called from {set_next,put_prev}_entity() has problems because:
+
+ - set_next_entity() calls __dequeue_entity() before it does cfs_rq->curr = se.
+ This means the avg_vruntime() will see the removal but not current, missing
+ the entity for accounting.
+
+ - put_prev_entity() calls __enqueue_entity() before it does cfs_rq->curr =
+ NULL. This means the avg_vruntime() will see the addition *and* current,
+ leading to double accounting.
+
+Both cases are incorrect/inconsistent.
+
+Noting that avg_vruntime is already called on each {en,de}queue, remove the
+explicit avg_vruntime() calls (which removes an extra 64bit division for each
+{en,de}queue) and have avg_vruntime() update zero_vruntime itself.
+
+Additionally, have the tick call avg_vruntime() -- discarding the result, but
+for the side-effect of updating zero_vruntime.
+
+While there, optimize avg_vruntime() by noting that the average of one value is
+rather trivial to compute.
+
+Test case:
+ # taskset -c -p 1 $$
+ # taskset -c 2 bash -c 'while :; do :; done&'
+ # cat /sys/kernel/debug/sched/debug | awk '/^cpu#/ {P=0} /^cpu#2,/ {P=1} {if (P) print $0}' | grep -e zero_vruntime -e "^>"
+
+PRE:
+ .zero_vruntime : 31316.407903
+ >R bash 487 50787.345112 E 50789.145972 2.800000 50780.298364 16 120 0.000000 0.000000 0.000000 /
+ .zero_vruntime : 382548.253179
+ >R bash 487 427275.204288 E 427276.003584 2.800000 427268.157540 23 120 0.000000 0.000000 0.000000 /
+
+POST:
+ .zero_vruntime : 17259.709467
+ >R bash 526 17259.709467 E 17262.509467 2.800000 16915.031624 9 120 0.000000 0.000000 0.000000 /
+ .zero_vruntime : 18702.723356
+ >R bash 526 18702.723356 E 18705.523356 2.800000 18358.045513 9 120 0.000000 0.000000 0.000000 /
+
+Fixes: 79f3f9bedd14 ("sched/eevdf: Fix min_vruntime vs avg_vruntime")
+Reported-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20260219080624.438854780%40infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 84 ++++++++++++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f669c84c7c0e9..c3735197c6e7c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -589,6 +589,21 @@ static inline bool entity_before(const struct sched_entity *a,
+ return vruntime_cmp(a->deadline, "<", b->deadline);
+ }
+
++/*
++ * Per avg_vruntime() below, cfs_rq::zero_vruntime is only slightly stale
++ * and this value should be no more than two lag bounds. Which puts it in the
++ * general order of:
++ *
++ * (slice + TICK_NSEC) << NICE_0_LOAD_SHIFT
++ *
++ * which is around 44 bits in size (on 64bit); that is 20 for
++ * NICE_0_LOAD_SHIFT, another 20 for NSEC_PER_MSEC and then a handful for
++ * however many msec the actual slice+tick ends up begin.
++ *
++ * (disregarding the actual divide-by-weight part makes for the worst case
++ * weight of 2, which nicely cancels vs the fuzz in zero_vruntime not actually
++ * being the zero-lag point).
++ */
+ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ return vruntime_op(se->vruntime, "-", cfs_rq->zero_vruntime);
+@@ -676,39 +691,61 @@ sum_w_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ }
+
+ static inline
+-void sum_w_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
++void update_zero_vruntime(struct cfs_rq *cfs_rq, s64 delta)
+ {
+ /*
+- * v' = v + d ==> sum_w_vruntime' = sum_runtime - d*sum_weight
++ * v' = v + d ==> sum_w_vruntime' = sum_w_vruntime - d*sum_weight
+ */
+ cfs_rq->sum_w_vruntime -= cfs_rq->sum_weight * delta;
++ cfs_rq->zero_vruntime += delta;
+ }
+
+ /*
+- * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
++ * Specifically: avg_vruntime() + 0 must result in entity_eligible() := true
+ * For this to be so, the result of this function must have a left bias.
++ *
++ * Called in:
++ * - place_entity() -- before enqueue
++ * - update_entity_lag() -- before dequeue
++ * - entity_tick()
++ *
++ * This means it is one entry 'behind' but that puts it close enough to where
++ * the bound on entity_key() is at most two lag bounds.
+ */
+ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+- s64 avg = cfs_rq->sum_w_vruntime;
+- long load = cfs_rq->sum_weight;
++ long weight = cfs_rq->sum_weight;
++ s64 delta = 0;
+
+- if (curr && curr->on_rq) {
+- unsigned long weight = scale_load_down(curr->load.weight);
++ if (curr && !curr->on_rq)
++ curr = NULL;
+
+- avg += entity_key(cfs_rq, curr) * weight;
+- load += weight;
+- }
++ if (weight) {
++ s64 runtime = cfs_rq->sum_w_vruntime;
++
++ if (curr) {
++ unsigned long w = scale_load_down(curr->load.weight);
++
++ runtime += entity_key(cfs_rq, curr) * w;
++ weight += w;
++ }
+
+- if (load) {
+ /* sign flips effective floor / ceiling */
+- if (avg < 0)
+- avg -= (load - 1);
+- avg = div_s64(avg, load);
++ if (runtime < 0)
++ runtime -= (weight - 1);
++
++ delta = div_s64(runtime, weight);
++ } else if (curr) {
++ /*
++ * When there is but one element, it is the average.
++ */
++ delta = curr->vruntime - cfs_rq->zero_vruntime;
+ }
+
+- return cfs_rq->zero_vruntime + avg;
++ update_zero_vruntime(cfs_rq, delta);
++
++ return cfs_rq->zero_vruntime;
+ }
+
+ /*
+@@ -777,16 +814,6 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ return vruntime_eligible(cfs_rq, se->vruntime);
+ }
+
+-static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+-{
+- u64 vruntime = avg_vruntime(cfs_rq);
+- s64 delta = vruntime_op(vruntime, "-", cfs_rq->zero_vruntime);
+-
+- sum_w_vruntime_update(cfs_rq, delta);
+-
+- cfs_rq->zero_vruntime = vruntime;
+-}
+-
+ static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *root = __pick_root_entity(cfs_rq);
+@@ -856,7 +883,6 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ sum_w_vruntime_add(cfs_rq, se);
+- update_zero_vruntime(cfs_rq);
+ se->min_vruntime = se->vruntime;
+ se->min_slice = se->slice;
+ rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+@@ -868,7 +894,6 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ &min_vruntime_cb);
+ sum_w_vruntime_sub(cfs_rq, se);
+- update_zero_vruntime(cfs_rq);
+ }
+
+ struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
+@@ -5566,6 +5591,11 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+ update_load_avg(cfs_rq, curr, UPDATE_TG);
+ update_cfs_group(curr);
+
++ /*
++ * Pulls along cfs_rq::zero_vruntime.
++ */
++ avg_vruntime(cfs_rq);
++
+ #ifdef CONFIG_SCHED_HRTICK
+ /*
+ * queued ticks are scheduled to match the slice, so don't bother
+--
+2.51.0
+
--- /dev/null
+From 08257aa2c33d58f127883c6324fdc652928964d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Dec 2025 16:10:32 +0100
+Subject: sched/fair: Introduce and use the vruntime_cmp() and vruntime_op()
+ wrappers for wrapped-signed aritmetics
+
+From: Ingo Molnar <mingo@kernel.org>
+
+[ Upstream commit 5758e48eefaf111d7764d8f1c8b666140fe5fa27 ]
+
+We have to be careful with vruntime comparisons and subtraction,
+due to the possibility of wrapping, so we have macros like:
+
+ #define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
+
+Which is used like this:
+
+ if (vruntime_gt(min_vruntime, se, rse))
+ se->min_vruntime = rse->min_vruntime;
+
+Replace this with an easier to read pattern that uses the regular
+arithmetics operators:
+
+ if (vruntime_cmp(se->min_vruntime, ">", rse->min_vruntime))
+ se->min_vruntime = rse->min_vruntime;
+
+Also replace vruntime subtractions with vruntime_op():
+
+ - delta = (s64)(sea->vruntime - seb->vruntime) +
+ - (s64)(cfs_rqb->zero_vruntime_fi - cfs_rqa->zero_vruntime_fi);
+ + delta = vruntime_op(sea->vruntime, "-", seb->vruntime) +
+ + vruntime_op(cfs_rqb->zero_vruntime_fi, "-", cfs_rqa->zero_vruntime_fi);
+
+In the vruntime_cmp() and vruntime_op() macros use Use __builtin_strcmp(),
+because of __HAVE_ARCH_STRCMP might turn off the compiler optimizations
+we rely on here to catch usage bugs.
+
+No change in functionality.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Stable-dep-of: b3d99f43c72b ("sched/fair: Fix zero_vruntime tracking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 66 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 51 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index a5f698ed15032..f669c84c7c0e9 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -524,10 +524,48 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+ * Scheduling class tree data structure manipulation methods:
+ */
+
++extern void __BUILD_BUG_vruntime_cmp(void);
++
++/* Use __builtin_strcmp() because of __HAVE_ARCH_STRCMP: */
++
++#define vruntime_cmp(A, CMP_STR, B) ({ \
++ int __res = 0; \
++ \
++ if (!__builtin_strcmp(CMP_STR, "<")) { \
++ __res = ((s64)((A)-(B)) < 0); \
++ } else if (!__builtin_strcmp(CMP_STR, "<=")) { \
++ __res = ((s64)((A)-(B)) <= 0); \
++ } else if (!__builtin_strcmp(CMP_STR, ">")) { \
++ __res = ((s64)((A)-(B)) > 0); \
++ } else if (!__builtin_strcmp(CMP_STR, ">=")) { \
++ __res = ((s64)((A)-(B)) >= 0); \
++ } else { \
++ /* Unknown operator throws linker error: */ \
++ __BUILD_BUG_vruntime_cmp(); \
++ } \
++ \
++ __res; \
++})
++
++extern void __BUILD_BUG_vruntime_op(void);
++
++#define vruntime_op(A, OP_STR, B) ({ \
++ s64 __res = 0; \
++ \
++ if (!__builtin_strcmp(OP_STR, "-")) { \
++ __res = (s64)((A)-(B)); \
++ } else { \
++ /* Unknown operator throws linker error: */ \
++ __BUILD_BUG_vruntime_op(); \
++ } \
++ \
++ __res; \
++})
++
++
+ static inline __maybe_unused u64 max_vruntime(u64 max_vruntime, u64 vruntime)
+ {
+- s64 delta = (s64)(vruntime - max_vruntime);
+- if (delta > 0)
++ if (vruntime_cmp(vruntime, ">", max_vruntime))
+ max_vruntime = vruntime;
+
+ return max_vruntime;
+@@ -535,8 +573,7 @@ static inline __maybe_unused u64 max_vruntime(u64 max_vruntime, u64 vruntime)
+
+ static inline __maybe_unused u64 min_vruntime(u64 min_vruntime, u64 vruntime)
+ {
+- s64 delta = (s64)(vruntime - min_vruntime);
+- if (delta < 0)
++ if (vruntime_cmp(vruntime, "<", min_vruntime))
+ min_vruntime = vruntime;
+
+ return min_vruntime;
+@@ -549,12 +586,12 @@ static inline bool entity_before(const struct sched_entity *a,
+ * Tiebreak on vruntime seems unnecessary since it can
+ * hardly happen.
+ */
+- return (s64)(a->deadline - b->deadline) < 0;
++ return vruntime_cmp(a->deadline, "<", b->deadline);
+ }
+
+ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- return (s64)(se->vruntime - cfs_rq->zero_vruntime);
++ return vruntime_op(se->vruntime, "-", cfs_rq->zero_vruntime);
+ }
+
+ #define __node_2_se(node) \
+@@ -732,7 +769,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ load += weight;
+ }
+
+- return avg >= (s64)(vruntime - cfs_rq->zero_vruntime) * load;
++ return avg >= vruntime_op(vruntime, "-", cfs_rq->zero_vruntime) * load;
+ }
+
+ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -743,7 +780,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+ {
+ u64 vruntime = avg_vruntime(cfs_rq);
+- s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
++ s64 delta = vruntime_op(vruntime, "-", cfs_rq->zero_vruntime);
+
+ sum_w_vruntime_update(cfs_rq, delta);
+
+@@ -770,13 +807,12 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+ return entity_before(__node_2_se(a), __node_2_se(b));
+ }
+
+-#define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
+-
+ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node)
+ {
+ if (node) {
+ struct sched_entity *rse = __node_2_se(node);
+- if (vruntime_gt(min_vruntime, se, rse))
++
++ if (vruntime_cmp(se->min_vruntime, ">", rse->min_vruntime))
+ se->min_vruntime = rse->min_vruntime;
+ }
+ }
+@@ -887,7 +923,7 @@ static inline void update_protect_slice(struct cfs_rq *cfs_rq, struct sched_enti
+
+ static inline bool protect_slice(struct sched_entity *se)
+ {
+- return ((s64)(se->vprot - se->vruntime) > 0);
++ return vruntime_cmp(se->vruntime, "<", se->vprot);
+ }
+
+ static inline void cancel_protect_slice(struct sched_entity *se)
+@@ -1014,7 +1050,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
+ */
+ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- if ((s64)(se->vruntime - se->deadline) < 0)
++ if (vruntime_cmp(se->vruntime, "<", se->deadline))
+ return false;
+
+ /*
+@@ -13238,8 +13274,8 @@ bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ * zero_vruntime_fi, which would have been updated in prior calls
+ * to se_fi_update().
+ */
+- delta = (s64)(sea->vruntime - seb->vruntime) +
+- (s64)(cfs_rqb->zero_vruntime_fi - cfs_rqa->zero_vruntime_fi);
++ delta = vruntime_op(sea->vruntime, "-", seb->vruntime) +
++ vruntime_op(cfs_rqb->zero_vruntime_fi, "-", cfs_rqa->zero_vruntime_fi);
+
+ return delta > 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e35af67c91f6c2293c91d99d8161fb2374772dce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 16:49:09 +0100
+Subject: sched/fair: Only set slice protection at pick time
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit bcd74b2ffdd0a2233adbf26b65c62fc69a809c8e ]
+
+We should not (re)set slice protection in the sched_change pattern
+which calls put_prev_task() / set_next_task().
+
+Fixes: 63304558ba5d ("sched/eevdf: Curb wakeup-preemption")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20260219080624.561421378%40infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index c3735197c6e7c..1644ad90acdca 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5477,7 +5477,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ }
+
+ static void
+-set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
++set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first)
+ {
+ clear_buddies(cfs_rq, se);
+
+@@ -5492,7 +5492,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ __dequeue_entity(cfs_rq, se);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+
+- set_protect_slice(cfs_rq, se);
++ if (first)
++ set_protect_slice(cfs_rq, se);
+ }
+
+ update_stats_curr_start(cfs_rq, se);
+@@ -8932,13 +8933,13 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ pse = parent_entity(pse);
+ }
+ if (se_depth >= pse_depth) {
+- set_next_entity(cfs_rq_of(se), se);
++ set_next_entity(cfs_rq_of(se), se, true);
+ se = parent_entity(se);
+ }
+ }
+
+ put_prev_entity(cfs_rq, pse);
+- set_next_entity(cfs_rq, se);
++ set_next_entity(cfs_rq, se, true);
+
+ __set_next_task_fair(rq, p, true);
+ }
+@@ -13530,7 +13531,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+- set_next_entity(cfs_rq, se);
++ set_next_entity(cfs_rq, se, first);
+ /* ensure bandwidth has been allocated on our new cfs_rq */
+ account_cfs_rq_runtime(cfs_rq, 0);
+ }
+--
+2.51.0
+
--- /dev/null
+From 90328d9eefa5545b634ecb96581a228a7d77472f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Nov 2025 12:09:16 +0100
+Subject: sched/fair: Rename cfs_rq::avg_load to cfs_rq::sum_weight
+
+From: Ingo Molnar <mingo@kernel.org>
+
+[ Upstream commit 4ff674fa986c27ec8a0542479258c92d361a2566 ]
+
+The ::avg_load field is a long-standing misnomer: it says it's an
+'average load', but in reality it's the momentary sum of the load
+of all currently runnable tasks. We'd have to also perform a
+division by nr_running (or use time-decay) to arrive at any sort
+of average value.
+
+This is clear from comments about the math of fair scheduling:
+
+ * \Sum w_i := cfs_rq->avg_load
+
+The sum of all weights is ... the sum of all weights, not
+the average of all weights.
+
+To make it doubly confusing, there's also an ::avg_load
+in the load-balancing struct sg_lb_stats, which *is* a
+true average.
+
+The second part of the field's name is a minor misnomer
+as well: it says 'load', and it is indeed a load_weight
+structure as it shares code with the load-balancer - but
+it's only in an SMP load-balancing context where
+load = weight, in the fair scheduling context the primary
+purpose is the weighting of different nice levels.
+
+So rename the field to ::sum_weight instead, which makes
+the terminology of the EEVDF math match up with our
+implementation of it:
+
+ * \Sum w_i := cfs_rq->sum_weight
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://patch.msgid.link/20251201064647.1851919-6-mingo@kernel.org
+Stable-dep-of: b3d99f43c72b ("sched/fair: Fix zero_vruntime tracking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 16 ++++++++--------
+ kernel/sched/sched.h | 2 +-
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 82038166d7b0c..e68e894b57559 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -608,7 +608,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ *
+ * v0 := cfs_rq->zero_vruntime
+ * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+- * \Sum w_i := cfs_rq->avg_load
++ * \Sum w_i := cfs_rq->sum_weight
+ *
+ * Since zero_vruntime closely tracks the per-task service, these
+ * deltas: (v_i - v), will be in the order of the maximal (virtual) lag
+@@ -625,7 +625,7 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime += key * weight;
+- cfs_rq->avg_load += weight;
++ cfs_rq->sum_weight += weight;
+ }
+
+ static void
+@@ -635,16 +635,16 @@ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime -= key * weight;
+- cfs_rq->avg_load -= weight;
++ cfs_rq->sum_weight -= weight;
+ }
+
+ static inline
+ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ {
+ /*
+- * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
++ * v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight
+ */
+- cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
++ cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta;
+ }
+
+ /*
+@@ -655,7 +655,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+- long load = cfs_rq->avg_load;
++ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+ unsigned long weight = scale_load_down(curr->load.weight);
+@@ -723,7 +723,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+- long load = cfs_rq->avg_load;
++ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+ unsigned long weight = scale_load_down(curr->load.weight);
+@@ -5164,7 +5164,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ *
+ * vl_i = (W + w_i)*vl'_i / W
+ */
+- load = cfs_rq->avg_load;
++ load = cfs_rq->sum_weight;
+ if (curr && curr->on_rq)
+ load += scale_load_down(curr->load.weight);
+
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 2f8b06b12a98f..20b2b7746c3c7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -681,7 +681,7 @@ struct cfs_rq {
+ unsigned int h_nr_idle; /* SCHED_IDLE */
+
+ s64 avg_vruntime;
+- u64 avg_load;
++ u64 sum_weight;
+
+ u64 zero_vruntime;
+ #ifdef CONFIG_SCHED_CORE
+--
+2.51.0
+
--- /dev/null
+From 7600b4e28449680a381268c419258232763f4d8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Dec 2025 16:09:23 +0100
+Subject: sched/fair: Rename cfs_rq::avg_vruntime to ::sum_w_vruntime, and
+ helper functions
+
+From: Ingo Molnar <mingo@kernel.org>
+
+[ Upstream commit dcbc9d3f0e594223275a18f7016001889ad35eff ]
+
+The ::avg_vruntime field is a misnomer: it says it's an
+'average vruntime', but in reality it's the momentary sum
+of the weighted vruntimes of all queued tasks, which is
+at least a division away from being an average.
+
+This is clear from comments about the math of fair scheduling:
+
+ * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+
+This confusion is increased by the cfs_avg_vruntime() function,
+which does perform the division and returns a true average.
+
+The sum of all weighted vruntimes should be named thusly,
+so rename the field to ::sum_w_vruntime. (As arguably
+::sum_weighted_vruntime would be a bit of a mouthful.)
+
+Understanding the scheduler is hard enough already, without
+extra layers of obfuscated naming. ;-)
+
+Also rename related helper functions:
+
+ sum_vruntime_add() => sum_w_vruntime_add()
+ sum_vruntime_sub() => sum_w_vruntime_sub()
+ sum_vruntime_update() => sum_w_vruntime_update()
+
+With the notable exception of cfs_avg_vruntime(), which
+was named accurately.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://patch.msgid.link/20251201064647.1851919-7-mingo@kernel.org
+Stable-dep-of: b3d99f43c72b ("sched/fair: Fix zero_vruntime tracking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 26 +++++++++++++-------------
+ kernel/sched/sched.h | 2 +-
+ 2 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index e68e894b57559..a5f698ed15032 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -607,7 +607,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ * Which we track using:
+ *
+ * v0 := cfs_rq->zero_vruntime
+- * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
++ * \Sum (v_i - v0) * w_i := cfs_rq->sum_w_vruntime
+ * \Sum w_i := cfs_rq->sum_weight
+ *
+ * Since zero_vruntime closely tracks the per-task service, these
+@@ -619,32 +619,32 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ * As measured, the max (key * weight) value was ~44 bits for a kernel build.
+ */
+ static void
+-avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
++sum_w_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+
+- cfs_rq->avg_vruntime += key * weight;
++ cfs_rq->sum_w_vruntime += key * weight;
+ cfs_rq->sum_weight += weight;
+ }
+
+ static void
+-avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
++sum_w_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+
+- cfs_rq->avg_vruntime -= key * weight;
++ cfs_rq->sum_w_vruntime -= key * weight;
+ cfs_rq->sum_weight -= weight;
+ }
+
+ static inline
+-void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
++void sum_w_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ {
+ /*
+- * v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight
++ * v' = v + d ==> sum_w_vruntime' = sum_runtime - d*sum_weight
+ */
+- cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta;
++ cfs_rq->sum_w_vruntime -= cfs_rq->sum_weight * delta;
+ }
+
+ /*
+@@ -654,7 +654,7 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+- s64 avg = cfs_rq->avg_vruntime;
++ s64 avg = cfs_rq->sum_w_vruntime;
+ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+@@ -722,7 +722,7 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+- s64 avg = cfs_rq->avg_vruntime;
++ s64 avg = cfs_rq->sum_w_vruntime;
+ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+@@ -745,7 +745,7 @@ static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+ u64 vruntime = avg_vruntime(cfs_rq);
+ s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
+
+- avg_vruntime_update(cfs_rq, delta);
++ sum_w_vruntime_update(cfs_rq, delta);
+
+ cfs_rq->zero_vruntime = vruntime;
+ }
+@@ -819,7 +819,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+ */
+ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- avg_vruntime_add(cfs_rq, se);
++ sum_w_vruntime_add(cfs_rq, se);
+ update_zero_vruntime(cfs_rq);
+ se->min_vruntime = se->vruntime;
+ se->min_slice = se->slice;
+@@ -831,7 +831,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ &min_vruntime_cb);
+- avg_vruntime_sub(cfs_rq, se);
++ sum_w_vruntime_sub(cfs_rq, se);
+ update_zero_vruntime(cfs_rq);
+ }
+
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 20b2b7746c3c7..ed37ab9209e59 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -680,7 +680,7 @@ struct cfs_rq {
+ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_idle; /* SCHED_IDLE */
+
+- s64 avg_vruntime;
++ s64 sum_w_vruntime;
+ u64 sum_weight;
+
+ u64 zero_vruntime;
+--
+2.51.0
+
--- /dev/null
+From 7e4cf09038cc1d3a97f04a2608b18e9dd55d2814 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 12:45:17 +0000
+Subject: sched_ext: Fix SCX_EFLAG_INITIALIZED being a no-op flag
+
+From: David Carlier <devnexen@gmail.com>
+
+[ Upstream commit 749989b2d90ddc7dd253ad3b11a77cf882721acf ]
+
+SCX_EFLAG_INITIALIZED is the sole member of enum scx_exit_flags with no
+explicit value, so the compiler assigns it 0. This makes the bitwise OR
+in scx_ops_init() a no-op:
+
+ sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; /* |= 0 */
+
+As a result, BPF schedulers cannot distinguish whether ops.init()
+completed successfully by inspecting exit_info->flags.
+
+Assign the value 1LLU << 0 so the flag is actually set.
+
+Fixes: f3aec2adce8d ("sched_ext: Add SCX_EFLAG_INITIALIZED to indicate successful ops.init()")
+Signed-off-by: David Carlier <devnexen@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/ext_internal.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
+index 601cfae8cc765..8039a750490f8 100644
+--- a/kernel/sched/ext_internal.h
++++ b/kernel/sched/ext_internal.h
+@@ -69,7 +69,7 @@ enum scx_exit_flags {
+ * info communication. The following flag indicates whether ops.init()
+ * finished successfully.
+ */
+- SCX_EFLAG_INITIALIZED,
++ SCX_EFLAG_INITIALIZED = 1LLU << 0,
+ };
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From aba09778974558e8afe4a85015beca07ef62c59f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 065eb91de9c0f..adc0beaf5468f 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -12040,6 +12040,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ break;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 7ea7c4245c691..7b765719f4f6b 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -15911,6 +15911,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -16874,9 +16900,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -17062,14 +17085,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index fd6dab1578872..40f313e2769fc 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -785,6 +785,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From 92ce8002708908c5da6597126f62c3c58998840d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 19:28:06 +0000
+Subject: scsi: pm8001: Fix use-after-free in pm8001_queue_command()
+
+From: Salomon Dushimirimana <salomondush@google.com>
+
+[ Upstream commit 38353c26db28efd984f51d426eac2396d299cca7 ]
+
+Commit e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()") refactors
+pm8001_queue_command(), however it introduces a potential cause of a double
+free scenario when it changes the function to return -ENODEV in case of phy
+down/device gone state.
+
+In this path, pm8001_queue_command() updates task status and calls
+task_done to indicate to upper layer that the task has been handled.
+However, this also frees the underlying SAS task. A -ENODEV is then
+returned to the caller. When libsas sas_ata_qc_issue() receives this error
+value, it assumes the task wasn't handled/queued by LLDD and proceeds to
+clean up and free the task again, resulting in a double free.
+
+Since pm8001_queue_command() handles the SAS task in this case, it should
+return 0 to the caller indicating that the task has been handled.
+
+Fixes: e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()")
+Signed-off-by: Salomon Dushimirimana <salomondush@google.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://patch.msgid.link/20260213192806.439432-1-salomondush@google.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 6a8d35aea93a5..645524f3fe2d0 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -525,8 +525,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
+ } else {
+ task->task_done(task);
+ }
+- rc = -ENODEV;
+- goto err_out;
++ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
++ return 0;
+ }
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+--
+2.51.0
+
--- /dev/null
+From 8131c0233022b82d0f30c4b2d521ff278b883184 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 755aa9c0017df..dae23ec4fcea8 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4401,14 +4401,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -10058,7 +10050,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
--- /dev/null
+From 93aec239d1231fd1ab7a9102383e3603a18e89cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 16:33:48 -0800
+Subject: selftests/bpf: Fix OOB read in dmabuf_collector
+
+From: T.J. Mercier <tjmercier@google.com>
+
+[ Upstream commit 6881af27f9ea0f5ca8f606f573ef5cc25ca31fe4 ]
+
+Dmabuf name allocations can be less than DMA_BUF_NAME_LEN characters,
+but bpf_probe_read_kernel always tries to read exactly that many bytes.
+If a name is less than DMA_BUF_NAME_LEN characters,
+bpf_probe_read_kernel will read past the end. bpf_probe_read_kernel_str
+stops at the first NUL terminator so use it instead, like
+iter_dmabuf_for_each already does.
+
+Fixes: ae5d2c59ecd7 ("selftests/bpf: Add test for dmabuf_iter")
+Reported-by: Jerome Lee <jaewookl@quicinc.com>
+Signed-off-by: T.J. Mercier <tjmercier@google.com>
+Link: https://lore.kernel.org/r/20260225003349.113746-1-tjmercier@google.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/progs/dmabuf_iter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
+index 13cdb11fdeb2b..9cbb7442646e5 100644
+--- a/tools/testing/selftests/bpf/progs/dmabuf_iter.c
++++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
+@@ -48,7 +48,7 @@ int dmabuf_collector(struct bpf_iter__dmabuf *ctx)
+
+ /* Buffers are not required to be named */
+ if (pname) {
+- if (bpf_probe_read_kernel(name, sizeof(name), pname))
++ if (bpf_probe_read_kernel_str(name, sizeof(name), pname) < 0)
+ return 1;
+
+ /* Name strings can be provided by userspace */
+--
+2.51.0
+
perf-core-fix-refcount-bug-and-potential-uaf-in-perf_mmap.patch
+drm-vmwgfx-fix-invalid-kref_put-callback-in-vmw_bo_d.patch
+drm-vmwgfx-return-the-correct-value-in-vmw_translate.patch
+debugobject-make-it-work-with-deferred-page-initiali.patch
+drm-logicvc-fix-device-node-reference-leak-in-logicv.patch
+kvm-arm64-hide-s1poe-from-guests-when-not-supported-.patch
+kvm-arm64-fix-id-register-initialization-for-non-pro.patch
+drm-fourcc-fix-plane-order-for-10-12-16-bit-ycbcr-fo.patch
+drm-tiny-sharp-memory-fix-pointer-error-dereference.patch
+irqchip-sifive-plic-fix-frozen-interrupt-due-to-affi.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+scsi-pm8001-fix-use-after-free-in-pm8001_queue_comma.patch
+drm-imx-parallel-display-check-return-value-of-devm_.patch
+alsa-scarlett2-fix-dsp-filter-control-array-handling.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+alsa-usb-audio-add-quirk_flag_skip_iface_setup.patch
+x86-fred-correct-speculative-safety-in-fred_extint.patch
+x86-cfi-fix-cfi-rewrite-for-odd-alignments.patch
+sched-fair-rename-cfs_rq-avg_load-to-cfs_rq-sum_weig.patch
+sched-fair-rename-cfs_rq-avg_vruntime-to-sum_w_vrunt.patch
+sched-fair-introduce-and-use-the-vruntime_cmp-and-vr.patch
+sched-fair-fix-zero_vruntime-tracking.patch
+sched-fair-only-set-slice-protection-at-pick-time.patch
+sched-eevdf-update-se-vprot-in-reweight_entity.patch
+sched-fair-fix-lag-clamp.patch
+rseq-clarify-rseq-registration-rseq_size-bound-check.patch
+perf-core-fix-invalid-wait-context-in-ctx_sched_in.patch
+accel-amdxdna-remove-buffer-size-check-when-creating.patch
+accel-amdxdna-prevent-ubuf-size-overflow.patch
+accel-amdxdna-validate-command-buffer-payload-count.patch
+drm-xe-wa-steer-rmw-of-mcr-registers-while-building-.patch
+cgroup-cpuset-fix-incorrect-use-of-cpuset_update_tas.patch
+cxl-move-devm_cxl_add_nvdimm_bridge-to-cxl_pmem.ko.patch
+cxl-fix-race-of-nvdimm_bus-object-when-creating-nvdi.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+regulator-bq257xx-fix-device-node-reference-leak-in-.patch
+zloop-advertise-a-volatile-write-cache.patch
+zloop-check-for-spurious-options-passed-to-remove.patch
+drm-client-do-not-destroy-null-modes.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+alsa-usb-audio-use-inclusive-terms.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+s390-idle-fix-cpu-idle-exit-cpu-time-accounting.patch
+s390-vtime-fix-virtual-timer-forwarding.patch
+arm64-io-rename-ioremap_prot-to-__ioremap_prot.patch
+arm64-io-extract-user-memory-type-in-ioremap_prot.patch
+pci-dwc-ep-flush-msi-x-write-before-unmapping-its-at.patch
+drm-amdgpu-userq-do-not-allow-userspace-to-trivially.patch
+drm-amdgpu-unlock-a-mutex-before-destroying-it.patch
+drm-amdgpu-fix-locking-bugs-in-error-paths.patch
+drm-amdgpu-fix-error-handling-in-slot-reset.patch
+alsa-hda-cs35l56-fix-signedness-error-in-cs35l56_hda.patch
+btrfs-free-pages-on-error-in-btrfs_uring_read_extent.patch
+btrfs-fix-error-message-order-of-parameters-in-btrfs.patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+btrfs-fix-objectid-value-in-error-message-in-check_e.patch
+btrfs-fix-warning-in-scrub_verify_one_metadata.patch
+btrfs-print-correct-subvol-num-if-active-swapfile-pr.patch
+btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
+alsa-usb-qcom-correct-parameter-comment-for-uaudio_t.patch
+asoc-sdca-fix-comments-for-sdca_irq_request.patch
+bpf-arm64-force-8-byte-alignment-for-jit-buffer-to-p.patch
+bpf-fix-stack-out-of-bounds-write-in-devmap.patch
+selftests-bpf-fix-oob-read-in-dmabuf_collector.patch
+sched_ext-fix-scx_eflag_initialized-being-a-no-op-fl.patch
+spi-stm32-fix-missing-pointer-assignment-in-case-of-.patch
+pci-correct-pci_cap_exp_endpoint_sizeof_v2-value.patch
+bpf-fix-race-in-cpumap-on-preempt_rt.patch
+bpf-fix-race-in-devmap-on-preempt_rt.patch
+bpf-add-bitwise-tracking-for-bpf_end.patch
+bpf-introduce-tnum_step-to-step-through-tnum-s-membe.patch
+bpf-improve-bounds-when-tnum-has-a-single-possible-v.patch
--- /dev/null
+From b3d5193b9a68f1fa81c48206d268e94c4bac80bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 16:09:22 +0100
+Subject: spi: stm32: fix missing pointer assignment in case of dma chaining
+
+From: Alain Volmat <alain.volmat@foss.st.com>
+
+[ Upstream commit e96493229a6399e902062213c6381162464cdd50 ]
+
+Commit c4f2c05ab029 ("spi: stm32: fix pointer-to-pointer variables usage")
+introduced a regression since dma descriptors generated as part of the
+stm32_spi_prepare_rx_dma_mdma_chaining function are not well propagated
+to the caller function, leading to mdma-dma chaining being no more
+functional.
+
+Fixes: c4f2c05ab029 ("spi: stm32: fix pointer-to-pointer variables usage")
+Signed-off-by: Alain Volmat <alain.volmat@foss.st.com>
+Acked-by: Antonio Quartulli <antonio@mandelbit.com>
+Link: https://patch.msgid.link/20260224-spi-stm32-chaining-fix-v1-1-5da7a4851b66@foss.st.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-stm32.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 80986bd251d29..7a6ee93be9bd4 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1570,6 +1570,9 @@ static int stm32_spi_prepare_rx_dma_mdma_chaining(struct stm32_spi *spi,
+ return -EINVAL;
+ }
+
++ *rx_mdma_desc = _mdma_desc;
++ *rx_dma_desc = _dma_desc;
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From b1501fbae6f640c54b6090c4b57e0c2a33338a55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 13:59:43 +0100
+Subject: x86/cfi: Fix CFI rewrite for odd alignments
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 24c8147abb39618d74fcc36e325765e8fe7bdd7a ]
+
+Rustam reported his clang builds did not boot properly; turns out his
+.config has: CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y set.
+
+Fix up the FineIBT code to deal with this unusual alignment.
+
+Fixes: 931ab63664f0 ("x86/ibt: Implement FineIBT")
+Reported-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/cfi.h | 12 ++++++++----
+ arch/x86/include/asm/linkage.h | 4 ++--
+ arch/x86/kernel/alternative.c | 29 ++++++++++++++++++++++-------
+ arch/x86/net/bpf_jit_comp.c | 13 ++-----------
+ 4 files changed, 34 insertions(+), 24 deletions(-)
+
+diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h
+index c40b9ebc1fb40..ab3fbbd947ed9 100644
+--- a/arch/x86/include/asm/cfi.h
++++ b/arch/x86/include/asm/cfi.h
+@@ -111,6 +111,12 @@ extern bhi_thunk __bhi_args_end[];
+
+ struct pt_regs;
+
++#ifdef CONFIG_CALL_PADDING
++#define CFI_OFFSET (CONFIG_FUNCTION_PADDING_CFI+5)
++#else
++#define CFI_OFFSET 5
++#endif
++
+ #ifdef CONFIG_CFI
+ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+ #define __bpfcall
+@@ -119,11 +125,9 @@ static inline int cfi_get_offset(void)
+ {
+ switch (cfi_mode) {
+ case CFI_FINEIBT:
+- return 16;
++ return /* fineibt_prefix_size */ 16;
+ case CFI_KCFI:
+- if (IS_ENABLED(CONFIG_CALL_PADDING))
+- return 16;
+- return 5;
++ return CFI_OFFSET;
+ default:
+ return 0;
+ }
+diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
+index 9d38ae744a2e4..a7294656ad908 100644
+--- a/arch/x86/include/asm/linkage.h
++++ b/arch/x86/include/asm/linkage.h
+@@ -68,7 +68,7 @@
+ * Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the
+ * CFI symbol layout changes.
+ *
+- * Without CALL_THUNKS:
++ * Without CALL_PADDING:
+ *
+ * .align FUNCTION_ALIGNMENT
+ * __cfi_##name:
+@@ -77,7 +77,7 @@
+ * .long __kcfi_typeid_##name
+ * name:
+ *
+- * With CALL_THUNKS:
++ * With CALL_PADDING:
+ *
+ * .align FUNCTION_ALIGNMENT
+ * __cfi_##name:
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 8ee5ff547357a..bd16e9f40d51a 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1168,7 +1168,7 @@ void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
+
+ poison_endbr(addr);
+ if (IS_ENABLED(CONFIG_FINEIBT))
+- poison_cfi(addr - 16);
++ poison_cfi(addr - CFI_OFFSET);
+ }
+ }
+
+@@ -1375,6 +1375,8 @@ extern u8 fineibt_preamble_end[];
+ #define fineibt_preamble_ud 0x13
+ #define fineibt_preamble_hash 5
+
++#define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
++
+ /*
+ * <fineibt_caller_start>:
+ * 0: b8 78 56 34 12 mov $0x12345678, %eax
+@@ -1620,7 +1622,7 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
+ * have determined there are no indirect calls to it and we
+ * don't need no CFI either.
+ */
+- if (!is_endbr(addr + 16))
++ if (!is_endbr(addr + CFI_OFFSET))
+ continue;
+
+ hash = decode_preamble_hash(addr, &arity);
+@@ -1628,6 +1630,15 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
+ addr, addr, 5, addr))
+ return -EINVAL;
+
++ /*
++ * FineIBT relies on being at func-16, so if the preamble is
++ * actually larger than that, place it the tail end.
++ *
++ * NOTE: this is possible with things like DEBUG_CALL_THUNKS
++ * and DEBUG_FORCE_FUNCTION_ALIGN_64B.
++ */
++ addr += CFI_OFFSET - fineibt_prefix_size;
++
+ text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
+ WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
+ text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
+@@ -1650,10 +1661,10 @@ static void cfi_rewrite_endbr(s32 *start, s32 *end)
+ for (s = start; s < end; s++) {
+ void *addr = (void *)s + *s;
+
+- if (!exact_endbr(addr + 16))
++ if (!exact_endbr(addr + CFI_OFFSET))
+ continue;
+
+- poison_endbr(addr + 16);
++ poison_endbr(addr + CFI_OFFSET);
+ }
+ }
+
+@@ -1758,7 +1769,8 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
+ if (FINEIBT_WARN(fineibt_preamble_size, 20) ||
+ FINEIBT_WARN(fineibt_preamble_bhi + fineibt_bhi1_size, 20) ||
+ FINEIBT_WARN(fineibt_caller_size, 14) ||
+- FINEIBT_WARN(fineibt_paranoid_size, 20))
++ FINEIBT_WARN(fineibt_paranoid_size, 20) ||
++ WARN_ON_ONCE(CFI_OFFSET < fineibt_prefix_size))
+ return;
+
+ if (cfi_mode == CFI_AUTO) {
+@@ -1871,6 +1883,11 @@ static void poison_cfi(void *addr)
+ */
+ switch (cfi_mode) {
+ case CFI_FINEIBT:
++ /*
++ * FineIBT preamble is at func-16.
++ */
++ addr += CFI_OFFSET - fineibt_prefix_size;
++
+ /*
+ * FineIBT prefix should start with an ENDBR.
+ */
+@@ -1909,8 +1926,6 @@ static void poison_cfi(void *addr)
+ }
+ }
+
+-#define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
+-
+ /*
+ * When regs->ip points to a 0xD6 byte in the FineIBT preamble,
+ * return true and fill out target and type.
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index de5083cb1d374..788671a32d8ee 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -438,17 +438,8 @@ static void emit_kcfi(u8 **pprog, u32 hash)
+
+ EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
+ #ifdef CONFIG_CALL_PADDING
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
++ for (int i = 0; i < CONFIG_FUNCTION_PADDING_CFI; i++)
++ EMIT1(0x90);
+ #endif
+ EMIT_ENDBR();
+
+--
+2.51.0
+
--- /dev/null
+From 2b9d3a796af9703222ae3ba5e89ab28cde01ea93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 13:15:04 +0000
+Subject: x86/fred: Correct speculative safety in fred_extint()
+
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+
+[ Upstream commit aa280a08e7d8fae58557acc345b36b3dc329d595 ]
+
+array_index_nospec() is no use if the result gets spilled to the stack, as
+it makes the believed safe-under-speculation value subject to memory
+predictions.
+
+For all practical purposes, this means array_index_nospec() must be used in
+the expression that accesses the array.
+
+As the code currently stands, it's the wrong side of irqentry_enter(), and
+'index' is put into %ebp across the function call.
+
+Remove the index variable and reposition array_index_nospec(), so it's
+calculated immediately before the array access.
+
+Fixes: 14619d912b65 ("x86/fred: FRED entry/exit and dispatch code")
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260106131504.679932-1-andrew.cooper3@citrix.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/entry_fred.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/entry/entry_fred.c b/arch/x86/entry/entry_fred.c
+index f004a4dc74c2d..563e439b743f2 100644
+--- a/arch/x86/entry/entry_fred.c
++++ b/arch/x86/entry/entry_fred.c
+@@ -159,8 +159,6 @@ void __init fred_complete_exception_setup(void)
+ static noinstr void fred_extint(struct pt_regs *regs)
+ {
+ unsigned int vector = regs->fred_ss.vector;
+- unsigned int index = array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
+- NR_SYSTEM_VECTORS);
+
+ if (WARN_ON_ONCE(vector < FIRST_EXTERNAL_VECTOR))
+ return;
+@@ -169,7 +167,8 @@ static noinstr void fred_extint(struct pt_regs *regs)
+ irqentry_state_t state = irqentry_enter(regs);
+
+ instrumentation_begin();
+- sysvec_table[index](regs);
++ sysvec_table[array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
++ NR_SYSTEM_VECTORS)](regs);
+ instrumentation_end();
+ irqentry_exit(regs, state);
+ } else {
+--
+2.51.0
+
--- /dev/null
+From 6109f40ca1ed2cf4e56d1ad3498fcf5a1237b467 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 06:21:44 -0800
+Subject: zloop: advertise a volatile write cache
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 6acf7860dcc79ed045cc9e6a79c8a8bb6959dba7 ]
+
+Zloop is file system backed and thus needs to sync the underlying file
+system to persist data. Set BLK_FEAT_WRITE_CACHE so that the block
+layer actually send flush commands, and fix the flush implementation
+as sync_filesystem requires s_umount to be held and the code currently
+misses that.
+
+Fixes: eb0570c7df23 ("block: new zoned loop block device driver")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/zloop.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
+index 39a425db670c8..2419677524453 100644
+--- a/drivers/block/zloop.c
++++ b/drivers/block/zloop.c
+@@ -499,6 +499,21 @@ static void zloop_rw(struct zloop_cmd *cmd)
+ zloop_put_cmd(cmd);
+ }
+
++/*
++ * Sync the entire FS containing the zone files instead of walking all files.
++ */
++static int zloop_flush(struct zloop_device *zlo)
++{
++ struct super_block *sb = file_inode(zlo->data_dir)->i_sb;
++ int ret;
++
++ down_read(&sb->s_umount);
++ ret = sync_filesystem(sb);
++ up_read(&sb->s_umount);
++
++ return ret;
++}
++
+ static void zloop_handle_cmd(struct zloop_cmd *cmd)
+ {
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+@@ -515,11 +530,7 @@ static void zloop_handle_cmd(struct zloop_cmd *cmd)
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+- /*
+- * Sync the entire FS containing the zone files instead of
+- * walking all files
+- */
+- cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
++ cmd->ret = zloop_flush(zlo);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+@@ -892,7 +903,8 @@ static int zloop_ctl_add(struct zloop_options *opts)
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+- .features = BLK_FEAT_ZONED,
++ .features = BLK_FEAT_ZONED | BLK_FEAT_WRITE_CACHE,
++
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+--
+2.51.0
+
--- /dev/null
+From b70b0e1366429498fc82ff6adaeeb0ebeb9e0787 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 06:21:45 -0800
+Subject: zloop: check for spurious options passed to remove
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 3c4617117a2b7682cf037be5e5533e379707f050 ]
+
+Zloop uses a command option parser for all control commands,
+but most options are only valid for adding a new device. Check
+for incorrectly specified options in the remove handler.
+
+Fixes: eb0570c7df23 ("block: new zoned loop block device driver")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/zloop.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
+index 2419677524453..26364e43aeb34 100644
+--- a/drivers/block/zloop.c
++++ b/drivers/block/zloop.c
+@@ -1076,7 +1076,12 @@ static int zloop_ctl_remove(struct zloop_options *opts)
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+- pr_err("No ID specified\n");
++ pr_err("No ID specified for remove\n");
++ return -EINVAL;
++ }
++
++ if (opts->mask & ~ZLOOP_OPT_ID) {
++ pr_err("Invalid option specified for remove\n");
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 71c96e74b460e436769003295bb141a37484ddf2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 12:53:41 -0800
+Subject: accel/amdxdna: Fix command hang on suspended hardware context
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 07efce5a6611af6714ea3ef65694e0c8dd7e44f5 ]
+
+When a hardware context is suspended, the job scheduler is stopped. If a
+command is submitted while the context is suspended, the job is queued in
+the scheduler but aie2_sched_job_run() is never invoked to restart the
+hardware context. As a result, the command hangs.
+
+Fix this by modifying the hardware context suspend routine to keep the job
+scheduler running so that queued jobs can trigger context restart properly.
+
+Fixes: aac243092b70 ("accel/amdxdna: Add command execution")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260211205341.722982-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_ctx.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
+index 1dcf6e862656d..01a02f4c3a98d 100644
+--- a/drivers/accel/amdxdna/aie2_ctx.c
++++ b/drivers/accel/amdxdna/aie2_ctx.c
+@@ -53,6 +53,7 @@ static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwct
+ {
+ drm_sched_stop(&hwctx->priv->sched, bad_job);
+ aie2_destroy_context(xdna->dev_handle, hwctx);
++ drm_sched_start(&hwctx->priv->sched, 0);
+ }
+
+ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
+@@ -80,7 +81,6 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw
+ }
+
+ out:
+- drm_sched_start(&hwctx->priv->sched, 0);
+ XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
+ return ret;
+ }
+@@ -297,19 +297,23 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
+ struct dma_fence *fence;
+ int ret;
+
+- if (!hwctx->priv->mbox_chann)
++ ret = amdxdna_pm_resume_get(hwctx->client->xdna);
++ if (ret)
++ return NULL;
++
++ if (!hwctx->priv->mbox_chann) {
++ amdxdna_pm_suspend_put(hwctx->client->xdna);
+ return NULL;
++ }
+
+- if (!mmget_not_zero(job->mm))
++ if (!mmget_not_zero(job->mm)) {
++ amdxdna_pm_suspend_put(hwctx->client->xdna);
+ return ERR_PTR(-ESRCH);
++ }
+
+ kref_get(&job->refcnt);
+ fence = dma_fence_get(job->fence);
+
+- ret = amdxdna_pm_resume_get(hwctx->client->xdna);
+- if (ret)
+- goto out;
+-
+ if (job->drv_cmd) {
+ switch (job->drv_cmd->opcode) {
+ case SYNC_DEBUG_BO:
+--
+2.51.0
+
--- /dev/null
+From e9d8b46f95929ed709d96a5e9af3eda98f9cd8b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 22:03:06 -0800
+Subject: accel/amdxdna: Fix crash when destroying a suspended hardware context
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 8363c02863332992a1822688da41f881d88d1631 ]
+
+If userspace issues an ioctl to destroy a hardware context that has
+already been automatically suspended, the driver may crash because the
+mailbox channel pointer is NULL for the suspended context.
+
+Fix this by checking the mailbox channel pointer in aie2_destroy_context()
+before accessing it.
+
+Fixes: 97f27573837e ("accel/amdxdna: Fix potential NULL pointer dereference in context cleanup")
+Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260206060306.4050531-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_message.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
+index 2c5b27d90563e..43657203d22b7 100644
+--- a/drivers/accel/amdxdna/aie2_message.c
++++ b/drivers/accel/amdxdna/aie2_message.c
+@@ -274,6 +274,9 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
++ if (!hwctx->priv->mbox_chann)
++ return 0;
++
+ xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
+ ret = aie2_destroy_context_req(ndev, hwctx->fw_ctx_id);
+ xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
+--
+2.51.0
+
--- /dev/null
+From 273d2b555f306c2515fc0ad836fd6e90930dffe3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 12:46:44 -0800
+Subject: accel/amdxdna: Fix dead lock for suspend and resume
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 1aa82181a3c285c7351523d587f7981ae4c015c8 ]
+
+When an application issues a query IOCTL while auto suspend is running,
+a deadlock can occur. The query path holds dev_lock and then calls
+pm_runtime_resume_and_get(), which waits for the ongoing suspend to
+complete. Meanwhile, the suspend callback attempts to acquire dev_lock
+and blocks, resulting in a deadlock.
+
+Fix this by releasing dev_lock before calling pm_runtime_resume_and_get()
+and reacquiring it after the call completes. Also acquire dev_lock in the
+resume callback to keep the locking consistent.
+
+Fixes: 063db451832b ("accel/amdxdna: Enhance runtime power management")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260211204644.722758-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_ctx.c | 4 ++--
+ drivers/accel/amdxdna/aie2_pci.c | 7 +++----
+ drivers/accel/amdxdna/aie2_pm.c | 2 +-
+ drivers/accel/amdxdna/amdxdna_ctx.c | 19 +++++++------------
+ drivers/accel/amdxdna/amdxdna_pm.c | 2 ++
+ drivers/accel/amdxdna/amdxdna_pm.h | 11 +++++++++++
+ 6 files changed, 26 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
+index a3bb37543f73d..1dcf6e862656d 100644
+--- a/drivers/accel/amdxdna/aie2_ctx.c
++++ b/drivers/accel/amdxdna/aie2_ctx.c
+@@ -629,7 +629,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
+ goto free_entity;
+ }
+
+- ret = amdxdna_pm_resume_get(xdna);
++ ret = amdxdna_pm_resume_get_locked(xdna);
+ if (ret)
+ goto free_col_list;
+
+@@ -760,7 +760,7 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
+ if (!hwctx->cus)
+ return -ENOMEM;
+
+- ret = amdxdna_pm_resume_get(xdna);
++ ret = amdxdna_pm_resume_get_locked(xdna);
+ if (ret)
+ goto free_cus;
+
+diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
+index ec1c3ad57d490..20568d0f9a639 100644
+--- a/drivers/accel/amdxdna/aie2_pci.c
++++ b/drivers/accel/amdxdna/aie2_pci.c
+@@ -469,7 +469,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+ {
+ struct amdxdna_client *client;
+
+- guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+@@ -969,7 +968,7 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+- ret = amdxdna_pm_resume_get(xdna);
++ ret = amdxdna_pm_resume_get_locked(xdna);
+ if (ret)
+ goto dev_exit;
+
+@@ -1062,7 +1061,7 @@ static int aie2_get_array(struct amdxdna_client *client,
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+- ret = amdxdna_pm_resume_get(xdna);
++ ret = amdxdna_pm_resume_get_locked(xdna);
+ if (ret)
+ goto dev_exit;
+
+@@ -1152,7 +1151,7 @@ static int aie2_set_state(struct amdxdna_client *client,
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+- ret = amdxdna_pm_resume_get(xdna);
++ ret = amdxdna_pm_resume_get_locked(xdna);
+ if (ret)
+ goto dev_exit;
+
+diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
+index 579b8be13b180..29bd4403a94d4 100644
+--- a/drivers/accel/amdxdna/aie2_pm.c
++++ b/drivers/accel/amdxdna/aie2_pm.c
+@@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+ {
+ int ret;
+
+- ret = amdxdna_pm_resume_get(ndev->xdna);
++ ret = amdxdna_pm_resume_get_locked(ndev->xdna);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
+index d17aef89a0add..db3aa26fb55f0 100644
+--- a/drivers/accel/amdxdna/amdxdna_ctx.c
++++ b/drivers/accel/amdxdna/amdxdna_ctx.c
+@@ -266,9 +266,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
+ struct amdxdna_drm_config_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+- int ret, idx;
+ u32 buf_size;
+ void *buf;
++ int ret;
+ u64 val;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+@@ -310,20 +310,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
+ return -EINVAL;
+ }
+
+- mutex_lock(&xdna->dev_lock);
+- idx = srcu_read_lock(&client->hwctx_srcu);
++ guard(mutex)(&xdna->dev_lock);
+ hwctx = xa_load(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
+ ret = -EINVAL;
+- goto unlock_srcu;
++ goto free_buf;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
+
+-unlock_srcu:
+- srcu_read_unlock(&client->hwctx_srcu, idx);
+- mutex_unlock(&xdna->dev_lock);
++free_buf:
+ kfree(buf);
+ return ret;
+ }
+@@ -334,7 +331,7 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
+ struct amdxdna_hwctx *hwctx;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+- int ret, idx;
++ int ret;
+
+ if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
+ return -EOPNOTSUPP;
+@@ -345,17 +342,15 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
+
+ abo = to_xdna_obj(gobj);
+ guard(mutex)(&xdna->dev_lock);
+- idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
+ if (!hwctx) {
+ ret = -EINVAL;
+- goto unlock_srcu;
++ goto put_obj;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
+
+-unlock_srcu:
+- srcu_read_unlock(&client->hwctx_srcu, idx);
++put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+ }
+diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
+index d024d480521c4..b1fafddd7ad59 100644
+--- a/drivers/accel/amdxdna/amdxdna_pm.c
++++ b/drivers/accel/amdxdna/amdxdna_pm.c
+@@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+
++ guard(mutex)(&xdna->dev_lock);
+ if (xdna->dev_info->ops->suspend)
+ ret = xdna->dev_info->ops->suspend(xdna);
+
+@@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+
++ guard(mutex)(&xdna->dev_lock);
+ if (xdna->dev_info->ops->resume)
+ ret = xdna->dev_info->ops->resume(xdna);
+
+diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
+index 77b2d6e455700..3d26b973e0e36 100644
+--- a/drivers/accel/amdxdna/amdxdna_pm.h
++++ b/drivers/accel/amdxdna/amdxdna_pm.h
+@@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
+ void amdxdna_pm_init(struct amdxdna_dev *xdna);
+ void amdxdna_pm_fini(struct amdxdna_dev *xdna);
+
++static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna)
++{
++ int ret;
++
++ mutex_unlock(&xdna->dev_lock);
++ ret = amdxdna_pm_resume_get(xdna);
++ mutex_lock(&xdna->dev_lock);
++
++ return ret;
++}
++
+ #endif /* _AMDXDNA_PM_H_ */
+--
+2.51.0
+
--- /dev/null
+From 263308438fc1d8e7ea78eb9ee81ec4a50c19fa45 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:54:15 -0800
+Subject: accel/amdxdna: Fix out-of-bounds memset in command slot handling
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 1110a949675ebd56b3f0286e664ea543f745801c ]
+
+The remaining space in a command slot may be smaller than the size of
+the command header. Clearing the command header with memset() before
+verifying the available slot space can result in an out-of-bounds write
+and memory corruption.
+
+Fix this by moving the memset() call after the size validation.
+
+Fixes: 3d32eb7a5ecf ("accel/amdxdna: Fix cu_idx being cleared by memset() during command setup")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260217185415.1781908-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_message.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
+index d69d3afcfb748..a758c11a05a9c 100644
+--- a/drivers/accel/amdxdna/aie2_message.c
++++ b/drivers/accel/amdxdna/aie2_message.c
+@@ -656,11 +656,11 @@ aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *siz
+ u32 cmd_len;
+ void *cmd;
+
+- memset(npu_slot, 0, sizeof(*npu_slot));
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*npu_slot) + cmd_len)
+ return -EINVAL;
+
++ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+@@ -681,7 +681,6 @@ aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
+ u32 cmd_len;
+ u32 arg_sz;
+
+- memset(npu_slot, 0, sizeof(*npu_slot));
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE)
+@@ -690,6 +689,7 @@ aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
+
++ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+@@ -713,7 +713,6 @@ aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t
+ u32 cmd_len;
+ u32 arg_sz;
+
+- memset(npu_slot, 0, sizeof(*npu_slot));
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
+@@ -722,6 +721,7 @@ aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
+
++ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+@@ -749,7 +749,6 @@ aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
+ u32 cmd_len;
+ u32 arg_sz;
+
+- memset(npu_slot, 0, sizeof(*npu_slot));
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
+@@ -758,6 +757,7 @@ aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
+
++ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_ELF;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+--
+2.51.0
+
--- /dev/null
+From 8f14d282be6156e861e710c937ab3ca76706f470 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 12:47:16 -0800
+Subject: accel/amdxdna: Fix suspend failure after enabling turbo mode
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit fdb65acfe655f844ae1e88696b9656d3ef5bb8fb ]
+
+Enabling turbo mode disables hardware clock gating. Suspend requires
+hardware clock gating to be re-enabled, otherwise suspend will fail.
+Fix this by calling aie2_runtime_cfg() from aie2_hw_stop() to
+re-enable clock gating during suspend. Also ensure that firmware is
+initialized in aie2_hw_start() before modifying clock-gating
+settings during resume.
+
+Fixes: f4d7b8a6bc8c ("accel/amdxdna: Enhance power management settings")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260211204716.722788-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_pci.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
+index 20568d0f9a639..3356c9ed079a8 100644
+--- a/drivers/accel/amdxdna/aie2_pci.c
++++ b/drivers/accel/amdxdna/aie2_pci.c
+@@ -341,6 +341,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
+ return;
+ }
+
++ aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL);
+ aie2_mgmt_fw_fini(ndev);
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+@@ -424,15 +425,15 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
+ goto stop_psp;
+ }
+
+- ret = aie2_pm_init(ndev);
++ ret = aie2_mgmt_fw_init(ndev);
+ if (ret) {
+- XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
++ XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+- ret = aie2_mgmt_fw_init(ndev);
++ ret = aie2_pm_init(ndev);
+ if (ret) {
+- XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
++ XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f3e8fc0ef53c68e635fb5047d49bf082c5e94aa2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:28:15 -0800
+Subject: accel/amdxdna: Prevent ubuf size overflow
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 03808abb1d868aed7478a11a82e5bb4b3f1ca6d6 ]
+
+The ubuf size calculation may overflow, resulting in an undersized
+allocation and possible memory corruption.
+
+Use check_add_overflow() helpers to validate the size calculation before
+allocation.
+
+Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated buffer")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260217192815.1784689-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/amdxdna_ubuf.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
+index 9e3b3b055caa8..62a478f6b45fb 100644
+--- a/drivers/accel/amdxdna/amdxdna_ubuf.c
++++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
+@@ -7,6 +7,7 @@
+ #include <drm/drm_device.h>
+ #include <drm/drm_print.h>
+ #include <linux/dma-buf.h>
++#include <linux/overflow.h>
+ #include <linux/pagemap.h>
+ #include <linux/vmalloc.h>
+
+@@ -176,7 +177,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ goto free_ent;
+ }
+
+- exp_info.size += va_ent[i].len;
++ if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) {
++ ret = -EINVAL;
++ goto free_ent;
++ }
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+--
+2.51.0
+
--- /dev/null
+From 30d83b1fc32eee563a5838bca8113d598d6120d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 10:42:51 -0600
+Subject: accel/amdxdna: Reduce log noise during process termination
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 57aa3917a3b3bd805a3679371f97a1ceda3c5510 ]
+
+During process termination, several error messages are logged that are
+not actual errors but expected conditions when a process is killed or
+interrupted. This creates unnecessary noise in the kernel log.
+
+The specific scenarios are:
+
+1. HMM invalidation returns -ERESTARTSYS when the wait is interrupted by
+ a signal during process cleanup. This is expected when a process is
+ being terminated and should not be logged as an error.
+
+2. Context destruction returns -ENODEV when the firmware or device has
+ already stopped, which commonly occurs during cleanup if the device
+ was already torn down. This is also an expected condition during
+ orderly shutdown.
+
+Downgrade these expected error conditions from error level to debug level
+to reduce log noise while still keeping genuine errors visible.
+
+Fixes: 97f27573837e ("accel/amdxdna: Fix potential NULL pointer dereference in context cleanup")
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260210164521.1094274-3-mario.limonciello@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_ctx.c | 6 ++++--
+ drivers/accel/amdxdna/aie2_message.c | 4 +++-
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
+index 6378a0bc7b6ea..a3bb37543f73d 100644
+--- a/drivers/accel/amdxdna/aie2_ctx.c
++++ b/drivers/accel/amdxdna/aie2_ctx.c
+@@ -497,7 +497,7 @@ static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
+
+ if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) {
+ ret = aie2_destroy_context(xdna->dev_handle, hwctx);
+- if (ret)
++ if (ret && ret != -ENODEV)
+ XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret);
+ } else {
+ ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
+@@ -1070,6 +1070,8 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
+
+ ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, MAX_SCHEDULE_TIMEOUT);
+- if (!ret || ret == -ERESTARTSYS)
++ if (!ret)
+ XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
++ else if (ret == -ERESTARTSYS)
++ XDNA_DBG(xdna, "Wait for bo interrupted by signal");
+ }
+diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
+index 43657203d22b7..d69d3afcfb748 100644
+--- a/drivers/accel/amdxdna/aie2_message.c
++++ b/drivers/accel/amdxdna/aie2_message.c
+@@ -193,8 +193,10 @@ static int aie2_destroy_context_req(struct amdxdna_dev_hdl *ndev, u32 id)
+
+ req.context_id = id;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+- if (ret)
++ if (ret && ret != -ENODEV)
+ XDNA_WARN(xdna, "Destroy context failed, ret %d", ret);
++ else if (ret == -ENODEV)
++ XDNA_DBG(xdna, "Destroy context: device already stopped");
+
+ return ret;
+ }
+--
+2.51.0
+
--- /dev/null
+From dfb82c9640e4d4088102199c5566f18999dd31be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 22:02:37 -0800
+Subject: accel/amdxdna: Remove buffer size check when creating command BO
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 08fe1b5166fdc81b010d7bf39cd6440620e7931e ]
+
+Large command buffers may be used, and they do not always need to be
+mapped or accessed by the driver. Performing a size check at command BO
+creation time unnecessarily rejects valid use cases.
+
+Remove the buffer size check from command BO creation, and defer vmap
+and size validation to the paths where the driver actually needs to map
+and access the command buffer.
+
+Fixes: ac49797c1815 ("accel/amdxdna: Add GEM buffer object management")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260206060237.4050492-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/amdxdna_gem.c | 38 ++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
+index dfa916eeb2d9c..56341b7668b10 100644
+--- a/drivers/accel/amdxdna/amdxdna_gem.c
++++ b/drivers/accel/amdxdna/amdxdna_gem.c
+@@ -21,8 +21,6 @@
+ #include "amdxdna_pci_drv.h"
+ #include "amdxdna_ubuf.h"
+
+-#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+-
+ MODULE_IMPORT_NS("DMA_BUF");
+
+ static int
+@@ -746,12 +744,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ {
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+- int ret;
+-
+- if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+- XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+- return ERR_PTR(-EINVAL);
+- }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+@@ -765,17 +757,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+- ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
+- if (ret) {
+- XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+- goto release_obj;
+- }
+-
+ return abo;
+-
+-release_obj:
+- drm_gem_object_put(to_gobj(abo));
+- return ERR_PTR(ret);
+ }
+
+ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+@@ -872,6 +854,7 @@ struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
++ int ret;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+@@ -880,9 +863,26 @@ struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ }
+
+ abo = to_xdna_obj(gobj);
+- if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
++ if (bo_type != AMDXDNA_BO_INVALID && abo->type != bo_type)
++ goto put_obj;
++
++ if (bo_type != AMDXDNA_BO_CMD || abo->mem.kva)
+ return abo;
+
++ if (abo->mem.size > SZ_32K) {
++ XDNA_ERR(xdna, "Cmd bo is too big %ld", abo->mem.size);
++ goto put_obj;
++ }
++
++ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
++ if (ret) {
++ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
++ goto put_obj;
++ }
++
++ return abo;
++
++put_obj:
+ drm_gem_object_put(gobj);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From beeabb129fdafcd11a45e2eb967b3a3bbdfe2447 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 22:02:51 -0800
+Subject: accel/amdxdna: Switch to always use chained command
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit c68a6af400ca80596e8c37de0a1cb564aa9da8a4 ]
+
+Preempt commands are only supported when submitted as chained commands.
+To ensure preempt support works consistently, always submit commands in
+chained command format.
+
+Set force_cmdlist to true so that single commands are filled using the
+chained command layout, enabling correct handling of preempt commands.
+
+Fixes: 3a0ff7b98af4 ("accel/amdxdna: Support preemption requests")
+Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260206060251.4050512-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_ctx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
+index 37d05f2e986f9..6378a0bc7b6ea 100644
+--- a/drivers/accel/amdxdna/aie2_ctx.c
++++ b/drivers/accel/amdxdna/aie2_ctx.c
+@@ -23,9 +23,9 @@
+ #include "amdxdna_pci_drv.h"
+ #include "amdxdna_pm.h"
+
+-static bool force_cmdlist;
++static bool force_cmdlist = true;
+ module_param(force_cmdlist, bool, 0600);
+-MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
++MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default true)");
+
+ #define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
+
+--
+2.51.0
+
--- /dev/null
+From 391ac79461491889110bfb589dfd24e39879954b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Feb 2026 13:19:46 -0800
+Subject: accel/amdxdna: Validate command buffer payload count
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 901ec3470994006bc8dd02399e16b675566c3416 ]
+
+The count field in the command header is used to determine the valid
+payload size. Verify that the valid payload does not exceed the remaining
+buffer space.
+
+Fixes: aac243092b70 ("accel/amdxdna: Add command execution")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260219211946.1920485-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/amdxdna_ctx.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
+index db3aa26fb55f0..e42eb12fc7c1b 100644
+--- a/drivers/accel/amdxdna/amdxdna_ctx.c
++++ b/drivers/accel/amdxdna/amdxdna_ctx.c
+@@ -104,7 +104,10 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+- if (unlikely(count <= num_masks)) {
++ if (unlikely(count <= num_masks ||
++ count * sizeof(u32) +
++ offsetof(struct amdxdna_cmd, data[0]) >
++ abo->mem.size)) {
+ *size = 0;
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 5bccff779d5d552a1cbe5f4b98da1cfab48ed551 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 08:30:11 +0300
+Subject: accel: ethosu: Fix shift overflow in cmd_to_addr()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 7be41fb00e2c2a823f271a8318b453ca11812f1e ]
+
+The "((cmd[0] & 0xff0000) << 16)" shift is zero. This was intended
+to be (((u64)cmd[0] & 0xff0000) << 16). Move the cast to the correct
+location.
+
+Fixes: 5a5e9c0228e6 ("accel: Add Arm Ethos-U NPU driver")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aQGmY64tWcwOGFP4@stanley.mountain
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ethosu/ethosu_gem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c
+index 473b5f5d75144..7b073116314ba 100644
+--- a/drivers/accel/ethosu/ethosu_gem.c
++++ b/drivers/accel/ethosu/ethosu_gem.c
+@@ -154,7 +154,7 @@ static void cmd_state_init(struct cmd_state *st)
+
+ static u64 cmd_to_addr(u32 *cmd)
+ {
+- return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1];
++ return (((u64)cmd[0] & 0xff0000) << 16) | cmd[1];
+ }
+
+ static u64 dma_length(struct ethosu_validated_cmdstream_info *info,
+--
+2.51.0
+
--- /dev/null
+From 0e9c46e879da4a87961f84ea5320fd0250c1ca12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 11:17:28 +0000
+Subject: ALSA: hda: cs35l56: Fix signedness error in cs35l56_hda_posture_put()
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 003ce8c9b2ca28fbb4860651e76fb1c9a91f2ea1 ]
+
+In cs35l56_hda_posture_put() assign ucontrol->value.integer.value[0] to
+a long instead of an unsigned long. ucontrol->value.integer.value[0] is
+a long.
+
+This fixes the sparse warning:
+
+sound/hda/codecs/side-codecs/cs35l56_hda.c:256:20: warning: unsigned value
+that used to be signed checked against zero?
+sound/hda/codecs/side-codecs/cs35l56_hda.c:252:29: signed value source
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Fixes: 73cfbfa9caea8 ("ALSA: hda/cs35l56: Add driver for Cirrus Logic CS35L56 amplifier")
+Link: https://patch.msgid.link/20260226111728.1700431-1-rf@opensource.cirrus.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/hda/codecs/side-codecs/cs35l56_hda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/hda/codecs/side-codecs/cs35l56_hda.c b/sound/hda/codecs/side-codecs/cs35l56_hda.c
+index f7ba92e119578..32d734bf2fdf8 100644
+--- a/sound/hda/codecs/side-codecs/cs35l56_hda.c
++++ b/sound/hda/codecs/side-codecs/cs35l56_hda.c
+@@ -249,7 +249,7 @@ static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+- unsigned long pos = ucontrol->value.integer.value[0];
++ long pos = ucontrol->value.integer.value[0];
+ bool changed;
+ int ret;
+
+--
+2.51.0
+
--- /dev/null
+From 3d6963bcab2fd774202f328af39ef2a919f95c73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 21:58:48 +1030
+Subject: ALSA: scarlett2: Fix DSP filter control array handling
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 1d241483368f2fd87fbaba64d6aec6bad3a1e12e ]
+
+scarlett2_add_dsp_ctls() was incorrectly storing the precomp and PEQ
+filter coefficient control pointers into the precomp_flt_switch_ctls
+and peq_flt_switch_ctls arrays instead of the intended targets
+precomp_flt_ctls and peq_flt_ctls. Pass NULL instead, as the filter
+coefficient control pointers are not used, and remove the unused
+precomp_flt_ctls and peq_flt_ctls arrays from struct scarlett2_data.
+
+Additionally, scarlett2_update_filter_values() was reading
+dsp_input_count * peq_flt_count values for
+SCARLETT2_CONFIG_PEQ_FLT_SWITCH, but the peq_flt_switch array is
+indexed only by dsp_input_count (one switch per DSP input, not per
+filter). Fix the read count.
+
+Fixes: b64678eb4e70 ("ALSA: scarlett2: Add DSP controls")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Link: https://patch.msgid.link/86497b71db060677d97c38a6ce5f89bb3b25361b.1771581197.git.g@b4.vu
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/mixer_scarlett2.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+index 88b7e42d159e0..7b31504c5f24c 100644
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -1328,8 +1328,6 @@ struct scarlett2_data {
+ struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
+ struct snd_kcontrol *mix_ctls[SCARLETT2_MIX_MAX];
+ struct snd_kcontrol *compressor_ctls[SCARLETT2_COMPRESSOR_CTLS_MAX];
+- struct snd_kcontrol *precomp_flt_ctls[SCARLETT2_PRECOMP_FLT_CTLS_MAX];
+- struct snd_kcontrol *peq_flt_ctls[SCARLETT2_PEQ_FLT_CTLS_MAX];
+ struct snd_kcontrol *precomp_flt_switch_ctls[SCARLETT2_DSP_SWITCH_MAX];
+ struct snd_kcontrol *peq_flt_switch_ctls[SCARLETT2_DSP_SWITCH_MAX];
+ struct snd_kcontrol *direct_monitor_ctl;
+@@ -3447,7 +3445,6 @@ static int scarlett2_update_autogain(struct usb_mixer_interface *mixer)
+ private->autogain_status[i] =
+ private->num_autogain_status_texts - 1;
+
+-
+ for (i = 0; i < SCARLETT2_AG_TARGET_COUNT; i++)
+ if (scarlett2_has_config_item(private,
+ scarlett2_ag_target_configs[i])) {
+@@ -5372,8 +5369,7 @@ static int scarlett2_update_filter_values(struct usb_mixer_interface *mixer)
+
+ err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_PEQ_FLT_SWITCH,
+- info->dsp_input_count * info->peq_flt_count,
+- private->peq_flt_switch);
++ info->dsp_input_count, private->peq_flt_switch);
+ if (err < 0)
+ return err;
+
+@@ -6546,7 +6542,7 @@ static int scarlett2_add_dsp_ctls(struct usb_mixer_interface *mixer, int i)
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_precomp_flt_ctl,
+ i * info->precomp_flt_count + j,
+- 1, s, &private->precomp_flt_switch_ctls[j]);
++ 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+@@ -6556,7 +6552,7 @@ static int scarlett2_add_dsp_ctls(struct usb_mixer_interface *mixer, int i)
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_peq_flt_ctl,
+ i * info->peq_flt_count + j,
+- 1, s, &private->peq_flt_switch_ctls[j]);
++ 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 6554ac98bd13b6da4e21f0cd1827db4b8669e5ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:36:35 +1030
+Subject: ALSA: usb-audio: Add QUIRK_FLAG_SKIP_IFACE_SETUP
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 38c322068a26a01d7ff64da92179e68cdde9860b ]
+
+Add a quirk flag to skip the usb_set_interface(),
+snd_usb_init_pitch(), and snd_usb_init_sample_rate() calls in
+__snd_usb_parse_audio_interface(). These are redundant with
+snd_usb_endpoint_prepare() at stream-open time.
+
+Enable the quirk for Focusrite devices, as init_sample_rate(rate_max)
+sets 192kHz during probing, which disables the internal mixer and Air
+and Safe modes.
+
+Fixes: 16f1f838442d ("Revert "ALSA: usb-audio: Drop superfluous interface setup at parsing"")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/65a7909b15f9feb76c2a6f4f8814c240ddc50737.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 3 ++-
+ sound/usb/stream.c | 3 +++
+ sound/usb/usbaudio.h | 6 ++++++
+ 3 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9cc5165510182..a89ea2233180a 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2422,7 +2422,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- 0),
++ QUIRK_FLAG_SKIP_IFACE_SETUP),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+@@ -2504,6 +2504,7 @@ static const char *const snd_usb_audio_quirk_flag_names[] = {
+ QUIRK_STRING_ENTRY(MIC_RES_384),
+ QUIRK_STRING_ENTRY(MIXER_PLAYBACK_MIN_MUTE),
+ QUIRK_STRING_ENTRY(MIXER_CAPTURE_MIN_MUTE),
++ QUIRK_STRING_ENTRY(SKIP_IFACE_SETUP),
+ NULL
+ };
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index ec7d756d78d17..421e94b233e17 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1259,6 +1259,9 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ set_iface_first = true;
+
+ /* try to set the interface... */
++ if (chip->quirk_flags & QUIRK_FLAG_SKIP_IFACE_SETUP)
++ continue;
++
+ usb_set_interface(chip->dev, iface_no, 0);
+ if (set_iface_first)
+ usb_set_interface(chip->dev, iface_no, altno);
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 79978cae9799c..085530cf62d92 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -224,6 +224,10 @@ extern bool snd_usb_skip_validation;
+ * playback value represents muted state instead of minimum audible volume
+ * QUIRK_FLAG_MIXER_CAPTURE_MIN_MUTE
+ * Similar to QUIRK_FLAG_MIXER_PLAYBACK_MIN_MUTE, but for capture streams
++ * QUIRK_FLAG_SKIP_IFACE_SETUP
++ * Skip the probe-time interface setup (usb_set_interface,
++ * init_pitch, init_sample_rate); redundant with
++ * snd_usb_endpoint_prepare() at stream-open time
+ */
+
+ enum {
+@@ -253,6 +257,7 @@ enum {
+ QUIRK_TYPE_MIC_RES_384 = 23,
+ QUIRK_TYPE_MIXER_PLAYBACK_MIN_MUTE = 24,
+ QUIRK_TYPE_MIXER_CAPTURE_MIN_MUTE = 25,
++ QUIRK_TYPE_SKIP_IFACE_SETUP = 26,
+ /* Please also edit snd_usb_audio_quirk_flag_names */
+ };
+
+@@ -284,5 +289,6 @@ enum {
+ #define QUIRK_FLAG_MIC_RES_384 QUIRK_FLAG(MIC_RES_384)
+ #define QUIRK_FLAG_MIXER_PLAYBACK_MIN_MUTE QUIRK_FLAG(MIXER_PLAYBACK_MIN_MUTE)
+ #define QUIRK_FLAG_MIXER_CAPTURE_MIN_MUTE QUIRK_FLAG(MIXER_CAPTURE_MIN_MUTE)
++#define QUIRK_FLAG_SKIP_IFACE_SETUP QUIRK_FLAG(SKIP_IFACE_SETUP)
+
+ #endif /* __USBAUDIO_H */
+--
+2.51.0
+
--- /dev/null
+From 5e197fa24c8c22b9edccbab217dd2439f8a143b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 1eaf52d1ae9c7..bd035ab414531 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1374,6 +1374,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ return -EINVAL;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From 775bac740ff433d9973042a7a5fd44a4247af4b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 86c329632e396..9cc5165510182 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2422,7 +2422,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.51.0
+
--- /dev/null
+From 62b5c71b27f328e41f9a8ea07cd59525a76ea665 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:31 +0100
+Subject: ALSA: usb-audio: Use inclusive terms
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4e9113c533acee2ba1f72fd68ee6ecd36b64484e ]
+
+Replace the remaining with inclusive terms; it's only this function
+name we overlooked at the previous conversion.
+
+Fixes: 53837b4ac2bd ("ALSA: usb-audio: Replace slave/master terms")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index bd035ab414531..686f095290673 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -160,8 +160,8 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ * This won't be used for implicit feedback which takes the packet size
+ * returned from the sync source
+ */
+-static int slave_next_packet_size(struct snd_usb_endpoint *ep,
+- unsigned int avail)
++static int synced_next_packet_size(struct snd_usb_endpoint *ep,
++ unsigned int avail)
+ {
+ unsigned int phase;
+ int ret;
+@@ -227,7 +227,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ }
+
+ if (ep->sync_source)
+- return slave_next_packet_size(ep, avail);
++ return synced_next_packet_size(ep, avail);
+ else
+ return next_packet_size(ep, avail);
+ }
+--
+2.51.0
+
--- /dev/null
+From a385b4879c6e7d0487af5bb4a304906563bc67b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:43:49 +0100
+Subject: ALSA: usb: qcom: Correct parameter comment for
+ uaudio_transfer_buffer_setup()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1d6452a0ce78cd3f4e48943b5ba21d273a658298 ]
+
+At fixing the memory leak of xfer buffer, we forgot to update the
+corresponding comment, too. This resulted in a kernel-doc warning
+with W=1. Let's correct it.
+
+Fixes: 5c7ef5001292 ("ALSA: qc_audio_offload: avoid leaking xfer_buf allocation")
+Link: https://patch.msgid.link/20260226154414.1081568-4-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/qcom/qc_audio_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/qcom/qc_audio_offload.c b/sound/usb/qcom/qc_audio_offload.c
+index cfb30a195364a..297490f0f5874 100644
+--- a/sound/usb/qcom/qc_audio_offload.c
++++ b/sound/usb/qcom/qc_audio_offload.c
+@@ -1007,7 +1007,7 @@ static int enable_audio_stream(struct snd_usb_substream *subs,
+ /**
+ * uaudio_transfer_buffer_setup() - fetch and populate xfer buffer params
+ * @subs: usb substream
+- * @xfer_buf: xfer buf to be allocated
++ * @xfer_buf_cpu: xfer buf to be allocated
+ * @xfer_buf_len: size of allocation
+ * @mem_info: QMI response info
+ *
+--
+2.51.0
+
--- /dev/null
+From 377e6d3449443e5c90380bfded47121a2eddcd2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 22:10:11 +0000
+Subject: arm64: io: Extract user memory type in ioremap_prot()
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit 8f098037139b294050053123ab2bc0f819d08932 ]
+
+The only caller of ioremap_prot() outside of the generic ioremap()
+implementation is generic_access_phys(), which passes a 'pgprot_t' value
+determined from the user mapping of the target 'pfn' being accessed by
+the kernel. On arm64, the 'pgprot_t' contains all of the non-address
+bits from the pte, including the permission controls, and so we end up
+returning a new user mapping from ioremap_prot() which faults when
+accessed from the kernel on systems with PAN:
+
+ | Unable to handle kernel read from unreadable memory at virtual address ffff80008ea89000
+ | ...
+ | Call trace:
+ | __memcpy_fromio+0x80/0xf8
+ | generic_access_phys+0x20c/0x2b8
+ | __access_remote_vm+0x46c/0x5b8
+ | access_remote_vm+0x18/0x30
+ | environ_read+0x238/0x3e8
+ | vfs_read+0xe4/0x2b0
+ | ksys_read+0xcc/0x178
+ | __arm64_sys_read+0x4c/0x68
+
+Extract only the memory type from the user 'pgprot_t' in ioremap_prot()
+and assert that we're being passed a user mapping, to protect us against
+any changes in future that may require additional handling. To avoid
+falsely flagging users of ioremap(), provide our own ioremap() macro
+which simply wraps __ioremap_prot().
+
+Cc: Zeng Heng <zengheng4@huawei.com>
+Cc: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 893dea9ccd08 ("arm64: Add HAVE_IOREMAP_PROT support")
+Reported-by: Jinjiang Tu <tujinjiang@huawei.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/io.h | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index cd2fddfe814ac..8cbd1e96fd50b 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -266,10 +266,23 @@ typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
+ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
+ void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
+
+-#define ioremap_prot __ioremap_prot
++static inline void __iomem *ioremap_prot(phys_addr_t phys, size_t size,
++ pgprot_t user_prot)
++{
++ pgprot_t prot;
++ ptdesc_t user_prot_val = pgprot_val(user_prot);
++
++ if (WARN_ON_ONCE(!(user_prot_val & PTE_USER)))
++ return NULL;
+
+-#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
++ prot = __pgprot_modify(PAGE_KERNEL, PTE_ATTRINDX_MASK,
++ user_prot_val & PTE_ATTRINDX_MASK);
++ return __ioremap_prot(phys, size, prot);
++}
++#define ioremap_prot ioremap_prot
+
++#define ioremap(addr, size) \
++ __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+ #define ioremap_wc(addr, size) \
+ __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
+ #define ioremap_np(addr, size) \
+--
+2.51.0
+
--- /dev/null
+From ec733a542138ce8b437d0fddf4c60cbd79a06e02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 22:10:10 +0000
+Subject: arm64: io: Rename ioremap_prot() to __ioremap_prot()
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit f6bf47ab32e0863df50f5501d207dcdddb7fc507 ]
+
+Rename our ioremap_prot() implementation to __ioremap_prot() and convert
+all arch-internal callers over to the new function.
+
+ioremap_prot() remains as a #define to __ioremap_prot() for
+generic_access_phys() and will be subsequently extended to handle user
+permissions in 'prot'.
+
+Cc: Zeng Heng <zengheng4@huawei.com>
+Cc: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: 8f098037139b ("arm64: io: Extract user memory type in ioremap_prot()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/io.h | 11 ++++++-----
+ arch/arm64/kernel/acpi.c | 2 +-
+ arch/arm64/mm/ioremap.c | 6 +++---
+ 3 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index 83e03abbb2ca9..cd2fddfe814ac 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -264,19 +264,20 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+ typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
+ pgprot_t *prot);
+ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
++void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
+
+-#define ioremap_prot ioremap_prot
++#define ioremap_prot __ioremap_prot
+
+ #define _PAGE_IOREMAP PROT_DEVICE_nGnRE
+
+ #define ioremap_wc(addr, size) \
+- ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
++ __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
+ #define ioremap_np(addr, size) \
+- ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
++ __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
+
+ #define ioremap_encrypted(addr, size) \
+- ioremap_prot((addr), (size), PAGE_KERNEL)
++ __ioremap_prot((addr), (size), PAGE_KERNEL)
+
+ /*
+ * io{read,write}{16,32,64}be() macros
+@@ -297,7 +298,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
+ if (pfn_is_map_memory(__phys_to_pfn(addr)))
+ return (void __iomem *)__phys_to_virt(addr);
+
+- return ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
++ return __ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
+index af90128cfed56..a9d884fd1d001 100644
+--- a/arch/arm64/kernel/acpi.c
++++ b/arch/arm64/kernel/acpi.c
+@@ -377,7 +377,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+ prot = __acpi_get_writethrough_mem_attribute();
+ }
+ }
+- return ioremap_prot(phys, size, prot);
++ return __ioremap_prot(phys, size, prot);
+ }
+
+ /*
+diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
+index 10e246f112710..1e4794a2af7d6 100644
+--- a/arch/arm64/mm/ioremap.c
++++ b/arch/arm64/mm/ioremap.c
+@@ -14,8 +14,8 @@ int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook)
+ return 0;
+ }
+
+-void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+- pgprot_t pgprot)
++void __iomem *__ioremap_prot(phys_addr_t phys_addr, size_t size,
++ pgprot_t pgprot)
+ {
+ unsigned long last_addr = phys_addr + size - 1;
+
+@@ -38,7 +38,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+
+ return generic_ioremap_prot(phys_addr, size, pgprot);
+ }
+-EXPORT_SYMBOL(ioremap_prot);
++EXPORT_SYMBOL(__ioremap_prot);
+
+ /*
+ * Must be called after early_fixmap_init
+--
+2.51.0
+
--- /dev/null
+From 8e0fc9dac42f293401a64c7e00d46957b8c8a7ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:47:52 +0100
+Subject: ASoC: SDCA: Fix comments for sdca_irq_request()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 71c1978ab6d2c6d48c31311855f1a85377c152ae ]
+
+The kernel-doc comments for sdca_irq_request() contained some typos
+that lead to build warnings with W=1. Let's correct them.
+
+Fixes: b126394d9ec6 ("ASoC: SDCA: Generic interrupt support")
+Acked-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260226154753.1083320-1-tiwai@suse.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sdca/sdca_interrupts.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/sdca/sdca_interrupts.c b/sound/soc/sdca/sdca_interrupts.c
+index ff3a7e405fdcb..49b675e601433 100644
+--- a/sound/soc/sdca/sdca_interrupts.c
++++ b/sound/soc/sdca/sdca_interrupts.c
+@@ -246,9 +246,9 @@ static int sdca_irq_request_locked(struct device *dev,
+ }
+
+ /**
+- * sdca_request_irq - request an individual SDCA interrupt
++ * sdca_irq_request - request an individual SDCA interrupt
+ * @dev: Pointer to the struct device against which things should be allocated.
+- * @interrupt_info: Pointer to the interrupt information structure.
++ * @info: Pointer to the interrupt information structure.
+ * @sdca_irq: SDCA interrupt position.
+ * @name: Name to be given to the IRQ.
+ * @handler: A callback thread function to be called for the IRQ.
+--
+2.51.0
+
--- /dev/null
+From 241f6b562ad95e20352bef995557d075b558e9b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 19:15:02 +0800
+Subject: bpf: Add bitwise tracking for BPF_END
+
+From: Tianci Cao <ziye@zju.edu.cn>
+
+[ Upstream commit 9d21199842247ab05c675fb9b6c6ca393a5c0024 ]
+
+This patch implements bitwise tracking (tnum analysis) for BPF_END
+(byte swap) operation.
+
+Currently, the BPF verifier does not track value for BPF_END operation,
+treating the result as completely unknown. This limits the verifier's
+ability to prove safety of programs that perform endianness conversions,
+which are common in networking code.
+
+For example, the following code pattern for port number validation:
+
+int test(struct pt_regs *ctx) {
+ __u64 x = bpf_get_prandom_u32();
+ x &= 0x3f00; // Range: [0, 0x3f00], var_off: (0x0; 0x3f00)
+ x = bswap16(x); // Should swap to range [0, 0x3f], var_off: (0x0; 0x3f)
+ if (x > 0x3f) goto trap;
+ return 0;
+trap:
+ return *(u64 *)NULL; // Should be unreachable
+}
+
+Currently generates verifier output:
+
+1: (54) w0 &= 16128 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))
+2: (d7) r0 = bswap16 r0 ; R0=scalar()
+3: (25) if r0 > 0x3f goto pc+2 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=63,var_off=(0x0; 0x3f))
+
+Without this patch, even though the verifier knows `x` has certain bits
+set, after bswap16, it loses all tracking information and treats port
+as having a completely unknown value [0, 65535].
+
+According to the BPF instruction set[1], there are 3 kinds of BPF_END:
+
+1. `bswap(16|32|64)`: opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE)
+ - do unconditional swap
+2. `le(16|32|64)`: opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE)
+ - on big-endian: do swap
+ - on little-endian: truncation (16/32-bit) or no-op (64-bit)
+3. `be(16|32|64)`: opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE)
+ - on little-endian: do swap
+ - on big-endian: truncation (16/32-bit) or no-op (64-bit)
+
+Since BPF_END operations are inherently bit-wise permutations, tnum
+(bitwise tracking) offers the most efficient and precise mechanism
+for value analysis. By implementing `tnum_bswap16`, `tnum_bswap32`,
+and `tnum_bswap64`, we can derive exact `var_off` values concisely,
+directly reflecting the bit-level changes.
+
+Here is the overview of changes:
+
+1. In `tnum_bswap(16|32|64)` (kernel/bpf/tnum.c):
+
+Call `swab(16|32|64)` function on the value and mask of `var_off`, and
+do truncation for 16/32-bit cases.
+
+2. In `adjust_scalar_min_max_vals` (kernel/bpf/verifier.c):
+
+Call helper function `scalar_byte_swap`.
+- Only do byte swap when
+ * alu64 (unconditional swap) OR
+ * switching between big-endian and little-endian machines.
+- If need do byte swap:
+ * Firstly call `tnum_bswap(16|32|64)` to update `var_off`.
+ * Then reset the bound since byte swap scrambles the range.
+- For 16/32-bit cases, truncate dst register to match the swapped size.
+
+This enables better verification of networking code that frequently uses
+byte swaps for protocol processing, reducing false positive rejections.
+
+[1] https://www.kernel.org/doc/Documentation/bpf/standardization/instruction-set.rst
+
+Co-developed-by: Shenghao Yuan <shenghaoyuan0928@163.com>
+Signed-off-by: Shenghao Yuan <shenghaoyuan0928@163.com>
+Co-developed-by: Yazhou Tang <tangyazhou518@outlook.com>
+Signed-off-by: Yazhou Tang <tangyazhou518@outlook.com>
+Signed-off-by: Tianci Cao <ziye@zju.edu.cn>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/r/20260204111503.77871-2-ziye@zju.edu.cn
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: efc11a667878 ("bpf: Improve bounds when tnum has a single possible value")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tnum.h | 5 ++++
+ kernel/bpf/tnum.c | 16 ++++++++++++
+ kernel/bpf/verifier.c | 60 ++++++++++++++++++++++++++++++++++++++++---
+ 3 files changed, 78 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/tnum.h b/include/linux/tnum.h
+index c52b862dad45b..fa4654ffb6217 100644
+--- a/include/linux/tnum.h
++++ b/include/linux/tnum.h
+@@ -63,6 +63,11 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2);
+ /* Return @a with all but the lowest @size bytes cleared */
+ struct tnum tnum_cast(struct tnum a, u8 size);
+
++/* Swap the bytes of a tnum */
++struct tnum tnum_bswap16(struct tnum a);
++struct tnum tnum_bswap32(struct tnum a);
++struct tnum tnum_bswap64(struct tnum a);
++
+ /* Returns true if @a is a known constant */
+ static inline bool tnum_is_const(struct tnum a)
+ {
+diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
+index f8e70e9c3998d..26fbfbb017001 100644
+--- a/kernel/bpf/tnum.c
++++ b/kernel/bpf/tnum.c
+@@ -8,6 +8,7 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/tnum.h>
++#include <linux/swab.h>
+
+ #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
+ /* A completely unknown value */
+@@ -253,3 +254,18 @@ struct tnum tnum_const_subreg(struct tnum a, u32 value)
+ {
+ return tnum_with_subreg(a, tnum_const(value));
+ }
++
++struct tnum tnum_bswap16(struct tnum a)
++{
++ return TNUM(swab16(a.value & 0xFFFF), swab16(a.mask & 0xFFFF));
++}
++
++struct tnum tnum_bswap32(struct tnum a)
++{
++ return TNUM(swab32(a.value & 0xFFFFFFFF), swab32(a.mask & 0xFFFFFFFF));
++}
++
++struct tnum tnum_bswap64(struct tnum a)
++{
++ return TNUM(swab64(a.value), swab64(a.mask));
++}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 783d984d7884d..0f871db07aadf 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -15458,6 +15458,48 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
+ __update_reg_bounds(dst_reg);
+ }
+
++static void scalar_byte_swap(struct bpf_reg_state *dst_reg, struct bpf_insn *insn)
++{
++ /*
++ * Byte swap operation - update var_off using tnum_bswap.
++ * Three cases:
++ * 1. bswap(16|32|64): opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE)
++ * unconditional swap
++ * 2. to_le(16|32|64): opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE)
++ * swap on big-endian, truncation or no-op on little-endian
++ * 3. to_be(16|32|64): opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE)
++ * swap on little-endian, truncation or no-op on big-endian
++ */
++
++ bool alu64 = BPF_CLASS(insn->code) == BPF_ALU64;
++ bool to_le = BPF_SRC(insn->code) == BPF_TO_LE;
++ bool is_big_endian;
++#ifdef CONFIG_CPU_BIG_ENDIAN
++ is_big_endian = true;
++#else
++ is_big_endian = false;
++#endif
++ /* Apply bswap if alu64 or switch between big-endian and little-endian machines */
++ bool need_bswap = alu64 || (to_le == is_big_endian);
++
++ if (need_bswap) {
++ if (insn->imm == 16)
++ dst_reg->var_off = tnum_bswap16(dst_reg->var_off);
++ else if (insn->imm == 32)
++ dst_reg->var_off = tnum_bswap32(dst_reg->var_off);
++ else if (insn->imm == 64)
++ dst_reg->var_off = tnum_bswap64(dst_reg->var_off);
++ /*
++ * Byteswap scrambles the range, so we must reset bounds.
++ * Bounds will be re-derived from the new tnum later.
++ */
++ __mark_reg_unbounded(dst_reg);
++ }
++ /* For bswap16/32, truncate dst register to match the swapped size */
++ if (insn->imm == 16 || insn->imm == 32)
++ coerce_reg_to_size(dst_reg, insn->imm / 8);
++}
++
+ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
+ const struct bpf_reg_state *src_reg)
+ {
+@@ -15484,6 +15526,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
+ case BPF_XOR:
+ case BPF_OR:
+ case BPF_MUL:
++ case BPF_END:
+ return true;
+
+ /* Shift operators range is only computable if shift dimension operand
+@@ -15632,12 +15675,23 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
+ else
+ scalar_min_max_arsh(dst_reg, &src_reg);
+ break;
++ case BPF_END:
++ scalar_byte_swap(dst_reg, insn);
++ break;
+ default:
+ break;
+ }
+
+- /* ALU32 ops are zero extended into 64bit register */
+- if (alu32)
++ /*
++ * ALU32 ops are zero extended into 64bit register.
++ *
++ * BPF_END is already handled inside the helper (truncation),
++ * so skip zext here to avoid unexpected zero extension.
++ * e.g., le64: opcode=(BPF_END|BPF_ALU|BPF_TO_LE), imm=0x40
++ * This is a 64bit byte swap operation with alu32==true,
++ * but we should not zero extend the result.
++ */
++ if (alu32 && opcode != BPF_END)
+ zext_32_to_64(dst_reg);
+ reg_bounds_sync(dst_reg);
+ return 0;
+@@ -15817,7 +15871,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ }
+
+ /* check dest operand */
+- if (opcode == BPF_NEG &&
++ if ((opcode == BPF_NEG || opcode == BPF_END) &&
+ regs[insn->dst_reg].type == SCALAR_VALUE) {
+ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
+ err = err ?: adjust_scalar_min_max_vals(env, insn,
+--
+2.51.0
+
--- /dev/null
+From d16d4c625a823cacdd0bb0799208dd9144d21cb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 07:55:25 +0000
+Subject: bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic
+ tearing
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit ef06fd16d48704eac868441d98d4ef083d8f3d07 ]
+
+struct bpf_plt contains a u64 target field. Currently, the BPF JIT
+allocator requests an alignment of 4 bytes (sizeof(u32)) for the JIT
+buffer.
+
+Because the base address of the JIT buffer can be 4-byte aligned (e.g.,
+ending in 0x4 or 0xc), the relative padding logic in build_plt() fails
+to ensure that target lands on an 8-byte boundary.
+
+This leads to two issues:
+1. UBSAN reports misaligned-access warnings when dereferencing the
+ structure.
+2. More critically, target is updated concurrently via WRITE_ONCE() in
+ bpf_arch_text_poke() while the JIT'd code executes ldr. On arm64,
+ 64-bit loads/stores are only guaranteed to be single-copy atomic if
+ they are 64-bit aligned. A misaligned target risks a torn read,
+ causing the JIT to jump to a corrupted address.
+
+Fix this by increasing the allocation alignment requirement to 8 bytes
+(sizeof(u64)) in bpf_jit_binary_pack_alloc(). This anchors the base of
+the JIT buffer to an 8-byte boundary, allowing the relative padding math
+in build_plt() to correctly align the target field.
+
+Fixes: b2ad54e1533e ("bpf, arm64: Implement bpf_arch_text_poke() for arm64")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Acked-by: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20260226075525.233321-1-tabba@google.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/net/bpf_jit_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 1d657bd3ce655..f9fcd699f2e94 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -2126,7 +2126,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
+ image_size = extable_offset + extable_size;
+ ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
+- sizeof(u32), &header, &image_ptr,
++ sizeof(u64), &header, &image_ptr,
+ jit_fill_hole);
+ if (!ro_header) {
+ prog = orig_prog;
+--
+2.51.0
+
--- /dev/null
+From 739e37c4d1e949ab2b6bd3948cece3768c4154cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:14:55 +0800
+Subject: bpf: Fix race in cpumap on PREEMPT_RT
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 869c63d5975d55e97f6b168e885452b3da20ea47 ]
+
+On PREEMPT_RT kernels, the per-CPU xdp_bulk_queue (bq) can be accessed
+concurrently by multiple preemptible tasks on the same CPU.
+
+The original code assumes bq_enqueue() and __cpu_map_flush() run
+atomically with respect to each other on the same CPU, relying on
+local_bh_disable() to prevent preemption. However, on PREEMPT_RT,
+local_bh_disable() only calls migrate_disable() (when
+PREEMPT_RT_NEEDS_BH_LOCK is not set) and does not disable
+preemption, which allows CFS scheduling to preempt a task during
+bq_flush_to_queue(), enabling another task on the same CPU to enter
+bq_enqueue() and operate on the same per-CPU bq concurrently.
+
+This leads to several races:
+
+1. Double __list_del_clearprev(): after bq->count is reset in
+ bq_flush_to_queue(), a preempting task can call bq_enqueue() ->
+ bq_flush_to_queue() on the same bq when bq->count reaches
+ CPU_MAP_BULK_SIZE. Both tasks then call __list_del_clearprev()
+ on the same bq->flush_node, the second call dereferences the
+ prev pointer that was already set to NULL by the first.
+
+2. bq->count and bq->q[] races: concurrent bq_enqueue() can corrupt
+ the packet queue while bq_flush_to_queue() is processing it.
+
+The race between task A (__cpu_map_flush -> bq_flush_to_queue) and
+task B (bq_enqueue -> bq_flush_to_queue) on the same CPU:
+
+ Task A (xdp_do_flush) Task B (cpu_map_enqueue)
+ ---------------------- ------------------------
+ bq_flush_to_queue(bq)
+ spin_lock(&q->producer_lock)
+ /* flush bq->q[] to ptr_ring */
+ bq->count = 0
+ spin_unlock(&q->producer_lock)
+ bq_enqueue(rcpu, xdpf)
+ <-- CFS preempts Task A --> bq->q[bq->count++] = xdpf
+ /* ... more enqueues until full ... */
+ bq_flush_to_queue(bq)
+ spin_lock(&q->producer_lock)
+ /* flush to ptr_ring */
+ spin_unlock(&q->producer_lock)
+ __list_del_clearprev(flush_node)
+ /* sets flush_node.prev = NULL */
+ <-- Task A resumes -->
+ __list_del_clearprev(flush_node)
+ flush_node.prev->next = ...
+ /* prev is NULL -> kernel oops */
+
+Fix this by adding a local_lock_t to xdp_bulk_queue and acquiring it
+in bq_enqueue() and __cpu_map_flush(). These paths already run under
+local_bh_disable(), so use local_lock_nested_bh() which on non-RT is
+a pure annotation with no overhead, and on PREEMPT_RT provides a
+per-CPU sleeping lock that serializes access to the bq.
+
+To reproduce, insert an mdelay(100) between bq->count = 0 and
+__list_del_clearprev() in bq_flush_to_queue(), then run reproducer
+provided by syzkaller.
+
+Fixes: 3253cb49cbad ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
+Reported-by: syzbot+2b3391f44313b3983e91@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/69369331.a70a0220.38f243.009d.GAE@google.com/T/
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
+Link: https://lore.kernel.org/r/20260225121459.183121-2-jiayuan.chen@linux.dev
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/cpumap.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index 703e5df1f4ef9..306bf98378041 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -29,6 +29,7 @@
+ #include <linux/sched.h>
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
++#include <linux/local_lock.h>
+ #include <linux/completion.h>
+ #include <trace/events/xdp.h>
+ #include <linux/btf_ids.h>
+@@ -52,6 +53,7 @@ struct xdp_bulk_queue {
+ struct list_head flush_node;
+ struct bpf_cpu_map_entry *obj;
+ unsigned int count;
++ local_lock_t bq_lock;
+ };
+
+ /* Struct for every remote "destination" CPU in map */
+@@ -451,6 +453,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ for_each_possible_cpu(i) {
+ bq = per_cpu_ptr(rcpu->bulkq, i);
+ bq->obj = rcpu;
++ local_lock_init(&bq->bq_lock);
+ }
+
+ /* Alloc queue */
+@@ -717,6 +720,8 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
+ struct ptr_ring *q;
+ int i;
+
++ lockdep_assert_held(&bq->bq_lock);
++
+ if (unlikely(!bq->count))
+ return;
+
+@@ -744,11 +749,15 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
+ }
+
+ /* Runs under RCU-read-side, plus in softirq under NAPI protection.
+- * Thus, safe percpu variable access.
++ * Thus, safe percpu variable access. PREEMPT_RT relies on
++ * local_lock_nested_bh() to serialise access to the per-CPU bq.
+ */
+ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+ {
+- struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
++ struct xdp_bulk_queue *bq;
++
++ local_lock_nested_bh(&rcpu->bulkq->bq_lock);
++ bq = this_cpu_ptr(rcpu->bulkq);
+
+ if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
+ bq_flush_to_queue(bq);
+@@ -769,6 +778,8 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+
+ list_add(&bq->flush_node, flush_list);
+ }
++
++ local_unlock_nested_bh(&rcpu->bulkq->bq_lock);
+ }
+
+ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+@@ -805,7 +816,9 @@ void __cpu_map_flush(struct list_head *flush_list)
+ struct xdp_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
++ local_lock_nested_bh(&bq->obj->bulkq->bq_lock);
+ bq_flush_to_queue(bq);
++ local_unlock_nested_bh(&bq->obj->bulkq->bq_lock);
+
+ /* If already running, costs spin_lock_irqsave + smb_mb */
+ wake_up_process(bq->obj->kthread);
+--
+2.51.0
+
--- /dev/null
+From 33b4f4976b3a591a3a07ca42a903462134d5f33e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:14:56 +0800
+Subject: bpf: Fix race in devmap on PREEMPT_RT
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 1872e75375c40add4a35990de3be77b5741c252c ]
+
+On PREEMPT_RT kernels, the per-CPU xdp_dev_bulk_queue (bq) can be
+accessed concurrently by multiple preemptible tasks on the same CPU.
+
+The original code assumes bq_enqueue() and __dev_flush() run atomically
+with respect to each other on the same CPU, relying on
+local_bh_disable() to prevent preemption. However, on PREEMPT_RT,
+local_bh_disable() only calls migrate_disable() (when
+PREEMPT_RT_NEEDS_BH_LOCK is not set) and does not disable
+preemption, which allows CFS scheduling to preempt a task during
+bq_xmit_all(), enabling another task on the same CPU to enter
+bq_enqueue() and operate on the same per-CPU bq concurrently.
+
+This leads to several races:
+
+1. Double-free / use-after-free on bq->q[]: bq_xmit_all() snapshots
+ cnt = bq->count, then iterates bq->q[0..cnt-1] to transmit frames.
+ If preempted after the snapshot, a second task can call bq_enqueue()
+ -> bq_xmit_all() on the same bq, transmitting (and freeing) the
+ same frames. When the first task resumes, it operates on stale
+ pointers in bq->q[], causing use-after-free.
+
+2. bq->count and bq->q[] corruption: concurrent bq_enqueue() modifying
+ bq->count and bq->q[] while bq_xmit_all() is reading them.
+
+3. dev_rx/xdp_prog teardown race: __dev_flush() clears bq->dev_rx and
+ bq->xdp_prog after bq_xmit_all(). If preempted between
+ bq_xmit_all() return and bq->dev_rx = NULL, a preempting
+ bq_enqueue() sees dev_rx still set (non-NULL), skips adding bq to
+ the flush_list, and enqueues a frame. When __dev_flush() resumes,
+ it clears dev_rx and removes bq from the flush_list, orphaning the
+ newly enqueued frame.
+
+4. __list_del_clearprev() on flush_node: similar to the cpumap race,
+ both tasks can call __list_del_clearprev() on the same flush_node,
+ the second dereferences the prev pointer already set to NULL.
+
+The race between task A (__dev_flush -> bq_xmit_all) and task B
+(bq_enqueue -> bq_xmit_all) on the same CPU:
+
+ Task A (xdp_do_flush) Task B (ndo_xdp_xmit redirect)
+ ---------------------- --------------------------------
+ __dev_flush(flush_list)
+ bq_xmit_all(bq)
+ cnt = bq->count /* e.g. 16 */
+ /* start iterating bq->q[] */
+ <-- CFS preempts Task A -->
+ bq_enqueue(dev, xdpf)
+ bq->count == DEV_MAP_BULK_SIZE
+ bq_xmit_all(bq, 0)
+ cnt = bq->count /* same 16! */
+ ndo_xdp_xmit(bq->q[])
+ /* frames freed by driver */
+ bq->count = 0
+ <-- Task A resumes -->
+ ndo_xdp_xmit(bq->q[])
+ /* use-after-free: frames already freed! */
+
+Fix this by adding a local_lock_t to xdp_dev_bulk_queue and acquiring
+it in bq_enqueue() and __dev_flush(). These paths already run under
+local_bh_disable(), so use local_lock_nested_bh() which on non-RT is
+a pure annotation with no overhead, and on PREEMPT_RT provides a
+per-CPU sleeping lock that serializes access to the bq.
+
+Fixes: 3253cb49cbad ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
+Link: https://lore.kernel.org/r/20260225121459.183121-3-jiayuan.chen@linux.dev
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 2984e938f94dc..3d619d01088e3 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -45,6 +45,7 @@
+ * types of devmap; only the lookup and insertion is different.
+ */
+ #include <linux/bpf.h>
++#include <linux/local_lock.h>
+ #include <net/xdp.h>
+ #include <linux/filter.h>
+ #include <trace/events/xdp.h>
+@@ -60,6 +61,7 @@ struct xdp_dev_bulk_queue {
+ struct net_device *dev_rx;
+ struct bpf_prog *xdp_prog;
+ unsigned int count;
++ local_lock_t bq_lock;
+ };
+
+ struct bpf_dtab_netdev {
+@@ -381,6 +383,8 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
+ int to_send = cnt;
+ int i;
+
++ lockdep_assert_held(&bq->bq_lock);
++
+ if (unlikely(!cnt))
+ return;
+
+@@ -425,10 +429,12 @@ void __dev_flush(struct list_head *flush_list)
+ struct xdp_dev_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
++ local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
+ bq_xmit_all(bq, XDP_XMIT_FLUSH);
+ bq->dev_rx = NULL;
+ bq->xdp_prog = NULL;
+ __list_del_clearprev(&bq->flush_node);
++ local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
+ }
+ }
+
+@@ -451,12 +457,16 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
+
+ /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
+ * variable access, and map elements stick around. See comment above
+- * xdp_do_flush() in filter.c.
++ * xdp_do_flush() in filter.c. PREEMPT_RT relies on local_lock_nested_bh()
++ * to serialise access to the per-CPU bq.
+ */
+ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
+ {
+- struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
++ struct xdp_dev_bulk_queue *bq;
++
++ local_lock_nested_bh(&dev->xdp_bulkq->bq_lock);
++ bq = this_cpu_ptr(dev->xdp_bulkq);
+
+ if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
+ bq_xmit_all(bq, 0);
+@@ -477,6 +487,8 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ }
+
+ bq->q[bq->count++] = xdpf;
++
++ local_unlock_nested_bh(&dev->xdp_bulkq->bq_lock);
+ }
+
+ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+@@ -1127,8 +1139,13 @@ static int dev_map_notification(struct notifier_block *notifier,
+ if (!netdev->xdp_bulkq)
+ return NOTIFY_BAD;
+
+- for_each_possible_cpu(cpu)
+- per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
++ for_each_possible_cpu(cpu) {
++ struct xdp_dev_bulk_queue *bq;
++
++ bq = per_cpu_ptr(netdev->xdp_bulkq, cpu);
++ bq->dev = netdev;
++ local_lock_init(&bq->bq_lock);
++ }
+ break;
+ case NETDEV_UNREGISTER:
+ /* This rcu_read_lock/unlock pair is needed because
+--
+2.51.0
+
--- /dev/null
+From 256185e58c6d1586afe01b56cde669f9743df4a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 05:34:44 +0000
+Subject: bpf: Fix stack-out-of-bounds write in devmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b7bf516c3ecd9a2aae2dc2635178ab87b734fef1 ]
+
+get_upper_ifindexes() iterates over all upper devices and writes their
+indices into an array without checking bounds.
+
+Also the callers assume that the max number of upper devices is
+MAX_NEST_DEV and allocate excluded_devices[1+MAX_NEST_DEV] on the stack,
+but that assumption is not correct and the number of upper devices could
+be larger than MAX_NEST_DEV (e.g., many macvlans), causing a
+stack-out-of-bounds write.
+
+Add a max parameter to get_upper_ifindexes() to avoid the issue.
+When there are too many upper devices, return -EOVERFLOW and abort the
+redirect.
+
+To reproduce, create more than MAX_NEST_DEV(8) macvlans on a device with
+an XDP program attached using BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS.
+Then send a packet to the device to trigger the XDP redirect path.
+
+Reported-by: syzbot+10cc7f13760b31bd2e61@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698c4ce3.050a0220.340abe.000b.GAE@google.com/T/
+Fixes: aeea1b86f936 ("bpf, devmap: Exclude XDP broadcast to master device")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Link: https://lore.kernel.org/r/20260225053506.4738-1-kohei@enjuk.jp
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 2625601de76e9..2984e938f94dc 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -588,18 +588,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
+ }
+
+ /* Get ifindex of each upper device. 'indexes' must be able to hold at
+- * least MAX_NEST_DEV elements.
+- * Returns the number of ifindexes added.
++ * least 'max' elements.
++ * Returns the number of ifindexes added, or -EOVERFLOW if there are too
++ * many upper devices.
+ */
+-static int get_upper_ifindexes(struct net_device *dev, int *indexes)
++static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
+ {
+ struct net_device *upper;
+ struct list_head *iter;
+ int n = 0;
+
+ netdev_for_each_upper_dev_rcu(dev, upper, iter) {
++ if (n >= max)
++ return -EOVERFLOW;
+ indexes[n++] = upper->ifindex;
+ }
++
+ return n;
+ }
+
+@@ -615,7 +619,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev_rx->ifindex;
+ }
+
+@@ -733,7 +741,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev->ifindex;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8cc9b23a5be9e39091cba46a622e0fbbb60a147b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 22:35:02 +0100
+Subject: bpf: Improve bounds when tnum has a single possible value
+
+From: Paul Chaignon <paul.chaignon@gmail.com>
+
+[ Upstream commit efc11a667878a1d655ff034a93a539debbfedb12 ]
+
+We're hitting an invariant violation in Cilium that sometimes leads to
+BPF programs being rejected and Cilium failing to start [1]. The
+following extract from verifier logs shows what's happening:
+
+ from 201 to 236: R1=0 R6=ctx() R7=1 R9=scalar(smin=umin=smin32=umin32=3584,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100)) R10=fp0
+ 236: R1=0 R6=ctx() R7=1 R9=scalar(smin=umin=smin32=umin32=3584,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100)) R10=fp0
+ ; if (magic == MARK_MAGIC_HOST || magic == MARK_MAGIC_OVERLAY || magic == MARK_MAGIC_ENCRYPT) @ bpf_host.c:1337
+ 236: (16) if w9 == 0xe00 goto pc+45 ; R9=scalar(smin=umin=smin32=umin32=3585,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100))
+ 237: (16) if w9 == 0xf00 goto pc+1
+ verifier bug: REG INVARIANTS VIOLATION (false_reg1): range bounds violation u64=[0xe01, 0xe00] s64=[0xe01, 0xe00] u32=[0xe01, 0xe00] s32=[0xe01, 0xe00] var_off=(0xe00, 0x0)
+
+We reach instruction 236 with two possible values for R9, 0xe00 and
+0xf00. This is perfectly reflected in the tnum, but of course the ranges
+are less accurate and cover [0xe00; 0xf00]. Taking the fallthrough path
+at instruction 236 allows the verifier to reduce the range to
+[0xe01; 0xf00]. The tnum is however not updated.
+
+With these ranges, at instruction 237, the verifier is not able to
+deduce that R9 is always equal to 0xf00. Hence the fallthrough pass is
+explored first, the verifier refines the bounds using the assumption
+that R9 != 0xf00, and ends up with an invariant violation.
+
+This pattern of impossible branch + bounds refinement is common to all
+invariant violations seen so far. The long-term solution is likely to
+rely on the refinement + invariant violation check to detect dead
+branches, as started by Eduard. To fix the current issue, we need
+something with less refactoring that we can backport.
+
+This patch uses the tnum_step helper introduced in the previous patch to
+detect the above situation. In particular, three cases are now detected
+in the bounds refinement:
+
+1. The u64 range and the tnum only overlap in umin.
+ u64: ---[xxxxxx]-----
+ tnum: --xx----------x-
+
+2. The u64 range and the tnum only overlap in the maximum value
+ represented by the tnum, called tmax.
+ u64: ---[xxxxxx]-----
+ tnum: xx-----x--------
+
+3. The u64 range and the tnum only overlap in between umin (excluded)
+ and umax.
+ u64: ---[xxxxxx]-----
+ tnum: xx----x-------x-
+
+To detect these three cases, we call tnum_step(tnum, umin), which
+returns the smallest member of the tnum greater than umin, called
+tnum_next here. We're in case (1) if umin is part of the tnum and
+tnum_next is greater than umax. We're in case (2) if umin is not part of
+the tnum and tnum_next is equal to tmax. Finally, we're in case (3) if
+umin is not part of the tnum, tnum_next is inferior or equal to umax,
+and calling tnum_step a second time gives us a value past umax.
+
+This change implements these three cases. With it, the above bytecode
+looks as follows:
+
+ 0: (85) call bpf_get_prandom_u32#7 ; R0=scalar()
+ 1: (47) r0 |= 3584 ; R0=scalar(smin=0x8000000000000e00,umin=umin32=3584,smin32=0x80000e00,var_off=(0xe00; 0xfffffffffffff1ff))
+ 2: (57) r0 &= 3840 ; R0=scalar(smin=umin=smin32=umin32=3584,smax=umax=smax32=umax32=3840,var_off=(0xe00; 0x100))
+ 3: (15) if r0 == 0xe00 goto pc+2 ; R0=3840
+ 4: (15) if r0 == 0xf00 goto pc+1
+ 4: R0=3840
+ 6: (95) exit
+
+In addition to the new selftests, this change was also verified with
+Agni [3]. For the record, the raw SMT is available at [4]. The property
+it verifies is that: If a concrete value x is contained in all input
+abstract values, after __update_reg_bounds, it will continue to be
+contained in all output abstract values.
+
+Link: https://github.com/cilium/cilium/issues/44216 [1]
+Link: https://pchaigno.github.io/test-verifier-complexity.html [2]
+Link: https://github.com/bpfverif/agni [3]
+Link: https://pastebin.com/raw/naCfaqNx [4]
+Fixes: 0df1a55afa83 ("bpf: Warn on internal verifier errors")
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Tested-by: Marco Schirrmeister <mschirrmeister@gmail.com>
+Co-developed-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+Signed-off-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
+Link: https://lore.kernel.org/r/ef254c4f68be19bd393d450188946821c588565d.1772225741.git.paul.chaignon@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 0f871db07aadf..c3b58f5d062b0 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2358,6 +2358,9 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg)
+
+ static void __update_reg64_bounds(struct bpf_reg_state *reg)
+ {
++ u64 tnum_next, tmax;
++ bool umin_in_tnum;
++
+ /* min signed is max(sign bit) | min(other bits) */
+ reg->smin_value = max_t(s64, reg->smin_value,
+ reg->var_off.value | (reg->var_off.mask & S64_MIN));
+@@ -2367,6 +2370,33 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
+ reg->umin_value = max(reg->umin_value, reg->var_off.value);
+ reg->umax_value = min(reg->umax_value,
+ reg->var_off.value | reg->var_off.mask);
++
++ /* Check if u64 and tnum overlap in a single value */
++ tnum_next = tnum_step(reg->var_off, reg->umin_value);
++ umin_in_tnum = (reg->umin_value & ~reg->var_off.mask) == reg->var_off.value;
++ tmax = reg->var_off.value | reg->var_off.mask;
++ if (umin_in_tnum && tnum_next > reg->umax_value) {
++ /* The u64 range and the tnum only overlap in umin.
++ * u64: ---[xxxxxx]-----
++ * tnum: --xx----------x-
++ */
++ ___mark_reg_known(reg, reg->umin_value);
++ } else if (!umin_in_tnum && tnum_next == tmax) {
++ /* The u64 range and the tnum only overlap in the maximum value
++ * represented by the tnum, called tmax.
++ * u64: ---[xxxxxx]-----
++ * tnum: xx-----x--------
++ */
++ ___mark_reg_known(reg, tmax);
++ } else if (!umin_in_tnum && tnum_next <= reg->umax_value &&
++ tnum_step(reg->var_off, tnum_next) > reg->umax_value) {
++ /* The u64 range and the tnum only overlap in between umin
++ * (excluded) and umax.
++ * u64: ---[xxxxxx]-----
++ * tnum: xx----x-------x-
++ */
++ ___mark_reg_known(reg, tnum_next);
++ }
+ }
+
+ static void __update_reg_bounds(struct bpf_reg_state *reg)
+--
+2.51.0
+
--- /dev/null
+From 7084facadc9f5c2ba37ee41e1e73c3c25e8a239c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 22:32:21 +0100
+Subject: bpf: Introduce tnum_step to step through tnum's members
+
+From: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+
+[ Upstream commit 76e954155b45294c502e3d3a9e15757c858ca55e ]
+
+This commit introduces tnum_step(), a function that, when given t, and a
+number z returns the smallest member of t larger than z. The number z
+must be greater or equal to the smallest member of t and less than the
+largest member of t.
+
+The first step is to compute j, a number that keeps all of t's known
+bits, and matches all unknown bits to z's bits. Since j is a member of
+the t, it is already a candidate for result. However, we want our result
+to be (minimally) greater than z.
+
+There are only two possible cases:
+
+(1) Case j <= z. In this case, we want to increase the value of j and
+make it > z.
+(2) Case j > z. In this case, we want to decrease the value of j while
+keeping it > z.
+
+(Case 1) j <= z
+
+t = xx11x0x0
+z = 10111101 (189)
+j = 10111000 (184)
+ ^
+ k
+
+(Case 1.1) Let's first consider the case where j < z. We will address j
+== z later.
+
+Since z > j, there had to be a bit position that was 1 in z and a 0 in
+j, beyond which all positions of higher significance are equal in j and
+z. Further, this position could not have been unknown in a, because the
+unknown positions of a match z. This position had to be a 1 in z and
+known 0 in t.
+
+Let k be position of the most significant 1-to-0 flip. In our example, k
+= 3 (starting the count at 1 at the least significant bit). Setting (to
+1) the unknown bits of t in positions of significance smaller than
+k will not produce a result > z. Hence, we must set/unset the unknown
+bits at positions of significance higher than k. Specifically, we look
+for the next larger combination of 1s and 0s to place in those
+positions, relative to the combination that exists in z. We can achieve
+this by concatenating bits at unknown positions of t into an integer,
+adding 1, and writing the bits of that result back into the
+corresponding bit positions previously extracted from z.
+
+>From our example, considering only positions of significance greater
+than k:
+
+t = xx..x
+z = 10..1
+ + 1
+ -----
+ 11..0
+
+This is the exact combination 1s and 0s we need at the unknown bits of t
+in positions of significance greater than k. Further, our result must
+only increase the value minimally above z. Hence, unknown bits in
+positions of significance smaller than k should remain 0. We finally
+have,
+
+result = 11110000 (240)
+
+(Case 1.2) Now consider the case when j = z, for example
+
+t = 1x1x0xxx
+z = 10110100 (180)
+j = 10110100 (180)
+
+Matching the unknown bits of the t to the bits of z yielded exactly z.
+To produce a number greater than z, we must set/unset the unknown bits
+in t, and *all* the unknown bits of t candidates for being set/unset. We
+can do this similar to Case 1.1, by adding 1 to the bits extracted from
+the masked bit positions of z. Essentially, this case is equivalent to
+Case 1.1, with k = 0.
+
+t = 1x1x0xxx
+z = .0.1.100
+ + 1
+ ---------
+ .0.1.101
+
+This is the exact combination of bits needed in the unknown positions of
+t. After recalling the known positions of t, we get
+
+result = 10110101 (181)
+
+(Case 2) j > z
+
+t = x00010x1
+z = 10000010 (130)
+j = 10001011 (139)
+ ^
+ k
+
+Since j > z, there had to be a bit position which was 0 in z, and a 1 in
+j, beyond which all positions of higher significance are equal in j and
+z. This position had to be a 0 in z and known 1 in t. Let k be the
+position of the most significant 0-to-1 flip. In our example, k = 4.
+
+Because of the 0-to-1 flip at position k, a member of t can become
+greater than z if the bits in positions greater than k are themselves >=
+to z. To make that member *minimally* greater than z, the bits in
+positions greater than k must be exactly = z. Hence, we simply match all
+of t's unknown bits in positions more significant than k to z's bits. In
+positions less significant than k, we set all t's unknown bits to 0
+to retain minimality.
+
+In our example, in positions of greater significance than k (=4),
+t=x000. These positions are matched with z (1000) to produce 1000. In
+positions of lower significance than k, t=10x1. All unknown bits are set
+to 0 to produce 1001. The final result is:
+
+result = 10001001 (137)
+
+This concludes the computation for a result > z that is a member of t.
+
+The procedure for tnum_step() in this commit implements the idea
+described above. As a proof of correctness, we verified the algorithm
+against a logical specification of tnum_step. The specification asserts
+the following about the inputs t, z and output res that:
+
+1. res is a member of t, and
+2. res is strictly greater than z, and
+3. there does not exist another value res2 such that
+ 3a. res2 is also a member of t, and
+ 3b. res2 is greater than z
+ 3c. res2 is smaller than res
+
+We checked the implementation against this logical specification using
+an SMT solver. The verification formula in SMTLIB format is available
+at [1]. The verification returned an "unsat": indicating that no input
+assignment exists for which the implementation and the specification
+produce different outputs.
+
+In addition, we also automatically generated the logical encoding of the
+C implementation using Agni [2] and verified it against the same
+specification. This verification also returned an "unsat", confirming
+that the implementation is equivalent to the specification. The formula
+for this check is also available at [3].
+
+Link: https://pastebin.com/raw/2eRWbiit [1]
+Link: https://github.com/bpfverif/agni [2]
+Link: https://pastebin.com/raw/EztVbBJ2 [3]
+Co-developed-by: Srinivas Narayana <srinivas.narayana@rutgers.edu>
+Signed-off-by: Srinivas Narayana <srinivas.narayana@rutgers.edu>
+Co-developed-by: Santosh Nagarakatte <santosh.nagarakatte@rutgers.edu>
+Signed-off-by: Santosh Nagarakatte <santosh.nagarakatte@rutgers.edu>
+Signed-off-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
+Link: https://lore.kernel.org/r/93fdf71910411c0f19e282ba6d03b4c65f9c5d73.1772225741.git.paul.chaignon@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: efc11a667878 ("bpf: Improve bounds when tnum has a single possible value")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tnum.h | 3 +++
+ kernel/bpf/tnum.c | 56 ++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 59 insertions(+)
+
+diff --git a/include/linux/tnum.h b/include/linux/tnum.h
+index fa4654ffb6217..ca2cfec8de08a 100644
+--- a/include/linux/tnum.h
++++ b/include/linux/tnum.h
+@@ -131,4 +131,7 @@ static inline bool tnum_subreg_is_const(struct tnum a)
+ return !(tnum_subreg(a)).mask;
+ }
+
++/* Returns the smallest member of t larger than z */
++u64 tnum_step(struct tnum t, u64 z);
++
+ #endif /* _LINUX_TNUM_H */
+diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
+index 26fbfbb017001..4abc359b3db01 100644
+--- a/kernel/bpf/tnum.c
++++ b/kernel/bpf/tnum.c
+@@ -269,3 +269,59 @@ struct tnum tnum_bswap64(struct tnum a)
+ {
+ return TNUM(swab64(a.value), swab64(a.mask));
+ }
++
++/* Given tnum t, and a number z such that tmin <= z < tmax, where tmin
++ * is the smallest member of the t (= t.value) and tmax is the largest
++ * member of t (= t.value | t.mask), returns the smallest member of t
++ * larger than z.
++ *
++ * For example,
++ * t = x11100x0
++ * z = 11110001 (241)
++ * result = 11110010 (242)
++ *
++ * Note: if this function is called with z >= tmax, it just returns
++ * early with tmax; if this function is called with z < tmin, the
++ * algorithm already returns tmin.
++ */
++u64 tnum_step(struct tnum t, u64 z)
++{
++ u64 tmax, j, p, q, r, s, v, u, w, res;
++ u8 k;
++
++ tmax = t.value | t.mask;
++
++ /* if z >= largest member of t, return largest member of t */
++ if (z >= tmax)
++ return tmax;
++
++ /* if z < smallest member of t, return smallest member of t */
++ if (z < t.value)
++ return t.value;
++
++ /* keep t's known bits, and match all unknown bits to z */
++ j = t.value | (z & t.mask);
++
++ if (j > z) {
++ p = ~z & t.value & ~t.mask;
++ k = fls64(p); /* k is the most-significant 0-to-1 flip */
++ q = U64_MAX << k;
++ r = q & z; /* positions > k matched to z */
++ s = ~q & t.value; /* positions <= k matched to t.value */
++ v = r | s;
++ res = v;
++ } else {
++ p = z & ~t.value & ~t.mask;
++ k = fls64(p); /* k is the most-significant 1-to-0 flip */
++ q = U64_MAX << k;
++ r = q & t.mask & z; /* unknown positions > k, matched to z */
++ s = q & ~t.mask; /* known positions > k, set to 1 */
++ v = r | s;
++ /* add 1 to unknown positions > k to make value greater than z */
++ u = v + (1ULL << k);
++ /* extract bits in unknown positions > k from u, rest from t.value */
++ w = (u & t.mask) | t.value;
++ res = w;
++ }
++ return res;
++}
+--
+2.51.0
+
--- /dev/null
+From fe4032e8080c3ab3c73aa23e4342cf189283281a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:41 +0000
+Subject: btrfs: fix compat mask in error messages in btrfs_check_features()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 587bb33b10bda645a1028c1737ad3992b3d7cf61 ]
+
+Commit d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency
+checks") introduced a regression when it comes to handling unsupported
+incompat or compat_ro flags. Beforehand we only printed the flags that
+we didn't recognize, afterwards we printed them all, which is less
+useful. Fix the error handling so it behaves like it used to.
+
+Fixes: d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 2833b44f4b4f2..6d2dcd023cc6f 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3150,7 +3150,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ btrfs_err(fs_info,
+ "cannot mount because of unknown incompat features (0x%llx)",
+- incompat);
++ incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
+ return -EINVAL;
+ }
+
+@@ -3182,7 +3182,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (compat_ro_unsupp && is_rw_mount) {
+ btrfs_err(fs_info,
+ "cannot mount read-write because of unknown compat_ro features (0x%llx)",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+@@ -3195,7 +3195,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_err(fs_info,
+ "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From b8014a4ce989da578cc737d90df8a3383579f72a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 18:25:42 +0000
+Subject: btrfs: fix error message order of parameters in
+ btrfs_delete_delayed_dir_index()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 3cf0f35779d364cf2003c617bb7f3f3e41023372 ]
+
+Fix the error message in btrfs_delete_delayed_dir_index() if
+__btrfs_add_delayed_item() fails: the message says root, inode, index,
+error, but we're actually passing index, root, inode, error.
+
+Fixes: adc1ef55dc04 ("btrfs: add details to error messages at btrfs_delete_delayed_dir_index()")
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/delayed-inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 4b7d9015e0dad..7e3d294a6dced 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1673,7 +1673,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ if (unlikely(ret)) {
+ btrfs_err(trans->fs_info,
+ "failed to add delayed dir index item, root: %llu, inode: %llu, index: %llu, error: %d",
+- index, btrfs_root_id(node->root), node->inode_id, ret);
++ btrfs_root_id(node->root), node->inode_id, index, ret);
+ btrfs_delayed_item_release_metadata(dir->root, item);
+ btrfs_release_delayed_item(item);
+ }
+--
+2.51.0
+
--- /dev/null
+From fffa2f7fb3add36397687a8862692671539df5b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index c21c21adf61ed..6d4dceb144373 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1893,7 +1893,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 071e3765870021c0f828731512b6f8865952cd0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 14:39:46 +0000
+Subject: btrfs: fix objectid value in error message in check_extent_data_ref()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit a10172780526c2002e062102ad4f2aabac495889 ]
+
+Fix a copy-paste error in check_extent_data_ref(): we're printing root
+as in the message above, we should be printing objectid.
+
+Fixes: f333a3c7e832 ("btrfs: tree-checker: validate dref root and objectid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 6d4dceb144373..12d6ae49bc078 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1712,7 +1712,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+- root);
++ objectid);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+--
+2.51.0
+
--- /dev/null
+From 97680fb57942b9ad398a0f84d8b2937fa341f8d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:13 +0000
+Subject: btrfs: fix warning in scrub_verify_one_metadata()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 44e2fda66427a0442d8d2c0e6443256fb458ab6b ]
+
+Commit b471965fdb2d ("btrfs: fix replace/scrub failure with
+metadata_uuid") fixed the comparison in scrub_verify_one_metadata() to
+use metadata_uuid rather than fsid, but left the warning as it was. Fix
+it so it matches what we're doing.
+
+Fixes: b471965fdb2d ("btrfs: fix replace/scrub failure with metadata_uuid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/scrub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index a40ee41f42c68..4fc69b2d213a6 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -745,7 +745,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block %llu mirror %u has bad fsid, has %pU want %pU",
+ logical, stripe->mirror_num,
+- header->fsid, fs_info->fs_devices->fsid);
++ header->fsid, fs_info->fs_devices->metadata_uuid);
+ return;
+ }
+ if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+--
+2.51.0
+
--- /dev/null
+From cb91df2351e07dbcfe0dc377c81a3f3450f6dc61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 22:12:15 +0100
+Subject: btrfs: free pages on error in btrfs_uring_read_extent()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Sabaté Solà <mssola@mssola.com>
+
+[ Upstream commit 3f501412f2079ca14bf68a18d80a2b7a823f1f64 ]
+
+In this function the 'pages' object is never freed in the hopes that it is
+picked up by btrfs_uring_read_finished() whenever that executes in the
+future. But that's just the happy path. Along the way previous
+allocations might have gone wrong, or we might not get -EIOCBQUEUED from
+btrfs_encoded_read_regular_fill_pages(). In all these cases, we go to a
+cleanup section that frees all memory allocated by this function without
+assuming any deferred execution, and this also needs to happen for the
+'pages' allocation.
+
+Fixes: 34310c442e17 ("btrfs: add io_uring command for encoded reads (ENCODED_READ ioctl)")
+Signed-off-by: Miquel Sabaté Solà <mssola@mssola.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ioctl.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index acb484546b1da..c9284ce6c6e78 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4661,7 +4661,7 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
+ {
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+- struct page **pages;
++ struct page **pages = NULL;
+ struct btrfs_uring_priv *priv = NULL;
+ unsigned long nr_pages;
+ int ret;
+@@ -4719,6 +4719,11 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ kfree(priv);
++ for (int i = 0; i < nr_pages; i++) {
++ if (pages[i])
++ __free_page(pages[i]);
++ }
++ kfree(pages);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 2b51c34f1e021abcaf8a2319af17b5269b642ad4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:32:39 +0000
+Subject: btrfs: print correct subvol num if active swapfile prevents deletion
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 1c7e9111f4e6d6d42bc47759c9af1ef91f03ac2c ]
+
+Fix the error message in btrfs_delete_subvolume() if we can't delete a
+subvolume because it has an active swapfile: we were printing the number
+of the parent rather than the target.
+
+Fixes: 60021bd754c6 ("btrfs: prevent subvol with swapfile from being deleted")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a2b5b440637e6..827554963a7c8 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4720,7 +4720,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ spin_unlock(&dest->root_item_lock);
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu with active swapfile",
+- btrfs_root_id(root));
++ btrfs_root_id(dest));
+ ret = -EPERM;
+ goto out_up_write;
+ }
+--
+2.51.0
+
--- /dev/null
+From 4f661a683d243e17a792bf6b31969880d92678cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 13:54:11 -0500
+Subject: cgroup/cpuset: Fix incorrect change to effective_xcpus in
+ partition_xcpus_del()
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit f9a1767ce3a34bc33c3d33473f65dc13a380e379 ]
+
+The effective_xcpus of a cpuset can contain offline CPUs. In
+partition_xcpus_del(), the xcpus parameter is incorrectly used as
+a temporary cpumask to mask out offline CPUs. As xcpus can be the
+effective_xcpus of a cpuset, this can result in unexpected changes
+in that cpumask. Fix this problem by not making any changes to the
+xcpus parameter.
+
+Fixes: 11e5f407b64a ("cgroup/cpuset: Keep track of CPUs in isolated partitions")
+Reviewed-by: Chen Ridong <chenridong@huaweicloud.com>
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 62e1807b23448..aaef221a1434c 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1401,8 +1401,8 @@ static void partition_xcpus_del(int old_prs, struct cpuset *parent,
+ isolated_cpus_update(old_prs, parent->partition_root_state,
+ xcpus);
+
+- cpumask_and(xcpus, xcpus, cpu_active_mask);
+ cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
++ cpumask_and(parent->effective_cpus, parent->effective_cpus, cpu_active_mask);
+ }
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From a49039327cc9ec91d0be02ed8e40b27b8b215277 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 13:54:12 -0500
+Subject: cgroup/cpuset: Fix incorrect use of cpuset_update_tasks_cpumask() in
+ update_cpumasks_hier()
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 68230aac8b9aad243626fbaf3ca170012c17fec5 ]
+
+Commit e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2")
+incorrectly changed the 2nd parameter of cpuset_update_tasks_cpumask()
+from tmp->new_cpus to cp->effective_cpus. This second parameter is just
+a temporary cpumask for internal use. The cpuset_update_tasks_cpumask()
+function was originally called update_tasks_cpumask() before commit
+381b53c3b549 ("cgroup/cpuset: rename functions shared between v1
+and v2").
+
+This mistake can incorrectly change the effective_cpus of the
+cpuset when it is the top_cpuset or in arm64 architecture where
+task_cpu_possible_mask() may differ from cpu_possible_mask. So far
+top_cpuset hasn't been passed to update_cpumasks_hier() yet, but arm64
+arch can still be impacted. Fix it by reverting the incorrect change.
+
+Fixes: e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index aaef221a1434c..81b3165f1aaa1 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2350,7 +2350,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ WARN_ON(!is_in_v2_mode() &&
+ !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+
+- cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
++ cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
+
+ /*
+ * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
+--
+2.51.0
+
--- /dev/null
+From 739db060d8373a3be32708e8084c3e7fac53bb17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 00:58:11 +0100
+Subject: clk: scu/imx8qxp: do not register driver in probe()
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit 78437ab3b769f80526416570f60173c89858dd84 ]
+
+imx_clk_scu_init() registers the imx_clk_scu_driver while commonly being
+called from IMX driver's probe() callbacks.
+
+However, it neither makes sense to register drivers from probe()
+callbacks of other drivers, nor does the driver core allow registering
+drivers with a device lock already being held.
+
+The latter was revealed by commit dc23806a7c47 ("driver core: enforce
+device_lock for driver_match_device()") leading to a deadlock condition
+described in [1].
+
+Besides that, nothing seems to unregister the imx_clk_scu_driver once
+the corresponding driver module is unloaded, which leaves the
+driver-core with a dangling pointer.
+
+Also, if there are multiple matching devices for the imx8qxp_clk_driver,
+imx8qxp_clk_probe() calls imx_clk_scu_init() multiple times. However,
+any subsequent call after the first one will fail, since the driver-core
+does not allow to register the same struct platform_driver multiple
+times.
+
+Hence, register the imx_clk_scu_driver from module_init() and unregister
+it in module_exit().
+
+Note that we first register the imx8qxp_clk_driver and then call
+imx_clk_scu_module_init() to avoid having to call
+imx_clk_scu_module_exit() in the unwind path of imx8qxp_clk_init().
+
+Fixes: dc23806a7c47 ("driver core: enforce device_lock for driver_match_device()")
+Fixes: 220175cd3979 ("clk: imx: scu: fix build break when compiled as modules")
+Reported-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Closes: https://lore.kernel.org/lkml/13955113.uLZWGnKmhe@steina-w/
+Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com> # TQMa8x/MBa8x
+Link: https://lore.kernel.org/lkml/DFU7CEPUSG9A.1KKGVW4HIPMSH@kernel.org/ [1]
+Acked-by: Abel Vesa <abelvesa@kernel.org>
+Reviewed-by: Daniel Baluta <daniel.baluta@nxp.com>
+Link: https://patch.msgid.link/20260212235842.85934-1-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/imx/clk-imx8qxp.c | 24 +++++++++++++++++++++++-
+ drivers/clk/imx/clk-scu.c | 12 +++++++++++-
+ drivers/clk/imx/clk-scu.h | 2 ++
+ 3 files changed, 36 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index 3ae162625bb1a..c781425a005ef 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -346,7 +346,29 @@ static struct platform_driver imx8qxp_clk_driver = {
+ },
+ .probe = imx8qxp_clk_probe,
+ };
+-module_platform_driver(imx8qxp_clk_driver);
++
++static int __init imx8qxp_clk_init(void)
++{
++ int ret;
++
++ ret = platform_driver_register(&imx8qxp_clk_driver);
++ if (ret)
++ return ret;
++
++ ret = imx_clk_scu_module_init();
++ if (ret)
++ platform_driver_unregister(&imx8qxp_clk_driver);
++
++ return ret;
++}
++module_init(imx8qxp_clk_init);
++
++static void __exit imx8qxp_clk_exit(void)
++{
++ imx_clk_scu_module_exit();
++ platform_driver_unregister(&imx8qxp_clk_driver);
++}
++module_exit(imx8qxp_clk_exit);
+
+ MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+ MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
+diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
+index 34c9dc1fb20e5..c90d21e05f916 100644
+--- a/drivers/clk/imx/clk-scu.c
++++ b/drivers/clk/imx/clk-scu.c
+@@ -191,6 +191,16 @@ static bool imx_scu_clk_is_valid(u32 rsrc_id)
+ return p != NULL;
+ }
+
++int __init imx_clk_scu_module_init(void)
++{
++ return platform_driver_register(&imx_clk_scu_driver);
++}
++
++void __exit imx_clk_scu_module_exit(void)
++{
++ return platform_driver_unregister(&imx_clk_scu_driver);
++}
++
+ int imx_clk_scu_init(struct device_node *np,
+ const struct imx_clk_scu_rsrc_table *data)
+ {
+@@ -215,7 +225,7 @@ int imx_clk_scu_init(struct device_node *np,
+ rsrc_table = data;
+ }
+
+- return platform_driver_register(&imx_clk_scu_driver);
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
+index af7b697f51cae..ca82f2cce8974 100644
+--- a/drivers/clk/imx/clk-scu.h
++++ b/drivers/clk/imx/clk-scu.h
+@@ -25,6 +25,8 @@ extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8dxl;
+ extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qxp;
+ extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qm;
+
++int __init imx_clk_scu_module_init(void);
++void __exit imx_clk_scu_module_exit(void);
+ int imx_clk_scu_init(struct device_node *np,
+ const struct imx_clk_scu_rsrc_table *data);
+ struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
+--
+2.51.0
+
--- /dev/null
+From cdad77d8c3dab2604d70680ae09ae02b60d8cb08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 14:50:38 -0700
+Subject: cxl: Fix race of nvdimm_bus object when creating nvdimm objects
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+[ Upstream commit 96a1fd0d84b17360840f344826897fa71049870e ]
+
+Found issue during running of cxl-translate.sh unit test. Adding a 3s
+sleep right before the test seems to make the issue reproduce fairly
+consistently. The cxl_translate module has dependency on cxl_acpi and
+causes orphaned nvdimm objects to reprobe after cxl_acpi is removed.
+The nvdimm_bus object is registered by the cxl_nvb object when
+cxl_acpi_probe() is called. With the nvdimm_bus object missing,
+__nd_device_register() will trigger NULL pointer dereference when
+accessing the dev->parent that points to &nvdimm_bus->dev.
+
+[ 192.884510] BUG: kernel NULL pointer dereference, address: 000000000000006c
+[ 192.895383] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS edk2-20250812-19.fc42 08/12/2025
+[ 192.897721] Workqueue: cxl_port cxl_bus_rescan_queue [cxl_core]
+[ 192.899459] RIP: 0010:kobject_get+0xc/0x90
+[ 192.924871] Call Trace:
+[ 192.925959] <TASK>
+[ 192.926976] ? pm_runtime_init+0xb9/0xe0
+[ 192.929712] __nd_device_register.part.0+0x4d/0xc0 [libnvdimm]
+[ 192.933314] __nvdimm_create+0x206/0x290 [libnvdimm]
+[ 192.936662] cxl_nvdimm_probe+0x119/0x1d0 [cxl_pmem]
+[ 192.940245] cxl_bus_probe+0x1a/0x60 [cxl_core]
+[ 192.943349] really_probe+0xde/0x380
+
+This patch also relies on the previous change where
+devm_cxl_add_nvdimm_bridge() is called from drivers/cxl/pmem.c instead
+of drivers/cxl/core.c to ensure the dependency of cxl_acpi on cxl_pmem.
+
+1. Set probe_type of cxl_nvb to PROBE_FORCE_SYNCHRONOUS to ensure the
+ driver is probed synchronously when add_device() is called.
+2. Add a check in __devm_cxl_add_nvdimm_bridge() to ensure that the
+ cxl_nvb driver is attached during cxl_acpi_probe().
+3. Take the cxl_root uport_dev lock and the cxl_nvb->dev lock in
+ devm_cxl_add_nvdimm() before checking nvdimm_bus is valid.
+4. Set cxl_nvdimm flag to CXL_NVD_F_INVALIDATED so cxl_nvdimm_probe()
+ will exit with -EBUSY.
+
+The removal of cxl_nvdimm devices should prevent any orphaned devices
+from probing once the nvdimm_bus is gone.
+
+[ dj: Fixed 0-day reported kdoc issue. ]
+[ dj: Fix cxl_nvb reference leak on error. Gregory (kreview-0811365) ]
+
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Fixes: 8fdcb1704f61 ("cxl/pmem: Add initial infrastructure for pmem support")
+Tested-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: Alison Schofield <alison.schofield@intel.com?>
+Link: https://patch.msgid.link/20260205001633.1813643-3-dave.jiang@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/pmem.c | 29 +++++++++++++++++++++++++++++
+ drivers/cxl/cxl.h | 5 +++++
+ drivers/cxl/pmem.c | 10 ++++++++--
+ 3 files changed, 42 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
+index e1325936183a6..e3a8b8d813333 100644
+--- a/drivers/cxl/core/pmem.c
++++ b/drivers/cxl/core/pmem.c
+@@ -115,6 +115,15 @@ static void unregister_nvb(void *_cxl_nvb)
+ device_unregister(&cxl_nvb->dev);
+ }
+
++static bool cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge *cxl_nvb)
++{
++ struct device *dev = &cxl_nvb->dev;
++
++ guard(device)(dev);
++ /* If the device has no driver, then it failed to attach. */
++ return dev->driver == NULL;
++}
++
+ struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port)
+ {
+@@ -138,6 +147,11 @@ struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
+ if (rc)
+ goto err;
+
++ if (cxl_nvdimm_bridge_failed_attach(cxl_nvb)) {
++ unregister_nvb(cxl_nvb);
++ return ERR_PTR(-ENODEV);
++ }
++
+ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+ if (rc)
+ return ERR_PTR(rc);
+@@ -247,6 +261,21 @@ int devm_cxl_add_nvdimm(struct cxl_port *parent_port,
+ if (!cxl_nvb)
+ return -ENODEV;
+
++ /*
++ * Take the uport_dev lock to guard against race of nvdimm_bus object.
++ * cxl_acpi_probe() registers the nvdimm_bus and is done under the
++ * root port uport_dev lock.
++ *
++ * Take the cxl_nvb device lock to ensure that cxl_nvb driver is in a
++ * consistent state. And the driver registers nvdimm_bus.
++ */
++ guard(device)(cxl_nvb->port->uport_dev);
++ guard(device)(&cxl_nvb->dev);
++ if (!cxl_nvb->nvdimm_bus) {
++ rc = -ENODEV;
++ goto err_alloc;
++ }
++
+ cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
+ if (IS_ERR(cxl_nvd)) {
+ rc = PTR_ERR(cxl_nvd);
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 2854e47fd9869..e477cd72d3000 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -563,11 +563,16 @@ struct cxl_nvdimm_bridge {
+
+ #define CXL_DEV_ID_LEN 19
+
++enum {
++ CXL_NVD_F_INVALIDATED = 0,
++};
++
+ struct cxl_nvdimm {
+ struct device dev;
+ struct cxl_memdev *cxlmd;
+ u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
+ u64 dirty_shutdowns;
++ unsigned long flags;
+ };
+
+ struct cxl_pmem_region_mapping {
+diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
+index 714beaf1704be..c00b84b960761 100644
+--- a/drivers/cxl/pmem.c
++++ b/drivers/cxl/pmem.c
+@@ -14,7 +14,7 @@
+ static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+
+ /**
+- * __devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
++ * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+ * @host: platform firmware root device
+ * @port: CXL port at the root of a CXL topology
+ *
+@@ -143,6 +143,9 @@ static int cxl_nvdimm_probe(struct device *dev)
+ struct nvdimm *nvdimm;
+ int rc;
+
++ if (test_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags))
++ return -EBUSY;
++
+ set_exclusive_cxl_commands(mds, exclusive_cmds);
+ rc = devm_add_action_or_reset(dev, clear_exclusive, mds);
+ if (rc)
+@@ -323,8 +326,10 @@ static int detach_nvdimm(struct device *dev, void *data)
+ scoped_guard(device, dev) {
+ if (dev->driver) {
+ cxl_nvd = to_cxl_nvdimm(dev);
+- if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
++ if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) {
+ release = true;
++ set_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags);
++ }
+ }
+ }
+ if (release)
+@@ -367,6 +372,7 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
+ .probe = cxl_nvdimm_bridge_probe,
+ .id = CXL_DEVICE_NVDIMM_BRIDGE,
+ .drv = {
++ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
+ },
+ };
+--
+2.51.0
+
--- /dev/null
+From ed50aa75a3f2984bbe59947a60d797ce4e723879 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Feb 2026 16:16:17 -0800
+Subject: cxl/mbox: validate payload size before accessing contents in
+ cxl_payload_from_user_allowed()
+
+From: Davidlohr Bueso <dave@stgolabs.net>
+
+[ Upstream commit 60b5d1f68338aff2c5af0113f04aefa7169c50c2 ]
+
+cxl_payload_from_user_allowed() casts and dereferences the input
+payload without first verifying its size. When a raw mailbox command
+is sent with an undersized payload (ie: 1 byte for CXL_MBOX_OP_CLEAR_LOG,
+which expects a 16-byte UUID), uuid_equal() reads past the allocated buffer,
+triggering a KASAN splat:
+
+BUG: KASAN: slab-out-of-bounds in memcmp+0x176/0x1d0 lib/string.c:683
+Read of size 8 at addr ffff88810130f5c0 by task syz.1.62/2258
+
+CPU: 2 UID: 0 PID: 2258 Comm: syz.1.62 Not tainted 6.19.0-dirty #3 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0xab/0xe0 lib/dump_stack.c:120
+ print_address_description mm/kasan/report.c:378 [inline]
+ print_report+0xce/0x650 mm/kasan/report.c:482
+ kasan_report+0xce/0x100 mm/kasan/report.c:595
+ memcmp+0x176/0x1d0 lib/string.c:683
+ uuid_equal include/linux/uuid.h:73 [inline]
+ cxl_payload_from_user_allowed drivers/cxl/core/mbox.c:345 [inline]
+ cxl_mbox_cmd_ctor drivers/cxl/core/mbox.c:368 [inline]
+ cxl_validate_cmd_from_user drivers/cxl/core/mbox.c:522 [inline]
+ cxl_send_cmd+0x9c0/0xb50 drivers/cxl/core/mbox.c:643
+ __cxl_memdev_ioctl drivers/cxl/core/memdev.c:698 [inline]
+ cxl_memdev_ioctl+0x14f/0x190 drivers/cxl/core/memdev.c:713
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:597 [inline]
+ __se_sys_ioctl fs/ioctl.c:583 [inline]
+ __x64_sys_ioctl+0x18e/0x210 fs/ioctl.c:583
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xa8/0x330 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7fdaf331ba79
+Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007fdaf1d77038 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007fdaf3585fa0 RCX: 00007fdaf331ba79
+RDX: 00002000000001c0 RSI: 00000000c030ce02 RDI: 0000000000000003
+RBP: 00007fdaf33749df R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007fdaf3586038 R14: 00007fdaf3585fa0 R15: 00007ffced2af768
+ </TASK>
+
+Add 'in_size' parameter to cxl_payload_from_user_allowed() and validate
+the payload is large enough.
+
+Fixes: 6179045ccc0c ("cxl/mbox: Block immediate mode in SET_PARTITION_INFO command")
+Fixes: 206f9fa9d555 ("cxl/mbox: Add Clear Log mailbox command")
+Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
+Reviewed-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://patch.msgid.link/20260220001618.963490-2-dave@stgolabs.net
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/mbox.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
+index fa6dd0c94656f..e7a6452bf5445 100644
+--- a/drivers/cxl/core/mbox.c
++++ b/drivers/cxl/core/mbox.c
+@@ -311,6 +311,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
+ * cxl_payload_from_user_allowed() - Check contents of in_payload.
+ * @opcode: The mailbox command opcode.
+ * @payload_in: Pointer to the input payload passed in from user space.
++ * @in_size: Size of @payload_in in bytes.
+ *
+ * Return:
+ * * true - payload_in passes check for @opcode.
+@@ -325,12 +326,15 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
+ *
+ * The specific checks are determined by the opcode.
+ */
+-static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
++static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in,
++ size_t in_size)
+ {
+ switch (opcode) {
+ case CXL_MBOX_OP_SET_PARTITION_INFO: {
+ struct cxl_mbox_set_partition_info *pi = payload_in;
+
++ if (in_size < sizeof(*pi))
++ return false;
+ if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
+ return false;
+ break;
+@@ -338,6 +342,8 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
+ case CXL_MBOX_OP_CLEAR_LOG: {
+ const uuid_t *uuid = (uuid_t *)payload_in;
+
++ if (in_size < sizeof(uuid_t))
++ return false;
+ /*
+ * Restrict the âClear logâ action to only apply to
+ * Vendor debug logs.
+@@ -365,7 +371,8 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
+ if (IS_ERR(mbox_cmd->payload_in))
+ return PTR_ERR(mbox_cmd->payload_in);
+
+- if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
++ if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in,
++ in_size)) {
+ dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
+ cxl_mem_opcode_to_name(opcode));
+ kvfree(mbox_cmd->payload_in);
+--
+2.51.0
+
--- /dev/null
+From 8d717ee549d75f2d19b9900644d3645637d36e6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 17:31:23 -0700
+Subject: cxl: Move devm_cxl_add_nvdimm_bridge() to cxl_pmem.ko
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+[ Upstream commit e7e222ad73d93fe54d6e6e3a15253a0ecf081a1b ]
+
+Moving the symbol devm_cxl_add_nvdimm_bridge() to
+drivers/cxl/cxl_pmem.c, so that cxl_pmem can export a symbol that gives
+cxl_acpi a depedency on cxl_pmem kernel module. This is a prepatory patch
+to resolve the issue of a race for nvdimm_bus object that is created
+during cxl_acpi_probe().
+
+No functional changes besides moving code.
+
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Ira Weiny <ira.weiny@intel.com>
+Tested-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: Alison Schofield <alison.schofield@intel.com?>
+Link: https://patch.msgid.link/20260205001633.1813643-2-dave.jiang@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Stable-dep-of: 96a1fd0d84b1 ("cxl: Fix race of nvdimm_bus object when creating nvdimm objects")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/pmem.c | 13 +++----------
+ drivers/cxl/cxl.h | 2 ++
+ drivers/cxl/pmem.c | 14 ++++++++++++++
+ 3 files changed, 19 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
+index 8853415c106a9..e1325936183a6 100644
+--- a/drivers/cxl/core/pmem.c
++++ b/drivers/cxl/core/pmem.c
+@@ -115,15 +115,8 @@ static void unregister_nvb(void *_cxl_nvb)
+ device_unregister(&cxl_nvb->dev);
+ }
+
+-/**
+- * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+- * @host: platform firmware root device
+- * @port: CXL port at the root of a CXL topology
+- *
+- * Return: bridge device that can host cxl_nvdimm objects
+- */
+-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+- struct cxl_port *port)
++struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
++ struct cxl_port *port)
+ {
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+@@ -155,7 +148,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ put_device(dev);
+ return ERR_PTR(rc);
+ }
+-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
++EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_nvdimm_bridge, "cxl_pmem");
+
+ static void cxl_nvdimm_release(struct device *dev)
+ {
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index ba17fa86d249e..2854e47fd9869 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -893,6 +893,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
+ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
+ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port);
++struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
++ struct cxl_port *port);
+ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
+ bool is_cxl_nvdimm(struct device *dev);
+ int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
+diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
+index e197883690efc..714beaf1704be 100644
+--- a/drivers/cxl/pmem.c
++++ b/drivers/cxl/pmem.c
+@@ -13,6 +13,20 @@
+
+ static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+
++/**
++ * __devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
++ * @host: platform firmware root device
++ * @port: CXL port at the root of a CXL topology
++ *
++ * Return: bridge device that can host cxl_nvdimm objects
++ */
++struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
++ struct cxl_port *port)
++{
++ return __devm_cxl_add_nvdimm_bridge(host, port);
++}
++EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
++
+ static void clear_exclusive(void *mds)
+ {
+ clear_exclusive_cxl_commands(mds, exclusive_cmds);
+--
+2.51.0
+
--- /dev/null
+From 7e1cd24fe3812e3653fdb3533c1baaed15eb19da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:27:05 +0100
+Subject: debugobject: Make it work with deferred page initialization - again
+
+From: Thomas Gleixner <tglx@kernel.org>
+
+[ Upstream commit fd3634312a04f336dcbfb481060219f0cd320738 ]
+
+debugobjects uses __GFP_HIGH for allocations as it might be invoked
+within locked regions. That worked perfectly fine until v6.18. It still
+works correctly when deferred page initialization is disabled and works
+by chance when no page allocation is required before deferred page
+initialization has completed.
+
+Since v6.18 allocations w/o a reclaim flag cause new_slab() to end up in
+alloc_frozen_pages_nolock_noprof(), which returns early when deferred
+page initialization has not yet completed. As the deferred page
+initialization takes quite a while the debugobject pool is depleted and
+debugobjects are disabled.
+
+This can be worked around when PREEMPT_COUNT is enabled as that allows
+debugobjects to add __GFP_KSWAPD_RECLAIM to the GFP flags when the context
+is preemtible. When PREEMPT_COUNT is disabled the context is unknown and
+the reclaim bit can't be set because the caller might hold locks which
+might deadlock in the allocator.
+
+In preemptible context the reclaim bit is harmless and not a performance
+issue as that's usually invoked from slow path initialization context.
+
+That makes debugobjects depend on PREEMPT_COUNT || !DEFERRED_STRUCT_PAGE_INIT.
+
+Fixes: af92793e52c3 ("slab: Introduce kmalloc_nolock() and kfree_nolock().")
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Link: https://patch.msgid.link/87pl6gznti.ffs@tglx
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/Kconfig.debug | 1 +
+ lib/debugobjects.c | 19 ++++++++++++++++++-
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 4bae3b389a9c5..52c7a3a89f088 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -723,6 +723,7 @@ source "mm/Kconfig.debug"
+
+ config DEBUG_OBJECTS
+ bool "Debug object operations"
++ depends on PREEMPT_COUNT || !DEFERRED_STRUCT_PAGE_INIT
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, additional code will be inserted into the
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 89a1d6745dc2c..12f50de85b621 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -398,9 +398,26 @@ static void fill_pool(void)
+
+ atomic_inc(&cpus_allocating);
+ while (pool_should_refill(&pool_global)) {
++ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ HLIST_HEAD(head);
+
+- if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
++ /*
++ * Allow reclaim only in preemptible context and during
++ * early boot. If not preemptible, the caller might hold
++ * locks causing a deadlock in the allocator.
++ *
++ * If the reclaim flag is not set during early boot then
++ * allocations, which happen before deferred page
++ * initialization has completed, will fail.
++ *
++ * In preemptible context the flag is harmless and not a
++ * performance issue as that's usually invoked from slow
++ * path initialization context.
++ */
++ if (preemptible() || system_state < SYSTEM_SCHEDULING)
++ gfp |= __GFP_KSWAPD_RECLAIM;
++
++ if (!kmem_alloc_batch(&head, obj_cache, gfp))
+ break;
+
+ guard(raw_spinlock_irqsave)(&pool_lock);
+--
+2.51.0
+
--- /dev/null
+From cba744dad4ab63c47e4c9bbae7eb3cd8280581ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 10:18:51 +0530
+Subject: drm/amdgpu: Fix error handling in slot reset
+
+From: Lijo Lazar <lijo.lazar@amd.com>
+
+[ Upstream commit b57c4ec98c17789136a4db948aec6daadceb5024 ]
+
+If the device has not recovered after slot reset is called, it goes to
+out label for error handling. There it could make decision based on
+uninitialized hive pointer and could result in accessing an uninitialized
+list.
+
+Initialize the list and hive properly so that it handles the error
+situation and also releases the reset domain lock which is acquired
+during error_detected callback.
+
+Fixes: 732c6cefc1ec ("drm/amdgpu: Replace tmp_adev with hive in amdgpu_pci_slot_reset")
+Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
+Reviewed-by: Ce Sun <cesun102@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit bb71362182e59caa227e4192da5a612b09349696)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 09f9d82e572da..ad5a3235a75f1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -7203,6 +7203,15 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
+
+ memset(&reset_context, 0, sizeof(reset_context));
++ INIT_LIST_HEAD(&device_list);
++ hive = amdgpu_get_xgmi_hive(adev);
++ if (hive) {
++ mutex_lock(&hive->hive_lock);
++ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
++ list_add_tail(&tmp_adev->reset_list, &device_list);
++ } else {
++ list_add_tail(&adev->reset_list, &device_list);
++ }
+
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+@@ -7243,19 +7252,13 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+- INIT_LIST_HEAD(&device_list);
+
+- hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+- mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
++ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+- list_add_tail(&tmp_adev->reset_list, &device_list);
+- }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+- list_add_tail(&adev->reset_list, &device_list);
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
+--
+2.51.0
+
--- /dev/null
+From a32059e6eecff6b23cc414ef2177c367f3f7cc4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 13:50:23 -0800
+Subject: drm/amdgpu: Fix locking bugs in error paths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 480ad5f6ead4a47b969aab6618573cd6822bb6a4 ]
+
+Do not unlock psp->ras_context.mutex if it has not been locked. This has
+been detected by the Clang thread-safety analyzer.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: YiPeng Chai <YiPeng.Chai@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Cc: amd-gfx@lists.freedesktop.org
+Fixes: b3fb79cda568 ("drm/amdgpu: add mutex to protect ras shared memory")
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 6fa01b4335978051d2cd80841728fd63cc597970)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 6e8aad91bcd30..0d3c18f04ac36 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -332,13 +332,13 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ if (!context || !context->initialized) {
+ dev_err(adev->dev, "TA is not initialized\n");
+ ret = -EINVAL;
+- goto err_free_shared_buf;
++ goto free_shared_buf;
+ }
+
+ if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_invoke) {
+ dev_err(adev->dev, "Unsupported function to invoke TA\n");
+ ret = -EOPNOTSUPP;
+- goto err_free_shared_buf;
++ goto free_shared_buf;
+ }
+
+ context->session_id = ta_id;
+@@ -346,7 +346,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ mutex_lock(&psp->ras_context.mutex);
+ ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
+ if (ret)
+- goto err_free_shared_buf;
++ goto unlock;
+
+ ret = psp_fn_ta_invoke(psp, cmd_id);
+ if (ret || context->resp_status) {
+@@ -354,15 +354,17 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ ret, context->resp_status);
+ if (!ret) {
+ ret = -EINVAL;
+- goto err_free_shared_buf;
++ goto unlock;
+ }
+ }
+
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
+ ret = -EFAULT;
+
+-err_free_shared_buf:
++unlock:
+ mutex_unlock(&psp->ras_context.mutex);
++
++free_shared_buf:
+ kfree(shared_buf);
+
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 9a3a6e8f32855e02d3436a9d8c427707437b9ef1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:07 -0800
+Subject: drm/amdgpu: Unlock a mutex before destroying it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 5e0bcc7b88bcd081aaae6f481b10d9ab294fcb69 ]
+
+Mutexes must be unlocked before these are destroyed. This has been detected
+by the Clang thread-safety analyzer.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Yang Wang <kevinyang.wang@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Cc: amd-gfx@lists.freedesktop.org
+Fixes: f5e4cc8461c4 ("drm/amdgpu: implement RAS ACA driver framework")
+Reviewed-by: Yang Wang <kevinyang.wang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 270258ba320beb99648dceffb67e86ac76786e55)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+index 9b31804491500..3f9b094e93a29 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+@@ -641,6 +641,7 @@ static void aca_error_fini(struct aca_error *aerr)
+ aca_bank_error_remove(aerr, bank_error);
+
+ out_unlock:
++ mutex_unlock(&aerr->lock);
+ mutex_destroy(&aerr->lock);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From be0e1798b7b6e5f0255cd32cb0478a5611544941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 12:41:31 +0000
+Subject: drm/amdgpu/userq: Do not allow userspace to trivially triger kernel
+ warnings
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+[ Upstream commit 7b7d7693a55d606d700beb9549c9f7f0e5d9c24f ]
+
+Userspace can either deliberately pass in the too small num_fences, or the
+required number can legitimately grow between the two calls to the userq
+wait ioctl. In both cases we do not want the emit the kernel warning
+backtrace since nothing is wrong with the kernel and userspace will simply
+get an errno reported back. So lets simply drop the WARN_ONs.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: a292fdecd728 ("drm/amdgpu: Implement userqueue signal/wait IOCTL")
+Cc: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 2c333ea579de6cc20ea7bc50e9595ef72863e65c)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+index 85e9edc1cb6ff..f61886745e33d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+@@ -830,7 +830,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+@@ -847,7 +847,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+@@ -871,7 +871,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+@@ -895,7 +895,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ if (r)
+ goto free_fences;
+
+- if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
++ if (num_fences >= wait_info->num_fences) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+--
+2.51.0
+
--- /dev/null
+From 689419e8700ce596c79d377679b0a676ef45bf4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Feb 2026 12:06:21 +0800
+Subject: drm/bridge: synopsys: dw-dp: Check return value of
+ devm_drm_bridge_add() in dw_dp_bind()
+
+From: Chen Ni <nichen@iscas.ac.cn>
+
+[ Upstream commit 496daa2759260374bb9c9b2196a849aa3bc513a8 ]
+
+Return the value of devm_drm_bridge_add() in order to propagate the
+error properly, if it fails due to resource allocation failure or bridge
+registration failure.
+
+This ensures that the bind function fails safely rather than proceeding
+with a potentially incomplete bridge setup.
+
+Fixes: b726970486d8 ("drm/bridge: synopsys: dw-dp: add bridge before attaching")
+Signed-off-by: Chen Ni <nichen@iscas.ac.cn>
+Reviewed-by: Andy Yan <andyshrk@163.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Link: https://patch.msgid.link/20260206040621.4095517-1-nichen@iscas.ac.cn
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/synopsys/dw-dp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
+index 4323424524847..07f7a2e0d9f2a 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-dp.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
+@@ -2049,7 +2049,9 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->ycbcr_420_allowed = true;
+
+- devm_drm_bridge_add(dev, bridge);
++ ret = devm_drm_bridge_add(dev, bridge);
++ if (ret)
++ return ERR_PTR(ret);
+
+ dp->aux.dev = dev;
+ dp->aux.drm_dev = encoder->dev;
+--
+2.51.0
+
--- /dev/null
+From aa879ef6c819b3d3408dfcbde8c3cb38f29cdba7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 22:12:28 +0000
+Subject: drm/client: Do not destroy NULL modes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Cavitt <jonathan.cavitt@intel.com>
+
+[ Upstream commit c601fd5414315fc515f746b499110e46272e7243 ]
+
+'modes' in drm_client_modeset_probe may fail to kcalloc. If this
+occurs, we jump to 'out', calling modes_destroy on it, which
+dereferences it. This may result in a NULL pointer dereference in the
+error case. Prevent that.
+
+Fixes: 3039cc0c0653 ("drm/client: Make copies of modes")
+Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Cc: Ville SyrjÀlÀ <ville.syrjala@linux.intel.com>
+Signed-off-by: Ville SyrjÀlÀ <ville.syrjala@linux.intel.com>
+Link: https://patch.msgid.link/20260224221227.69126-2-jonathan.cavitt@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client_modeset.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index fc4caf7da5fcd..4a72f323e83e3 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -930,7 +930,8 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ mutex_unlock(&client->modeset_mutex);
+ out:
+ kfree(crtcs);
+- modes_destroy(dev, modes, connector_count);
++ if (modes)
++ modes_destroy(dev, modes, connector_count);
+ kfree(modes);
+ kfree(offsets);
+ kfree(enabled);
+--
+2.51.0
+
--- /dev/null
+From ba272e16a3bcee2da9d291cca49796dce84b0008 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Feb 2026 22:47:26 +0000
+Subject: drm/fourcc: fix plane order for 10/12/16-bit YCbCr formats
+
+From: Simon Ser <contact@emersion.fr>
+
+[ Upstream commit e9e0b48cd15b46dcb2bbc165f6b0fee698b855d6 ]
+
+The short comments had the correct order, but the long comments
+had the planes reversed.
+
+Fixes: 2271e0a20ef7 ("drm: drm_fourcc: add 10/12/16bit software decoder YCbCr formats")
+Signed-off-by: Simon Ser <contact@emersion.fr>
+Reviewed-by: Daniel Stone <daniels@collabora.com>
+Reviewed-by: Robert Mader <robert.mader@collabora.com>
+Link: https://patch.msgid.link/20260208224718.57199-1-contact@emersion.fr
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/drm/drm_fourcc.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index e527b24bd824b..c89aede3cb120 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -401,8 +401,8 @@ extern "C" {
+ * implementation can multiply the values by 2^6=64. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [6:10] little endian
+- * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
+- * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
++ * index 1 = Cb plane, [15:0] z:Cb [6:10] little endian
++ * index 2 = Cr plane, [15:0] z:Cr [6:10] little endian
+ */
+ #define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+ #define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+@@ -414,8 +414,8 @@ extern "C" {
+ * implementation can multiply the values by 2^4=16. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [4:12] little endian
+- * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
+- * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
++ * index 1 = Cb plane, [15:0] z:Cb [4:12] little endian
++ * index 2 = Cr plane, [15:0] z:Cr [4:12] little endian
+ */
+ #define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+ #define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+@@ -424,8 +424,8 @@ extern "C" {
+ /*
+ * 3 plane YCbCr
+ * index 0 = Y plane, [15:0] Y little endian
+- * index 1 = Cr plane, [15:0] Cr little endian
+- * index 2 = Cb plane, [15:0] Cb little endian
++ * index 1 = Cb plane, [15:0] Cb little endian
++ * index 2 = Cr plane, [15:0] Cr little endian
+ */
+ #define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+ #define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+--
+2.51.0
+
--- /dev/null
+From 10010ff6e6c16f502020302db97e0207c4c80768 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 17:06:29 +0800
+Subject: drm/imx: parallel-display: check return value of
+ devm_drm_bridge_add() in imx_pd_probe()
+
+From: Chen Ni <nichen@iscas.ac.cn>
+
+[ Upstream commit c5f8658f97ec392eeaf355d4e9775ae1f23ca1d3 ]
+
+Return the value of devm_drm_bridge_add() in order to propagate the
+error properly, if it fails due to resource allocation failure or bridge
+registration failure.
+
+This ensures that the probe function fails safely rather than proceeding
+with a potentially incomplete bridge setup.
+
+Fixes: bf7e97910b9f ("drm/imx: parallel-display: add the bridge before attaching it")
+Signed-off-by: Chen Ni <nichen@iscas.ac.cn>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Link: https://patch.msgid.link/20260204090629.2209542-1-nichen@iscas.ac.cn
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/ipuv3/parallel-display.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+index 6fbf505d2801d..590120a33fa07 100644
+--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
++++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+@@ -256,7 +256,9 @@ static int imx_pd_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, imxpd);
+
+- devm_drm_bridge_add(dev, &imxpd->bridge);
++ ret = devm_drm_bridge_add(dev, &imxpd->bridge);
++ if (ret)
++ return ret;
+
+ return component_add(dev, &imx_pd_ops);
+ }
+--
+2.51.0
+
--- /dev/null
+From 5be2290c8ea91550c3227c1ca25d3d8d25033be8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 00:21:19 +0800
+Subject: drm/logicvc: Fix device node reference leak in
+ logicvc_drm_config_parse()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fef0e649f8b42bdffe4a916dd46e1b1e9ad2f207 ]
+
+The logicvc_drm_config_parse() function calls of_get_child_by_name() to
+find the "layers" node but fails to release the reference, leading to a
+device node reference leak.
+
+Fix this by using the __free(device_node) cleanup attribute to automatic
+release the reference when the variable goes out of scope.
+
+Fixes: efeeaefe9be5 ("drm: Add support for the LogiCVC display controller")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Reviewed-by: Kory Maincent <kory.maincent@bootlin.com>
+Link: https://patch.msgid.link/20260130-logicvc_drm-v1-1-04366463750c@gmail.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/logicvc/logicvc_drm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
+index 204b0fee55d0b..bbebf4fc7f51a 100644
+--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
++++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
+@@ -92,7 +92,6 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_drm_config *config = &logicvc->config;
+- struct device_node *layers_node;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
+@@ -128,7 +127,8 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ if (ret)
+ return ret;
+
+- layers_node = of_get_child_by_name(of_node, "layers");
++ struct device_node *layers_node __free(device_node) =
++ of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "Missing non-optional layers node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From 9a00c44d86419f99074f1c67aa2c24abbf4a4c68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Feb 2026 22:04:38 -0600
+Subject: drm/tiny: sharp-memory: fix pointer error dereference
+
+From: Ethan Tidmore <ethantidmore06@gmail.com>
+
+[ Upstream commit 46120745bb4e7e1f09959624716b4c5d6e2c2e9e ]
+
+The function devm_drm_dev_alloc() returns a pointer error upon failure
+not NULL. Change null check to pointer error check.
+
+Detected by Smatch:
+drivers/gpu/drm/tiny/sharp-memory.c:549 sharp_memory_probe() error:
+'smd' dereferencing possible ERR_PTR()
+
+Fixes: b8f9f21716fec ("drm/tiny: Add driver for Sharp Memory LCD")
+Signed-off-by: Ethan Tidmore <ethantidmore06@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patch.msgid.link/20260216040438.43702-1-ethantidmore06@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tiny/sharp-memory.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
+index 64272cd0f6e22..cbf69460ebf32 100644
+--- a/drivers/gpu/drm/tiny/sharp-memory.c
++++ b/drivers/gpu/drm/tiny/sharp-memory.c
+@@ -541,8 +541,8 @@ static int sharp_memory_probe(struct spi_device *spi)
+
+ smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver,
+ struct sharp_memory_device, drm);
+- if (!smd)
+- return -ENOMEM;
++ if (IS_ERR(smd))
++ return PTR_ERR(smd);
+
+ spi_set_drvdata(spi, smd);
+
+--
+2.51.0
+
--- /dev/null
+From e5f0fa1c2defc39c7be81fb246fcab559a5bbd0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 12:12:36 -0500
+Subject: drm/vmwgfx: Fix invalid kref_put callback in vmw_bo_dirty_release
+
+From: Brad Spengler <brad.spengler@opensrcsec.com>
+
+[ Upstream commit 211ecfaaef186ee5230a77d054cdec7fbfc6724a ]
+
+The kref_put() call uses (void *)kvfree as the release callback, which
+is incorrect. kref_put() expects a function with signature
+void (*release)(struct kref *), but kvfree has signature
+void (*)(const void *). Calling through an incompatible function pointer
+is undefined behavior.
+
+The code only worked by accident because ref_count is the first member
+of vmw_bo_dirty, making the kref pointer equal to the struct pointer.
+
+Fix this by adding a proper release callback that uses container_of()
+to retrieve the containing structure before freeing.
+
+Fixes: c1962742ffff ("drm/vmwgfx: Use kref in vmw_bo_dirty")
+Signed-off-by: Brad Spengler <brad.spengler@opensrcsec.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Ian Forbes <ian.forbes@broadcom.com>
+Link: https://patch.msgid.link/20260107171236.3573118-1-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+index fd4e76486f2d1..45561bc1c9eff 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+@@ -260,6 +260,13 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
+ return ret;
+ }
+
++static void vmw_bo_dirty_free(struct kref *kref)
++{
++ struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count);
++
++ kvfree(dirty);
++}
++
+ /**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+@@ -274,7 +281,7 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
+ {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+- if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
++ if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free))
+ vbo->dirty = NULL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From ebf1f82c49bb7015e66e07ac2c4854c35d225ed3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 11:53:57 -0600
+Subject: drm/vmwgfx: Return the correct value in vmw_translate_ptr functions
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 5023ca80f9589295cb60735016e39fc5cc714243 ]
+
+Before the referenced fixes these functions used a lookup function that
+returned a pointer. This was changed to another lookup function that
+returned an error code with the pointer becoming an out parameter.
+
+The error path when the lookup failed was not changed to reflect this
+change and the code continued to return the PTR_ERR of the now
+uninitialized pointer. This could cause the vmw_translate_ptr functions
+to return success when they actually failed causing further uninitialized
+and OOB accesses.
+
+Reported-by: Kuzey Arda Bulut <kuzeyardabulut@gmail.com>
+Fixes: a309c7194e8a ("drm/vmwgfx: Remove rcu locks from user resources")
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patch.msgid.link/20260113175357.129285-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 3057f8baa7d25..e1f18020170ab 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1143,7 +1143,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+@@ -1199,7 +1199,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+--
+2.51.0
+
--- /dev/null
+From 968bbf34435d6ad876b471647bce27e8364171fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Feb 2026 14:30:59 -0800
+Subject: drm/xe/wa: Steer RMW of MCR registers while building default LRC
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+[ Upstream commit 43d37df67f7770d8d261fdcb64ecc8c314e91303 ]
+
+When generating the default LRC, if a register is not masked, we apply
+any save-restore programming necessary via a read-modify-write sequence
+that will ensure we only update the relevant bits/fields without
+clobbering the rest of the register. However some of the registers that
+need to be updated might be MCR registers which require steering to a
+non-terminated instance to ensure we can read back a valid, non-zero
+value. The steering of reads originating from a command streamer is
+controlled by register CS_MMIO_GROUP_INSTANCE_SELECT. Emit additional
+MI_LRI commands to update the steering before any RMW of an MCR register
+to ensure the reads are performed properly.
+
+Note that needing to perform a RMW of an MCR register while building the
+default LRC is pretty rare. Most of the MCR registers that are part of
+an engine's LRCs are also masked registers, so no MCR is necessary.
+
+Fixes: f2f90989ccff ("drm/xe: Avoid reading RMW registers in emit_wa_job")
+Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Reviewed-by: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Link: https://patch.msgid.link/20260206223058.387014-2-matthew.d.roper@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 6c2e331c915ba9e774aa847921262805feb00863)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/regs/xe_engine_regs.h | 6 +++
+ drivers/gpu/drm/xe/xe_gt.c | 66 +++++++++++++++++++-----
+ 2 files changed, 60 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+index 68172b0248a6e..dc5a4fafa70cf 100644
+--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+@@ -96,6 +96,12 @@
+ #define ENABLE_SEMAPHORE_POLL_BIT REG_BIT(13)
+
+ #define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED)
++
++#define CS_MMIO_GROUP_INSTANCE_SELECT(base) XE_REG((base) + 0xcc)
++#define SELECTIVE_READ_ADDRESSING REG_BIT(30)
++#define SELECTIVE_READ_GROUP REG_GENMASK(29, 23)
++#define SELECTIVE_READ_INSTANCE REG_GENMASK(22, 16)
++
+ /*
+ * CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
+ * The lsb of each can be considered a separate enabling bit for encryption.
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index cdce210e36f25..e89cbe498c427 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -187,11 +187,15 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ return ret;
+ }
+
++/* Dwords required to emit a RMW of a register */
++#define EMIT_RMW_DW 20
++
+ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ {
+- struct xe_reg_sr *sr = &q->hwe->reg_lrc;
++ struct xe_hw_engine *hwe = q->hwe;
++ struct xe_reg_sr *sr = &hwe->reg_lrc;
+ struct xe_reg_sr_entry *entry;
+- int count_rmw = 0, count = 0, ret;
++ int count_rmw = 0, count_rmw_mcr = 0, count = 0, ret;
+ unsigned long idx;
+ struct xe_bb *bb;
+ size_t bb_len = 0;
+@@ -201,6 +205,8 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
++ else if (entry->reg.mcr)
++ ++count_rmw_mcr;
+ else
+ ++count_rmw;
+ }
+@@ -208,17 +214,35 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ if (count)
+ bb_len += count * 2 + 1;
+
+- if (count_rmw)
+- bb_len += count_rmw * 20 + 7;
++ /*
++ * RMW of MCR registers is the same as a normal RMW, except an
++ * additional LRI (3 dwords) is required per register to steer the read
++ * to a nom-terminated instance.
++ *
++ * We could probably shorten the batch slightly by eliding the
++ * steering for consecutive MCR registers that have the same
++ * group/instance target, but it's not worth the extra complexity to do
++ * so.
++ */
++ bb_len += count_rmw * EMIT_RMW_DW;
++ bb_len += count_rmw_mcr * (EMIT_RMW_DW + 3);
++
++ /*
++ * After doing all RMW, we need 7 trailing dwords to clean up,
++ * plus an additional 3 dwords to reset steering if any of the
++ * registers were MCR.
++ */
++ if (count_rmw || count_rmw_mcr)
++ bb_len += 7 + (count_rmw_mcr ? 3 : 0);
+
+- if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
++ if (hwe->class == XE_ENGINE_CLASS_RENDER)
+ /*
+ * Big enough to emit all of the context's 3DSTATE via
+ * xe_lrc_emit_hwe_state_instructions()
+ */
+- bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32);
++ bb_len += xe_gt_lrc_size(gt, hwe->class) / sizeof(u32);
+
+- xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len);
++ xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", hwe->name, bb_len);
+
+ bb = xe_bb_new(gt, bb_len, false);
+ if (IS_ERR(bb))
+@@ -253,13 +277,23 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ }
+ }
+
+- if (count_rmw) {
+- /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */
+-
++ if (count_rmw || count_rmw_mcr) {
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
++ if (entry->reg.mcr) {
++ struct xe_reg_mcr reg = { .__reg.raw = entry->reg.raw };
++ u8 group, instance;
++
++ xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance);
++ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
++ *cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(hwe->mmio_base).addr;
++ *cs++ = SELECTIVE_READ_ADDRESSING |
++ REG_FIELD_PREP(SELECTIVE_READ_GROUP, group) |
++ REG_FIELD_PREP(SELECTIVE_READ_INSTANCE, instance);
++ }
++
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ *cs++ = entry->reg.addr;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+@@ -285,8 +319,9 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = entry->reg.addr;
+
+- xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+- entry->reg.addr, entry->clr_bits, entry->set_bits);
++ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x%s\n",
++ entry->reg.addr, entry->clr_bits, entry->set_bits,
++ entry->reg.mcr ? " (MCR)" : "");
+ }
+
+ /* reset used GPR */
+@@ -298,6 +333,13 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = 0;
++
++ /* reset steering */
++ if (count_rmw_mcr) {
++ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
++ *cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr;
++ *cs++ = 0;
++ }
+ }
+
+ cs = xe_lrc_emit_hwe_state_instructions(q, cs);
+--
+2.51.0
+
--- /dev/null
+From b32ed0a2d5602023a838b7a4c85f1a21427ea608 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 17:34:51 +0800
+Subject: gpio: shared: fix memory leaks
+
+From: Daniel J Blueman <daniel@quora.org>
+
+[ Upstream commit 32e0a7ad9c841f46549ccac0f1cca347a40d8685 ]
+
+On a Snapdragon X1 Elite laptop (Lenovo Yoga Slim 7x), kmemleak reports
+three sets of:
+
+unreferenced object 0xffff00080187f400 (size 1024):
+ comm "swapper/0", pid 1, jiffies 4294667327
+ hex dump (first 32 bytes):
+ 58 bd 70 01 08 00 ff ff 58 bd 70 01 08 00 ff ff X.p.....X.p.....
+ 00 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 ................
+ backtrace (crc 1665d1f8):
+ kmemleak_alloc+0xf4/0x12c
+ __kmalloc_cache_noprof+0x370/0x49c
+ gpio_shared_make_ref+0x70/0x16c
+ gpio_shared_of_traverse+0x4e8/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_init+0x34/0x1c4
+ do_one_initcall+0x50/0x280
+ kernel_init_freeable+0x290/0x33c
+ kernel_init+0x28/0x14c
+ ret_from_fork+0x10/0x20
+
+unreferenced object 0xffff00080170c140 (size 8):
+ comm "swapper/0", pid 1, jiffies 4294667327
+ hex dump (first 8 bytes):
+ 72 65 73 65 74 00 00 00 reset...
+ backtrace (crc fc24536):
+ kmemleak_alloc+0xf4/0x12c
+ __kmalloc_node_track_caller_noprof+0x3c4/0x584
+ kstrdup+0x4c/0xcc
+ gpio_shared_make_ref+0x8c/0x16c
+ gpio_shared_of_traverse+0x4e8/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_of_traverse+0x200/0x5f4
+ gpio_shared_init+0x34/0x1c4
+ do_one_initcall+0x50/0x280
+ kernel_init_freeable+0x290/0x33c
+ kernel_init+0x28/0x14c
+ ret_from_fork+0x10/0x20
+
+Fix this by decrementing the reference count of each list entry rather than
+only the first.
+
+Fix verified on the same laptop.
+
+Fixes: a060b8c511abb gpiolib: implement low-level, shared GPIO support
+Signed-off-by: Daniel J Blueman <daniel@quora.org>
+Link: https://patch.msgid.link/20260220093452.101655-1-daniel@quora.org
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpiolib-shared.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpio/gpiolib-shared.c b/drivers/gpio/gpiolib-shared.c
+index 9e65442034393..e16f467b72e7a 100644
+--- a/drivers/gpio/gpiolib-shared.c
++++ b/drivers/gpio/gpiolib-shared.c
+@@ -753,14 +753,14 @@ static bool gpio_shared_entry_is_really_shared(struct gpio_shared_entry *entry)
+ static void gpio_shared_free_exclusive(void)
+ {
+ struct gpio_shared_entry *entry, *epos;
++ struct gpio_shared_ref *ref, *rpos;
+
+ list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
+ if (gpio_shared_entry_is_really_shared(entry))
+ continue;
+
+- gpio_shared_drop_ref(list_first_entry(&entry->refs,
+- struct gpio_shared_ref,
+- list));
++ list_for_each_entry_safe(ref, rpos, &entry->refs, list)
++ gpio_shared_drop_ref(ref);
+ gpio_shared_drop_entry(entry);
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From e78fbb0ea1e83a618cdc184daca0f5b7f936d138 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 11:36:09 -0700
+Subject: io_uring/cmd_net: use READ_ONCE() for ->addr3 read
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit a46435537a844d0f7b4b620baf962cad136422de ]
+
+Any SQE read should use READ_ONCE(), to ensure the result is read once
+and only once. Doesn't really matter for this case, but it's better to
+keep these 100% consistent and always use READ_ONCE() for the prep side
+of SQE handling.
+
+Fixes: 5d24321e4c15 ("io_uring: Introduce getsockname io_uring cmd")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/cmd_net.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/io_uring/cmd_net.c b/io_uring/cmd_net.c
+index 3db34e2d22ee5..17d499f68fe6d 100644
+--- a/io_uring/cmd_net.c
++++ b/io_uring/cmd_net.c
+@@ -145,7 +145,7 @@ static int io_uring_cmd_getsockname(struct socket *sock,
+ return -EINVAL;
+
+ uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+- ulen = u64_to_user_ptr(sqe->addr3);
++ ulen = u64_to_user_ptr(READ_ONCE(sqe->addr3));
+ peer = READ_ONCE(sqe->optlen);
+ if (peer > 1)
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From b0947f1a7bdbcdd1b3f93a7c82629e62d410e85f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:36:10 +0200
+Subject: irqchip/ls-extirq: Fix devm_of_iomap() error check
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit fe5669e363b129cde285bfb4d45abb72d1d77cfc ]
+
+The devm_of_iomap() function returns an ERR_PTR() encoded error code on
+failure. Replace the incorrect check against NULL with IS_ERR().
+
+Fixes: 05cd654829dd ("irqchip/ls-extirq: Convert to a platform driver to make it work again")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Reviewed-by: Herve Codina <herve.codina@bootlin.com>
+Link: https://patch.msgid.link/20260224113610.1129022-3-ioana.ciornei@nxp.com
+Closes: https://lore.kernel.org/all/aYXvfbfT6w0TMsXS@stanley.mountain/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-ls-extirq.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
+index 96f9c20621cf5..d724fe8439801 100644
+--- a/drivers/irqchip/irq-ls-extirq.c
++++ b/drivers/irqchip/irq-ls-extirq.c
+@@ -190,8 +190,10 @@ static int ls_extirq_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n");
+
+ priv->intpcr = devm_of_iomap(dev, node, 0, NULL);
+- if (!priv->intpcr)
+- return dev_err_probe(dev, -ENOMEM, "Cannot ioremap OF node %pOF\n", node);
++ if (IS_ERR(priv->intpcr)) {
++ return dev_err_probe(dev, PTR_ERR(priv->intpcr),
++ "Cannot ioremap OF node %pOF\n", node);
++ }
+
+ ret = ls_extirq_parse_map(priv, node);
+ if (ret)
+--
+2.51.0
+
--- /dev/null
+From 503ea50cc7eece7eb625f791110786b9f24ff8d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 12:41:25 +0100
+Subject: irqchip/sifive-plic: Fix frozen interrupt due to affinity setting
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 1072020685f4b81f6efad3b412cdae0bd62bb043 ]
+
+PLIC ignores interrupt completion message for disabled interrupt, explained
+by the specification:
+
+ The PLIC signals it has completed executing an interrupt handler by
+ writing the interrupt ID it received from the claim to the
+ claim/complete register. The PLIC does not check whether the completion
+ ID is the same as the last claim ID for that target. If the completion
+ ID does not match an interrupt source that is currently enabled for
+ the target, the completion is silently ignored.
+
+This caused problems in the past, because an interrupt can be disabled
+while still being handled and plic_irq_eoi() had no effect. That was fixed
+by checking if the interrupt is disabled, and if so enable it, before
+sending the completion message. That check is done with irqd_irq_disabled().
+
+However, that is not sufficient because the enable bit for the handling
+hart can be zero despite irqd_irq_disabled(d) being false. This can happen
+when affinity setting is changed while a hart is still handling the
+interrupt.
+
+This problem is easily reproducible by dumping a large file to uart (which
+generates lots of interrupts) and at the same time keep changing the uart
+interrupt's affinity setting. The uart port becomes frozen almost
+instantaneously.
+
+Fix this by checking PLIC's enable bit instead of irqd_irq_disabled().
+
+Fixes: cc9f04f9a84f ("irqchip/sifive-plic: Implement irq_set_affinity() for SMP host")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260212114125.3148067-1-namcao@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-sifive-plic.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 60fd8f91762b1..70058871d2fb6 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -172,8 +172,13 @@ static void plic_irq_disable(struct irq_data *d)
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
++ u32 __iomem *reg;
++ bool enabled;
++
++ reg = handler->enable_base + (d->hwirq / 32) * sizeof(u32);
++ enabled = readl(reg) & BIT(d->hwirq % 32);
+
+- if (unlikely(irqd_irq_disabled(d))) {
++ if (unlikely(!enabled)) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+--
+2.51.0
+
--- /dev/null
+From 024a5060aec6d7ea0a9d8e5e5ada57320a0ce1d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 14:38:14 +0000
+Subject: KVM: arm64: Fix ID register initialization for non-protected pKVM
+ guests
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit 7e7c2cf0024d89443a7af52e09e47b1fe634ab17 ]
+
+In protected mode, the hypervisor maintains a separate instance of
+the `kvm` structure for each VM. For non-protected VMs, this structure is
+initialized from the host's `kvm` state.
+
+Currently, `pkvm_init_features_from_host()` copies the
+`KVM_ARCH_FLAG_ID_REGS_INITIALIZED` flag from the host without the
+underlying `id_regs` data being initialized. This results in the
+hypervisor seeing the flag as set while the ID registers remain zeroed.
+
+Consequently, `kvm_has_feat()` checks at EL2 fail (return 0) for
+non-protected VMs. This breaks logic that relies on feature detection,
+such as `ctxt_has_tcrx()` for TCR2_EL1 support. As a result, certain
+system registers (e.g., TCR2_EL1, PIR_EL1, POR_EL1) are not
+saved/restored during the world switch, which could lead to state
+corruption.
+
+Fix this by explicitly copying the ID registers from the host `kvm` to
+the hypervisor `kvm` for non-protected VMs during initialization, since
+we trust the host with its non-protected guests' features. Also ensure
+`KVM_ARCH_FLAG_ID_REGS_INITIALIZED` is cleared initially in
+`pkvm_init_features_from_host` so that `vm_copy_id_regs` can properly
+initialize them and set the flag once done.
+
+Fixes: 41d6028e28bd ("KVM: arm64: Convert the SVE guest vcpu flag to a vm flag")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://patch.msgid.link/20260213143815.1732675-4-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/hyp/nvhe/pkvm.c | 35 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 33 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+index 12b2acfbcfd14..59a0102218189 100644
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -345,6 +345,7 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
+ /* No restrictions for non-protected VMs. */
+ if (!kvm_vm_is_protected(kvm)) {
+ hyp_vm->kvm.arch.flags = host_arch_flags;
++ hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
+
+ bitmap_copy(kvm->arch.vcpu_features,
+ host_kvm->arch.vcpu_features,
+@@ -471,6 +472,35 @@ static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *h
+ return ret;
+ }
+
++static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
++{
++ struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
++ const struct kvm *host_kvm = hyp_vm->host_kvm;
++ struct kvm *kvm = &hyp_vm->kvm;
++
++ if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
++ return -EINVAL;
++
++ if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
++ return 0;
++
++ memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
++
++ return 0;
++}
++
++static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
++{
++ int ret = 0;
++
++ if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
++ kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
++ else
++ ret = vm_copy_id_regs(hyp_vcpu);
++
++ return ret;
++}
++
+ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
+ struct pkvm_hyp_vm *hyp_vm,
+ struct kvm_vcpu *host_vcpu)
+@@ -490,8 +520,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
+ hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
+ hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+
+- if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+- kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
++ ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
++ if (ret)
++ goto done;
+
+ ret = pkvm_vcpu_init_traps(hyp_vcpu);
+ if (ret)
+--
+2.51.0
+
--- /dev/null
+From 86de397ee7b7352cebe5ebe4a8f890a81e7c259e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 14:38:12 +0000
+Subject: KVM: arm64: Hide S1POE from guests when not supported by the host
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit f66857bafd4f151c5cc6856e47be2e12c1721e43 ]
+
+When CONFIG_ARM64_POE is disabled, KVM does not save/restore POR_EL1.
+However, ID_AA64MMFR3_EL1 sanitisation currently exposes the feature to
+guests whenever the hardware supports it, ignoring the host kernel
+configuration.
+
+If a guest detects this feature and attempts to use it, the host will
+fail to context-switch POR_EL1, potentially leading to state corruption.
+
+Fix this by masking ID_AA64MMFR3_EL1.S1POE in the sanitised system
+registers, preventing KVM from advertising the feature when the host
+does not support it (i.e. system_supports_poe() is false).
+
+Fixes: 70ed7238297f ("KVM: arm64: Sanitise ID_AA64MMFR3_EL1")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://patch.msgid.link/20260213143815.1732675-2-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/sys_regs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 88a57ca36d96c..237e8bd1cf29c 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1816,6 +1816,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
+ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1POE |
+ ID_AA64MMFR3_EL1_S1PIE;
++
++ if (!system_supports_poe())
++ val &= ~ID_AA64MMFR3_EL1_S1POE;
+ break;
+ case SYS_ID_MMFR4_EL1:
+ val &= ~ID_MMFR4_EL1_CCIDX;
+--
+2.51.0
+
--- /dev/null
+From 31b698425ce870de404c9f01983c7a27d712b514 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 22:33:22 +0900
+Subject: mm/slab: pass __GFP_NOWARN to refill_sheaf() if fallback is available
+
+From: Harry Yoo <harry.yoo@oracle.com>
+
+[ Upstream commit 021ca6b670bebebc409d43845efcfe8c11c1dd54 ]
+
+When refill_sheaf() is called, failing to refill the sheaf doesn't
+necessarily mean the allocation will fail because a fallback path
+might be available and serve the allocation request.
+
+Suppress spurious warnings by passing __GFP_NOWARN along with
+__GFP_NOMEMALLOC whenever a fallback path is available.
+
+When the caller is alloc_full_sheaf() or __pcs_replace_empty_main(),
+the kernel always falls back to the slowpath (__slab_alloc_node()).
+For __prefill_sheaf_pfmemalloc(), the fallback path is available
+only when gfp_pfmemalloc_allowed() returns true.
+
+Reported-and-tested-by: Chris Bainbridge <chris.bainbridge@gmail.com>
+Closes: https://lore.kernel.org/linux-mm/aZt2-oS9lkmwT7Ch@debian.local
+Fixes: 1ce20c28eafd ("slab: handle pfmemalloc slabs properly with sheaves")
+Link: https://lore.kernel.org/linux-mm/aZwSreGj9-HHdD-j@hyeyoo
+Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
+Link: https://patch.msgid.link/20260223133322.16705-1-harry.yoo@oracle.com
+Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/slub.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 889c2804bbfeb..b68db0f5a6374 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2715,7 +2715,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
+ if (!sheaf)
+ return NULL;
+
+- if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
++ if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+ free_empty_sheaf(s, sheaf);
+ return NULL;
+ }
+@@ -5092,7 +5092,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
+ return NULL;
+
+ if (empty) {
+- if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
++ if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+ full = empty;
+ } else {
+ /*
+@@ -5395,9 +5395,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
+ static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
+ struct slab_sheaf *sheaf, gfp_t gfp)
+ {
+- int ret = 0;
++ gfp_t gfp_nomemalloc;
++ int ret;
++
++ gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
++ if (gfp_pfmemalloc_allowed(gfp))
++ gfp_nomemalloc |= __GFP_NOWARN;
+
+- ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
++ ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
+
+ if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From be26f417dbbd429dc118330d7877e0f0e6db8d4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 06:10:08 -0600
+Subject: PCI: Correct PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit 39195990e4c093c9eecf88f29811c6de29265214 ]
+
+fb82437fdd8c ("PCI: Change capability register offsets to hex") incorrectly
+converted the PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value from decimal 52 to hex
+0x32:
+
+ -#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
+ +#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+
+This broke PCI capabilities in a VMM because subsequent ones weren't
+DWORD-aligned.
+
+Change PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 to the correct value of 0x34.
+
+fb82437fdd8c was from Baruch Siach <baruch@tkos.co.il>, but this was not
+Baruch's fault; it's a mistake I made when applying the patch.
+
+Fixes: fb82437fdd8c ("PCI: Change capability register offsets to hex")
+Reported-by: David Woodhouse <dwmw2@infradead.org>
+Closes: https://lore.kernel.org/all/3ae392a0158e9d9ab09a1d42150429dd8ca42791.camel@infradead.org
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Krzysztof WilczyĆski <kwilczynski@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/pci_regs.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index 3add74ae25948..48b0616ddbbbd 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -707,7 +707,7 @@
+ #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
+ #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+ #define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
++#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x34 /* end of v2 EPs w/ link */
+ #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
+ #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
+ #define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+--
+2.51.0
+
--- /dev/null
+From 832324ae51ec82a30afd2286c2270d4d8d1cff3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 18:55:41 +0100
+Subject: PCI: dwc: ep: Flush MSI-X write before unmapping its ATU entry
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit c22533c66ccae10511ad6a7afc34bb26c47577e3 ]
+
+Endpoint drivers use dw_pcie_ep_raise_msix_irq() to raise an MSI-X
+interrupt to the host using a writel(), which generates a PCI posted write
+transaction. There's no completion for posted writes, so the writel() may
+return before the PCI write completes. dw_pcie_ep_raise_msix_irq() also
+unmaps the outbound ATU entry used for the PCI write, so the write races
+with the unmap.
+
+If the PCI write loses the race with the ATU unmap, the write may corrupt
+host memory or cause IOMMU errors, e.g., these when running fio with a
+larger queue depth against nvmet-pci-epf:
+
+ arm-smmu-v3 fc900000.iommu: 0x0000010000000010
+ arm-smmu-v3 fc900000.iommu: 0x0000020000000000
+ arm-smmu-v3 fc900000.iommu: 0x000000090000f040
+ arm-smmu-v3 fc900000.iommu: 0x0000000000000000
+ arm-smmu-v3 fc900000.iommu: event: F_TRANSLATION client: 0000:01:00.0 sid: 0x100 ssid: 0x0 iova: 0x90000f040 ipa: 0x0
+ arm-smmu-v3 fc900000.iommu: unpriv data write s1 "Input address caused fault" stag: 0x0
+
+Flush the write by performing a readl() of the same address to ensure that
+the write has reached the destination before the ATU entry is unmapped.
+
+The same problem was solved for dw_pcie_ep_raise_msi_irq() in commit
+8719c64e76bf ("PCI: dwc: ep: Cache MSI outbound iATU mapping"), but there
+it was solved by dedicating an outbound iATU only for MSI. We can't do the
+same for MSI-X because each vector can have a different msg_addr and the
+msg_addr may be changed while the vector is masked.
+
+Fixes: beb4641a787d ("PCI: dwc: Add MSI-X callbacks handler")
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20260211175540.105677-2-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 77f27295b0a80..7ebb01fa5076f 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -1013,6 +1013,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+
+ writel(msg_data, ep->msi_mem + offset);
+
++ /* flush posted write before unmap */
++ readl(ep->msi_mem + offset);
++
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From bf947a8c07ed8f9cc3fb2754b16ccdfbfd5335fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:12:25 +0100
+Subject: PCI: dwc: ep: Refresh MSI Message Address cache on change
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit 468711a40d5dfc01bf0a24c1981246a2c93ac405 ]
+
+Endpoint drivers use dw_pcie_ep_raise_msi_irq() to raise MSI interrupts to
+the host. After 8719c64e76bf ("PCI: dwc: ep: Cache MSI outbound iATU
+mapping"), dw_pcie_ep_raise_msi_irq() caches the Message Address from the
+MSI Capability in ep->msi_msg_addr. But that Message Address is controlled
+by the host, and it may change. For example, if:
+
+ - firmware on the host configures the Message Address and triggers an
+ MSI,
+
+ - a driver on the Endpoint raises the MSI via dw_pcie_ep_raise_msi_irq(),
+ which caches the Message Address,
+
+ - a kernel on the host reconfigures the Message Address and the host
+ kernel driver triggers another MSI,
+
+dw_pcie_ep_raise_msi_irq() notices that the Message Address no longer
+matches the cached ep->msi_msg_addr, warns about it, and returns error
+instead of raising the MSI. The host kernel may hang because it never
+receives the MSI.
+
+This was seen with the nvmet_pci_epf_driver: the host UEFI performs NVMe
+commands, e.g. Identify Controller to get the name of the controller,
+nvmet-pci-epf posts the completion queue entry and raises an IRQ using
+dw_pcie_ep_raise_msi_irq(). When the host boots Linux, we see a
+WARN_ON_ONCE() from dw_pcie_ep_raise_msi_irq(), and the host kernel hangs
+because the nvme driver never gets an IRQ.
+
+Remove the warning when dw_pcie_ep_raise_msi_irq() notices that Message
+Address has changed, remap using the new address, and update the
+ep->msi_msg_addr cache.
+
+Fixes: 8719c64e76bf ("PCI: dwc: ep: Cache MSI outbound iATU mapping")
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Tested-by: Koichiro Den <den@valinux.co.jp>
+Acked-by: Manivannan Sadhasivam <mani@kernel.org>
+Link: https://patch.msgid.link/20260210181225.3926165-2-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../pci/controller/dwc/pcie-designware-ep.c | 22 +++++++++++--------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 59fd6ebf01489..77f27295b0a80 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -904,6 +904,19 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ * supported, so we avoid reprogramming the region on every MSI,
+ * specifically unmapping immediately after writel().
+ */
++ if (ep->msi_iatu_mapped && (ep->msi_msg_addr != msg_addr ||
++ ep->msi_map_size != map_size)) {
++ /*
++ * The host changed the MSI target address or the required
++ * mapping size changed. Reprogramming the iATU when there are
++ * operations in flight is unsafe on this controller. However,
++ * there is no unified way to check if we have operations in
++ * flight, thus we don't know if we should WARN() or not.
++ */
++ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
++ ep->msi_iatu_mapped = false;
++ }
++
+ if (!ep->msi_iatu_mapped) {
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0,
+ ep->msi_mem_phys, msg_addr,
+@@ -914,15 +927,6 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ ep->msi_iatu_mapped = true;
+ ep->msi_msg_addr = msg_addr;
+ ep->msi_map_size = map_size;
+- } else if (WARN_ON_ONCE(ep->msi_msg_addr != msg_addr ||
+- ep->msi_map_size != map_size)) {
+- /*
+- * The host changed the MSI target address or the required
+- * mapping size changed. Reprogramming the iATU at runtime is
+- * unsafe on this controller, so bail out instead of trying to
+- * update the existing region.
+- */
+- return -EINVAL;
+ }
+
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
+--
+2.51.0
+
--- /dev/null
+From 6df212709e084b43ee446e7715b3c17741d5fecc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Jun 2025 21:51:05 -0700
+Subject: perf/core: Fix invalid wait context in ctx_sched_in()
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+[ Upstream commit 486ff5ad49bc50315bcaf6d45f04a33ef0a45ced ]
+
+Lockdep found a bug in the event scheduling when a pinned event was
+failed and wakes up the threads in the ring buffer like below.
+
+It seems it should not grab a wait-queue lock under perf-context lock.
+Let's do it with irq_work.
+
+ [ 39.913691] =============================
+ [ 39.914157] [ BUG: Invalid wait context ]
+ [ 39.914623] 6.15.0-next-20250530-next-2025053 #1 Not tainted
+ [ 39.915271] -----------------------------
+ [ 39.915731] repro/837 is trying to lock:
+ [ 39.916191] ffff88801acfabd8 (&event->waitq){....}-{3:3}, at: __wake_up+0x26/0x60
+ [ 39.917182] other info that might help us debug this:
+ [ 39.917761] context-{5:5}
+ [ 39.918079] 4 locks held by repro/837:
+ [ 39.918530] #0: ffffffff8725cd00 (rcu_read_lock){....}-{1:3}, at: __perf_event_task_sched_in+0xd1/0xbc0
+ [ 39.919612] #1: ffff88806ca3c6f8 (&cpuctx_lock){....}-{2:2}, at: __perf_event_task_sched_in+0x1a7/0xbc0
+ [ 39.920748] #2: ffff88800d91fc18 (&ctx->lock){....}-{2:2}, at: __perf_event_task_sched_in+0x1f9/0xbc0
+ [ 39.921819] #3: ffffffff8725cd00 (rcu_read_lock){....}-{1:3}, at: perf_event_wakeup+0x6c/0x470
+
+Fixes: f4b07fd62d4d ("perf/core: Use POLLHUP for a pinned event in error")
+Closes: https://lore.kernel.org/lkml/aD2w50VDvGIH95Pf@ly-workstation
+Reported-by: "Lai, Yi" <yi1.lai@linux.intel.com>
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: "Lai, Yi" <yi1.lai@linux.intel.com>
+Link: https://patch.msgid.link/20250603045105.1731451-1-namhyung@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index c0bb657e28e31..4311c33c3381c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4017,7 +4017,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ if (*perf_event_fasync(event))
+ event->pending_kill = POLL_ERR;
+
+- perf_event_wakeup(event);
++ event->pending_wakeup = 1;
++ irq_work_queue(&event->pending_irq);
+ } else {
+ struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu);
+
+--
+2.51.0
+
--- /dev/null
+From 50df46bd03026d56b381105ab9078f52446abf75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 4311c33c3381c..84a79e977580e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10498,6 +10498,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -10574,6 +10581,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -10582,6 +10602,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -11080,6 +11110,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -11412,6 +11447,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -11876,7 +11916,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 36b68a8ed5263baa1bdf7c3e5d6c094191e4a3d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 19:19:03 +0800
+Subject: regulator: bq257xx: Fix device node reference leak in
+ bq257xx_reg_dt_parse_gpio()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 4baaddaa44af01cd4ce239493060738fd0881835 ]
+
+In bq257xx_reg_dt_parse_gpio(), if fails to get subchild, it returns
+without calling of_node_put(child), causing the device node reference
+leak.
+
+Fixes: 981dd162b635 ("regulator: bq257xx: Add bq257xx boost regulator driver")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Link: https://patch.msgid.link/20260224-bq257-v1-1-8ebbc731c1c3@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/bq257xx-regulator.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/regulator/bq257xx-regulator.c b/drivers/regulator/bq257xx-regulator.c
+index fc1ccede44688..dab8f1ab44503 100644
+--- a/drivers/regulator/bq257xx-regulator.c
++++ b/drivers/regulator/bq257xx-regulator.c
+@@ -115,11 +115,10 @@ static void bq257xx_reg_dt_parse_gpio(struct platform_device *pdev)
+ return;
+
+ subchild = of_get_child_by_name(child, pdata->desc.of_match);
++ of_node_put(child);
+ if (!subchild)
+ return;
+
+- of_node_put(child);
+-
+ pdata->otg_en_gpio = devm_fwnode_gpiod_get_index(&pdev->dev,
+ of_fwnode_handle(subchild),
+ "enable", 0,
+--
+2.51.0
+
--- /dev/null
+From 7ffbafbbf072f53c63b40bb3e0fd0fa6cd578678 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 18:07:59 +0800
+Subject: regulator: fp9931: Fix PM runtime reference leak in
+ fp9931_hwmon_read()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 0902010c8d163f7b62e655efda1a843529152c7c ]
+
+In fp9931_hwmon_read(), if regmap_read() failed, the function returned
+the error code without calling pm_runtime_put_autosuspend(), causing
+a PM reference leak.
+
+Fixes: 12d821bd13d4 ("regulator: Add FP9931/JD9930 driver")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20260224-fp9931-v1-1-1cf05cabef4a@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/fp9931.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/regulator/fp9931.c b/drivers/regulator/fp9931.c
+index 7fbcc6327cc63..abea3b69d8a08 100644
+--- a/drivers/regulator/fp9931.c
++++ b/drivers/regulator/fp9931.c
+@@ -144,13 +144,12 @@ static int fp9931_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ return ret;
+
+ ret = regmap_read(data->regmap, FP9931_REG_TMST_VALUE, &val);
+- if (ret)
+- return ret;
++ if (!ret)
++ *temp = (s8)val * 1000;
+
+ pm_runtime_put_autosuspend(data->dev);
+- *temp = (s8)val * 1000;
+
+- return 0;
++ return ret;
+ }
+
+ static umode_t fp9931_hwmon_is_visible(const void *data,
+--
+2.51.0
+
--- /dev/null
+From 729b6f1434ce8efd0a2741ac15cc8bba4c78c79a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 15:06:40 -0500
+Subject: rseq: Clarify rseq registration rseq_size bound check comment
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+[ Upstream commit 26d43a90be81fc90e26688a51d3ec83188602731 ]
+
+The rseq registration validates that the rseq_size argument is greater
+or equal to 32 (the original rseq size), but the comment associated with
+this check does not clearly state this.
+
+Clarify the comment to that effect.
+
+Fixes: ee3e3ac05c26 ("rseq: Introduce extensible rseq ABI")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260220200642.1317826-2-mathieu.desnoyers@efficios.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/rseq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 395d8b002350a..6cb5b7e51555d 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -428,8 +428,9 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32
+ * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
+ * size, the required alignment is the original struct rseq alignment.
+ *
+- * In order to be valid, rseq_len is either the original rseq size, or
+- * large enough to contain all supported fields, as communicated to
++ * The rseq_len is required to be greater or equal to the original rseq
++ * size. In order to be valid, rseq_len is either the original rseq size,
++ * or large enough to contain all supported fields, as communicated to
+ * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
+ */
+ if (rseq_len < ORIG_RSEQ_SIZE ||
+--
+2.51.0
+
--- /dev/null
+From 2003bf114f2d6193edd5563c64b6a3d1789889dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 15:20:04 +0100
+Subject: s390/idle: Fix cpu idle exit cpu time accounting
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 0d785e2c324c90662baa4fe07a0d02233ff92824 ]
+
+With the conversion to generic entry [1] cpu idle exit cpu time accounting
+was converted from assembly to C. This introduced an reversed order of cpu
+time accounting.
+
+On cpu idle exit the current accounting happens with the following call
+chain:
+
+-> do_io_irq()/do_ext_irq()
+ -> irq_enter_rcu()
+ -> account_hardirq_enter()
+ -> vtime_account_irq()
+ -> vtime_account_kernel()
+
+vtime_account_kernel() accounts the passed cpu time since last_update_timer
+as system time, and updates last_update_timer to the current cpu timer
+value.
+
+However the subsequent call of
+
+ -> account_idle_time_irq()
+
+will incorrectly subtract passed cpu time from timer_idle_enter to the
+updated last_update_timer value from system_timer. Then last_update_timer
+is updated to a sys_enter_timer, which means that last_update_timer goes
+back in time.
+
+Subsequently account_hardirq_exit() will account too much cpu time as
+hardirq time. The sum of all accounted cpu times is still correct, however
+some cpu time which was previously accounted as system time is now
+accounted as hardirq time, plus there is the oddity that last_update_timer
+goes back in time.
+
+Restore previous behavior by extracting cpu time accounting code from
+account_idle_time_irq() into a new update_timer_idle() function and call it
+before irq_enter_rcu().
+
+Fixes: 56e62a737028 ("s390: convert to generic entry") [1]
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/idle.h | 1 +
+ arch/s390/kernel/idle.c | 13 +++++++++----
+ arch/s390/kernel/irq.c | 10 ++++++++--
+ 3 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
+index 09f763b9eb40a..133059d9a949c 100644
+--- a/arch/s390/include/asm/idle.h
++++ b/arch/s390/include/asm/idle.h
+@@ -23,5 +23,6 @@ extern struct device_attribute dev_attr_idle_count;
+ extern struct device_attribute dev_attr_idle_time_us;
+
+ void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
++void update_timer_idle(void);
+
+ #endif /* _S390_IDLE_H */
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index 39cb8d0ae3480..0f9e53f0a0686 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -21,11 +21,10 @@
+
+ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+
+-void account_idle_time_irq(void)
++void update_timer_idle(void)
+ {
+ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
+ struct lowcore *lc = get_lowcore();
+- unsigned long idle_time;
+ u64 cycles_new[8];
+ int i;
+
+@@ -35,13 +34,19 @@ void account_idle_time_irq(void)
+ this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
+ }
+
+- idle_time = lc->int_clock - idle->clock_idle_enter;
+-
+ lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
+ lc->last_update_clock = lc->int_clock;
+
+ lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
+ lc->last_update_timer = lc->sys_enter_timer;
++}
++
++void account_idle_time_irq(void)
++{
++ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
++ unsigned long idle_time;
++
++ idle_time = get_lowcore()->int_clock - idle->clock_idle_enter;
+
+ /* Account time spent with enabled wait psw loaded as idle time. */
+ WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
+diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
+index bdf9c7cb5685b..080e9285b3379 100644
+--- a/arch/s390/kernel/irq.c
++++ b/arch/s390/kernel/irq.c
+@@ -146,6 +146,10 @@ void noinstr do_io_irq(struct pt_regs *regs)
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ bool from_idle;
+
++ from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
++ if (from_idle)
++ update_timer_idle();
++
+ irq_enter_rcu();
+
+ if (user_mode(regs)) {
+@@ -154,7 +158,6 @@ void noinstr do_io_irq(struct pt_regs *regs)
+ current->thread.last_break = regs->last_break;
+ }
+
+- from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (from_idle)
+ account_idle_time_irq();
+
+@@ -182,6 +185,10 @@ void noinstr do_ext_irq(struct pt_regs *regs)
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ bool from_idle;
+
++ from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
++ if (from_idle)
++ update_timer_idle();
++
+ irq_enter_rcu();
+
+ if (user_mode(regs)) {
+@@ -194,7 +201,6 @@ void noinstr do_ext_irq(struct pt_regs *regs)
+ regs->int_parm = get_lowcore()->ext_params;
+ regs->int_parm_long = get_lowcore()->ext_params2;
+
+- from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (from_idle)
+ account_idle_time_irq();
+
+--
+2.51.0
+
--- /dev/null
+From bcea8eadaaab46e5e15059935b091eda8028b3a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 23:33:52 +0100
+Subject: s390/kexec: Disable stack protector in s390_reset_system()
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+[ Upstream commit 1623a554c68f352c17d0a358bc62580dc187f06b ]
+
+s390_reset_system() calls set_prefix(0), which switches back to the
+absolute lowcore. At that point the stack protector canary no longer
+matches the canary from the lowcore the function was entered with, so
+the stack check fails.
+
+Mark s390_reset_system() __no_stack_protector. This is safe here since
+its callers (__do_machine_kdump() and __do_machine_kexec()) are
+effectively no-return and fall back to disabled_wait() on failure.
+
+Fixes: f5730d44e05e ("s390: Add stackprotector support")
+Reported-by: Nikita Dubrovskii <nikita@linux.ibm.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/ipl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index dcdc7e2748486..049c557c452ff 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -2377,7 +2377,7 @@ void __init setup_ipl(void)
+ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+ }
+
+-void s390_reset_system(void)
++void __no_stack_protector s390_reset_system(void)
+ {
+ /* Disable prefixing */
+ set_prefix(0);
+--
+2.51.0
+
--- /dev/null
+From 792656c09df1d5361db3771d05799616feb89236 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 15:20:05 +0100
+Subject: s390/vtime: Fix virtual timer forwarding
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit dbc0fb35679ed5d0adecf7d02137ac2c77244b3b ]
+
+Since delayed accounting of system time [1] the virtual timer is
+forwarded by do_account_vtime() but also vtime_account_kernel(),
+vtime_account_softirq(), and vtime_account_hardirq(). This leads
+to double accounting of system, guest, softirq, and hardirq time.
+
+Remove accounting from the vtime_account*() family to restore old behavior.
+
+There is only one user of the vtimer interface, which might explain
+why nobody noticed this so far.
+
+Fixes: b7394a5f4ce9 ("sched/cputime, s390: Implement delayed accounting of system time") [1]
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/vtime.c | 18 ++----------------
+ 1 file changed, 2 insertions(+), 16 deletions(-)
+
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 234a0ba305108..122d30b104401 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -225,10 +225,6 @@ static u64 vtime_delta(void)
+ return timer - lc->last_update_timer;
+ }
+
+-/*
+- * Update process times based on virtual cpu times stored by entry.S
+- * to the lowcore fields user_timer, system_timer & steal_clock.
+- */
+ void vtime_account_kernel(struct task_struct *tsk)
+ {
+ struct lowcore *lc = get_lowcore();
+@@ -238,27 +234,17 @@ void vtime_account_kernel(struct task_struct *tsk)
+ lc->guest_timer += delta;
+ else
+ lc->system_timer += delta;
+-
+- virt_timer_forward(delta);
+ }
+ EXPORT_SYMBOL_GPL(vtime_account_kernel);
+
+ void vtime_account_softirq(struct task_struct *tsk)
+ {
+- u64 delta = vtime_delta();
+-
+- get_lowcore()->softirq_timer += delta;
+-
+- virt_timer_forward(delta);
++ get_lowcore()->softirq_timer += vtime_delta();
+ }
+
+ void vtime_account_hardirq(struct task_struct *tsk)
+ {
+- u64 delta = vtime_delta();
+-
+- get_lowcore()->hardirq_timer += delta;
+-
+- virt_timer_forward(delta);
++ get_lowcore()->hardirq_timer += vtime_delta();
+ }
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From af772255793d2271b9e1ec6d5862d0d081c85365 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jan 2026 12:31:13 +0000
+Subject: sched/eevdf: Update se->vprot in reweight_entity()
+
+From: Wang Tao <wangtao554@huawei.com>
+
+[ Upstream commit ff38424030f98976150e42ca35f4b00e6ab8fa23 ]
+
+In the EEVDF framework with Run-to-Parity protection, `se->vprot` is an
+independent variable defining the virtual protection timestamp.
+
+When `reweight_entity()` is called (e.g., via nice/renice), it performs
+the following actions to preserve Lag consistency:
+ 1. Scales `se->vlag` based on the new weight.
+ 2. Calls `place_entity()`, which recalculates `se->vruntime` based on
+ the new weight and scaled lag.
+
+However, the current implementation fails to update `se->vprot`, leading
+to mismatches between the task's actual runtime and its expected duration.
+
+Fixes: 63304558ba5d ("sched/eevdf: Curb wakeup-preemption")
+Suggested-by: Zhang Qiao <zhangqiao22@huawei.com>
+Signed-off-by: Wang Tao <wangtao554@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20260120123113.3518950-1-wangtao554@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 6f66d4f0540ea..c8a6dac54e220 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3816,6 +3816,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long weight)
+ {
+ bool curr = cfs_rq->curr == se;
++ bool rel_vprot = false;
++ u64 vprot;
+
+ if (se->on_rq) {
+ /* commit outstanding execution time */
+@@ -3823,6 +3825,11 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ update_entity_lag(cfs_rq, se);
+ se->deadline -= se->vruntime;
+ se->rel_deadline = 1;
++ if (curr && protect_slice(se)) {
++ vprot = se->vprot - se->vruntime;
++ rel_vprot = true;
++ }
++
+ cfs_rq->nr_queued--;
+ if (!curr)
+ __dequeue_entity(cfs_rq, se);
+@@ -3838,6 +3845,9 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ if (se->rel_deadline)
+ se->deadline = div_s64(se->deadline * se->load.weight, weight);
+
++ if (rel_vprot)
++ vprot = div_s64(vprot * se->load.weight, weight);
++
+ update_load_set(&se->load, weight);
+
+ do {
+@@ -3849,6 +3859,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
+ place_entity(cfs_rq, se, 0);
++ if (rel_vprot)
++ se->vprot = se->vruntime + vprot;
+ update_load_add(&cfs_rq->load, se->load.weight);
+ if (!curr)
+ __enqueue_entity(cfs_rq, se);
+--
+2.51.0
+
--- /dev/null
+From 359a18c732788c0ab1e921b6e5fd1132baf89702 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 12:16:28 +0200
+Subject: sched/fair: Fix lag clamp
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 6e3c0a4e1ad1e0455b7880fad02b3ee179f56c09 ]
+
+Vincent reported that he was seeing undue lag clamping in a mixed
+slice workload. Implement the max_slice tracking as per the todo
+comment.
+
+Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
+Reported-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20250422101628.GA33555@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched.h | 1 +
+ kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++----
+ 2 files changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5f00b5ed0f3b7..eb1c4c347a5cf 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -574,6 +574,7 @@ struct sched_entity {
+ u64 deadline;
+ u64 min_vruntime;
+ u64 min_slice;
++ u64 max_slice;
+
+ struct list_head group_node;
+ unsigned char on_rq;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index c8a6dac54e220..a8e766eaca1f9 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -748,6 +748,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ return cfs_rq->zero_vruntime;
+ }
+
++static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq);
++
+ /*
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+@@ -761,17 +763,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ * EEVDF gives the following limit for a steady state system:
+ *
+ * -r_max < lag < max(r_max, q)
+- *
+- * XXX could add max_slice to the augmented data to track this.
+ */
+ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
++ u64 max_slice = cfs_rq_max_slice(cfs_rq) + TICK_NSEC;
+ s64 vlag, limit;
+
+ WARN_ON_ONCE(!se->on_rq);
+
+ vlag = avg_vruntime(cfs_rq) - se->vruntime;
+- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
++ limit = calc_delta_fair(max_slice, se);
+
+ se->vlag = clamp(vlag, -limit, limit);
+ }
+@@ -829,6 +830,21 @@ static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+ return min_slice;
+ }
+
++static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq)
++{
++ struct sched_entity *root = __pick_root_entity(cfs_rq);
++ struct sched_entity *curr = cfs_rq->curr;
++ u64 max_slice = 0ULL;
++
++ if (curr && curr->on_rq)
++ max_slice = curr->slice;
++
++ if (root)
++ max_slice = max(max_slice, root->max_slice);
++
++ return max_slice;
++}
++
+ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+ {
+ return entity_before(__node_2_se(a), __node_2_se(b));
+@@ -853,6 +869,15 @@ static inline void __min_slice_update(struct sched_entity *se, struct rb_node *n
+ }
+ }
+
++static inline void __max_slice_update(struct sched_entity *se, struct rb_node *node)
++{
++ if (node) {
++ struct sched_entity *rse = __node_2_se(node);
++ if (rse->max_slice > se->max_slice)
++ se->max_slice = rse->max_slice;
++ }
++}
++
+ /*
+ * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
+ */
+@@ -860,6 +885,7 @@ static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
+ {
+ u64 old_min_vruntime = se->min_vruntime;
+ u64 old_min_slice = se->min_slice;
++ u64 old_max_slice = se->max_slice;
+ struct rb_node *node = &se->run_node;
+
+ se->min_vruntime = se->vruntime;
+@@ -870,8 +896,13 @@ static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
+ __min_slice_update(se, node->rb_right);
+ __min_slice_update(se, node->rb_left);
+
++ se->max_slice = se->slice;
++ __max_slice_update(se, node->rb_right);
++ __max_slice_update(se, node->rb_left);
++
+ return se->min_vruntime == old_min_vruntime &&
+- se->min_slice == old_min_slice;
++ se->min_slice == old_min_slice &&
++ se->max_slice == old_max_slice;
+ }
+
+ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+--
+2.51.0
+
--- /dev/null
+From f2ecaf0cff71b7fc7db5502e6beaf41ac2987d82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:28:16 +0100
+Subject: sched/fair: Fix zero_vruntime tracking
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit b3d99f43c72b56cf7a104a364e7fb34b0702828b ]
+
+It turns out that zero_vruntime tracking is broken when there is but a single
+task running. Current update paths are through __{en,de}queue_entity(), and
+when there is but a single task, pick_next_task() will always return that one
+task, and put_prev_set_next_task() will end up in neither function.
+
+This can cause entity_key() to grow indefinitely large and cause overflows,
+leading to much pain and suffering.
+
+Furtermore, doing update_zero_vruntime() from __{de,en}queue_entity(), which
+are called from {set_next,put_prev}_entity() has problems because:
+
+ - set_next_entity() calls __dequeue_entity() before it does cfs_rq->curr = se.
+ This means the avg_vruntime() will see the removal but not current, missing
+ the entity for accounting.
+
+ - put_prev_entity() calls __enqueue_entity() before it does cfs_rq->curr =
+ NULL. This means the avg_vruntime() will see the addition *and* current,
+ leading to double accounting.
+
+Both cases are incorrect/inconsistent.
+
+Noting that avg_vruntime is already called on each {en,de}queue, remove the
+explicit avg_vruntime() calls (which removes an extra 64bit division for each
+{en,de}queue) and have avg_vruntime() update zero_vruntime itself.
+
+Additionally, have the tick call avg_vruntime() -- discarding the result, but
+for the side-effect of updating zero_vruntime.
+
+While there, optimize avg_vruntime() by noting that the average of one value is
+rather trivial to compute.
+
+Test case:
+ # taskset -c -p 1 $$
+ # taskset -c 2 bash -c 'while :; do :; done&'
+ # cat /sys/kernel/debug/sched/debug | awk '/^cpu#/ {P=0} /^cpu#2,/ {P=1} {if (P) print $0}' | grep -e zero_vruntime -e "^>"
+
+PRE:
+ .zero_vruntime : 31316.407903
+ >R bash 487 50787.345112 E 50789.145972 2.800000 50780.298364 16 120 0.000000 0.000000 0.000000 /
+ .zero_vruntime : 382548.253179
+ >R bash 487 427275.204288 E 427276.003584 2.800000 427268.157540 23 120 0.000000 0.000000 0.000000 /
+
+POST:
+ .zero_vruntime : 17259.709467
+ >R bash 526 17259.709467 E 17262.509467 2.800000 16915.031624 9 120 0.000000 0.000000 0.000000 /
+ .zero_vruntime : 18702.723356
+ >R bash 526 18702.723356 E 18705.523356 2.800000 18358.045513 9 120 0.000000 0.000000 0.000000 /
+
+Fixes: 79f3f9bedd14 ("sched/eevdf: Fix min_vruntime vs avg_vruntime")
+Reported-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20260219080624.438854780%40infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 84 ++++++++++++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0fb6c3d43620f..436dec8927232 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -589,6 +589,21 @@ static inline bool entity_before(const struct sched_entity *a,
+ return vruntime_cmp(a->deadline, "<", b->deadline);
+ }
+
++/*
++ * Per avg_vruntime() below, cfs_rq::zero_vruntime is only slightly stale
++ * and this value should be no more than two lag bounds. Which puts it in the
++ * general order of:
++ *
++ * (slice + TICK_NSEC) << NICE_0_LOAD_SHIFT
++ *
++ * which is around 44 bits in size (on 64bit); that is 20 for
++ * NICE_0_LOAD_SHIFT, another 20 for NSEC_PER_MSEC and then a handful for
++ * however many msec the actual slice+tick ends up begin.
++ *
++ * (disregarding the actual divide-by-weight part makes for the worst case
++ * weight of 2, which nicely cancels vs the fuzz in zero_vruntime not actually
++ * being the zero-lag point).
++ */
+ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ return vruntime_op(se->vruntime, "-", cfs_rq->zero_vruntime);
+@@ -676,39 +691,61 @@ sum_w_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ }
+
+ static inline
+-void sum_w_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
++void update_zero_vruntime(struct cfs_rq *cfs_rq, s64 delta)
+ {
+ /*
+- * v' = v + d ==> sum_w_vruntime' = sum_runtime - d*sum_weight
++ * v' = v + d ==> sum_w_vruntime' = sum_w_vruntime - d*sum_weight
+ */
+ cfs_rq->sum_w_vruntime -= cfs_rq->sum_weight * delta;
++ cfs_rq->zero_vruntime += delta;
+ }
+
+ /*
+- * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
++ * Specifically: avg_vruntime() + 0 must result in entity_eligible() := true
+ * For this to be so, the result of this function must have a left bias.
++ *
++ * Called in:
++ * - place_entity() -- before enqueue
++ * - update_entity_lag() -- before dequeue
++ * - entity_tick()
++ *
++ * This means it is one entry 'behind' but that puts it close enough to where
++ * the bound on entity_key() is at most two lag bounds.
+ */
+ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+- s64 avg = cfs_rq->sum_w_vruntime;
+- long load = cfs_rq->sum_weight;
++ long weight = cfs_rq->sum_weight;
++ s64 delta = 0;
+
+- if (curr && curr->on_rq) {
+- unsigned long weight = scale_load_down(curr->load.weight);
++ if (curr && !curr->on_rq)
++ curr = NULL;
+
+- avg += entity_key(cfs_rq, curr) * weight;
+- load += weight;
+- }
++ if (weight) {
++ s64 runtime = cfs_rq->sum_w_vruntime;
++
++ if (curr) {
++ unsigned long w = scale_load_down(curr->load.weight);
++
++ runtime += entity_key(cfs_rq, curr) * w;
++ weight += w;
++ }
+
+- if (load) {
+ /* sign flips effective floor / ceiling */
+- if (avg < 0)
+- avg -= (load - 1);
+- avg = div_s64(avg, load);
++ if (runtime < 0)
++ runtime -= (weight - 1);
++
++ delta = div_s64(runtime, weight);
++ } else if (curr) {
++ /*
++ * When there is but one element, it is the average.
++ */
++ delta = curr->vruntime - cfs_rq->zero_vruntime;
+ }
+
+- return cfs_rq->zero_vruntime + avg;
++ update_zero_vruntime(cfs_rq, delta);
++
++ return cfs_rq->zero_vruntime;
+ }
+
+ /*
+@@ -777,16 +814,6 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ return vruntime_eligible(cfs_rq, se->vruntime);
+ }
+
+-static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+-{
+- u64 vruntime = avg_vruntime(cfs_rq);
+- s64 delta = vruntime_op(vruntime, "-", cfs_rq->zero_vruntime);
+-
+- sum_w_vruntime_update(cfs_rq, delta);
+-
+- cfs_rq->zero_vruntime = vruntime;
+-}
+-
+ static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *root = __pick_root_entity(cfs_rq);
+@@ -856,7 +883,6 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ sum_w_vruntime_add(cfs_rq, se);
+- update_zero_vruntime(cfs_rq);
+ se->min_vruntime = se->vruntime;
+ se->min_slice = se->slice;
+ rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+@@ -868,7 +894,6 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ &min_vruntime_cb);
+ sum_w_vruntime_sub(cfs_rq, se);
+- update_zero_vruntime(cfs_rq);
+ }
+
+ struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
+@@ -5567,6 +5592,11 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+ update_load_avg(cfs_rq, curr, UPDATE_TG);
+ update_cfs_group(curr);
+
++ /*
++ * Pulls along cfs_rq::zero_vruntime.
++ */
++ avg_vruntime(cfs_rq);
++
+ #ifdef CONFIG_SCHED_HRTICK
+ /*
+ * queued ticks are scheduled to match the slice, so don't bother
+--
+2.51.0
+
--- /dev/null
+From cdec58c841b4c8be34bd1ecfe1f172dd40667590 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Dec 2025 16:10:32 +0100
+Subject: sched/fair: Introduce and use the vruntime_cmp() and vruntime_op()
+ wrappers for wrapped-signed aritmetics
+
+From: Ingo Molnar <mingo@kernel.org>
+
+[ Upstream commit 5758e48eefaf111d7764d8f1c8b666140fe5fa27 ]
+
+We have to be careful with vruntime comparisons and subtraction,
+due to the possibility of wrapping, so we have macros like:
+
+ #define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
+
+Which is used like this:
+
+ if (vruntime_gt(min_vruntime, se, rse))
+ se->min_vruntime = rse->min_vruntime;
+
+Replace this with an easier to read pattern that uses the regular
+arithmetics operators:
+
+ if (vruntime_cmp(se->min_vruntime, ">", rse->min_vruntime))
+ se->min_vruntime = rse->min_vruntime;
+
+Also replace vruntime subtractions with vruntime_op():
+
+ - delta = (s64)(sea->vruntime - seb->vruntime) +
+ - (s64)(cfs_rqb->zero_vruntime_fi - cfs_rqa->zero_vruntime_fi);
+ + delta = vruntime_op(sea->vruntime, "-", seb->vruntime) +
+ + vruntime_op(cfs_rqb->zero_vruntime_fi, "-", cfs_rqa->zero_vruntime_fi);
+
+In the vruntime_cmp() and vruntime_op() macros use Use __builtin_strcmp(),
+because of __HAVE_ARCH_STRCMP might turn off the compiler optimizations
+we rely on here to catch usage bugs.
+
+No change in functionality.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Stable-dep-of: b3d99f43c72b ("sched/fair: Fix zero_vruntime tracking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 66 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 51 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f06a5d36106b4..0fb6c3d43620f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -524,10 +524,48 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+ * Scheduling class tree data structure manipulation methods:
+ */
+
++extern void __BUILD_BUG_vruntime_cmp(void);
++
++/* Use __builtin_strcmp() because of __HAVE_ARCH_STRCMP: */
++
++#define vruntime_cmp(A, CMP_STR, B) ({ \
++ int __res = 0; \
++ \
++ if (!__builtin_strcmp(CMP_STR, "<")) { \
++ __res = ((s64)((A)-(B)) < 0); \
++ } else if (!__builtin_strcmp(CMP_STR, "<=")) { \
++ __res = ((s64)((A)-(B)) <= 0); \
++ } else if (!__builtin_strcmp(CMP_STR, ">")) { \
++ __res = ((s64)((A)-(B)) > 0); \
++ } else if (!__builtin_strcmp(CMP_STR, ">=")) { \
++ __res = ((s64)((A)-(B)) >= 0); \
++ } else { \
++ /* Unknown operator throws linker error: */ \
++ __BUILD_BUG_vruntime_cmp(); \
++ } \
++ \
++ __res; \
++})
++
++extern void __BUILD_BUG_vruntime_op(void);
++
++#define vruntime_op(A, OP_STR, B) ({ \
++ s64 __res = 0; \
++ \
++ if (!__builtin_strcmp(OP_STR, "-")) { \
++ __res = (s64)((A)-(B)); \
++ } else { \
++ /* Unknown operator throws linker error: */ \
++ __BUILD_BUG_vruntime_op(); \
++ } \
++ \
++ __res; \
++})
++
++
+ static inline __maybe_unused u64 max_vruntime(u64 max_vruntime, u64 vruntime)
+ {
+- s64 delta = (s64)(vruntime - max_vruntime);
+- if (delta > 0)
++ if (vruntime_cmp(vruntime, ">", max_vruntime))
+ max_vruntime = vruntime;
+
+ return max_vruntime;
+@@ -535,8 +573,7 @@ static inline __maybe_unused u64 max_vruntime(u64 max_vruntime, u64 vruntime)
+
+ static inline __maybe_unused u64 min_vruntime(u64 min_vruntime, u64 vruntime)
+ {
+- s64 delta = (s64)(vruntime - min_vruntime);
+- if (delta < 0)
++ if (vruntime_cmp(vruntime, "<", min_vruntime))
+ min_vruntime = vruntime;
+
+ return min_vruntime;
+@@ -549,12 +586,12 @@ static inline bool entity_before(const struct sched_entity *a,
+ * Tiebreak on vruntime seems unnecessary since it can
+ * hardly happen.
+ */
+- return (s64)(a->deadline - b->deadline) < 0;
++ return vruntime_cmp(a->deadline, "<", b->deadline);
+ }
+
+ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- return (s64)(se->vruntime - cfs_rq->zero_vruntime);
++ return vruntime_op(se->vruntime, "-", cfs_rq->zero_vruntime);
+ }
+
+ #define __node_2_se(node) \
+@@ -732,7 +769,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ load += weight;
+ }
+
+- return avg >= (s64)(vruntime - cfs_rq->zero_vruntime) * load;
++ return avg >= vruntime_op(vruntime, "-", cfs_rq->zero_vruntime) * load;
+ }
+
+ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -743,7 +780,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+ {
+ u64 vruntime = avg_vruntime(cfs_rq);
+- s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
++ s64 delta = vruntime_op(vruntime, "-", cfs_rq->zero_vruntime);
+
+ sum_w_vruntime_update(cfs_rq, delta);
+
+@@ -770,13 +807,12 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+ return entity_before(__node_2_se(a), __node_2_se(b));
+ }
+
+-#define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
+-
+ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node)
+ {
+ if (node) {
+ struct sched_entity *rse = __node_2_se(node);
+- if (vruntime_gt(min_vruntime, se, rse))
++
++ if (vruntime_cmp(se->min_vruntime, ">", rse->min_vruntime))
+ se->min_vruntime = rse->min_vruntime;
+ }
+ }
+@@ -887,7 +923,7 @@ static inline void update_protect_slice(struct cfs_rq *cfs_rq, struct sched_enti
+
+ static inline bool protect_slice(struct sched_entity *se)
+ {
+- return ((s64)(se->vprot - se->vruntime) > 0);
++ return vruntime_cmp(se->vruntime, "<", se->vprot);
+ }
+
+ static inline void cancel_protect_slice(struct sched_entity *se)
+@@ -1024,7 +1060,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
+ */
+ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- if ((s64)(se->vruntime - se->deadline) < 0)
++ if (vruntime_cmp(se->vruntime, "<", se->deadline))
+ return false;
+
+ /*
+@@ -13319,8 +13355,8 @@ bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ * zero_vruntime_fi, which would have been updated in prior calls
+ * to se_fi_update().
+ */
+- delta = (s64)(sea->vruntime - seb->vruntime) +
+- (s64)(cfs_rqb->zero_vruntime_fi - cfs_rqa->zero_vruntime_fi);
++ delta = vruntime_op(sea->vruntime, "-", seb->vruntime) +
++ vruntime_op(cfs_rqb->zero_vruntime_fi, "-", cfs_rqa->zero_vruntime_fi);
+
+ return delta > 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e2125c1dd608830a8acbde27889504fbaf55b181 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 16:49:09 +0100
+Subject: sched/fair: Only set slice protection at pick time
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit bcd74b2ffdd0a2233adbf26b65c62fc69a809c8e ]
+
+We should not (re)set slice protection in the sched_change pattern
+which calls put_prev_task() / set_next_task().
+
+Fixes: 63304558ba5d ("sched/eevdf: Curb wakeup-preemption")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
+Link: https://patch.msgid.link/20260219080624.561421378%40infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 436dec8927232..6f66d4f0540ea 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5488,7 +5488,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ }
+
+ static void
+-set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
++set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first)
+ {
+ clear_buddies(cfs_rq, se);
+
+@@ -5503,7 +5503,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ __dequeue_entity(cfs_rq, se);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+
+- set_protect_slice(cfs_rq, se);
++ if (first)
++ set_protect_slice(cfs_rq, se);
+ }
+
+ update_stats_curr_start(cfs_rq, se);
+@@ -9016,13 +9017,13 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ pse = parent_entity(pse);
+ }
+ if (se_depth >= pse_depth) {
+- set_next_entity(cfs_rq_of(se), se);
++ set_next_entity(cfs_rq_of(se), se, true);
+ se = parent_entity(se);
+ }
+ }
+
+ put_prev_entity(cfs_rq, pse);
+- set_next_entity(cfs_rq, se);
++ set_next_entity(cfs_rq, se, true);
+
+ __set_next_task_fair(rq, p, true);
+ }
+@@ -13621,7 +13622,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+- set_next_entity(cfs_rq, se);
++ set_next_entity(cfs_rq, se, first);
+ /* ensure bandwidth has been allocated on our new cfs_rq */
+ account_cfs_rq_runtime(cfs_rq, 0);
+ }
+--
+2.51.0
+
--- /dev/null
+From 34e89c9b2cc0e5618947dfb98000bf5c209fb083 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Nov 2025 12:09:16 +0100
+Subject: sched/fair: Rename cfs_rq::avg_load to cfs_rq::sum_weight
+
+From: Ingo Molnar <mingo@kernel.org>
+
+[ Upstream commit 4ff674fa986c27ec8a0542479258c92d361a2566 ]
+
+The ::avg_load field is a long-standing misnomer: it says it's an
+'average load', but in reality it's the momentary sum of the load
+of all currently runnable tasks. We'd have to also perform a
+division by nr_running (or use time-decay) to arrive at any sort
+of average value.
+
+This is clear from comments about the math of fair scheduling:
+
+ * \Sum w_i := cfs_rq->avg_load
+
+The sum of all weights is ... the sum of all weights, not
+the average of all weights.
+
+To make it doubly confusing, there's also an ::avg_load
+in the load-balancing struct sg_lb_stats, which *is* a
+true average.
+
+The second part of the field's name is a minor misnomer
+as well: it says 'load', and it is indeed a load_weight
+structure as it shares code with the load-balancer - but
+it's only in an SMP load-balancing context where
+load = weight, in the fair scheduling context the primary
+purpose is the weighting of different nice levels.
+
+So rename the field to ::sum_weight instead, which makes
+the terminology of the EEVDF math match up with our
+implementation of it:
+
+ * \Sum w_i := cfs_rq->sum_weight
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://patch.msgid.link/20251201064647.1851919-6-mingo@kernel.org
+Stable-dep-of: b3d99f43c72b ("sched/fair: Fix zero_vruntime tracking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 16 ++++++++--------
+ kernel/sched/sched.h | 2 +-
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 3eaeceda71b00..afb774c2f7bf7 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -608,7 +608,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ *
+ * v0 := cfs_rq->zero_vruntime
+ * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+- * \Sum w_i := cfs_rq->avg_load
++ * \Sum w_i := cfs_rq->sum_weight
+ *
+ * Since zero_vruntime closely tracks the per-task service, these
+ * deltas: (v_i - v), will be in the order of the maximal (virtual) lag
+@@ -625,7 +625,7 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime += key * weight;
+- cfs_rq->avg_load += weight;
++ cfs_rq->sum_weight += weight;
+ }
+
+ static void
+@@ -635,16 +635,16 @@ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime -= key * weight;
+- cfs_rq->avg_load -= weight;
++ cfs_rq->sum_weight -= weight;
+ }
+
+ static inline
+ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ {
+ /*
+- * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
++ * v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight
+ */
+- cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
++ cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta;
+ }
+
+ /*
+@@ -655,7 +655,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+- long load = cfs_rq->avg_load;
++ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+ unsigned long weight = scale_load_down(curr->load.weight);
+@@ -723,7 +723,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+- long load = cfs_rq->avg_load;
++ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+ unsigned long weight = scale_load_down(curr->load.weight);
+@@ -5175,7 +5175,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ *
+ * vl_i = (W + w_i)*vl'_i / W
+ */
+- load = cfs_rq->avg_load;
++ load = cfs_rq->sum_weight;
+ if (curr && curr->on_rq)
+ load += scale_load_down(curr->load.weight);
+
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 1b4283e9edc3b..f4e9a21cf0936 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -679,7 +679,7 @@ struct cfs_rq {
+ unsigned int h_nr_idle; /* SCHED_IDLE */
+
+ s64 avg_vruntime;
+- u64 avg_load;
++ u64 sum_weight;
+
+ u64 zero_vruntime;
+ #ifdef CONFIG_SCHED_CORE
+--
+2.51.0
+
--- /dev/null
+From f7b40a9e60913dcd886e409d53633cc8b3aaf7b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Dec 2025 16:09:23 +0100
+Subject: sched/fair: Rename cfs_rq::avg_vruntime to ::sum_w_vruntime, and
+ helper functions
+
+From: Ingo Molnar <mingo@kernel.org>
+
+[ Upstream commit dcbc9d3f0e594223275a18f7016001889ad35eff ]
+
+The ::avg_vruntime field is a misnomer: it says it's an
+'average vruntime', but in reality it's the momentary sum
+of the weighted vruntimes of all queued tasks, which is
+at least a division away from being an average.
+
+This is clear from comments about the math of fair scheduling:
+
+ * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+
+This confusion is increased by the cfs_avg_vruntime() function,
+which does perform the division and returns a true average.
+
+The sum of all weighted vruntimes should be named thusly,
+so rename the field to ::sum_w_vruntime. (As arguably
+::sum_weighted_vruntime would be a bit of a mouthful.)
+
+Understanding the scheduler is hard enough already, without
+extra layers of obfuscated naming. ;-)
+
+Also rename related helper functions:
+
+ sum_vruntime_add() => sum_w_vruntime_add()
+ sum_vruntime_sub() => sum_w_vruntime_sub()
+ sum_vruntime_update() => sum_w_vruntime_update()
+
+With the notable exception of cfs_avg_vruntime(), which
+was named accurately.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://patch.msgid.link/20251201064647.1851919-7-mingo@kernel.org
+Stable-dep-of: b3d99f43c72b ("sched/fair: Fix zero_vruntime tracking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 26 +++++++++++++-------------
+ kernel/sched/sched.h | 2 +-
+ 2 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index afb774c2f7bf7..f06a5d36106b4 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -607,7 +607,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ * Which we track using:
+ *
+ * v0 := cfs_rq->zero_vruntime
+- * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
++ * \Sum (v_i - v0) * w_i := cfs_rq->sum_w_vruntime
+ * \Sum w_i := cfs_rq->sum_weight
+ *
+ * Since zero_vruntime closely tracks the per-task service, these
+@@ -619,32 +619,32 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ * As measured, the max (key * weight) value was ~44 bits for a kernel build.
+ */
+ static void
+-avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
++sum_w_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+
+- cfs_rq->avg_vruntime += key * weight;
++ cfs_rq->sum_w_vruntime += key * weight;
+ cfs_rq->sum_weight += weight;
+ }
+
+ static void
+-avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
++sum_w_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+
+- cfs_rq->avg_vruntime -= key * weight;
++ cfs_rq->sum_w_vruntime -= key * weight;
+ cfs_rq->sum_weight -= weight;
+ }
+
+ static inline
+-void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
++void sum_w_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ {
+ /*
+- * v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight
++ * v' = v + d ==> sum_w_vruntime' = sum_runtime - d*sum_weight
+ */
+- cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta;
++ cfs_rq->sum_w_vruntime -= cfs_rq->sum_weight * delta;
+ }
+
+ /*
+@@ -654,7 +654,7 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+- s64 avg = cfs_rq->avg_vruntime;
++ s64 avg = cfs_rq->sum_w_vruntime;
+ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+@@ -722,7 +722,7 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+- s64 avg = cfs_rq->avg_vruntime;
++ s64 avg = cfs_rq->sum_w_vruntime;
+ long load = cfs_rq->sum_weight;
+
+ if (curr && curr->on_rq) {
+@@ -745,7 +745,7 @@ static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+ u64 vruntime = avg_vruntime(cfs_rq);
+ s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
+
+- avg_vruntime_update(cfs_rq, delta);
++ sum_w_vruntime_update(cfs_rq, delta);
+
+ cfs_rq->zero_vruntime = vruntime;
+ }
+@@ -819,7 +819,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+ */
+ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- avg_vruntime_add(cfs_rq, se);
++ sum_w_vruntime_add(cfs_rq, se);
+ update_zero_vruntime(cfs_rq);
+ se->min_vruntime = se->vruntime;
+ se->min_slice = se->slice;
+@@ -831,7 +831,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ &min_vruntime_cb);
+- avg_vruntime_sub(cfs_rq, se);
++ sum_w_vruntime_sub(cfs_rq, se);
+ update_zero_vruntime(cfs_rq);
+ }
+
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index f4e9a21cf0936..d3485d48be281 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -678,7 +678,7 @@ struct cfs_rq {
+ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_idle; /* SCHED_IDLE */
+
+- s64 avg_vruntime;
++ s64 sum_w_vruntime;
+ u64 sum_weight;
+
+ u64 zero_vruntime;
+--
+2.51.0
+
--- /dev/null
+From 433686ab4a069360e8885d20fb2bde9e803ccecc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 12:45:17 +0000
+Subject: sched_ext: Fix SCX_EFLAG_INITIALIZED being a no-op flag
+
+From: David Carlier <devnexen@gmail.com>
+
+[ Upstream commit 749989b2d90ddc7dd253ad3b11a77cf882721acf ]
+
+SCX_EFLAG_INITIALIZED is the sole member of enum scx_exit_flags with no
+explicit value, so the compiler assigns it 0. This makes the bitwise OR
+in scx_ops_init() a no-op:
+
+ sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; /* |= 0 */
+
+As a result, BPF schedulers cannot distinguish whether ops.init()
+completed successfully by inspecting exit_info->flags.
+
+Assign the value 1LLU << 0 so the flag is actually set.
+
+Fixes: f3aec2adce8d ("sched_ext: Add SCX_EFLAG_INITIALIZED to indicate successful ops.init()")
+Signed-off-by: David Carlier <devnexen@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/ext_internal.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
+index 386c677e4c9a0..11ebb744d8931 100644
+--- a/kernel/sched/ext_internal.h
++++ b/kernel/sched/ext_internal.h
+@@ -74,7 +74,7 @@ enum scx_exit_flags {
+ * info communication. The following flag indicates whether ops.init()
+ * finished successfully.
+ */
+- SCX_EFLAG_INITIALIZED,
++ SCX_EFLAG_INITIALIZED = 1LLU << 0,
+ };
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 85fbd32a788ac377a76abf7a52441a1fa8d9d43f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index b1460b16dd91d..c6bb45c3d4c4a 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -12034,6 +12034,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ break;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 73d77cfab5f82..bddfc412b04b5 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -15981,6 +15981,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -16944,9 +16970,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -17132,14 +17155,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index fd6dab1578872..40f313e2769fc 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -785,6 +785,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From 9eacfa2be5d2ad25ba8cfc100df4331569905adb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 19:28:06 +0000
+Subject: scsi: pm8001: Fix use-after-free in pm8001_queue_command()
+
+From: Salomon Dushimirimana <salomondush@google.com>
+
+[ Upstream commit 38353c26db28efd984f51d426eac2396d299cca7 ]
+
+Commit e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()") refactors
+pm8001_queue_command(), however it introduces a potential cause of a double
+free scenario when it changes the function to return -ENODEV in case of phy
+down/device gone state.
+
+In this path, pm8001_queue_command() updates task status and calls
+task_done to indicate to upper layer that the task has been handled.
+However, this also frees the underlying SAS task. A -ENODEV is then
+returned to the caller. When libsas sas_ata_qc_issue() receives this error
+value, it assumes the task wasn't handled/queued by LLDD and proceeds to
+clean up and free the task again, resulting in a double free.
+
+Since pm8001_queue_command() handles the SAS task in this case, it should
+return 0 to the caller indicating that the task has been handled.
+
+Fixes: e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()")
+Signed-off-by: Salomon Dushimirimana <salomondush@google.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://patch.msgid.link/20260213192806.439432-1-salomondush@google.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 6a8d35aea93a5..645524f3fe2d0 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -525,8 +525,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
+ } else {
+ task->task_done(task);
+ }
+- rc = -ENODEV;
+- goto err_out;
++ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
++ return 0;
+ }
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+--
+2.51.0
+
--- /dev/null
+From e548a1425b592226ec68067db33bf5963a9e7c66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 09f0d77d57f02..d6e4e99a571f1 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4385,14 +4385,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -10174,7 +10166,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
--- /dev/null
+From ca1c8982d29b626eca4e10ed842c154f56aa1d12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 16:33:48 -0800
+Subject: selftests/bpf: Fix OOB read in dmabuf_collector
+
+From: T.J. Mercier <tjmercier@google.com>
+
+[ Upstream commit 6881af27f9ea0f5ca8f606f573ef5cc25ca31fe4 ]
+
+Dmabuf name allocations can be less than DMA_BUF_NAME_LEN characters,
+but bpf_probe_read_kernel always tries to read exactly that many bytes.
+If a name is less than DMA_BUF_NAME_LEN characters,
+bpf_probe_read_kernel will read past the end. bpf_probe_read_kernel_str
+stops at the first NUL terminator so use it instead, like
+iter_dmabuf_for_each already does.
+
+Fixes: ae5d2c59ecd7 ("selftests/bpf: Add test for dmabuf_iter")
+Reported-by: Jerome Lee <jaewookl@quicinc.com>
+Signed-off-by: T.J. Mercier <tjmercier@google.com>
+Link: https://lore.kernel.org/r/20260225003349.113746-1-tjmercier@google.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/progs/dmabuf_iter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
+index 13cdb11fdeb2b..9cbb7442646e5 100644
+--- a/tools/testing/selftests/bpf/progs/dmabuf_iter.c
++++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
+@@ -48,7 +48,7 @@ int dmabuf_collector(struct bpf_iter__dmabuf *ctx)
+
+ /* Buffers are not required to be named */
+ if (pname) {
+- if (bpf_probe_read_kernel(name, sizeof(name), pname))
++ if (bpf_probe_read_kernel_str(name, sizeof(name), pname) < 0)
+ return 1;
+
+ /* Name strings can be provided by userspace */
+--
+2.51.0
+
perf-core-fix-refcount-bug-and-potential-uaf-in-perf_mmap.patch
+drm-vmwgfx-fix-invalid-kref_put-callback-in-vmw_bo_d.patch
+drm-vmwgfx-return-the-correct-value-in-vmw_translate.patch
+debugobject-make-it-work-with-deferred-page-initiali.patch
+drm-logicvc-fix-device-node-reference-leak-in-logicv.patch
+kvm-arm64-hide-s1poe-from-guests-when-not-supported-.patch
+kvm-arm64-fix-id-register-initialization-for-non-pro.patch
+drm-fourcc-fix-plane-order-for-10-12-16-bit-ycbcr-fo.patch
+drm-tiny-sharp-memory-fix-pointer-error-dereference.patch
+irqchip-sifive-plic-fix-frozen-interrupt-due-to-affi.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+scsi-pm8001-fix-use-after-free-in-pm8001_queue_comma.patch
+accel-ethosu-fix-shift-overflow-in-cmd_to_addr.patch
+drm-imx-parallel-display-check-return-value-of-devm_.patch
+drm-bridge-synopsys-dw-dp-check-return-value-of-devm.patch
+alsa-scarlett2-fix-dsp-filter-control-array-handling.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+alsa-usb-audio-add-quirk_flag_skip_iface_setup.patch
+gpio-shared-fix-memory-leaks.patch
+x86-fred-correct-speculative-safety-in-fred_extint.patch
+x86-bug-handle-__warn_printf-trap-in-early_fixup_exc.patch
+x86-cfi-fix-cfi-rewrite-for-odd-alignments.patch
+sched-fair-rename-cfs_rq-avg_load-to-cfs_rq-sum_weig.patch
+sched-fair-rename-cfs_rq-avg_vruntime-to-sum_w_vrunt.patch
+sched-fair-introduce-and-use-the-vruntime_cmp-and-vr.patch
+sched-fair-fix-zero_vruntime-tracking.patch
+sched-fair-only-set-slice-protection-at-pick-time.patch
+sched-eevdf-update-se-vprot-in-reweight_entity.patch
+sched-fair-fix-lag-clamp.patch
+rseq-clarify-rseq-registration-rseq_size-bound-check.patch
+perf-core-fix-invalid-wait-context-in-ctx_sched_in.patch
+accel-amdxdna-remove-buffer-size-check-when-creating.patch
+accel-amdxdna-switch-to-always-use-chained-command.patch
+accel-amdxdna-fix-crash-when-destroying-a-suspended-.patch
+accel-amdxdna-reduce-log-noise-during-process-termin.patch
+accel-amdxdna-fix-dead-lock-for-suspend-and-resume.patch
+accel-amdxdna-fix-suspend-failure-after-enabling-tur.patch
+accel-amdxdna-fix-command-hang-on-suspended-hardware.patch
+accel-amdxdna-fix-out-of-bounds-memset-in-command-sl.patch
+accel-amdxdna-prevent-ubuf-size-overflow.patch
+accel-amdxdna-validate-command-buffer-payload-count.patch
+drm-xe-wa-steer-rmw-of-mcr-registers-while-building-.patch
+cgroup-cpuset-fix-incorrect-change-to-effective_xcpu.patch
+cgroup-cpuset-fix-incorrect-use-of-cpuset_update_tas.patch
+clk-scu-imx8qxp-do-not-register-driver-in-probe.patch
+cxl-move-devm_cxl_add_nvdimm_bridge-to-cxl_pmem.ko.patch
+cxl-fix-race-of-nvdimm_bus-object-when-creating-nvdi.patch
+cxl-mbox-validate-payload-size-before-accessing-cont.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+regulator-fp9931-fix-pm-runtime-reference-leak-in-fp.patch
+regulator-bq257xx-fix-device-node-reference-leak-in-.patch
+irqchip-ls-extirq-fix-devm_of_iomap-error-check.patch
+io_uring-cmd_net-use-read_once-for-addr3-read.patch
+zloop-advertise-a-volatile-write-cache.patch
+zloop-check-for-spurious-options-passed-to-remove.patch
+drm-client-do-not-destroy-null-modes.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+alsa-usb-audio-use-inclusive-terms.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+s390-idle-fix-cpu-idle-exit-cpu-time-accounting.patch
+s390-vtime-fix-virtual-timer-forwarding.patch
+s390-kexec-disable-stack-protector-in-s390_reset_sys.patch
+arm64-io-rename-ioremap_prot-to-__ioremap_prot.patch
+arm64-io-extract-user-memory-type-in-ioremap_prot.patch
+pci-dwc-ep-refresh-msi-message-address-cache-on-chan.patch
+pci-dwc-ep-flush-msi-x-write-before-unmapping-its-at.patch
+drm-amdgpu-userq-do-not-allow-userspace-to-trivially.patch
+drm-amdgpu-unlock-a-mutex-before-destroying-it.patch
+drm-amdgpu-fix-locking-bugs-in-error-paths.patch
+drm-amdgpu-fix-error-handling-in-slot-reset.patch
+alsa-hda-cs35l56-fix-signedness-error-in-cs35l56_hda.patch
+btrfs-free-pages-on-error-in-btrfs_uring_read_extent.patch
+btrfs-fix-error-message-order-of-parameters-in-btrfs.patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+btrfs-fix-objectid-value-in-error-message-in-check_e.patch
+btrfs-fix-warning-in-scrub_verify_one_metadata.patch
+btrfs-print-correct-subvol-num-if-active-swapfile-pr.patch
+btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
+alsa-usb-qcom-correct-parameter-comment-for-uaudio_t.patch
+mm-slab-pass-__gfp_nowarn-to-refill_sheaf-if-fallbac.patch
+asoc-sdca-fix-comments-for-sdca_irq_request.patch
+bpf-arm64-force-8-byte-alignment-for-jit-buffer-to-p.patch
+bpf-fix-stack-out-of-bounds-write-in-devmap.patch
+selftests-bpf-fix-oob-read-in-dmabuf_collector.patch
+sched_ext-fix-scx_eflag_initialized-being-a-no-op-fl.patch
+spi-stm32-fix-missing-pointer-assignment-in-case-of-.patch
+pci-correct-pci_cap_exp_endpoint_sizeof_v2-value.patch
+bpf-fix-race-in-cpumap-on-preempt_rt.patch
+bpf-fix-race-in-devmap-on-preempt_rt.patch
+bpf-add-bitwise-tracking-for-bpf_end.patch
+bpf-introduce-tnum_step-to-step-through-tnum-s-membe.patch
+bpf-improve-bounds-when-tnum-has-a-single-possible-v.patch
+uaccess-fix-scoped_user_read_access-for-pointer-to-c.patch
--- /dev/null
+From 2f014ef41cda5e2c62db117183a17bf7449a8c89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 16:09:22 +0100
+Subject: spi: stm32: fix missing pointer assignment in case of dma chaining
+
+From: Alain Volmat <alain.volmat@foss.st.com>
+
+[ Upstream commit e96493229a6399e902062213c6381162464cdd50 ]
+
+Commit c4f2c05ab029 ("spi: stm32: fix pointer-to-pointer variables usage")
+introduced a regression since dma descriptors generated as part of the
+stm32_spi_prepare_rx_dma_mdma_chaining function are not well propagated
+to the caller function, leading to mdma-dma chaining being no more
+functional.
+
+Fixes: c4f2c05ab029 ("spi: stm32: fix pointer-to-pointer variables usage")
+Signed-off-by: Alain Volmat <alain.volmat@foss.st.com>
+Acked-by: Antonio Quartulli <antonio@mandelbit.com>
+Link: https://patch.msgid.link/20260224-spi-stm32-chaining-fix-v1-1-5da7a4851b66@foss.st.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-stm32.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 80986bd251d29..7a6ee93be9bd4 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1570,6 +1570,9 @@ static int stm32_spi_prepare_rx_dma_mdma_chaining(struct stm32_spi *spi,
+ return -EINVAL;
+ }
+
++ *rx_mdma_desc = _mdma_desc;
++ *rx_dma_desc = _dma_desc;
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7d361fc9e4fbbf38a5c897f9e8e64cbd71c7c273 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 13:27:51 +0000
+Subject: uaccess: Fix scoped_user_read_access() for 'pointer to const'
+
+From: David Laight <david.laight.linux@gmail.com>
+
+[ Upstream commit af4e9ef3d78420feb8fe58cd9a1ab80c501b3c08 ]
+
+If a 'const struct foo __user *ptr' is used for the address passed to
+scoped_user_read_access() then you get a warning/error
+
+ uaccess.h:691:1: error: initialization discards 'const' qualifier from pointer target type [-Werror=discarded-qualifiers]
+
+for the
+
+ void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl)
+
+assignment.
+
+Fix by using 'auto' for both _tmpptr and the redeclaration of uptr.
+Replace the CLASS() with explicit __cleanup() functions on uptr.
+
+Fixes: e497310b4ffb ("uaccess: Provide scoped user access regions")
+Signed-off-by: David Laight <david.laight.linux@gmail.com>
+Reviewed-and-tested-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/uaccess.h | 54 +++++++++++++++--------------------------
+ 1 file changed, 20 insertions(+), 34 deletions(-)
+
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index 1f3804245c066..809e4f7dfdbd4 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -647,36 +647,22 @@ static inline void user_access_restore(unsigned long flags) { }
+ /* Define RW variant so the below _mode macro expansion works */
+ #define masked_user_rw_access_begin(u) masked_user_access_begin(u)
+ #define user_rw_access_begin(u, s) user_access_begin(u, s)
+-#define user_rw_access_end() user_access_end()
+
+ /* Scoped user access */
+-#define USER_ACCESS_GUARD(_mode) \
+-static __always_inline void __user * \
+-class_user_##_mode##_begin(void __user *ptr) \
+-{ \
+- return ptr; \
+-} \
+- \
+-static __always_inline void \
+-class_user_##_mode##_end(void __user *ptr) \
+-{ \
+- user_##_mode##_access_end(); \
+-} \
+- \
+-DEFINE_CLASS(user_ ##_mode## _access, void __user *, \
+- class_user_##_mode##_end(_T), \
+- class_user_##_mode##_begin(ptr), void __user *ptr) \
+- \
+-static __always_inline class_user_##_mode##_access_t \
+-class_user_##_mode##_access_ptr(void __user *scope) \
+-{ \
+- return scope; \
+-}
+
+-USER_ACCESS_GUARD(read)
+-USER_ACCESS_GUARD(write)
+-USER_ACCESS_GUARD(rw)
+-#undef USER_ACCESS_GUARD
++/* Cleanup wrapper functions */
++static __always_inline void __scoped_user_read_access_end(const void *p)
++{
++ user_read_access_end();
++};
++static __always_inline void __scoped_user_write_access_end(const void *p)
++{
++ user_write_access_end();
++};
++static __always_inline void __scoped_user_rw_access_end(const void *p)
++{
++ user_access_end();
++};
+
+ /**
+ * __scoped_user_access_begin - Start a scoped user access
+@@ -750,13 +736,13 @@ USER_ACCESS_GUARD(rw)
+ *
+ * Don't use directly. Use scoped_masked_user_$MODE_access() instead.
+ */
+-#define __scoped_user_access(mode, uptr, size, elbl) \
+-for (bool done = false; !done; done = true) \
+- for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
+- !done; done = true) \
+- for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \
+- /* Force modified pointer usage within the scope */ \
+- for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
++#define __scoped_user_access(mode, uptr, size, elbl) \
++for (bool done = false; !done; done = true) \
++ for (auto _tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
++ !done; done = true) \
++ /* Force modified pointer usage within the scope */ \
++ for (const auto uptr __cleanup(__scoped_user_##mode##_access_end) = \
++ _tmpptr; !done; done = true)
+
+ /**
+ * scoped_user_read_access_size - Start a scoped user read access with given size
+--
+2.51.0
+
--- /dev/null
+From acff22e89e18c4b1e852280c37a4bd0b5dd820d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Jan 2026 11:47:37 +0800
+Subject: x86/bug: Handle __WARN_printf() trap in early_fixup_exception()
+
+From: Hou Wenlong <houwenlong.hwl@antgroup.com>
+
+[ Upstream commit a0cb371b521dde44f32cfe954b6ef6f82b407393 ]
+
+The commit 5b472b6e5bd9 ("x86_64/bug: Implement __WARN_printf()")
+implemented __WARN_printf(), which changed the mechanism to use UD1
+instead of UD2. However, it only handles the trap in the runtime IDT
+handler, while the early booting IDT handler lacks this handling. As a
+result, the usage of WARN() before the runtime IDT setup can lead to
+kernel crashes. Since KMSAN is enabled after the runtime IDT setup, it
+is safe to use handle_bug() directly in early_fixup_exception() to
+address this issue.
+
+Fixes: 5b472b6e5bd9 ("x86_64/bug: Implement __WARN_printf()")
+Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/c4fb3645f60d3a78629d9870e8fcc8535281c24f.1768016713.git.houwenlong.hwl@antgroup.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/traps.h | 2 ++
+ arch/x86/kernel/traps.c | 2 +-
+ arch/x86/mm/extable.c | 7 ++-----
+ 3 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index 869b880618018..3f24cc472ce9b 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -25,6 +25,8 @@ extern int ibt_selftest_noendbr(void);
+ void handle_invalid_op(struct pt_regs *regs);
+ #endif
+
++noinstr bool handle_bug(struct pt_regs *regs);
++
+ static inline int get_si_code(unsigned long condition)
+ {
+ if (condition & DR_STEP)
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index bcf1dedc1d008..aca1eca5daffa 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -397,7 +397,7 @@ static inline void handle_invalid_op(struct pt_regs *regs)
+ ILL_ILLOPN, error_get_trap_addr(regs));
+ }
+
+-static noinstr bool handle_bug(struct pt_regs *regs)
++noinstr bool handle_bug(struct pt_regs *regs)
+ {
+ unsigned long addr = regs->ip;
+ bool handled = false;
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index 2fdc1f1f5adb9..6b9ff1c6cafa2 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -411,14 +411,11 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
+ return;
+
+ if (trapnr == X86_TRAP_UD) {
+- if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+- /* Skip the ud2. */
+- regs->ip += LEN_UD2;
++ if (handle_bug(regs))
+ return;
+- }
+
+ /*
+- * If this was a BUG and report_bug returns or if this
++ * If this was a BUG and handle_bug returns or if this
+ * was just a normal #UD, we want to continue onward and
+ * crash.
+ */
+--
+2.51.0
+
--- /dev/null
+From 1ffd506b16c4012d711be261a95d29faa9acff20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 13:59:43 +0100
+Subject: x86/cfi: Fix CFI rewrite for odd alignments
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 24c8147abb39618d74fcc36e325765e8fe7bdd7a ]
+
+Rustam reported his clang builds did not boot properly; turns out his
+.config has: CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y set.
+
+Fix up the FineIBT code to deal with this unusual alignment.
+
+Fixes: 931ab63664f0 ("x86/ibt: Implement FineIBT")
+Reported-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/cfi.h | 12 ++++++++----
+ arch/x86/include/asm/linkage.h | 4 ++--
+ arch/x86/kernel/alternative.c | 29 ++++++++++++++++++++++-------
+ arch/x86/net/bpf_jit_comp.c | 13 ++-----------
+ 4 files changed, 34 insertions(+), 24 deletions(-)
+
+diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h
+index c40b9ebc1fb40..ab3fbbd947ed9 100644
+--- a/arch/x86/include/asm/cfi.h
++++ b/arch/x86/include/asm/cfi.h
+@@ -111,6 +111,12 @@ extern bhi_thunk __bhi_args_end[];
+
+ struct pt_regs;
+
++#ifdef CONFIG_CALL_PADDING
++#define CFI_OFFSET (CONFIG_FUNCTION_PADDING_CFI+5)
++#else
++#define CFI_OFFSET 5
++#endif
++
+ #ifdef CONFIG_CFI
+ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+ #define __bpfcall
+@@ -119,11 +125,9 @@ static inline int cfi_get_offset(void)
+ {
+ switch (cfi_mode) {
+ case CFI_FINEIBT:
+- return 16;
++ return /* fineibt_prefix_size */ 16;
+ case CFI_KCFI:
+- if (IS_ENABLED(CONFIG_CALL_PADDING))
+- return 16;
+- return 5;
++ return CFI_OFFSET;
+ default:
+ return 0;
+ }
+diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
+index 9d38ae744a2e4..a7294656ad908 100644
+--- a/arch/x86/include/asm/linkage.h
++++ b/arch/x86/include/asm/linkage.h
+@@ -68,7 +68,7 @@
+ * Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the
+ * CFI symbol layout changes.
+ *
+- * Without CALL_THUNKS:
++ * Without CALL_PADDING:
+ *
+ * .align FUNCTION_ALIGNMENT
+ * __cfi_##name:
+@@ -77,7 +77,7 @@
+ * .long __kcfi_typeid_##name
+ * name:
+ *
+- * With CALL_THUNKS:
++ * With CALL_PADDING:
+ *
+ * .align FUNCTION_ALIGNMENT
+ * __cfi_##name:
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 28518371d8bf3..a3f81cde2bb59 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1147,7 +1147,7 @@ void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
+
+ poison_endbr(addr);
+ if (IS_ENABLED(CONFIG_FINEIBT))
+- poison_cfi(addr - 16);
++ poison_cfi(addr - CFI_OFFSET);
+ }
+ }
+
+@@ -1354,6 +1354,8 @@ extern u8 fineibt_preamble_end[];
+ #define fineibt_preamble_ud 0x13
+ #define fineibt_preamble_hash 5
+
++#define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
++
+ /*
+ * <fineibt_caller_start>:
+ * 0: b8 78 56 34 12 mov $0x12345678, %eax
+@@ -1599,7 +1601,7 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
+ * have determined there are no indirect calls to it and we
+ * don't need no CFI either.
+ */
+- if (!is_endbr(addr + 16))
++ if (!is_endbr(addr + CFI_OFFSET))
+ continue;
+
+ hash = decode_preamble_hash(addr, &arity);
+@@ -1607,6 +1609,15 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
+ addr, addr, 5, addr))
+ return -EINVAL;
+
++ /*
++ * FineIBT relies on being at func-16, so if the preamble is
++ * actually larger than that, place it the tail end.
++ *
++ * NOTE: this is possible with things like DEBUG_CALL_THUNKS
++ * and DEBUG_FORCE_FUNCTION_ALIGN_64B.
++ */
++ addr += CFI_OFFSET - fineibt_prefix_size;
++
+ text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
+ WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
+ text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
+@@ -1629,10 +1640,10 @@ static void cfi_rewrite_endbr(s32 *start, s32 *end)
+ for (s = start; s < end; s++) {
+ void *addr = (void *)s + *s;
+
+- if (!exact_endbr(addr + 16))
++ if (!exact_endbr(addr + CFI_OFFSET))
+ continue;
+
+- poison_endbr(addr + 16);
++ poison_endbr(addr + CFI_OFFSET);
+ }
+ }
+
+@@ -1737,7 +1748,8 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
+ if (FINEIBT_WARN(fineibt_preamble_size, 20) ||
+ FINEIBT_WARN(fineibt_preamble_bhi + fineibt_bhi1_size, 20) ||
+ FINEIBT_WARN(fineibt_caller_size, 14) ||
+- FINEIBT_WARN(fineibt_paranoid_size, 20))
++ FINEIBT_WARN(fineibt_paranoid_size, 20) ||
++ WARN_ON_ONCE(CFI_OFFSET < fineibt_prefix_size))
+ return;
+
+ if (cfi_mode == CFI_AUTO) {
+@@ -1850,6 +1862,11 @@ static void poison_cfi(void *addr)
+ */
+ switch (cfi_mode) {
+ case CFI_FINEIBT:
++ /*
++ * FineIBT preamble is at func-16.
++ */
++ addr += CFI_OFFSET - fineibt_prefix_size;
++
+ /*
+ * FineIBT prefix should start with an ENDBR.
+ */
+@@ -1888,8 +1905,6 @@ static void poison_cfi(void *addr)
+ }
+ }
+
+-#define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
+-
+ /*
+ * When regs->ip points to a 0xD6 byte in the FineIBT preamble,
+ * return true and fill out target and type.
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index b0bac2a66eff3..ea76949ddda5e 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -438,17 +438,8 @@ static void emit_kcfi(u8 **pprog, u32 hash)
+
+ EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
+ #ifdef CONFIG_CALL_PADDING
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
+- EMIT1(0x90);
++ for (int i = 0; i < CONFIG_FUNCTION_PADDING_CFI; i++)
++ EMIT1(0x90);
+ #endif
+ EMIT_ENDBR();
+
+--
+2.51.0
+
--- /dev/null
+From fc09b739a060007b0aeaa6a107b7d8ea6f1a6436 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 13:15:04 +0000
+Subject: x86/fred: Correct speculative safety in fred_extint()
+
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+
+[ Upstream commit aa280a08e7d8fae58557acc345b36b3dc329d595 ]
+
+array_index_nospec() is no use if the result gets spilled to the stack, as
+it makes the believed safe-under-speculation value subject to memory
+predictions.
+
+For all practical purposes, this means array_index_nospec() must be used in
+the expression that accesses the array.
+
+As the code currently stands, it's the wrong side of irqentry_enter(), and
+'index' is put into %ebp across the function call.
+
+Remove the index variable and reposition array_index_nospec(), so it's
+calculated immediately before the array access.
+
+Fixes: 14619d912b65 ("x86/fred: FRED entry/exit and dispatch code")
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260106131504.679932-1-andrew.cooper3@citrix.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/entry_fred.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/entry/entry_fred.c b/arch/x86/entry/entry_fred.c
+index 94e626cc6a074..4fc5b176d3edb 100644
+--- a/arch/x86/entry/entry_fred.c
++++ b/arch/x86/entry/entry_fred.c
+@@ -159,8 +159,6 @@ void __init fred_complete_exception_setup(void)
+ static noinstr void fred_extint(struct pt_regs *regs)
+ {
+ unsigned int vector = regs->fred_ss.vector;
+- unsigned int index = array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
+- NR_SYSTEM_VECTORS);
+
+ if (WARN_ON_ONCE(vector < FIRST_EXTERNAL_VECTOR))
+ return;
+@@ -169,7 +167,8 @@ static noinstr void fred_extint(struct pt_regs *regs)
+ irqentry_state_t state = irqentry_enter(regs);
+
+ instrumentation_begin();
+- sysvec_table[index](regs);
++ sysvec_table[array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
++ NR_SYSTEM_VECTORS)](regs);
+ instrumentation_end();
+ irqentry_exit(regs, state);
+ } else {
+--
+2.51.0
+
--- /dev/null
+From 847d7833656e9844cffddaf582e26aaa2ae487a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 06:21:44 -0800
+Subject: zloop: advertise a volatile write cache
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 6acf7860dcc79ed045cc9e6a79c8a8bb6959dba7 ]
+
+Zloop is file system backed and thus needs to sync the underlying file
+system to persist data. Set BLK_FEAT_WRITE_CACHE so that the block
+layer actually send flush commands, and fix the flush implementation
+as sync_filesystem requires s_umount to be held and the code currently
+misses that.
+
+Fixes: eb0570c7df23 ("block: new zoned loop block device driver")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/zloop.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
+index 8e334f5025fc0..ae9bf2a85c21c 100644
+--- a/drivers/block/zloop.c
++++ b/drivers/block/zloop.c
+@@ -542,6 +542,21 @@ static void zloop_rw(struct zloop_cmd *cmd)
+ zloop_put_cmd(cmd);
+ }
+
++/*
++ * Sync the entire FS containing the zone files instead of walking all files.
++ */
++static int zloop_flush(struct zloop_device *zlo)
++{
++ struct super_block *sb = file_inode(zlo->data_dir)->i_sb;
++ int ret;
++
++ down_read(&sb->s_umount);
++ ret = sync_filesystem(sb);
++ up_read(&sb->s_umount);
++
++ return ret;
++}
++
+ static void zloop_handle_cmd(struct zloop_cmd *cmd)
+ {
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+@@ -562,11 +577,7 @@ static void zloop_handle_cmd(struct zloop_cmd *cmd)
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+- /*
+- * Sync the entire FS containing the zone files instead of
+- * walking all files
+- */
+- cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
++ cmd->ret = zloop_flush(zlo);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+@@ -981,7 +992,8 @@ static int zloop_ctl_add(struct zloop_options *opts)
+ struct queue_limits lim = {
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+- .features = BLK_FEAT_ZONED,
++ .features = BLK_FEAT_ZONED | BLK_FEAT_WRITE_CACHE,
++
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+--
+2.51.0
+
--- /dev/null
+From 3191a433ea2a6759d83b9226452ae8b6fdbe00ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 06:21:45 -0800
+Subject: zloop: check for spurious options passed to remove
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 3c4617117a2b7682cf037be5e5533e379707f050 ]
+
+Zloop uses a command option parser for all control commands,
+but most options are only valid for adding a new device. Check
+for incorrectly specified options in the remove handler.
+
+Fixes: eb0570c7df23 ("block: new zoned loop block device driver")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/zloop.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
+index ae9bf2a85c21c..9e3bb538d5fcf 100644
+--- a/drivers/block/zloop.c
++++ b/drivers/block/zloop.c
+@@ -1174,7 +1174,12 @@ static int zloop_ctl_remove(struct zloop_options *opts)
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+- pr_err("No ID specified\n");
++ pr_err("No ID specified for remove\n");
++ return -EINVAL;
++ }
++
++ if (opts->mask & ~ZLOOP_OPT_ID) {
++ pr_err("Invalid option specified for remove\n");
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 54a525fe6dfbb8abd82eeda2b65f41d78bd7b39f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 11:17:28 +0000
+Subject: ALSA: hda: cs35l56: Fix signedness error in cs35l56_hda_posture_put()
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 003ce8c9b2ca28fbb4860651e76fb1c9a91f2ea1 ]
+
+In cs35l56_hda_posture_put() assign ucontrol->value.integer.value[0] to
+a long instead of an unsigned long. ucontrol->value.integer.value[0] is
+a long.
+
+This fixes the sparse warning:
+
+sound/hda/codecs/side-codecs/cs35l56_hda.c:256:20: warning: unsigned value
+that used to be signed checked against zero?
+sound/hda/codecs/side-codecs/cs35l56_hda.c:252:29: signed value source
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Fixes: 73cfbfa9caea8 ("ALSA: hda/cs35l56: Add driver for Cirrus Logic CS35L56 amplifier")
+Link: https://patch.msgid.link/20260226111728.1700431-1-rf@opensource.cirrus.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/cs35l56_hda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 03b2a6a919b4d..8d86a13b8a960 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -238,7 +238,7 @@ static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+- unsigned long pos = ucontrol->value.integer.value[0];
++ long pos = ucontrol->value.integer.value[0];
+ bool changed;
+ int ret;
+
+--
+2.51.0
+
--- /dev/null
+From f655638864ad1bb89eb1fcbe2749e58e1854a7b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 04:59:45 +0000
+Subject: ALSA: pci: hda: use snd_kcontrol_chip()
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit 483dd12dbe34c6d4e71d4d543bcb1292bcb62d08 ]
+
+We can use snd_kcontrol_chip(). Let's use it.
+
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/87plglauda.wl-kuninori.morimoto.gx@renesas.com
+Stable-dep-of: 003ce8c9b2ca ("ALSA: hda: cs35l56: Fix signedness error in cs35l56_hda_posture_put()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/cs35l56_hda.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index b84f3b3eb1409..03b2a6a919b4d 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -174,7 +174,7 @@ static int cs35l56_hda_mixer_info(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_mixer_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int reg_val;
+ int i;
+
+@@ -194,7 +194,7 @@ static int cs35l56_hda_mixer_get(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_mixer_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int item = ucontrol->value.enumerated.item[0];
+ bool changed;
+
+@@ -221,7 +221,7 @@ static int cs35l56_hda_posture_info(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_posture_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int pos;
+ int ret;
+
+@@ -237,7 +237,7 @@ static int cs35l56_hda_posture_get(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned long pos = ucontrol->value.integer.value[0];
+ bool changed;
+ int ret;
+@@ -284,7 +284,7 @@ static int cs35l56_hda_vol_info(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ unsigned int raw_vol;
+ int vol;
+ int ret;
+@@ -308,7 +308,7 @@ static int cs35l56_hda_vol_get(struct snd_kcontrol *kcontrol,
+ static int cs35l56_hda_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct cs35l56_hda *cs35l56 = (struct cs35l56_hda *)kcontrol->private_data;
++ struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol);
+ long vol = ucontrol->value.integer.value[0];
+ unsigned int raw_vol;
+ bool changed;
+--
+2.51.0
+
--- /dev/null
+From 004f43b6fbed9c4b8a6cd3e414a495b20930ceea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 +0100
+Subject: ALSA: usb-audio: Cap the packet size pre-calculations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7fe8dec3f628e9779f1631576f8e693370050348 ]
+
+We calculate the possible packet sizes beforehand for adaptive and
+synchronous endpoints, but we didn't take care of the max frame size
+for those pre-calculated values. When a device or a bus limits the
+packet size, a high sample rate or a high number of channels may lead
+to the packet sizes that are larger than the given limit, which
+results in an error from the USB core at submitting URBs.
+
+As a simple workaround, just add the sanity checks of pre-calculated
+packet sizes to have the upper boundary of ep->maxframesize.
+
+Fixes: f0bd62b64016 ("ALSA: usb-audio: Improve frames size computation")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221076
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 1092b964167e9..d035b25f67b64 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1396,6 +1396,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ goto unlock;
+ }
+
++ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
++ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
++
+ /* calculate the frequency in 16.16 format */
+ ep->freqm = ep->freqn;
+ ep->freqshift = INT_MIN;
+--
+2.51.0
+
--- /dev/null
+From a7fe4db1ff95a0ff580dad6010edad2fded9fb33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index ff2bbe761ee3a..15e72c419dbc2 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2308,7 +2308,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.51.0
+
--- /dev/null
+From 85960ff4067b7c727ab0baac8fb51ff61cab1a3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:31 +0100
+Subject: ALSA: usb-audio: Use inclusive terms
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4e9113c533acee2ba1f72fd68ee6ecd36b64484e ]
+
+Replace the remaining with inclusive terms; it's only this function
+name we overlooked at the previous conversion.
+
+Fixes: 53837b4ac2bd ("ALSA: usb-audio: Replace slave/master terms")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20260225085233.316306-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index d035b25f67b64..806755a65fc05 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -160,8 +160,8 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ * This won't be used for implicit feedback which takes the packet size
+ * returned from the sync source
+ */
+-static int slave_next_packet_size(struct snd_usb_endpoint *ep,
+- unsigned int avail)
++static int synced_next_packet_size(struct snd_usb_endpoint *ep,
++ unsigned int avail)
+ {
+ unsigned long flags;
+ unsigned int phase;
+@@ -230,7 +230,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ }
+
+ if (ep->sync_source)
+- return slave_next_packet_size(ep, avail);
++ return synced_next_packet_size(ep, avail);
+ else
+ return next_packet_size(ep, avail);
+ }
+--
+2.51.0
+
--- /dev/null
+From c9cea876c6119ea2c155c902286c4fa7a0b3ec58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 05:34:44 +0000
+Subject: bpf: Fix stack-out-of-bounds write in devmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b7bf516c3ecd9a2aae2dc2635178ab87b734fef1 ]
+
+get_upper_ifindexes() iterates over all upper devices and writes their
+indices into an array without checking bounds.
+
+Also the callers assume that the max number of upper devices is
+MAX_NEST_DEV and allocate excluded_devices[1+MAX_NEST_DEV] on the stack,
+but that assumption is not correct and the number of upper devices could
+be larger than MAX_NEST_DEV (e.g., many macvlans), causing a
+stack-out-of-bounds write.
+
+Add a max parameter to get_upper_ifindexes() to avoid the issue.
+When there are too many upper devices, return -EOVERFLOW and abort the
+redirect.
+
+To reproduce, create more than MAX_NEST_DEV(8) macvlans on a device with
+an XDP program attached using BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS.
+Then send a packet to the device to trigger the XDP redirect path.
+
+Reported-by: syzbot+10cc7f13760b31bd2e61@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698c4ce3.050a0220.340abe.000b.GAE@google.com/T/
+Fixes: aeea1b86f936 ("bpf, devmap: Exclude XDP broadcast to master device")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Link: https://lore.kernel.org/r/20260225053506.4738-1-kohei@enjuk.jp
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 5f2356b47b2dd..3bdec239be610 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -577,18 +577,22 @@ static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifin
+ }
+
+ /* Get ifindex of each upper device. 'indexes' must be able to hold at
+- * least MAX_NEST_DEV elements.
+- * Returns the number of ifindexes added.
++ * least 'max' elements.
++ * Returns the number of ifindexes added, or -EOVERFLOW if there are too
++ * many upper devices.
+ */
+-static int get_upper_ifindexes(struct net_device *dev, int *indexes)
++static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
+ {
+ struct net_device *upper;
+ struct list_head *iter;
+ int n = 0;
+
+ netdev_for_each_upper_dev_rcu(dev, upper, iter) {
++ if (n >= max)
++ return -EOVERFLOW;
+ indexes[n++] = upper->ifindex;
+ }
++
+ return n;
+ }
+
+@@ -604,7 +608,11 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev_rx->ifindex;
+ }
+
+@@ -722,7 +730,11 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ int err;
+
+ if (exclude_ingress) {
+- num_excluded = get_upper_ifindexes(dev, excluded_devices);
++ num_excluded = get_upper_ifindexes(dev, excluded_devices,
++ ARRAY_SIZE(excluded_devices) - 1);
++ if (num_excluded < 0)
++ return num_excluded;
++
+ excluded_devices[num_excluded++] = dev->ifindex;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From cb20e8d6063700ac871bddb35cbff3ae681400fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 09:06:56 -0700
+Subject: btrfs: add raid stripe tree definitions
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit ee1293308e01d359688243d665138f35a6f1f9b8 ]
+
+Add definitions for the raid stripe tree. This tree will hold information
+about the on-disk layout of the stripes in a RAID set.
+
+Each stripe extent has a 1:1 relationship with an on-disk extent item and
+is doing the logical to per-drive physical address translation for the
+extent item in question.
+
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/accessors.h | 8 ++++++++
+ fs/btrfs/locking.c | 1 +
+ include/uapi/linux/btrfs_tree.h | 29 +++++++++++++++++++++++++++++
+ 3 files changed, 38 insertions(+)
+
+diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
+index 8cfc8214109ca..341c07b4c2272 100644
+--- a/fs/btrfs/accessors.h
++++ b/fs/btrfs/accessors.h
+@@ -305,6 +305,14 @@ BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
+ BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
+ BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+
++BTRFS_SETGET_FUNCS(stripe_extent_encoding, struct btrfs_stripe_extent, encoding, 8);
++BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
++BTRFS_SETGET_FUNCS(raid_stride_physical, struct btrfs_raid_stride, physical, 64);
++BTRFS_SETGET_STACK_FUNCS(stack_stripe_extent_encoding,
++ struct btrfs_stripe_extent, encoding, 8);
++BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_devid, struct btrfs_raid_stride, devid, 64);
++BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_physical, struct btrfs_raid_stride, physical, 64);
++
+ /* struct btrfs_dev_extent */
+ BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, chunk_tree, 64);
+ BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
+diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
+index 7979449a58d6b..51737e7350669 100644
+--- a/fs/btrfs/locking.c
++++ b/fs/btrfs/locking.c
+@@ -73,6 +73,7 @@ static struct btrfs_lockdep_keyset {
+ { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
+ { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
+ { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
++ { .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") },
+ { .id = 0, DEFINE_NAME("tree") },
+ };
+
+diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
+index fc3c32186d7eb..ca65d7b7a6ca1 100644
+--- a/include/uapi/linux/btrfs_tree.h
++++ b/include/uapi/linux/btrfs_tree.h
+@@ -73,6 +73,9 @@
+ /* Holds the block group items for extent tree v2. */
+ #define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
+
++/* Tracks RAID stripes in block groups. */
++#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
++
+ /* device stats in the device tree */
+ #define BTRFS_DEV_STATS_OBJECTID 0ULL
+
+@@ -261,6 +264,8 @@
+ #define BTRFS_DEV_ITEM_KEY 216
+ #define BTRFS_CHUNK_ITEM_KEY 228
+
++#define BTRFS_RAID_STRIPE_KEY 230
++
+ /*
+ * Records the overall state of the qgroups.
+ * There's only one instance of this key present,
+@@ -719,6 +724,30 @@ struct btrfs_free_space_header {
+ __le64 num_bitmaps;
+ } __attribute__ ((__packed__));
+
++struct btrfs_raid_stride {
++ /* The id of device this raid extent lives on. */
++ __le64 devid;
++ /* The physical location on disk. */
++ __le64 physical;
++} __attribute__ ((__packed__));
++
++/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
++#define BTRFS_STRIPE_RAID0 1
++#define BTRFS_STRIPE_RAID1 2
++#define BTRFS_STRIPE_DUP 3
++#define BTRFS_STRIPE_RAID10 4
++#define BTRFS_STRIPE_RAID5 5
++#define BTRFS_STRIPE_RAID6 6
++#define BTRFS_STRIPE_RAID1C3 7
++#define BTRFS_STRIPE_RAID1C4 8
++
++struct btrfs_stripe_extent {
++ __u8 encoding;
++ __u8 reserved[7];
++ /* An array of raid strides this stripe is composed of. */
++ struct btrfs_raid_stride strides[];
++} __attribute__ ((__packed__));
++
+ #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
+ #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
+
+--
+2.51.0
+
--- /dev/null
+From b3b8e571a4066f115490997a7a8f504e68ba5bd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 09:06:58 -0700
+Subject: btrfs: add support for inserting raid stripe extents
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit 02c372e1f016e5113217597ab37b399c4e407477 ]
+
+Add support for inserting stripe extents into the raid stripe tree on
+completion of every write that needs an extra logical-to-physical
+translation when using RAID.
+
+Inserting the stripe extents happens after the data I/O has completed,
+this is done to
+
+ a) support zone-append and
+ b) rule out the possibility of a RAID-write-hole.
+
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/Makefile | 2 +-
+ fs/btrfs/bio.c | 21 +++++++++
+ fs/btrfs/extent-tree.c | 1 +
+ fs/btrfs/inode.c | 8 +++-
+ fs/btrfs/ordered-data.c | 1 +
+ fs/btrfs/ordered-data.h | 2 +
+ fs/btrfs/raid-stripe-tree.c | 87 +++++++++++++++++++++++++++++++++++++
+ fs/btrfs/raid-stripe-tree.h | 35 +++++++++++++++
+ fs/btrfs/volumes.c | 4 +-
+ fs/btrfs/volumes.h | 16 ++++---
+ 10 files changed, 168 insertions(+), 9 deletions(-)
+ create mode 100644 fs/btrfs/raid-stripe-tree.c
+ create mode 100644 fs/btrfs/raid-stripe-tree.h
+
+diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
+index 90d53209755bf..3bb869a84e541 100644
+--- a/fs/btrfs/Makefile
++++ b/fs/btrfs/Makefile
+@@ -33,7 +33,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
+ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
+ block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
+ subpage.o tree-mod-log.o extent-io-tree.o fs.o messages.o bio.o \
+- lru_cache.o
++ lru_cache.o raid-stripe-tree.o
+
+ btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
+ btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 650972895652d..6fa13be15f301 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -15,6 +15,7 @@
+ #include "rcu-string.h"
+ #include "zoned.h"
+ #include "file-item.h"
++#include "raid-stripe-tree.h"
+
+ static struct bio_set btrfs_bioset;
+ static struct bio_set btrfs_clone_bioset;
+@@ -416,6 +417,9 @@ static void btrfs_orig_write_end_io(struct bio *bio)
+ else
+ bio->bi_status = BLK_STS_OK;
+
++ if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
++ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
++
+ btrfs_orig_bbio_end_io(bbio);
+ btrfs_put_bioc(bioc);
+ }
+@@ -427,6 +431,8 @@ static void btrfs_clone_write_end_io(struct bio *bio)
+ if (bio->bi_status) {
+ atomic_inc(&stripe->bioc->error);
+ btrfs_log_dev_io_error(bio, stripe->dev);
++ } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
++ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ }
+
+ /* Pass on control to the original bio this one was cloned from */
+@@ -490,6 +496,7 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
+ bio->bi_private = &bioc->stripes[dev_nr];
+ bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
+ bioc->stripes[dev_nr].bioc = bioc;
++ bioc->size = bio->bi_iter.bi_size;
+ btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
+ }
+
+@@ -499,6 +506,8 @@ static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
+ if (!bioc) {
+ /* Single mirror read/write fast path. */
+ btrfs_bio(bio)->mirror_num = mirror_num;
++ if (bio_op(bio) != REQ_OP_READ)
++ btrfs_bio(bio)->orig_physical = smap->physical;
+ bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
+ if (bio_op(bio) != REQ_OP_READ)
+ btrfs_bio(bio)->orig_physical = smap->physical;
+@@ -690,6 +699,18 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ bio->bi_opf |= REQ_OP_ZONE_APPEND;
+ }
+
++ if (is_data_bbio(bbio) && bioc &&
++ btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
++ /*
++ * No locking for the list update, as we only add to
++ * the list in the I/O submission path, and list
++ * iteration only happens in the completion path, which
++ * can't happen until after the last submission.
++ */
++ btrfs_get_bioc(bioc);
++ list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list);
++ }
++
+ /*
+ * Csum items for reloc roots have already been cloned at this
+ * point, so they are handled as part of the no-checksum case.
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 1528a81b2c307..04ea2b2a9383e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -42,6 +42,7 @@
+ #include "file-item.h"
+ #include "orphan.h"
+ #include "tree-checker.h"
++#include "raid-stripe-tree.h"
+
+ #undef SCRAMBLE_DELAYED_REFS
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 96edac307408c..91df180e61e9b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -71,6 +71,7 @@
+ #include "super.h"
+ #include "orphan.h"
+ #include "backref.h"
++#include "raid-stripe-tree.h"
+
+ struct btrfs_iget_args {
+ u64 ino;
+@@ -3104,6 +3105,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
+
+ trans->block_rsv = &inode->block_rsv;
+
++ ret = btrfs_insert_raid_extent(trans, ordered_extent);
++ if (ret)
++ goto out;
++
+ if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
+ compress_type = ordered_extent->compress_type;
+ if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
+@@ -3252,7 +3257,8 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
+ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
+ {
+ if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
+- !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
++ !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
++ list_empty(&ordered->bioc_list))
+ btrfs_finish_ordered_zoned(ordered);
+ return btrfs_finish_one_ordered(ordered);
+ }
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index c68e9ecbc438c..e0a2d0cfd5ebe 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -198,6 +198,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ INIT_LIST_HEAD(&entry->log_list);
+ INIT_LIST_HEAD(&entry->root_extent_list);
+ INIT_LIST_HEAD(&entry->work_list);
++ INIT_LIST_HEAD(&entry->bioc_list);
+ init_completion(&entry->completion);
+
+ /*
+diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
+index 173bd5c5df262..1c51ac57e5dfd 100644
+--- a/fs/btrfs/ordered-data.h
++++ b/fs/btrfs/ordered-data.h
+@@ -151,6 +151,8 @@ struct btrfs_ordered_extent {
+ struct completion completion;
+ struct btrfs_work flush_work;
+ struct list_head work_list;
++
++ struct list_head bioc_list;
+ };
+
+ static inline void
+diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
+new file mode 100644
+index 0000000000000..c093e0bbb7be3
+--- /dev/null
++++ b/fs/btrfs/raid-stripe-tree.c
+@@ -0,0 +1,87 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
++ */
++
++#include <linux/btrfs_tree.h>
++#include "ctree.h"
++#include "fs.h"
++#include "accessors.h"
++#include "transaction.h"
++#include "disk-io.h"
++#include "raid-stripe-tree.h"
++#include "volumes.h"
++#include "misc.h"
++#include "print-tree.h"
++
++static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
++ struct btrfs_io_context *bioc)
++{
++ struct btrfs_fs_info *fs_info = trans->fs_info;
++ struct btrfs_key stripe_key;
++ struct btrfs_root *stripe_root = fs_info->stripe_root;
++ const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
++ u8 encoding = btrfs_bg_flags_to_raid_index(bioc->map_type);
++ struct btrfs_stripe_extent *stripe_extent;
++ const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
++ int ret;
++
++ stripe_extent = kzalloc(item_size, GFP_NOFS);
++ if (!stripe_extent) {
++ btrfs_abort_transaction(trans, -ENOMEM);
++ btrfs_end_transaction(trans);
++ return -ENOMEM;
++ }
++
++ btrfs_set_stack_stripe_extent_encoding(stripe_extent, encoding);
++ for (int i = 0; i < num_stripes; i++) {
++ u64 devid = bioc->stripes[i].dev->devid;
++ u64 physical = bioc->stripes[i].physical;
++ u64 length = bioc->stripes[i].length;
++ struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
++
++ if (length == 0)
++ length = bioc->size;
++
++ btrfs_set_stack_raid_stride_devid(raid_stride, devid);
++ btrfs_set_stack_raid_stride_physical(raid_stride, physical);
++ }
++
++ stripe_key.objectid = bioc->logical;
++ stripe_key.type = BTRFS_RAID_STRIPE_KEY;
++ stripe_key.offset = bioc->size;
++
++ ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
++ item_size);
++ if (ret)
++ btrfs_abort_transaction(trans, ret);
++
++ kfree(stripe_extent);
++
++ return ret;
++}
++
++int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
++ struct btrfs_ordered_extent *ordered_extent)
++{
++ struct btrfs_io_context *bioc;
++ int ret;
++
++ if (!btrfs_fs_incompat(trans->fs_info, RAID_STRIPE_TREE))
++ return 0;
++
++ list_for_each_entry(bioc, &ordered_extent->bioc_list, rst_ordered_entry) {
++ ret = btrfs_insert_one_raid_extent(trans, bioc);
++ if (ret)
++ return ret;
++ }
++
++ while (!list_empty(&ordered_extent->bioc_list)) {
++ bioc = list_first_entry(&ordered_extent->bioc_list,
++ typeof(*bioc), rst_ordered_entry);
++ list_del(&bioc->rst_ordered_entry);
++ btrfs_put_bioc(bioc);
++ }
++
++ return ret;
++}
+diff --git a/fs/btrfs/raid-stripe-tree.h b/fs/btrfs/raid-stripe-tree.h
+new file mode 100644
+index 0000000000000..7a169e75ad6df
+--- /dev/null
++++ b/fs/btrfs/raid-stripe-tree.h
+@@ -0,0 +1,35 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
++ */
++
++#ifndef BTRFS_RAID_STRIPE_TREE_H
++#define BTRFS_RAID_STRIPE_TREE_H
++
++struct btrfs_io_context;
++struct btrfs_io_stripe;
++struct btrfs_ordered_extent;
++struct btrfs_trans_handle;
++
++int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
++ struct btrfs_ordered_extent *ordered_extent);
++
++static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
++ u64 map_type)
++{
++ u64 type = map_type & BTRFS_BLOCK_GROUP_TYPE_MASK;
++ u64 profile = map_type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
++
++ if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE))
++ return false;
++
++ if (type != BTRFS_BLOCK_GROUP_DATA)
++ return false;
++
++ if (profile & BTRFS_BLOCK_GROUP_RAID1_MASK)
++ return true;
++
++ return false;
++}
++
++#endif
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 6ce083a6ed61f..23756f1464013 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5943,6 +5943,7 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
+ }
+
+ static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
++ u64 logical,
+ u16 total_stripes)
+ {
+ struct btrfs_io_context *bioc;
+@@ -5962,6 +5963,7 @@ static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_
+ bioc->fs_info = fs_info;
+ bioc->replace_stripe_src = -1;
+ bioc->full_stripe_logical = (u64)-1;
++ bioc->logical = logical;
+
+ return bioc;
+ }
+@@ -6498,7 +6500,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+ goto out;
+ }
+
+- bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes);
++ bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
+ if (!bioc) {
+ ret = -ENOMEM;
+ goto out;
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 5203095318b02..c6c5253bf5064 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -387,12 +387,11 @@ struct btrfs_fs_devices {
+
+ struct btrfs_io_stripe {
+ struct btrfs_device *dev;
+- union {
+- /* Block mapping */
+- u64 physical;
+- /* For the endio handler */
+- struct btrfs_io_context *bioc;
+- };
++ /* Block mapping. */
++ u64 physical;
++ u64 length;
++ /* For the endio handler. */
++ struct btrfs_io_context *bioc;
+ };
+
+ struct btrfs_discard_stripe {
+@@ -425,6 +424,11 @@ struct btrfs_io_context {
+ atomic_t error;
+ u16 max_errors;
+
++ u64 logical;
++ u64 size;
++ /* Raid stripe tree ordered entry. */
++ struct list_head rst_ordered_entry;
++
+ /*
+ * The total number of stripes, including the extra duplicated
+ * stripe for replace.
+--
+2.51.0
+
--- /dev/null
+From 0af379d8518bc1e306922ef947a69e4034abd856 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:41 +0000
+Subject: btrfs: fix compat mask in error messages in btrfs_check_features()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 587bb33b10bda645a1028c1737ad3992b3d7cf61 ]
+
+Commit d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency
+checks") introduced a regression when it comes to handling unsupported
+incompat or compat_ro flags. Beforehand we only printed the flags that
+we didn't recognize, afterwards we printed them all, which is less
+useful. Fix the error handling so it behaves like it used to.
+
+Fixes: d7f67ac9a928 ("btrfs: relax block-group-tree feature dependency checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 89e98f9cc2026..23431bc81c64a 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3094,7 +3094,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ btrfs_err(fs_info,
+ "cannot mount because of unknown incompat features (0x%llx)",
+- incompat);
++ incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
+ return -EINVAL;
+ }
+
+@@ -3126,7 +3126,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ if (compat_ro_unsupp && is_rw_mount) {
+ btrfs_err(fs_info,
+ "cannot mount read-write because of unknown compat_ro features (0x%llx)",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+@@ -3139,7 +3139,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_err(fs_info,
+ "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+- compat_ro);
++ compat_ro_unsupp);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 36f069c3ca1300aebdd7261a484583dcbb56aad6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 10:21:44 +0000
+Subject: btrfs: fix incorrect key offset in error message in
+ check_dev_extent_item()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 511dc8912ae3e929c1a182f5e6b2326516fd42a0 ]
+
+Fix the error message in check_dev_extent_item(), when an overlapping
+stripe is encountered. For dev extents, objectid is the disk number and
+offset the physical address, so prev_key->objectid should actually be
+prev_key->offset.
+
+(I can't take any credit for this one - this was discovered by Chris and
+his friend Claude.)
+
+Reported-by: Chris Mason <clm@fb.com>
+Fixes: 008e2512dc56 ("btrfs: tree-checker: add dev extent item checks")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 0d93368c1691a..bca5ec4c26630 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1802,7 +1802,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+- prev_key->objectid, prev_len, key->offset);
++ prev_key->offset, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From d134f43f561ecd07bc467bbf10993e57d201ba3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 14:39:46 +0000
+Subject: btrfs: fix objectid value in error message in check_extent_data_ref()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit a10172780526c2002e062102ad4f2aabac495889 ]
+
+Fix a copy-paste error in check_extent_data_ref(): we're printing root
+as in the message above, we should be printing objectid.
+
+Fixes: f333a3c7e832 ("btrfs: tree-checker: validate dref root and objectid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-checker.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index bca5ec4c26630..e38994ac14848 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1673,7 +1673,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+- root);
++ objectid);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+--
+2.51.0
+
--- /dev/null
+From 8b276adc0621875677405c2163a9d84b184269b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:46:13 +0000
+Subject: btrfs: fix warning in scrub_verify_one_metadata()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit 44e2fda66427a0442d8d2c0e6443256fb458ab6b ]
+
+Commit b471965fdb2d ("btrfs: fix replace/scrub failure with
+metadata_uuid") fixed the comparison in scrub_verify_one_metadata() to
+use metadata_uuid rather than fsid, but left the warning as it was. Fix
+it so it matches what we're doing.
+
+Fixes: b471965fdb2d ("btrfs: fix replace/scrub failure with metadata_uuid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/scrub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 3338e2e7a9a02..d2d2548eea05a 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -635,7 +635,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ btrfs_warn_rl(fs_info,
+ "tree block %llu mirror %u has bad fsid, has %pU want %pU",
+ logical, stripe->mirror_num,
+- header->fsid, fs_info->fs_devices->fsid);
++ header->fsid, fs_info->fs_devices->metadata_uuid);
+ return;
+ }
+ if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+--
+2.51.0
+
--- /dev/null
+From 28bdfd06bcb902d956b328cc32118574fa68b62e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Aug 2023 16:19:19 -0400
+Subject: btrfs: move btrfs_crc32c_final into free-space-cache.c
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 102f2640a346e84cb5c2d19805a9dd38a776013c ]
+
+This is the only place this helper is used, take it out of ctree.h and
+move it into free-space-cache.c.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.h | 5 -----
+ fs/btrfs/free-space-cache.c | 5 +++++
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 834af67fac231..3108852ff47d7 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -477,11 +477,6 @@ static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
+ return crc32c(crc, address, length);
+ }
+
+-static inline void btrfs_crc32c_final(u32 crc, u8 *result)
+-{
+- put_unaligned_le32(~crc, result);
+-}
+-
+ static inline u64 btrfs_name_hash(const char *name, int len)
+ {
+ return crc32c((u32)~1, name, len);
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 9a6ec9344c3e0..edf3612ba3108 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -57,6 +57,11 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes, bool update_stats);
+
++static void btrfs_crc32c_final(u32 crc, u8 *result)
++{
++ put_unaligned_le32(~crc, result);
++}
++
+ static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+ {
+ struct btrfs_free_space *info;
+--
+2.51.0
+
--- /dev/null
+From bd2e1a7cb8ae2260290d1aa4a483ace9ddfc8502 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Aug 2023 16:19:21 -0400
+Subject: btrfs: move btrfs_extref_hash into inode-item.h
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 98e4f060c4f565a3b62e8cdfe6b89f59167312b6 ]
+
+Ideally this would be un-inlined, but that is a cleanup for later. For
+now move this into inode-item.h, which is where the extref code lives.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.h | 9 ---------
+ fs/btrfs/inode-item.h | 7 +++++++
+ 2 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 11691c70ba791..1743aa21fa6e5 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -477,15 +477,6 @@ static inline u64 btrfs_name_hash(const char *name, int len)
+ return crc32c((u32)~1, name, len);
+ }
+
+-/*
+- * Figure the key offset of an extended inode ref
+- */
+-static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
+- int len)
+-{
+- return (u64) crc32c(parent_objectid, name, len);
+-}
+-
+ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
+ {
+ return mapping_gfp_constraint(mapping, ~__GFP_FS);
+diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h
+index d43633d5620f2..0f1730dabce6d 100644
+--- a/fs/btrfs/inode-item.h
++++ b/fs/btrfs/inode-item.h
+@@ -4,6 +4,7 @@
+ #define BTRFS_INODE_ITEM_H
+
+ #include <linux/types.h>
++#include <linux/crc32c.h>
+
+ struct btrfs_trans_handle;
+ struct btrfs_root;
+@@ -76,6 +77,12 @@ static inline void btrfs_inode_split_flags(u64 inode_item_flags,
+ *ro_flags = (u32)(inode_item_flags >> 32);
+ }
+
++/* Figure the key offset of an extended inode ref. */
++static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, int len)
++{
++ return (u64)crc32c(parent_objectid, name, len);
++}
++
+ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_truncate_control *control);
+--
+2.51.0
+
--- /dev/null
+From 871ea1983902a6bf81bfbc89b115d63f163d4daf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 09:06:57 -0700
+Subject: btrfs: read raid stripe tree from disk
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit 515020900d447796bc2f0f57064663617a11b65d ]
+
+If we find the raid-stripe-tree on mount, read it from disk. This is
+a backward incompatible feature. The rescue=ignorebadroots mount option
+will skip this tree.
+
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-rsv.c | 6 ++++++
+ fs/btrfs/disk-io.c | 18 ++++++++++++++++++
+ fs/btrfs/fs.h | 1 +
+ include/uapi/linux/btrfs.h | 1 +
+ 4 files changed, 26 insertions(+)
+
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index 97084ea3af0cc..07bf07431a7f4 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -354,6 +354,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ min_items++;
+ }
+
++ if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
++ num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
++ min_items++;
++ }
++
+ /*
+ * But we also want to reserve enough space so we can do the fallback
+ * global reserve for an unlink, which is an additional
+@@ -405,6 +410,7 @@ void btrfs_init_root_block_rsv(struct btrfs_root *root)
+ case BTRFS_EXTENT_TREE_OBJECTID:
+ case BTRFS_FREE_SPACE_TREE_OBJECTID:
+ case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
++ case BTRFS_RAID_STRIPE_TREE_OBJECTID:
+ root->block_rsv = &fs_info->delayed_refs_rsv;
+ break;
+ case BTRFS_ROOT_TREE_OBJECTID:
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3c26e91a8055f..89e98f9cc2026 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1179,6 +1179,8 @@ static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
+ return btrfs_grab_root(fs_info->block_group_root);
+ case BTRFS_FREE_SPACE_TREE_OBJECTID:
+ return btrfs_grab_root(btrfs_global_root(fs_info, &key));
++ case BTRFS_RAID_STRIPE_TREE_OBJECTID:
++ return btrfs_grab_root(fs_info->stripe_root);
+ default:
+ return NULL;
+ }
+@@ -1259,6 +1261,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
+ btrfs_put_root(fs_info->fs_root);
+ btrfs_put_root(fs_info->data_reloc_root);
+ btrfs_put_root(fs_info->block_group_root);
++ btrfs_put_root(fs_info->stripe_root);
+ btrfs_check_leaked_roots(fs_info);
+ btrfs_extent_buffer_leak_debug_check(fs_info);
+ kfree(fs_info->super_copy);
+@@ -1812,6 +1815,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
+ free_root_extent_buffers(info->fs_root);
+ free_root_extent_buffers(info->data_reloc_root);
+ free_root_extent_buffers(info->block_group_root);
++ free_root_extent_buffers(info->stripe_root);
+ if (free_chunk_root)
+ free_root_extent_buffers(info->chunk_root);
+ }
+@@ -2287,6 +2291,20 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
+ fs_info->uuid_root = root;
+ }
+
++ if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
++ location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
++ root = btrfs_read_tree_root(tree_root, &location);
++ if (IS_ERR(root)) {
++ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
++ ret = PTR_ERR(root);
++ goto out;
++ }
++ } else {
++ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
++ fs_info->stripe_root = root;
++ }
++ }
++
+ return 0;
+ out:
+ btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index d24d41f7811a6..b8b9ce8921baf 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -371,6 +371,7 @@ struct btrfs_fs_info {
+ struct btrfs_root *uuid_root;
+ struct btrfs_root *data_reloc_root;
+ struct btrfs_root *block_group_root;
++ struct btrfs_root *stripe_root;
+
+ /* The log root tree is a directory of all the other log roots */
+ struct btrfs_root *log_root_tree;
+diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
+index 6f776faaa791c..7b499b90bb779 100644
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -333,6 +333,7 @@ struct btrfs_ioctl_fs_info_args {
+ #define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
+ #define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
+ #define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
++#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
+
+ struct btrfs_ioctl_feature_flags {
+ __u64 compat_flags;
+--
+2.51.0
+
--- /dev/null
+From 8224321456a6e504b54ed20215402da7052cdc59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Aug 2023 16:19:20 -0400
+Subject: btrfs: remove btrfs_crc32c wrapper
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 03e86348965a5fa13593db8682132033d663f7ee ]
+
+This simply sends the same arguments into crc32c(), and is just used in
+a few places. Remove this wrapper and directly call crc32c() in these
+instances.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 511dc8912ae3 ("btrfs: fix incorrect key offset in error message in check_dev_extent_item()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.h | 5 -----
+ fs/btrfs/extent-tree.c | 6 +++---
+ fs/btrfs/free-space-cache.c | 4 ++--
+ fs/btrfs/send.c | 6 +++---
+ 4 files changed, 8 insertions(+), 13 deletions(-)
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 3108852ff47d7..11691c70ba791 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -472,11 +472,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
+ #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
+ ((bytes) >> (fs_info)->sectorsize_bits)
+
+-static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
+-{
+- return crc32c(crc, address, length);
+-}
+-
+ static inline u64 btrfs_name_hash(const char *name, int len)
+ {
+ return crc32c((u32)~1, name, len);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 774bdafc822c1..1528a81b2c307 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -414,11 +414,11 @@ u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
+ __le64 lenum;
+
+ lenum = cpu_to_le64(root_objectid);
+- high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
++ high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
+ lenum = cpu_to_le64(owner);
+- low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
++ low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
+ lenum = cpu_to_le64(offset);
+- low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
++ low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
+
+ return ((u64)high_crc << 31) ^ (u64)low_crc;
+ }
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index edf3612ba3108..c6e3b9a2921ab 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -545,7 +545,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
+ if (index == 0)
+ offset = sizeof(u32) * io_ctl->num_pages;
+
+- crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
++ crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
+ btrfs_crc32c_final(crc, (u8 *)&crc);
+ io_ctl_unmap_page(io_ctl);
+ tmp = page_address(io_ctl->pages[0]);
+@@ -567,7 +567,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
+ val = *tmp;
+
+ io_ctl_map_page(io_ctl, 0);
+- crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
++ crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
+ btrfs_crc32c_final(crc, (u8 *)&crc);
+ if (val != crc) {
+ btrfs_err_rl(io_ctl->fs_info,
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 6768e2231d610..4fa05ee81d434 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -814,7 +814,7 @@ static int send_cmd(struct send_ctx *sctx)
+ put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
+ put_unaligned_le32(0, &hdr->crc);
+
+- crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
++ crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ put_unaligned_le32(crc, &hdr->crc);
+
+ ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
+@@ -5740,8 +5740,8 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+ hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
+ hdr->crc = 0;
+- crc = btrfs_crc32c(0, sctx->send_buf, sctx->send_size);
+- crc = btrfs_crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
++ crc = crc32c(0, sctx->send_buf, sctx->send_size);
++ crc = crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
+ hdr->crc = cpu_to_le32(crc);
+
+ ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
+--
+2.51.0
+
--- /dev/null
+From 1edc909cc59218bd0b9043825339bc4c716562fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 00:21:19 +0800
+Subject: drm/logicvc: Fix device node reference leak in
+ logicvc_drm_config_parse()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fef0e649f8b42bdffe4a916dd46e1b1e9ad2f207 ]
+
+The logicvc_drm_config_parse() function calls of_get_child_by_name() to
+find the "layers" node but fails to release the reference, leading to a
+device node reference leak.
+
+Fix this by using the __free(device_node) cleanup attribute to automatic
+release the reference when the variable goes out of scope.
+
+Fixes: efeeaefe9be5 ("drm: Add support for the LogiCVC display controller")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Reviewed-by: Kory Maincent <kory.maincent@bootlin.com>
+Link: https://patch.msgid.link/20260130-logicvc_drm-v1-1-04366463750c@gmail.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/logicvc/logicvc_drm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
+index 749debd3d6a57..df74572e6d2ea 100644
+--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
++++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
+@@ -90,7 +90,6 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_drm_config *config = &logicvc->config;
+- struct device_node *layers_node;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
+@@ -126,7 +125,8 @@ static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+ if (ret)
+ return ret;
+
+- layers_node = of_get_child_by_name(of_node, "layers");
++ struct device_node *layers_node __free(device_node) =
++ of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "Missing non-optional layers node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From e32c6350fc535bbd5814ab1c4e3bdf23ffbe04a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 12:12:36 -0500
+Subject: drm/vmwgfx: Fix invalid kref_put callback in vmw_bo_dirty_release
+
+From: Brad Spengler <brad.spengler@opensrcsec.com>
+
+[ Upstream commit 211ecfaaef186ee5230a77d054cdec7fbfc6724a ]
+
+The kref_put() call uses (void *)kvfree as the release callback, which
+is incorrect. kref_put() expects a function with signature
+void (*release)(struct kref *), but kvfree has signature
+void (*)(const void *). Calling through an incompatible function pointer
+is undefined behavior.
+
+The code only worked by accident because ref_count is the first member
+of vmw_bo_dirty, making the kref pointer equal to the struct pointer.
+
+Fix this by adding a proper release callback that uses container_of()
+to retrieve the containing structure before freeing.
+
+Fixes: c1962742ffff ("drm/vmwgfx: Use kref in vmw_bo_dirty")
+Signed-off-by: Brad Spengler <brad.spengler@opensrcsec.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Ian Forbes <ian.forbes@broadcom.com>
+Link: https://patch.msgid.link/20260107171236.3573118-1-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+index de2498749e276..5bb710824d72f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+@@ -274,6 +274,13 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
+ return ret;
+ }
+
++static void vmw_bo_dirty_free(struct kref *kref)
++{
++ struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count);
++
++ kvfree(dirty);
++}
++
+ /**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+@@ -288,7 +295,7 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
+ {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+- if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
++ if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free))
+ vbo->dirty = NULL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c2a758805d73bd29daba7b21771c6e1737b162bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 11:53:57 -0600
+Subject: drm/vmwgfx: Return the correct value in vmw_translate_ptr functions
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 5023ca80f9589295cb60735016e39fc5cc714243 ]
+
+Before the referenced fixes these functions used a lookup function that
+returned a pointer. This was changed to another lookup function that
+returned an error code with the pointer becoming an out parameter.
+
+The error path when the lookup failed was not changed to reflect this
+change and the code continued to return the PTR_ERR of the now
+uninitialized pointer. This could cause the vmw_translate_ptr functions
+to return success when they actually failed causing further uninitialized
+and OOB accesses.
+
+Reported-by: Kuzey Arda Bulut <kuzeyardabulut@gmail.com>
+Fixes: a309c7194e8a ("drm/vmwgfx: Remove rcu locks from user resources")
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patch.msgid.link/20260113175357.129285-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 92b3e44d022fe..073791d696295 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1160,7 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+@@ -1216,7 +1216,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
+- return PTR_ERR(vmw_bo);
++ return ret;
+ }
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+--
+2.51.0
+
--- /dev/null
+From a8fff1a6bbbaa1f18cc25b69c92219b4519394dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 12:41:25 +0100
+Subject: irqchip/sifive-plic: Fix frozen interrupt due to affinity setting
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 1072020685f4b81f6efad3b412cdae0bd62bb043 ]
+
+PLIC ignores interrupt completion message for disabled interrupt, explained
+by the specification:
+
+ The PLIC signals it has completed executing an interrupt handler by
+ writing the interrupt ID it received from the claim to the
+ claim/complete register. The PLIC does not check whether the completion
+ ID is the same as the last claim ID for that target. If the completion
+ ID does not match an interrupt source that is currently enabled for
+ the target, the completion is silently ignored.
+
+This caused problems in the past, because an interrupt can be disabled
+while still being handled and plic_irq_eoi() had no effect. That was fixed
+by checking if the interrupt is disabled, and if so enable it, before
+sending the completion message. That check is done with irqd_irq_disabled().
+
+However, that is not sufficient because the enable bit for the handling
+hart can be zero despite irqd_irq_disabled(d) being false. This can happen
+when affinity setting is changed while a hart is still handling the
+interrupt.
+
+This problem is easily reproducible by dumping a large file to uart (which
+generates lots of interrupts) and at the same time keep changing the uart
+interrupt's affinity setting. The uart port becomes frozen almost
+instantaneously.
+
+Fix this by checking PLIC's enable bit instead of irqd_irq_disabled().
+
+Fixes: cc9f04f9a84f ("irqchip/sifive-plic: Implement irq_set_affinity() for SMP host")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260212114125.3148067-1-namcao@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-sifive-plic.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index a8f5cfad16f7d..794bdb6d4d1e3 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -148,8 +148,13 @@ static void plic_irq_disable(struct irq_data *d)
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
++ u32 __iomem *reg;
++ bool enabled;
++
++ reg = handler->enable_base + (d->hwirq / 32) * sizeof(u32);
++ enabled = readl(reg) & BIT(d->hwirq % 32);
+
+- if (unlikely(irqd_irq_disabled(d))) {
++ if (unlikely(!enabled)) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+--
+2.51.0
+
--- /dev/null
+From 54abcf09a5ac85f203cb0b472c04fac25e066dfa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 06:10:08 -0600
+Subject: PCI: Correct PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit 39195990e4c093c9eecf88f29811c6de29265214 ]
+
+fb82437fdd8c ("PCI: Change capability register offsets to hex") incorrectly
+converted the PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 value from decimal 52 to hex
+0x32:
+
+ -#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
+ +#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+
+This broke PCI capabilities in a VMM because subsequent ones weren't
+DWORD-aligned.
+
+Change PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 to the correct value of 0x34.
+
+fb82437fdd8c was from Baruch Siach <baruch@tkos.co.il>, but this was not
+Baruch's fault; it's a mistake I made when applying the patch.
+
+Fixes: fb82437fdd8c ("PCI: Change capability register offsets to hex")
+Reported-by: David Woodhouse <dwmw2@infradead.org>
+Closes: https://lore.kernel.org/all/3ae392a0158e9d9ab09a1d42150429dd8ca42791.camel@infradead.org
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Krzysztof WilczyĆski <kwilczynski@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/pci_regs.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index ade8dabf62108..036991bee1a74 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -694,7 +694,7 @@
+ #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
+ #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+ #define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
++#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x34 /* end of v2 EPs w/ link */
+ #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
+ #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
+ #define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+--
+2.51.0
+
--- /dev/null
+From dd854b476e566246aa7d9d30726555b2379e7af4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 13:29:09 +0100
+Subject: perf: Fix __perf_event_overflow() vs perf_remove_from_context() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c9bc1753b3cc41d0e01fbca7f035258b5f4db0ae ]
+
+Make sure that __perf_event_overflow() runs with IRQs disabled for all
+possible callchains. Specifically the software events can end up running
+it with only preemption disabled.
+
+This opens up a race vs perf_event_exit_event() and friends that will go
+and free various things the overflow path expects to be present, like
+the BPF program.
+
+Fixes: 592903cdcbf6 ("perf_counter: add an event_list")
+Reported-by: Simond Hu <cmdhh1767@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Simond Hu <cmdhh1767@gmail.com>
+Link: https://patch.msgid.link/20260224122909.GV1395416@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 42 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 9a6be06176bb4..652baf91c629e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9729,6 +9729,13 @@ int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+ {
++ /*
++ * Entry point from hardware PMI, interrupts should be disabled here.
++ * This serializes us against perf_event_remove_from_context() in
++ * things like perf_event_release_kernel().
++ */
++ lockdep_assert_irqs_disabled();
++
+ return __perf_event_overflow(event, 1, data, regs);
+ }
+
+@@ -9809,6 +9816,19 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ {
+ struct hw_perf_event *hwc = &event->hw;
+
++ /*
++ * This is:
++ * - software preempt
++ * - tracepoint preempt
++ * - tp_target_task irq (ctx->lock)
++ * - uprobes preempt/irq
++ * - kprobes preempt/irq
++ * - hw_breakpoint irq
++ *
++ * Any of these are sufficient to hold off RCU and thus ensure @event
++ * exists.
++ */
++ lockdep_assert_preemption_disabled();
+ local64_add(nr, &event->count);
+
+ if (!regs)
+@@ -9817,6 +9837,16 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
+ if (!is_sampling_event(event))
+ return;
+
++ /*
++ * Serialize against event_function_call() IPIs like normal overflow
++ * event handling. Specifically, must not allow
++ * perf_event_release_kernel() -> perf_remove_from_context() to make
++ * progress and 'release' the event from under us.
++ */
++ guard(irqsave)();
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+@@ -10320,6 +10350,11 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ struct perf_sample_data data;
+ struct perf_event *event;
+
++ /*
++ * Per being a tracepoint, this runs with preemption disabled.
++ */
++ lockdep_assert_preemption_disabled();
++
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = entry_size,
+@@ -10733,6 +10768,11 @@ void perf_bp_event(struct perf_event *bp, void *data)
+ struct perf_sample_data sample;
+ struct pt_regs *regs = data;
+
++ /*
++ * Exception context, will have interrupts disabled.
++ */
++ lockdep_assert_irqs_disabled();
++
+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+@@ -11185,7 +11225,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
+- if (__perf_event_overflow(event, 1, &data, regs))
++ if (perf_event_overflow(event, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3c6e9fea5e957512cccf08255e9171093b4ec541 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 15:06:40 -0500
+Subject: rseq: Clarify rseq registration rseq_size bound check comment
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+[ Upstream commit 26d43a90be81fc90e26688a51d3ec83188602731 ]
+
+The rseq registration validates that the rseq_size argument is greater
+or equal to 32 (the original rseq size), but the comment associated with
+this check does not clearly state this.
+
+Clarify the comment to that effect.
+
+Fixes: ee3e3ac05c26 ("rseq: Introduce extensible rseq ABI")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260220200642.1317826-2-mathieu.desnoyers@efficios.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/rseq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 810005f927d7c..e6ee81dd1e457 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -432,8 +432,9 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
+ * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
+ * size, the required alignment is the original struct rseq alignment.
+ *
+- * In order to be valid, rseq_len is either the original rseq size, or
+- * large enough to contain all supported fields, as communicated to
++ * The rseq_len is required to be greater or equal to the original rseq
++ * size. In order to be valid, rseq_len is either the original rseq size,
++ * or large enough to contain all supported fields, as communicated to
+ * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
+ */
+ if (rseq_len < ORIG_RSEQ_SIZE ||
+--
+2.51.0
+
--- /dev/null
+From e8d3526eef9fc46ac41beb53d9d88852ab593517 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 11:23:27 -0800
+Subject: scsi: lpfc: Properly set WC for DPP mapping
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit bffda93a51b40afd67c11bf558dc5aae83ca0943 ]
+
+Using set_memory_wc() to enable write-combining for the DPP portion of
+the MMIO mapping is wrong as set_memory_*() is meant to operate on RAM
+only, not MMIO mappings. In fact, as used currently triggers a BUG_ON()
+with enabled CONFIG_DEBUG_VIRTUAL.
+
+Simply map the DPP region separately and in addition to the already
+existing mappings, avoiding any possible negative side effects for
+these.
+
+Fixes: 1351e69fc6db ("scsi: lpfc: Add push-to-adapter support to sli4")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Reviewed-by: Mathias Krause <minipli@grsecurity.net>
+Link: https://patch.msgid.link/20260212192327.141104-1-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 2 ++
+ drivers/scsi/lpfc/lpfc_sli.c | 36 +++++++++++++++++++++++++++++------
+ drivers/scsi/lpfc/lpfc_sli4.h | 3 +++
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index b0eac09de5ad5..dc18d84c54c3c 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -12049,6 +12049,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
++ if (phba->sli4_hba.dpp_regs_memmap_wc_p)
++ iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ break;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 4cf935b7223af..c88e224feed8a 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -15938,6 +15938,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+ return NULL;
+ }
+
++static __maybe_unused void __iomem *
++lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
++{
++
++ /* DPP region is supposed to cover 64-bit BAR2 */
++ if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
++ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
++ "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
++ dpp_barset);
++ return NULL;
++ }
++
++ if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
++ void __iomem *dpp_map;
++
++ dpp_map = ioremap_wc(phba->pci_bar2_map,
++ pci_resource_len(phba->pcidev,
++ PCI_64BIT_BAR4));
++
++ if (dpp_map)
++ phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
++ }
++
++ return phba->sli4_hba.dpp_regs_memmap_wc_p;
++}
++
+ /**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+@@ -16901,9 +16927,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ uint8_t dpp_barset;
+ uint32_t dpp_offset;
+ uint8_t wq_create_version;
+-#ifdef CONFIG_X86
+- unsigned long pg_addr;
+-#endif
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+@@ -17089,14 +17112,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+
+ #ifdef CONFIG_X86
+ /* Enable combined writes for DPP aperture */
+- pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
+- rc = set_memory_wc(pg_addr, 1);
+- if (rc) {
++ bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
++ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3272 Cannot setup Combined "
+ "Write on WQ[%d] - disable DPP\n",
+ wq->queue_id);
+ phba->cfg_enable_dpp = 0;
++ } else {
++ wq->dpp_regaddr = bar_memmap_p + dpp_offset;
+ }
+ #else
+ phba->cfg_enable_dpp = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index 2541a8fba093f..323d3ed3272b5 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -783,6 +783,9 @@ struct lpfc_sli4_hba {
+ void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
+ * dpp registers
+ */
++ void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
++ * dpp registers with write combining
++ */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+--
+2.51.0
+
--- /dev/null
+From d3d3c96852f8bcada2cd4fb6c40fe97237ea2197 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Feb 2026 19:28:06 +0000
+Subject: scsi: pm8001: Fix use-after-free in pm8001_queue_command()
+
+From: Salomon Dushimirimana <salomondush@google.com>
+
+[ Upstream commit 38353c26db28efd984f51d426eac2396d299cca7 ]
+
+Commit e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()") refactors
+pm8001_queue_command(), however it introduces a potential cause of a double
+free scenario when it changes the function to return -ENODEV in case of phy
+down/device gone state.
+
+In this path, pm8001_queue_command() updates task status and calls
+task_done to indicate to upper layer that the task has been handled.
+However, this also frees the underlying SAS task. A -ENODEV is then
+returned to the caller. When libsas sas_ata_qc_issue() receives this error
+value, it assumes the task wasn't handled/queued by LLDD and proceeds to
+clean up and free the task again, resulting in a double free.
+
+Since pm8001_queue_command() handles the SAS task in this case, it should
+return 0 to the caller indicating that the task has been handled.
+
+Fixes: e29c47fe8946 ("scsi: pm8001: Simplify pm8001_task_exec()")
+Signed-off-by: Salomon Dushimirimana <salomondush@google.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://patch.msgid.link/20260213192806.439432-1-salomondush@google.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 4daab8b6d6752..0f911228cb2f1 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -476,8 +476,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
+ } else {
+ task->task_done(task);
+ }
+- rc = -ENODEV;
+- goto err_out;
++ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
++ return 0;
+ }
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+--
+2.51.0
+
--- /dev/null
+From 8dc377300f755683c804dc360c43df3ec5a4baba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 18:37:57 +0800
+Subject: scsi: ufs: core: Move link recovery for hibern8 exit failure to
+ wl_resume
+
+From: Peter Wang <peter.wang@mediatek.com>
+
+[ Upstream commit 62c015373e1cdb1cdca824bd2dbce2dac0819467 ]
+
+Move the link recovery trigger from ufshcd_uic_pwr_ctrl() to
+__ufshcd_wl_resume(). Ensure link recovery is only attempted when hibern8
+exit fails during resume, not during hibern8 enter in suspend. Improve
+error handling and prevent unnecessary link recovery attempts.
+
+Fixes: 35dabf4503b9 ("scsi: ufs: core: Use link recovery when h8 exit fails during runtime resume")
+Signed-off-by: Peter Wang <peter.wang@mediatek.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223103906.2533654-1-peter.wang@mediatek.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 808b648e1f388..0b74ef63e6721 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4289,14 +4289,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+- /*
+- * If the h8 exit fails during the runtime resume process, it becomes
+- * stuck and cannot be recovered through the error handler. To fix
+- * this, use link recovery instead of the error handler.
+- */
+- if (ret && hba->pm_op_in_progress)
+- ret = ufshcd_link_recovery(hba);
+-
+ return ret;
+ }
+
+@@ -10016,7 +10008,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+- goto vendor_suspend;
++ /*
++ * If the h8 exit fails during the runtime resume
++ * process, it becomes stuck and cannot be recovered
++ * through the error handler. To fix this, use link
++ * recovery instead of the error handler.
++ */
++ ret = ufshcd_link_recovery(hba);
++ if (ret)
++ goto vendor_suspend;
+ }
+ } else if (ufshcd_is_link_off(hba)) {
+ /*
+--
+2.51.0
+
--- /dev/null
+drm-vmwgfx-fix-invalid-kref_put-callback-in-vmw_bo_d.patch
+drm-vmwgfx-return-the-correct-value-in-vmw_translate.patch
+drm-logicvc-fix-device-node-reference-leak-in-logicv.patch
+irqchip-sifive-plic-fix-frozen-interrupt-due-to-affi.patch
+scsi-lpfc-properly-set-wc-for-dpp-mapping.patch
+scsi-pm8001-fix-use-after-free-in-pm8001_queue_comma.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+rseq-clarify-rseq-registration-rseq_size-bound-check.patch
+scsi-ufs-core-move-link-recovery-for-hibern8-exit-fa.patch
+alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
+alsa-usb-audio-use-inclusive-terms.patch
+perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
+alsa-pci-hda-use-snd_kcontrol_chip.patch
+alsa-hda-cs35l56-fix-signedness-error-in-cs35l56_hda.patch
+btrfs-move-btrfs_crc32c_final-into-free-space-cache..patch
+btrfs-remove-btrfs_crc32c-wrapper.patch
+btrfs-move-btrfs_extref_hash-into-inode-item.h.patch
+btrfs-add-raid-stripe-tree-definitions.patch
+btrfs-read-raid-stripe-tree-from-disk.patch
+btrfs-add-support-for-inserting-raid-stripe-extents.patch
+btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+btrfs-fix-objectid-value-in-error-message-in-check_e.patch
+btrfs-fix-warning-in-scrub_verify_one_metadata.patch
+btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
+bpf-fix-stack-out-of-bounds-write-in-devmap.patch
+pci-correct-pci_cap_exp_endpoint_sizeof_v2-value.patch