--- /dev/null
+From stable+bounces-190036-greg=kroah.com@vger.kernel.org Mon Oct 27 17:29:12 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 12:28:55 -0400
+Subject: mptcp: pm: in-kernel: C-flag: handle late ADD_ADDR
+To: stable@vger.kernel.org
+Cc: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Geliang Tang <geliang@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027162855.577684-1-sashal@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+[ Upstream commit e84cb860ac3ce67ec6ecc364433fd5b412c448bc ]
+
+The special C-flag case expects the ADD_ADDR to be received when
+switching to 'fully-established'. But for various reasons, the ADD_ADDR
+could be sent after the "4th ACK", and the special case doesn't work.
+
+On NIPA, the new test validating this special case for the C-flag failed
+a few times, e.g.
+
+ 102 default limits, server deny join id 0
+ syn rx [FAIL] got 0 JOIN[s] syn rx expected 2
+
+ Server ns stats
+ (...)
+ MPTcpExtAddAddrTx 1
+ MPTcpExtEchoAdd 1
+
+ Client ns stats
+ (...)
+ MPTcpExtAddAddr 1
+ MPTcpExtEchoAddTx 1
+
+ synack rx [FAIL] got 0 JOIN[s] synack rx expected 2
+ ack rx [FAIL] got 0 JOIN[s] ack rx expected 2
+ join Rx [FAIL] see above
+ syn tx [FAIL] got 0 JOIN[s] syn tx expected 2
+ join Tx [FAIL] see above
+
+I had a suspicion about what the issue could be: the ADD_ADDR might have
+been received after the switch to the 'fully-established' state. The
+issue was not easy to reproduce. The packet capture shown that the
+ADD_ADDR can indeed be sent with a delay, and the client would not try
+to establish subflows to it as expected.
+
+A simple fix is not to mark the endpoints as 'used' in the C-flag case,
+when looking at creating subflows to the remote initial IP address and
+port. In this case, there is no need to try.
+
+Note: newly added fullmesh endpoints will still continue to be used as
+expected, thanks to the conditions behind mptcp_pm_add_addr_c_flag_case.
+
+Fixes: 4b1ff850e0c1 ("mptcp: pm: in-kernel: usable client side with C-flag")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251020-net-mptcp-c-flag-late-add-addr-v1-1-8207030cb0e8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ applied to pm_netlink.c instead of pm_kernel.c ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -619,6 +619,10 @@ static void mptcp_pm_create_subflow_or_s
+ }
+
+ subflow:
++ /* No need to try establishing subflows to remote id0 if not allowed */
++ if (mptcp_pm_add_addr_c_flag_case(msk))
++ goto exit;
++
+ /* check if should create a new subflow */
+ while (msk->pm.local_addr_used < local_addr_max &&
+ msk->pm.subflows < subflows_max) {
+@@ -650,6 +654,8 @@ subflow:
+ __mptcp_subflow_connect(sk, &local.addr, &addrs[i]);
+ spin_lock_bh(&msk->pm.lock);
+ }
++
++exit:
+ mptcp_pm_nl_check_work_pending(msk);
+ }
+
--- /dev/null
+From stable+bounces-190001-greg=kroah.com@vger.kernel.org Mon Oct 27 15:43:10 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 10:37:05 -0400
+Subject: selftests: mptcp: disable add_addr retrans in endpoint_tests
+To: stable@vger.kernel.org
+Cc: Geliang Tang <tanggeliang@kylinos.cn>, Matthieu Baerts <matttbe@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027143706.523131-1-sashal@kernel.org>
+
+From: Geliang Tang <tanggeliang@kylinos.cn>
+
+[ Upstream commit f92199f551e617fae028c5c5905ddd63e3616e18 ]
+
+To prevent test instability in the "delete re-add signal" test caused by
+ADD_ADDR retransmissions, disable retransmissions for this test by setting
+net.mptcp.add_addr_timeout to 0.
+
+Suggested-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250815-net-mptcp-misc-fixes-6-17-rc2-v1-6-521fe9957892@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: c3496c052ac3 ("selftests: mptcp: join: mark 'delete re-add signal' as skipped if not supported")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3838,6 +3838,7 @@ endpoint_tests()
+ # remove and re-add
+ if reset_with_events "delete re-add signal" &&
+ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0
+ pm_nl_set_limits $ns1 0 3
+ pm_nl_set_limits $ns2 3 3
+ pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
--- /dev/null
+From stable+bounces-190002-greg=kroah.com@vger.kernel.org Mon Oct 27 15:53:23 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 10:37:06 -0400
+Subject: selftests: mptcp: join: mark 'delete re-add signal' as skipped if not supported
+To: stable@vger.kernel.org
+Cc: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Geliang Tang <geliang@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027143706.523131-2-sashal@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+[ Upstream commit c3496c052ac36ea98ec4f8e95ae6285a425a2457 ]
+
+The call to 'continue_if' was missing: it properly marks a subtest as
+'skipped' if the attached condition is not valid.
+
+Without that, the test is wrongly marked as passed on older kernels.
+
+Fixes: b5e2fb832f48 ("selftests: mptcp: add explicit test case for remove/readd")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251020-net-mptcp-c-flag-late-add-addr-v1-4-8207030cb0e8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3837,7 +3837,7 @@ endpoint_tests()
+
+ # remove and re-add
+ if reset_with_events "delete re-add signal" &&
+- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0
+ pm_nl_set_limits $ns1 0 3
+ pm_nl_set_limits $ns2 3 3
--- /dev/null
+From stable+bounces-190155-greg=kroah.com@vger.kernel.org Mon Oct 27 19:43:59 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 14:42:45 -0400
+Subject: serial: sc16is7xx: refactor EFR lock
+To: stable@vger.kernel.org
+Cc: Hugo Villeneuve <hvilleneuve@dimonoff.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027184246.638748-3-sashal@kernel.org>
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 0c84bea0cabc4e2b98a3de88eeb4ff798931f056 ]
+
+Move common code for EFR lock/unlock of mutex into functions for code reuse
+and clarity.
+
+With the addition of old_lcr, move irda_mode within struct sc16is7xx_one to
+reduce memory usage:
+ Before: /* size: 752, cachelines: 12, members: 10 */
+ After: /* size: 744, cachelines: 12, members: 10 */
+
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20231221231823.2327894-17-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 1c05bf6c0262 ("serial: sc16is7xx: remove useless enable of enhanced features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 106 ++++++++++++++++++++++-------------------
+ 1 file changed, 57 insertions(+), 49 deletions(-)
+
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -329,8 +329,9 @@ struct sc16is7xx_one {
+ struct kthread_work reg_work;
+ struct kthread_delayed_work ms_work;
+ struct sc16is7xx_one_config config;
+- bool irda_mode;
+ unsigned int old_mctrl;
++ u8 old_lcr; /* Value before EFR access. */
++ bool irda_mode;
+ };
+
+ struct sc16is7xx_port {
+@@ -412,6 +413,49 @@ static void sc16is7xx_power(struct uart_
+ on ? 0 : SC16IS7XX_IER_SLEEP_BIT);
+ }
+
++/*
++ * In an amazing feat of design, the Enhanced Features Register (EFR)
++ * shares the address of the Interrupt Identification Register (IIR).
++ * Access to EFR is switched on by writing a magic value (0xbf) to the
++ * Line Control Register (LCR). Any interrupt firing during this time will
++ * see the EFR where it expects the IIR to be, leading to
++ * "Unexpected interrupt" messages.
++ *
++ * Prevent this possibility by claiming a mutex while accessing the EFR,
++ * and claiming the same mutex from within the interrupt handler. This is
++ * similar to disabling the interrupt, but that doesn't work because the
++ * bulk of the interrupt processing is run as a workqueue job in thread
++ * context.
++ */
++static void sc16is7xx_efr_lock(struct uart_port *port)
++{
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
++
++ mutex_lock(&one->efr_lock);
++
++ /* Backup content of LCR. */
++ one->old_lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
++
++ /* Enable access to Enhanced register set */
++ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_B);
++
++ /* Disable cache updates when writing to EFR registers */
++ regcache_cache_bypass(one->regmap, true);
++}
++
++static void sc16is7xx_efr_unlock(struct uart_port *port)
++{
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
++
++ /* Re-enable cache updates when writing to normal registers */
++ regcache_cache_bypass(one->regmap, false);
++
++ /* Restore original content of LCR */
++ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, one->old_lcr);
++
++ mutex_unlock(&one->efr_lock);
++}
++
+ static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
+ {
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+@@ -538,39 +582,12 @@ static int sc16is7xx_set_baud(struct uar
+ div /= prescaler;
+ }
+
+- /* In an amazing feat of design, the Enhanced Features Register shares
+- * the address of the Interrupt Identification Register, and is
+- * switched in by writing a magic value (0xbf) to the Line Control
+- * Register. Any interrupt firing during this time will see the EFR
+- * where it expects the IIR to be, leading to "Unexpected interrupt"
+- * messages.
+- *
+- * Prevent this possibility by claiming a mutex while accessing the
+- * EFR, and claiming the same mutex from within the interrupt handler.
+- * This is similar to disabling the interrupt, but that doesn't work
+- * because the bulk of the interrupt processing is run as a workqueue
+- * job in thread context.
+- */
+- mutex_lock(&one->efr_lock);
+-
+- lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+-
+- /* Open the LCR divisors for configuration */
+- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+- SC16IS7XX_LCR_CONF_MODE_B);
+-
+ /* Enable enhanced features */
+- regcache_cache_bypass(one->regmap, true);
++ sc16is7xx_efr_lock(port);
+ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_ENABLE_BIT,
+ SC16IS7XX_EFR_ENABLE_BIT);
+-
+- regcache_cache_bypass(one->regmap, false);
+-
+- /* Put LCR back to the normal mode */
+- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+-
+- mutex_unlock(&one->efr_lock);
++ sc16is7xx_efr_unlock(port);
+
+ /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+@@ -579,7 +596,8 @@ static int sc16is7xx_set_baud(struct uar
+
+ mutex_lock(&one->efr_lock);
+
+- /* Open the LCR divisors for configuration */
++ /* Backup LCR and access special register set (DLL/DLH) */
++ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_A);
+
+@@ -589,7 +607,7 @@ static int sc16is7xx_set_baud(struct uar
+ sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
+ regcache_cache_bypass(one->regmap, false);
+
+- /* Put LCR back to the normal mode */
++ /* Restore LCR and access to general register set */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+ mutex_unlock(&one->efr_lock);
+@@ -1070,17 +1088,7 @@ static void sc16is7xx_set_termios(struct
+ if (!(termios->c_cflag & CREAD))
+ port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
+
+- /* As above, claim the mutex while accessing the EFR. */
+- mutex_lock(&one->efr_lock);
+-
+- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+- SC16IS7XX_LCR_CONF_MODE_B);
+-
+ /* Configure flow control */
+- regcache_cache_bypass(one->regmap, true);
+- sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
+- sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
+-
+ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
+ if (termios->c_cflag & CRTSCTS) {
+ flow |= SC16IS7XX_EFR_AUTOCTS_BIT |
+@@ -1092,16 +1100,16 @@ static void sc16is7xx_set_termios(struct
+ if (termios->c_iflag & IXOFF)
+ flow |= SC16IS7XX_EFR_SWFLOW1_BIT;
+
+- sc16is7xx_port_update(port,
+- SC16IS7XX_EFR_REG,
+- SC16IS7XX_EFR_FLOWCTRL_BITS,
+- flow);
+- regcache_cache_bypass(one->regmap, false);
+-
+ /* Update LCR register */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+- mutex_unlock(&one->efr_lock);
++ /* Update EFR registers */
++ sc16is7xx_efr_lock(port);
++ sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
++ sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
++ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
++ SC16IS7XX_EFR_FLOWCTRL_BITS, flow);
++ sc16is7xx_efr_unlock(port);
+
+ /* Get baud rate generator configuration */
+ baud = uart_get_baud_rate(port, termios, old,
--- /dev/null
+From stable+bounces-190149-greg=kroah.com@vger.kernel.org Mon Oct 27 19:45:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 14:42:43 -0400
+Subject: serial: sc16is7xx: remove unused to_sc16is7xx_port macro
+To: stable@vger.kernel.org
+Cc: "Hugo Villeneuve" <hvilleneuve@dimonoff.com>, "Ilpo Järvinen" <ilpo.jarvinen@linux.intel.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251027184246.638748-1-sashal@kernel.org>
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 22a048b0749346b6e3291892d06b95278d5ba84a ]
+
+This macro is not used anywhere.
+
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230905181649.134720-1-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 1c05bf6c0262 ("serial: sc16is7xx: remove useless enable of enhanced features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -358,7 +358,6 @@ static struct uart_driver sc16is7xx_uart
+ static void sc16is7xx_ier_set(struct uart_port *port, u8 bit);
+ static void sc16is7xx_stop_tx(struct uart_port *port);
+
+-#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e)))
+ #define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
+
+ static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
--- /dev/null
+From stable+bounces-190154-greg=kroah.com@vger.kernel.org Mon Oct 27 19:44:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 14:42:46 -0400
+Subject: serial: sc16is7xx: remove useless enable of enhanced features
+To: stable@vger.kernel.org
+Cc: Hugo Villeneuve <hvilleneuve@dimonoff.com>, stable <stable@kernel.org>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027184246.638748-4-sashal@kernel.org>
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 1c05bf6c0262f946571a37678250193e46b1ff0f ]
+
+Commit 43c51bb573aa ("sc16is7xx: make sure device is in suspend once
+probed") permanently enabled access to the enhanced features in
+sc16is7xx_probe(), and it is never disabled after that.
+
+Therefore, remove re-enable of enhanced features in
+sc16is7xx_set_baud(). This eliminates a potential useless read + write
+cycle each time the baud rate is reconfigured.
+
+Fixes: 43c51bb573aa ("sc16is7xx: make sure device is in suspend once probed")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://patch.msgid.link/20251006142002.177475-1-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -582,13 +582,6 @@ static int sc16is7xx_set_baud(struct uar
+ div /= prescaler;
+ }
+
+- /* Enable enhanced features */
+- sc16is7xx_efr_lock(port);
+- sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+- SC16IS7XX_EFR_ENABLE_BIT,
+- SC16IS7XX_EFR_ENABLE_BIT);
+- sc16is7xx_efr_unlock(port);
+-
+ /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_CLKSEL_BIT,
--- /dev/null
+From stable+bounces-190151-greg=kroah.com@vger.kernel.org Mon Oct 27 19:45:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 14:42:44 -0400
+Subject: serial: sc16is7xx: reorder code to remove prototype declarations
+To: stable@vger.kernel.org
+Cc: Hugo Villeneuve <hvilleneuve@dimonoff.com>, Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027184246.638748-2-sashal@kernel.org>
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 2de8a1b46756b5a79d8447f99afdfe49e914225a ]
+
+Move/reorder some functions to remove sc16is7xx_ier_set() and
+sc16is7xx_stop_tx() prototypes declarations.
+
+No functional change.
+
+sc16is7xx_ier_set() was introduced in
+commit cc4c1d05eb10 ("sc16is7xx: Properly resume TX after stop").
+
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20231221231823.2327894-16-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 1c05bf6c0262 ("serial: sc16is7xx: remove useless enable of enhanced features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 75 +++++++++++++++++++----------------------
+ 1 file changed, 36 insertions(+), 39 deletions(-)
+
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -355,9 +355,6 @@ static struct uart_driver sc16is7xx_uart
+ .nr = SC16IS7XX_MAX_DEVS,
+ };
+
+-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit);
+-static void sc16is7xx_stop_tx(struct uart_port *port);
+-
+ #define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
+
+ static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
+@@ -415,6 +412,42 @@ static void sc16is7xx_power(struct uart_
+ on ? 0 : SC16IS7XX_IER_SLEEP_BIT);
+ }
+
++static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
++{
++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
++
++ lockdep_assert_held_once(&port->lock);
++
++ one->config.flags |= SC16IS7XX_RECONF_IER;
++ one->config.ier_mask |= bit;
++ one->config.ier_val &= ~bit;
++ kthread_queue_work(&s->kworker, &one->reg_work);
++}
++
++static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
++{
++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
++
++ lockdep_assert_held_once(&port->lock);
++
++ one->config.flags |= SC16IS7XX_RECONF_IER;
++ one->config.ier_mask |= bit;
++ one->config.ier_val |= bit;
++ kthread_queue_work(&s->kworker, &one->reg_work);
++}
++
++static void sc16is7xx_stop_tx(struct uart_port *port)
++{
++ sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
++}
++
++static void sc16is7xx_stop_rx(struct uart_port *port)
++{
++ sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
++}
++
+ static const struct sc16is7xx_devtype sc16is74x_devtype = {
+ .name = "SC16IS74X",
+ .nr_gpio = 0,
+@@ -888,42 +921,6 @@ static void sc16is7xx_reg_proc(struct kt
+ sc16is7xx_reconf_rs485(&one->port);
+ }
+
+-static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
+-{
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+- lockdep_assert_held_once(&port->lock);
+-
+- one->config.flags |= SC16IS7XX_RECONF_IER;
+- one->config.ier_mask |= bit;
+- one->config.ier_val &= ~bit;
+- kthread_queue_work(&s->kworker, &one->reg_work);
+-}
+-
+-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
+-{
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+- lockdep_assert_held_once(&port->lock);
+-
+- one->config.flags |= SC16IS7XX_RECONF_IER;
+- one->config.ier_mask |= bit;
+- one->config.ier_val |= bit;
+- kthread_queue_work(&s->kworker, &one->reg_work);
+-}
+-
+-static void sc16is7xx_stop_tx(struct uart_port *port)
+-{
+- sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
+-}
+-
+-static void sc16is7xx_stop_rx(struct uart_port *port)
+-{
+- sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
+-}
+-
+ static void sc16is7xx_ms_proc(struct kthread_work *ws)
+ {
+ struct sc16is7xx_one *one = to_sc16is7xx_one(ws, ms_work.work);
btrfs-use-level-argument-in-log-tree-walk-callback-r.patch
btrfs-use-smp_mb__after_atomic-when-forcing-cow-in-c.patch
arch-add-the-macro-compile_offsets-to-all-the-asm-of.patch
+mptcp-pm-in-kernel-c-flag-handle-late-add_addr.patch
+selftests-mptcp-disable-add_addr-retrans-in-endpoint_tests.patch
+selftests-mptcp-join-mark-delete-re-add-signal-as-skipped-if-not-supported.patch
+serial-sc16is7xx-remove-unused-to_sc16is7xx_port-macro.patch
+serial-sc16is7xx-reorder-code-to-remove-prototype-declarations.patch
+serial-sc16is7xx-refactor-efr-lock.patch
+serial-sc16is7xx-remove-useless-enable-of-enhanced-features.patch
+xhci-dbc-poll-at-different-rate-depending-on-data-transfer-activity.patch
+xhci-dbc-allow-users-to-modify-dbc-poll-interval-via-sysfs.patch
+xhci-dbc-improve-performance-by-removing-delay-in-transfer-event-polling.patch
+xhci-dbc-avoid-event-polling-busyloop-if-pending-rx-transfers-are-inactive.patch
+xhci-dbc-fix-bogus-1024-byte-prefix-if-ttydbc-read-races-with-stall-event.patch
--- /dev/null
+From stable+bounces-190039-greg=kroah.com@vger.kernel.org Mon Oct 27 17:35:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 12:29:16 -0400
+Subject: xhci: dbc: Allow users to modify DbC poll interval via sysfs
+To: stable@vger.kernel.org
+Cc: Uday M Bhat <uday.m.bhat@intel.com>, Samuel Jacob <samjaco@google.com>, Mathias Nyman <mathias.nyman@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027162919.577996-2-sashal@kernel.org>
+
+From: Uday M Bhat <uday.m.bhat@intel.com>
+
+[ Upstream commit de3edd47a18fe05a560847cc3165871474e08196 ]
+
+xhci DbC driver polls the host controller for DbC events at a reduced
+rate when DbC is enabled but there are no active data transfers.
+
+Allow users to modify this reduced poll interval via dbc_poll_interval_ms
+sysfs entry. Unit is milliseconds and accepted range is 0 to 5000.
+Max interval of 5000 ms is selected as it matches the common 5 second
+timeout used in usb stack.
+Default value is 64 milliseconds.
+
+A long interval is useful when users know there won't be any activity
+on systems connected via DbC for long periods, and want to avoid
+battery drainage due to unnecessary CPU usage.
+
+Example being Android Debugger (ADB) usage over DbC on ChromeOS systems
+running Android Runtime.
+
+[minor changes and rewording -Mathias]
+
+Co-developed-by: Samuel Jacob <samjaco@google.com>
+Signed-off-by: Samuel Jacob <samjaco@google.com>
+Signed-off-by: Uday M Bhat <uday.m.bhat@intel.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20240626124835.1023046-5-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: f3d12ec847b9 ("xhci: dbc: fix bogus 1024 byte prefix if ttyDBC read races with stall event")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd | 10 +++
+ drivers/usb/host/xhci-dbgcap.c | 38 +++++++++++++++
+ drivers/usb/host/xhci-dbgcap.h | 2
+ 3 files changed, 49 insertions(+), 1 deletion(-)
+
+--- a/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd
++++ b/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd
+@@ -75,3 +75,13 @@ Description:
+ The default value is 1 (GNU Remote Debug command).
+ Other permissible value is 0 which is for vendor defined debug
+ target.
++
++What: /sys/bus/pci/drivers/xhci_hcd/.../dbc_poll_interval_ms
++Date: February 2024
++Contact: Mathias Nyman <mathias.nyman@linux.intel.com>
++Description:
++ This attribute adjust the polling interval used to check for
++ DbC events. Unit is milliseconds. Accepted values range from 0
++ up to 5000. The default value is 64 ms.
++ This polling interval is used while DbC is enabled but has no
++ active data transfers.
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -1214,11 +1214,48 @@ static ssize_t dbc_bInterfaceProtocol_st
+ return size;
+ }
+
++static ssize_t dbc_poll_interval_ms_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct xhci_dbc *dbc;
++ struct xhci_hcd *xhci;
++
++ xhci = hcd_to_xhci(dev_get_drvdata(dev));
++ dbc = xhci->dbc;
++
++ return sysfs_emit(buf, "%u\n", dbc->poll_interval);
++}
++
++static ssize_t dbc_poll_interval_ms_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t size)
++{
++ struct xhci_dbc *dbc;
++ struct xhci_hcd *xhci;
++ u32 value;
++ int ret;
++
++ ret = kstrtou32(buf, 0, &value);
++ if (ret || value > DBC_POLL_INTERVAL_MAX)
++ return -EINVAL;
++
++ xhci = hcd_to_xhci(dev_get_drvdata(dev));
++ dbc = xhci->dbc;
++
++ dbc->poll_interval = value;
++
++ mod_delayed_work(system_wq, &dbc->event_work, 0);
++
++ return size;
++}
++
+ static DEVICE_ATTR_RW(dbc);
+ static DEVICE_ATTR_RW(dbc_idVendor);
+ static DEVICE_ATTR_RW(dbc_idProduct);
+ static DEVICE_ATTR_RW(dbc_bcdDevice);
+ static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
++static DEVICE_ATTR_RW(dbc_poll_interval_ms);
+
+ static struct attribute *dbc_dev_attributes[] = {
+ &dev_attr_dbc.attr,
+@@ -1226,6 +1263,7 @@ static struct attribute *dbc_dev_attribu
+ &dev_attr_dbc_idProduct.attr,
+ &dev_attr_dbc_bcdDevice.attr,
+ &dev_attr_dbc_bInterfaceProtocol.attr,
++ &dev_attr_dbc_poll_interval_ms.attr,
+ NULL
+ };
+
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -94,7 +94,7 @@ struct dbc_ep {
+ #define DBC_QUEUE_SIZE 16
+ #define DBC_WRITE_BUF_SIZE 8192
+ #define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
+-
++#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */
+ /*
+ * Private structure for DbC hardware state:
+ */
--- /dev/null
+From stable+bounces-190041-greg=kroah.com@vger.kernel.org Mon Oct 27 17:29:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 12:29:18 -0400
+Subject: xhci: dbc: Avoid event polling busyloop if pending rx transfers are inactive.
+To: stable@vger.kernel.org
+Cc: "Mathias Nyman" <mathias.nyman@linux.intel.com>, "Łukasz Bartosik" <ukaszb@chromium.org>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251027162919.577996-4-sashal@kernel.org>
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit cab63934c33b12c0d1e9f4da7450928057f2c142 ]
+
+Event polling delay is set to 0 if there are any pending requests in
+either rx or tx requests lists. Checking for pending requests does
+not work well for "IN" transfers as the tty driver always queues
+requests to the list and TRBs to the ring, preparing to receive data
+from the host.
+
+This causes unnecessary busylooping and cpu hogging.
+
+Only set the event polling delay to 0 if there are pending tx "write"
+transfers, or if it was less than 10ms since last active data transfer
+in any direction.
+
+Cc: Łukasz Bartosik <ukaszb@chromium.org>
+Fixes: fb18e5bb9660 ("xhci: dbc: poll at different rate depending on data transfer activity")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20250505125630.561699-3-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: f3d12ec847b9 ("xhci: dbc: fix bogus 1024 byte prefix if ttyDBC read races with stall event")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 19 ++++++++++++++++---
+ drivers/usb/host/xhci-dbgcap.h | 3 +++
+ 2 files changed, 19 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -855,6 +855,7 @@ static enum evtreturn xhci_dbc_do_handle
+ {
+ dma_addr_t deq;
+ union xhci_trb *evt;
++ enum evtreturn ret = EVT_DONE;
+ u32 ctrl, portsc;
+ bool update_erdp = false;
+
+@@ -939,6 +940,7 @@ static enum evtreturn xhci_dbc_do_handle
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ dbc_handle_xfer_event(dbc, evt);
++ ret = EVT_XFER_DONE;
+ break;
+ default:
+ break;
+@@ -957,7 +959,7 @@ static enum evtreturn xhci_dbc_do_handle
+ lo_hi_writeq(deq, &dbc->regs->erdp);
+ }
+
+- return EVT_DONE;
++ return ret;
+ }
+
+ static void xhci_dbc_handle_events(struct work_struct *work)
+@@ -966,6 +968,7 @@ static void xhci_dbc_handle_events(struc
+ struct xhci_dbc *dbc;
+ unsigned long flags;
+ unsigned int poll_interval;
++ unsigned long busypoll_timelimit;
+
+ dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ poll_interval = dbc->poll_interval;
+@@ -984,11 +987,21 @@ static void xhci_dbc_handle_events(struc
+ dbc->driver->disconnect(dbc);
+ break;
+ case EVT_DONE:
+- /* set fast poll rate if there are pending data transfers */
++ /*
++ * Set fast poll rate if there are pending out transfers, or
++ * a transfer was recently processed
++ */
++ busypoll_timelimit = dbc->xfer_timestamp +
++ msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
++
+ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+- !list_empty(&dbc->eps[BULK_IN].list_pending))
++ time_is_after_jiffies(busypoll_timelimit))
+ poll_interval = 0;
+ break;
++ case EVT_XFER_DONE:
++ dbc->xfer_timestamp = jiffies;
++ poll_interval = 0;
++ break;
+ default:
+ dev_info(dbc->dev, "stop handling dbc events\n");
+ return;
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -95,6 +95,7 @@ struct dbc_ep {
+ #define DBC_WRITE_BUF_SIZE 8192
+ #define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
+ #define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */
++#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */
+ /*
+ * Private structure for DbC hardware state:
+ */
+@@ -141,6 +142,7 @@ struct xhci_dbc {
+ enum dbc_state state;
+ struct delayed_work event_work;
+ unsigned int poll_interval; /* ms */
++ unsigned long xfer_timestamp;
+ unsigned resume_required:1;
+ struct dbc_ep eps[2];
+
+@@ -186,6 +188,7 @@ struct dbc_request {
+ enum evtreturn {
+ EVT_ERR = -1,
+ EVT_DONE,
++ EVT_XFER_DONE,
+ EVT_GSER,
+ EVT_DISC,
+ };
--- /dev/null
+From stable+bounces-190042-greg=kroah.com@vger.kernel.org Mon Oct 27 17:29:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 12:29:19 -0400
+Subject: xhci: dbc: fix bogus 1024 byte prefix if ttyDBC read races with stall event
+To: stable@vger.kernel.org
+Cc: "Mathias Nyman" <mathias.nyman@linux.intel.com>, stable <stable@kernel.org>, "Łukasz Bartosik" <ukaszb@chromium.org>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251027162919.577996-5-sashal@kernel.org>
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit f3d12ec847b945d5d65846c85f062d07d5e73164 ]
+
+DbC may add 1024 bogus bytes to the beginneing of the receiving endpoint
+if DbC hw triggers a STALL event before any Transfer Blocks (TRBs) for
+incoming data are queued, but driver handles the event after it queued
+the TRBs.
+
+This is possible as xHCI DbC hardware may trigger spurious STALL transfer
+events even if endpoint is empty. The STALL event contains a pointer
+to the stalled TRB, and "remaining" untransferred data length.
+
+As there are no TRBs queued yet the STALL event will just point to first
+TRB position of the empty ring, with '0' bytes remaining untransferred.
+
+DbC driver is polling for events, and may not handle the STALL event
+before /dev/ttyDBC0 is opened and incoming data TRBs are queued.
+
+The DbC event handler will now assume the first queued TRB (length 1024)
+has stalled with '0' bytes remaining untransferred, and copies the data
+
+This race situation can be practically mitigated by making sure the event
+handler handles all pending transfer events when DbC reaches configured
+state, and only then create dev/ttyDbC0, and start queueing transfers.
+The event handler can this way detect the STALL events on empty rings
+and discard them before any transfers are queued.
+
+This does in practice solve the issue, but still leaves a small possible
+gap for the race to trigger.
+We still need a way to distinguish spurious STALLs on empty rings with '0'
+bytes remaing, from actual STALL events with all bytes transmitted.
+
+Cc: stable <stable@kernel.org>
+Fixes: dfba2174dc42 ("usb: xhci: Add DbC support in xHCI driver")
+Tested-by: Łukasz Bartosik <ukaszb@chromium.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -880,7 +880,8 @@ static enum evtreturn xhci_dbc_do_handle
+ dev_info(dbc->dev, "DbC configured\n");
+ portsc = readl(&dbc->regs->portsc);
+ writel(portsc, &dbc->regs->portsc);
+- return EVT_GSER;
++ ret = EVT_GSER;
++ break;
+ }
+
+ return EVT_DONE;
+@@ -940,7 +941,8 @@ static enum evtreturn xhci_dbc_do_handle
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ dbc_handle_xfer_event(dbc, evt);
+- ret = EVT_XFER_DONE;
++ if (ret != EVT_GSER)
++ ret = EVT_XFER_DONE;
+ break;
+ default:
+ break;
--- /dev/null
+From stable+bounces-190040-greg=kroah.com@vger.kernel.org Mon Oct 27 17:29:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 12:29:17 -0400
+Subject: xhci: dbc: Improve performance by removing delay in transfer event polling.
+To: stable@vger.kernel.org
+Cc: "Mathias Nyman" <mathias.nyman@linux.intel.com>, "Łukasz Bartosik" <ukaszb@chromium.org>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251027162919.577996-3-sashal@kernel.org>
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit 03e3d9c2bd85cda941b3cf78e895c1498ac05c5f ]
+
+Queue event polling work with 0 delay in case there are pending transfers
+queued up. This is part 2 of a 3 part series that roughly triples dbc
+performace when using adb push and pull over dbc.
+
+Max/min push rate after patches is 210/118 MB/s, pull rate 171/133 MB/s,
+tested with large files (300MB-9GB) by Łukasz Bartosik
+
+First performance improvement patch was commit 31128e7492dc
+("xhci: dbc: add dbgtty request to end of list once it completes")
+
+Cc: Łukasz Bartosik <ukaszb@chromium.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20241227120142.1035206-2-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: f3d12ec847b9 ("xhci: dbc: fix bogus 1024 byte prefix if ttyDBC read races with stall event")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -987,7 +987,7 @@ static void xhci_dbc_handle_events(struc
+ /* set fast poll rate if there are pending data transfers */
+ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+ !list_empty(&dbc->eps[BULK_IN].list_pending))
+- poll_interval = 1;
++ poll_interval = 0;
+ break;
+ default:
+ dev_info(dbc->dev, "stop handling dbc events\n");
--- /dev/null
+From stable+bounces-190038-greg=kroah.com@vger.kernel.org Mon Oct 27 17:29:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 12:29:15 -0400
+Subject: xhci: dbc: poll at different rate depending on data transfer activity
+To: stable@vger.kernel.org
+Cc: Mathias Nyman <mathias.nyman@linux.intel.com>, Uday M Bhat <uday.m.bhat@intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027162919.577996-1-sashal@kernel.org>
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit fb18e5bb96603cc79d97f03e4c05f3992cf28624 ]
+
+DbC driver starts polling for events immediately when DbC is enabled.
+The current polling interval is 1ms, which keeps the CPU busy, impacting
+power management even when there are no active data transfers.
+
+Solve this by polling at a slower rate, with a 64ms interval as default
+until a transfer request is queued, or if there are still are pending
+unhandled transfers at event completion.
+
+Tested-by: Uday M Bhat <uday.m.bhat@intel.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20240229141438.619372-9-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: f3d12ec847b9 ("xhci: dbc: fix bogus 1024 byte prefix if ttyDBC read races with stall event")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 13 +++++++++++--
+ drivers/usb/host/xhci-dbgcap.h | 2 ++
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -665,7 +665,8 @@ static int xhci_dbc_start(struct xhci_db
+ return ret;
+ }
+
+- return mod_delayed_work(system_wq, &dbc->event_work, 1);
++ return mod_delayed_work(system_wq, &dbc->event_work,
++ msecs_to_jiffies(dbc->poll_interval));
+ }
+
+ static void xhci_dbc_stop(struct xhci_dbc *dbc)
+@@ -964,8 +965,10 @@ static void xhci_dbc_handle_events(struc
+ enum evtreturn evtr;
+ struct xhci_dbc *dbc;
+ unsigned long flags;
++ unsigned int poll_interval;
+
+ dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
++ poll_interval = dbc->poll_interval;
+
+ spin_lock_irqsave(&dbc->lock, flags);
+ evtr = xhci_dbc_do_handle_events(dbc);
+@@ -981,13 +984,18 @@ static void xhci_dbc_handle_events(struc
+ dbc->driver->disconnect(dbc);
+ break;
+ case EVT_DONE:
++ /* set fast poll rate if there are pending data transfers */
++ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
++ !list_empty(&dbc->eps[BULK_IN].list_pending))
++ poll_interval = 1;
+ break;
+ default:
+ dev_info(dbc->dev, "stop handling dbc events\n");
+ return;
+ }
+
+- mod_delayed_work(system_wq, &dbc->event_work, 1);
++ mod_delayed_work(system_wq, &dbc->event_work,
++ msecs_to_jiffies(poll_interval));
+ }
+
+ static ssize_t dbc_show(struct device *dev,
+@@ -1242,6 +1250,7 @@ xhci_alloc_dbc(struct device *dev, void
+ dbc->idVendor = DBC_VENDOR_ID;
+ dbc->bcdDevice = DBC_DEVICE_REV;
+ dbc->bInterfaceProtocol = DBC_PROTOCOL;
++ dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
+
+ if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
+ goto err;
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -93,6 +93,7 @@ struct dbc_ep {
+
+ #define DBC_QUEUE_SIZE 16
+ #define DBC_WRITE_BUF_SIZE 8192
++#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
+
+ /*
+ * Private structure for DbC hardware state:
+@@ -139,6 +140,7 @@ struct xhci_dbc {
+
+ enum dbc_state state;
+ struct delayed_work event_work;
++ unsigned int poll_interval; /* ms */
+ unsigned resume_required:1;
+ struct dbc_ep eps[2];
+