--- /dev/null
+From 1b36d448ae05405c0098999319b915c49e7f3b9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 14:57:46 +0200
+Subject: batman-adv: hold claim backbone gateways by reference
+
+From: Haoze Xie <royenheart@gmail.com>
+
+commit 82d8701b2c930d0e96b0dbc9115a218d791cb0d2 upstream.
+
+batadv_bla_add_claim() can replace claim->backbone_gw and drop the old
+gateway's last reference while readers still follow the pointer.
+
+The netlink claim dump path dereferences claim->backbone_gw->orig and
+takes claim->backbone_gw->crc_lock without pinning the underlying
+backbone gateway. batadv_bla_check_claim() still has the same naked
+pointer access pattern.
+
+Reuse batadv_bla_claim_get_backbone_gw() in both readers so they operate
+on a stable gateway reference until the read-side work is complete.
+This keeps the dump and claim-check paths aligned with the lifetime
+rules introduced for the other BLA claim readers.
+
+Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code")
+Fixes: 04f3f5bf1883 ("batman-adv: add B.A.T.M.A.N. Dump BLA claims via netlink")
+Cc: stable@vger.kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Haoze Xie <royenheart@gmail.com>
+Signed-off-by: Ao Zhou <n05ec@lzu.edu.cn>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/bridge_loop_avoidance.c | 27 +++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index 986f707e7d973..ffbd8fa7acce1 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -2206,6 +2206,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+ struct batadv_bla_claim *claim)
+ {
+ u8 *primary_addr = primary_if->net_dev->dev_addr;
++ struct batadv_bla_backbone_gw *backbone_gw;
+ u16 backbone_crc;
+ bool is_own;
+ void *hdr;
+@@ -2221,32 +2222,35 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+
+ genl_dump_check_consistent(cb, hdr);
+
+- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+- primary_addr);
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
+
+- spin_lock_bh(&claim->backbone_gw->crc_lock);
+- backbone_crc = claim->backbone_gw->crc;
+- spin_unlock_bh(&claim->backbone_gw->crc_lock);
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
+ nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+- claim->backbone_gw->orig) ||
++ backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
++put_backbone_gw:
++ batadv_backbone_gw_put(backbone_gw);
+ out:
+ return ret;
+ }
+@@ -2612,6 +2616,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
+ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+ {
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim search_claim;
+ struct batadv_bla_claim *claim = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+@@ -2634,9 +2639,13 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ * return false.
+ */
+ if (claim) {
+- if (!batadv_compare_eth(claim->backbone_gw->orig,
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ ret = false;
++
++ batadv_backbone_gw_put(backbone_gw);
+ batadv_claim_put(claim);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 1b36d448ae05405c0098999319b915c49e7f3b9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 14:57:46 +0200
+Subject: batman-adv: hold claim backbone gateways by reference
+
+From: Haoze Xie <royenheart@gmail.com>
+
+commit 82d8701b2c930d0e96b0dbc9115a218d791cb0d2 upstream.
+
+batadv_bla_add_claim() can replace claim->backbone_gw and drop the old
+gateway's last reference while readers still follow the pointer.
+
+The netlink claim dump path dereferences claim->backbone_gw->orig and
+takes claim->backbone_gw->crc_lock without pinning the underlying
+backbone gateway. batadv_bla_check_claim() still has the same naked
+pointer access pattern.
+
+Reuse batadv_bla_claim_get_backbone_gw() in both readers so they operate
+on a stable gateway reference until the read-side work is complete.
+This keeps the dump and claim-check paths aligned with the lifetime
+rules introduced for the other BLA claim readers.
+
+Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code")
+Fixes: 04f3f5bf1883 ("batman-adv: add B.A.T.M.A.N. Dump BLA claims via netlink")
+Cc: stable@vger.kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Haoze Xie <royenheart@gmail.com>
+Signed-off-by: Ao Zhou <n05ec@lzu.edu.cn>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/bridge_loop_avoidance.c | 27 +++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index 986f707e7d973..ffbd8fa7acce1 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -2206,6 +2206,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+ struct batadv_bla_claim *claim)
+ {
+ u8 *primary_addr = primary_if->net_dev->dev_addr;
++ struct batadv_bla_backbone_gw *backbone_gw;
+ u16 backbone_crc;
+ bool is_own;
+ void *hdr;
+@@ -2221,32 +2222,35 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+
+ genl_dump_check_consistent(cb, hdr);
+
+- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+- primary_addr);
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
+
+- spin_lock_bh(&claim->backbone_gw->crc_lock);
+- backbone_crc = claim->backbone_gw->crc;
+- spin_unlock_bh(&claim->backbone_gw->crc_lock);
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
+ nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+- claim->backbone_gw->orig) ||
++ backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
++put_backbone_gw:
++ batadv_backbone_gw_put(backbone_gw);
+ out:
+ return ret;
+ }
+@@ -2612,6 +2616,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
+ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+ {
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim search_claim;
+ struct batadv_bla_claim *claim = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+@@ -2634,9 +2639,13 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ * return false.
+ */
+ if (claim) {
+- if (!batadv_compare_eth(claim->backbone_gw->orig,
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ ret = false;
++
++ batadv_backbone_gw_put(backbone_gw);
+ batadv_claim_put(claim);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From bffb8ceb31801f89411c3a189005a90a60e494b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 16:23:03 +0200
+Subject: drm/i915/gt: fix refcount underflow in intel_engine_park_heartbeat
+
+From: Sebastian Brzezinka <sebastian.brzezinka@intel.com>
+
+[ Upstream commit 4c71fd099513bfa8acab529b626e1f0097b76061 ]
+
+A use-after-free / refcount underflow is possible when the heartbeat
+worker and intel_engine_park_heartbeat() race to release the same
+engine->heartbeat.systole request.
+
+The heartbeat worker reads engine->heartbeat.systole and calls
+i915_request_put() on it when the request is complete, but clears
+the pointer in a separate, non-atomic step. Concurrently, a request
+retirement on another CPU can drop the engine wakeref to zero, triggering
+__engine_park() -> intel_engine_park_heartbeat(). If the heartbeat
+timer is pending at that point, cancel_delayed_work() returns true and
+intel_engine_park_heartbeat() reads the stale non-NULL systole pointer
+and calls i915_request_put() on it again, causing a refcount underflow:
+
+```
+<4> [487.221889] Workqueue: i915-unordered engine_retire [i915]
+<4> [487.222640] RIP: 0010:refcount_warn_saturate+0x68/0xb0
+...
+<4> [487.222707] Call Trace:
+<4> [487.222711] <TASK>
+<4> [487.222716] intel_engine_park_heartbeat.part.0+0x6f/0x80 [i915]
+<4> [487.223115] intel_engine_park_heartbeat+0x25/0x40 [i915]
+<4> [487.223566] __engine_park+0xb9/0x650 [i915]
+<4> [487.223973] ____intel_wakeref_put_last+0x2e/0xb0 [i915]
+<4> [487.224408] __intel_wakeref_put_last+0x72/0x90 [i915]
+<4> [487.224797] intel_context_exit_engine+0x7c/0x80 [i915]
+<4> [487.225238] intel_context_exit+0xf1/0x1b0 [i915]
+<4> [487.225695] i915_request_retire.part.0+0x1b9/0x530 [i915]
+<4> [487.226178] i915_request_retire+0x1c/0x40 [i915]
+<4> [487.226625] engine_retire+0x122/0x180 [i915]
+<4> [487.227037] process_one_work+0x239/0x760
+<4> [487.227060] worker_thread+0x200/0x3f0
+<4> [487.227068] ? __pfx_worker_thread+0x10/0x10
+<4> [487.227075] kthread+0x10d/0x150
+<4> [487.227083] ? __pfx_kthread+0x10/0x10
+<4> [487.227092] ret_from_fork+0x3d4/0x480
+<4> [487.227099] ? __pfx_kthread+0x10/0x10
+<4> [487.227107] ret_from_fork_asm+0x1a/0x30
+<4> [487.227141] </TASK>
+```
+
+Fix this by replacing the non-atomic pointer read + separate clear with
+xchg() in both racing paths. xchg() is a single indivisible hardware
+instruction that atomically reads the old pointer and writes NULL. This
+guarantees only one of the two concurrent callers obtains the non-NULL
+pointer and performs the put, the other gets NULL and skips it.
+
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/work_items/15880
+Fixes: 058179e72e09 ("drm/i915/gt: Replace hangcheck by heartbeats")
+Cc: <stable@vger.kernel.org> # v5.5+
+Signed-off-by: Sebastian Brzezinka <sebastian.brzezinka@intel.com>
+Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://lore.kernel.org/r/d4c1c14255688dd07cc8044973c4f032a8d1559e.1775038106.git.sebastian.brzezinka@intel.com
+(cherry picked from commit 13238dc0ee4f9ab8dafa2cca7295736191ae2f42)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/i915/gt/intel_engine_heartbeat.c | 26 +++++++++++++------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+index 5067d0524d4b5..780e29fa4aeeb 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+@@ -70,10 +70,12 @@ static void heartbeat(struct work_struct *wrk)
+ /* Just in case everything has gone horribly wrong, give it a kick */
+ intel_engine_flush_submission(engine);
+
+- rq = engine->heartbeat.systole;
+- if (rq && i915_request_completed(rq)) {
+- i915_request_put(rq);
+- engine->heartbeat.systole = NULL;
++ rq = xchg(&engine->heartbeat.systole, NULL);
++ if (rq) {
++ if (i915_request_completed(rq))
++ i915_request_put(rq);
++ else
++ engine->heartbeat.systole = rq;
+ }
+
+ if (!intel_engine_pm_get_if_awake(engine))
+@@ -153,8 +155,11 @@ static void heartbeat(struct work_struct *wrk)
+ unlock:
+ mutex_unlock(&ce->timeline->mutex);
+ out:
+- if (!next_heartbeat(engine))
+- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
++ if (!next_heartbeat(engine)) {
++ rq = xchg(&engine->heartbeat.systole, NULL);
++ if (rq)
++ i915_request_put(rq);
++ }
+ intel_engine_pm_put(engine);
+ }
+
+@@ -168,8 +173,13 @@ void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+
+ void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+ {
+- if (cancel_delayed_work(&engine->heartbeat.work))
+- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
++ if (cancel_delayed_work(&engine->heartbeat.work)) {
++ struct i915_request *rq;
++
++ rq = xchg(&engine->heartbeat.systole, NULL);
++ if (rq)
++ i915_request_put(rq);
++ }
+ }
+
+ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+--
+2.53.0
+
--- /dev/null
+From bffb8ceb31801f89411c3a189005a90a60e494b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 16:23:03 +0200
+Subject: drm/i915/gt: fix refcount underflow in intel_engine_park_heartbeat
+
+From: Sebastian Brzezinka <sebastian.brzezinka@intel.com>
+
+[ Upstream commit 4c71fd099513bfa8acab529b626e1f0097b76061 ]
+
+A use-after-free / refcount underflow is possible when the heartbeat
+worker and intel_engine_park_heartbeat() race to release the same
+engine->heartbeat.systole request.
+
+The heartbeat worker reads engine->heartbeat.systole and calls
+i915_request_put() on it when the request is complete, but clears
+the pointer in a separate, non-atomic step. Concurrently, a request
+retirement on another CPU can drop the engine wakeref to zero, triggering
+__engine_park() -> intel_engine_park_heartbeat(). If the heartbeat
+timer is pending at that point, cancel_delayed_work() returns true and
+intel_engine_park_heartbeat() reads the stale non-NULL systole pointer
+and calls i915_request_put() on it again, causing a refcount underflow:
+
+```
+<4> [487.221889] Workqueue: i915-unordered engine_retire [i915]
+<4> [487.222640] RIP: 0010:refcount_warn_saturate+0x68/0xb0
+...
+<4> [487.222707] Call Trace:
+<4> [487.222711] <TASK>
+<4> [487.222716] intel_engine_park_heartbeat.part.0+0x6f/0x80 [i915]
+<4> [487.223115] intel_engine_park_heartbeat+0x25/0x40 [i915]
+<4> [487.223566] __engine_park+0xb9/0x650 [i915]
+<4> [487.223973] ____intel_wakeref_put_last+0x2e/0xb0 [i915]
+<4> [487.224408] __intel_wakeref_put_last+0x72/0x90 [i915]
+<4> [487.224797] intel_context_exit_engine+0x7c/0x80 [i915]
+<4> [487.225238] intel_context_exit+0xf1/0x1b0 [i915]
+<4> [487.225695] i915_request_retire.part.0+0x1b9/0x530 [i915]
+<4> [487.226178] i915_request_retire+0x1c/0x40 [i915]
+<4> [487.226625] engine_retire+0x122/0x180 [i915]
+<4> [487.227037] process_one_work+0x239/0x760
+<4> [487.227060] worker_thread+0x200/0x3f0
+<4> [487.227068] ? __pfx_worker_thread+0x10/0x10
+<4> [487.227075] kthread+0x10d/0x150
+<4> [487.227083] ? __pfx_kthread+0x10/0x10
+<4> [487.227092] ret_from_fork+0x3d4/0x480
+<4> [487.227099] ? __pfx_kthread+0x10/0x10
+<4> [487.227107] ret_from_fork_asm+0x1a/0x30
+<4> [487.227141] </TASK>
+```
+
+Fix this by replacing the non-atomic pointer read + separate clear with
+xchg() in both racing paths. xchg() is a single indivisible hardware
+instruction that atomically reads the old pointer and writes NULL. This
+guarantees only one of the two concurrent callers obtains the non-NULL
+pointer and performs the put, the other gets NULL and skips it.
+
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/work_items/15880
+Fixes: 058179e72e09 ("drm/i915/gt: Replace hangcheck by heartbeats")
+Cc: <stable@vger.kernel.org> # v5.5+
+Signed-off-by: Sebastian Brzezinka <sebastian.brzezinka@intel.com>
+Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://lore.kernel.org/r/d4c1c14255688dd07cc8044973c4f032a8d1559e.1775038106.git.sebastian.brzezinka@intel.com
+(cherry picked from commit 13238dc0ee4f9ab8dafa2cca7295736191ae2f42)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/i915/gt/intel_engine_heartbeat.c | 26 +++++++++++++------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+index 5067d0524d4b5..780e29fa4aeeb 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+@@ -70,10 +70,12 @@ static void heartbeat(struct work_struct *wrk)
+ /* Just in case everything has gone horribly wrong, give it a kick */
+ intel_engine_flush_submission(engine);
+
+- rq = engine->heartbeat.systole;
+- if (rq && i915_request_completed(rq)) {
+- i915_request_put(rq);
+- engine->heartbeat.systole = NULL;
++ rq = xchg(&engine->heartbeat.systole, NULL);
++ if (rq) {
++ if (i915_request_completed(rq))
++ i915_request_put(rq);
++ else
++ engine->heartbeat.systole = rq;
+ }
+
+ if (!intel_engine_pm_get_if_awake(engine))
+@@ -153,8 +155,11 @@ static void heartbeat(struct work_struct *wrk)
+ unlock:
+ mutex_unlock(&ce->timeline->mutex);
+ out:
+- if (!next_heartbeat(engine))
+- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
++ if (!next_heartbeat(engine)) {
++ rq = xchg(&engine->heartbeat.systole, NULL);
++ if (rq)
++ i915_request_put(rq);
++ }
+ intel_engine_pm_put(engine);
+ }
+
+@@ -168,8 +173,13 @@ void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+
+ void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+ {
+- if (cancel_delayed_work(&engine->heartbeat.work))
+- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
++ if (cancel_delayed_work(&engine->heartbeat.work)) {
++ struct i915_request *rq;
++
++ rq = xchg(&engine->heartbeat.systole, NULL);
++ if (rq)
++ i915_request_put(rq);
++ }
+ }
+
+ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+--
+2.53.0
+
--- /dev/null
+From 50a09e2d662cbddaa0bd3f7a7212bcae706df442 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:21 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index dd03bc905841f..0d61a89fe99df 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -486,7 +486,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index 7a7467d3f7f05..c0e8237c779f3 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1715,6 +1715,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 24d2ab277d78e..9cf3644dfd276 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -207,11 +207,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index abdbbe8c5a43a..216271c7b60f1 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -158,6 +158,8 @@ void cpu_probe(void)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
++
++ c->vmbits = 31;
+ }
+
+ void cpu_report(void)
+--
+2.53.0
+
--- /dev/null
+From 50a09e2d662cbddaa0bd3f7a7212bcae706df442 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:21 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index dd03bc905841f..0d61a89fe99df 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -486,7 +486,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index 7a7467d3f7f05..c0e8237c779f3 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1715,6 +1715,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 24d2ab277d78e..9cf3644dfd276 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -207,11 +207,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index abdbbe8c5a43a..216271c7b60f1 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -158,6 +158,8 @@ void cpu_probe(void)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
++
++ c->vmbits = 31;
+ }
+
+ void cpu_report(void)
+--
+2.53.0
+
--- /dev/null
+From 97bf36cd80262bb41153869469b61bf22687a207 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:20 +0100
+Subject: mips: mm: Allocate tlb_vpn array atomically
+
+From: Stefan Wiehler <stefan.wiehler@nokia.com>
+
+commit 01cc50ea5167bb14117257ec084637abe9e5f691 upstream.
+
+Found by DEBUG_ATOMIC_SLEEP:
+
+ BUG: sleeping function called from invalid context at /include/linux/sched/mm.h:306
+ in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 0, name: swapper/1
+ preempt_count: 1, expected: 0
+ RCU nest depth: 0, expected: 0
+ no locks held by swapper/1/0.
+ irq event stamp: 0
+ hardirqs last enabled at (0): [<0000000000000000>] 0x0
+ hardirqs last disabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last enabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last disabled at (0): [<0000000000000000>] 0x0
+ CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.6.119-d79e757675ec-fct #1
+ Stack : 800000000290bad8 0000000000000000 0000000000000008 800000000290bae8
+ 800000000290bae8 800000000290bc78 0000000000000000 0000000000000000
+ ffffffff80c80000 0000000000000001 ffffffff80d8dee8 ffffffff810d09c0
+ 784bb2a7ec10647d 0000000000000010 ffffffff80a6fd60 8000000001d8a9c0
+ 0000000000000000 0000000000000000 ffffffff80d90000 0000000000000000
+ ffffffff80c9e0e8 0000000007ffffff 0000000000000cc0 0000000000000400
+ ffffffffffffffff 0000000000000001 0000000000000002 ffffffffc0149ed8
+ fffffffffffffffe 8000000002908000 800000000290bae0 ffffffff80a81b74
+ ffffffff80129fb0 0000000000000000 0000000000000000 0000000000000000
+ 0000000000000000 0000000000000000 ffffffff80129fd0 0000000000000000
+ ...
+ Call Trace:
+ [<ffffffff80129fd0>] show_stack+0x60/0x158
+ [<ffffffff80a7f894>] dump_stack_lvl+0x88/0xbc
+ [<ffffffff8018d3c8>] __might_resched+0x268/0x288
+ [<ffffffff803648b0>] __kmem_cache_alloc_node+0x2e0/0x330
+ [<ffffffff80302788>] __kmalloc+0x58/0xd0
+ [<ffffffff80a81b74>] r4k_tlb_uniquify+0x7c/0x428
+ [<ffffffff80143e8c>] tlb_init+0x7c/0x110
+ [<ffffffff8012bdb4>] per_cpu_trap_init+0x16c/0x1d0
+ [<ffffffff80133258>] start_secondary+0x28/0x128
+
+Fixes: 231ac951faba ("MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow")
+Signed-off-by: Stefan Wiehler <stefan.wiehler@nokia.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 8bc98c311ca62..3aef6acd57bdd 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -528,7 +528,7 @@ static void __ref r4k_tlb_uniquify(void)
+
+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+ tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+ if (WARN_ON(!tlb_vpns))
+ return; /* Pray local_flush_tlb_all() is good enough. */
+--
+2.53.0
+
--- /dev/null
+From 97bf36cd80262bb41153869469b61bf22687a207 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:20 +0100
+Subject: mips: mm: Allocate tlb_vpn array atomically
+
+From: Stefan Wiehler <stefan.wiehler@nokia.com>
+
+commit 01cc50ea5167bb14117257ec084637abe9e5f691 upstream.
+
+Found by DEBUG_ATOMIC_SLEEP:
+
+ BUG: sleeping function called from invalid context at /include/linux/sched/mm.h:306
+ in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 0, name: swapper/1
+ preempt_count: 1, expected: 0
+ RCU nest depth: 0, expected: 0
+ no locks held by swapper/1/0.
+ irq event stamp: 0
+ hardirqs last enabled at (0): [<0000000000000000>] 0x0
+ hardirqs last disabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last enabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last disabled at (0): [<0000000000000000>] 0x0
+ CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.6.119-d79e757675ec-fct #1
+ Stack : 800000000290bad8 0000000000000000 0000000000000008 800000000290bae8
+ 800000000290bae8 800000000290bc78 0000000000000000 0000000000000000
+ ffffffff80c80000 0000000000000001 ffffffff80d8dee8 ffffffff810d09c0
+ 784bb2a7ec10647d 0000000000000010 ffffffff80a6fd60 8000000001d8a9c0
+ 0000000000000000 0000000000000000 ffffffff80d90000 0000000000000000
+ ffffffff80c9e0e8 0000000007ffffff 0000000000000cc0 0000000000000400
+ ffffffffffffffff 0000000000000001 0000000000000002 ffffffffc0149ed8
+ fffffffffffffffe 8000000002908000 800000000290bae0 ffffffff80a81b74
+ ffffffff80129fb0 0000000000000000 0000000000000000 0000000000000000
+ 0000000000000000 0000000000000000 ffffffff80129fd0 0000000000000000
+ ...
+ Call Trace:
+ [<ffffffff80129fd0>] show_stack+0x60/0x158
+ [<ffffffff80a7f894>] dump_stack_lvl+0x88/0xbc
+ [<ffffffff8018d3c8>] __might_resched+0x268/0x288
+ [<ffffffff803648b0>] __kmem_cache_alloc_node+0x2e0/0x330
+ [<ffffffff80302788>] __kmalloc+0x58/0xd0
+ [<ffffffff80a81b74>] r4k_tlb_uniquify+0x7c/0x428
+ [<ffffffff80143e8c>] tlb_init+0x7c/0x110
+ [<ffffffff8012bdb4>] per_cpu_trap_init+0x16c/0x1d0
+ [<ffffffff80133258>] start_secondary+0x28/0x128
+
+Fixes: 231ac951faba ("MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow")
+Signed-off-by: Stefan Wiehler <stefan.wiehler@nokia.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 8bc98c311ca62..3aef6acd57bdd 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -528,7 +528,7 @@ static void __ref r4k_tlb_uniquify(void)
+
+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+ tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+ if (WARN_ON(!tlb_vpns))
+ return; /* Pray local_flush_tlb_all() is good enough. */
+--
+2.53.0
+
--- /dev/null
+From 0305a1bb0abade1fc42b49a0e92b9b610092a1d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:19 +0100
+Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream.
+
+Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+
+cores can have more than 64 TLB entries. Therefore allocate an array
+for uniquification instead of placing too an small array on the stack.
+
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Co-developed-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+Cc: stable@vger.kernel.org # v6.17+
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+[ Use memblock_free(__pa(...), ...) for 5.10.y. ]
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index d9a5ede8869bd..8bc98c311ca62 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, const void *b)
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+-static void r4k_tlb_uniquify(void)
++static void __ref r4k_tlb_uniquify(void)
+ {
+- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
++ phys_addr_t tlb_vpn_size;
++ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void)
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
++ if (use_slab)
++ kfree(tlb_vpns);
++ else
++ memblock_free(__pa(tlb_vpns), tlb_vpn_size);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 0305a1bb0abade1fc42b49a0e92b9b610092a1d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:19 +0100
+Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream.
+
+Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+
+cores can have more than 64 TLB entries. Therefore allocate an array
+for uniquification instead of placing too an small array on the stack.
+
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Co-developed-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+Cc: stable@vger.kernel.org # v6.17+
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+[ Use memblock_free(__pa(...), ...) for 5.10.y. ]
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index d9a5ede8869bd..8bc98c311ca62 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, const void *b)
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+-static void r4k_tlb_uniquify(void)
++static void __ref r4k_tlb_uniquify(void)
+ {
+- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
++ phys_addr_t tlb_vpn_size;
++ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void)
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
++ if (use_slab)
++ kfree(tlb_vpns);
++ else
++ memblock_free(__pa(tlb_vpns), tlb_vpn_size);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 6dbf3fb5e05adae3e74e9a48972eac41af492fa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:23 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index af0f1d4693099..2e4b4668afd89 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbmisc.h>
+
+ extern void build_tlb_refill_handler(void);
+@@ -501,87 +503,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From 6dbf3fb5e05adae3e74e9a48972eac41af492fa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:23 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index af0f1d4693099..2e4b4668afd89 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbmisc.h>
+
+ extern void build_tlb_refill_handler(void);
+@@ -501,87 +503,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From 793719611352407ef5081ec188caaee0a8b0d210 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:22 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 3aef6acd57bdd..af0f1d4693099 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -630,7 +630,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
--- /dev/null
+From 793719611352407ef5081ec188caaee0a8b0d210 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:21:22 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 3aef6acd57bdd..af0f1d4693099 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -630,7 +630,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
--- /dev/null
+From df6518b0ba40c8cef2820f7826c7eced97bd789a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index befc9d2bc0b52..46bdc38081164 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3474,7 +3474,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 6b2a215b27862..2a1d00e702d1b 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -595,7 +595,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From df6518b0ba40c8cef2820f7826c7eced97bd789a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index befc9d2bc0b52..46bdc38081164 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3474,7 +3474,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 6b2a215b27862..2a1d00e702d1b 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -595,7 +595,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From c6a47e9ffbeedbba9859f3f032182e79b5b1a566 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 04:33:04 +0000
+Subject: netfilter: nft_set_pipapo: do not rely on ZERO_SIZE_PTR
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 07ace0bbe03b3d8e85869af1dec5e4087b1d57b8 upstream
+
+pipapo relies on kmalloc(0) returning ZERO_SIZE_PTR (i.e., not NULL
+but pointer is invalid).
+
+Rework this to not call slab allocator when we'd request a 0-byte
+allocation.
+
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mukul Sikka <mukul.sikka@broadcom.com>
+Signed-off-by: Brennan Lamoreaux <brennan.lamoreaux@broadcom.com>
+[Keerthana: In older stable branches (v6.6 and earlier), the allocation logic in
+pipapo_clone() still relies on `src->rules` rather than `src->rules_alloc`
+(introduced in v6.9 via 9f439bd6ef4f). Consequently, the previously
+backported INT_MAX clamping check uses `src->rules`. This patch correctly
+moves that `src->rules > (INT_MAX / ...)` check inside the new
+`if (src->rules > 0)` block]
+Signed-off-by: Keerthana K <keerthana.kalyanasundaram@broadcom.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index baabbfe62a27f..39623bb726a5e 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -524,6 +524,9 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ struct nft_pipapo_field *f;
+ int i;
+
++ if (m->bsize_max == 0)
++ return ret;
++
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ if (!res_map) {
+ ret = ERR_PTR(-ENOMEM);
+@@ -1363,14 +1366,20 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ src->bsize * sizeof(*dst->lt) *
+ src->groups * NFT_PIPAPO_BUCKETS(src->bb));
+
+- if (src->rules > (INT_MAX / sizeof(*src->mt)))
+- goto out_mt;
++ if (src->rules > 0) {
++ if (src->rules > (INT_MAX / sizeof(*src->mt)))
++ goto out_mt;
++
++ dst->mt = kvmalloc_array(src->rules, sizeof(*src->mt),
++ GFP_KERNEL);
++ if (!dst->mt)
++ goto out_mt;
+
+- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
+- if (!dst->mt)
+- goto out_mt;
++ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
++ } else {
++ dst->mt = NULL;
++ }
+
+- memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ src++;
+ dst++;
+ }
+--
+2.53.0
+
--- /dev/null
+From c6a47e9ffbeedbba9859f3f032182e79b5b1a566 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 04:33:04 +0000
+Subject: netfilter: nft_set_pipapo: do not rely on ZERO_SIZE_PTR
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 07ace0bbe03b3d8e85869af1dec5e4087b1d57b8 upstream
+
+pipapo relies on kmalloc(0) returning ZERO_SIZE_PTR (i.e., not NULL
+but pointer is invalid).
+
+Rework this to not call slab allocator when we'd request a 0-byte
+allocation.
+
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mukul Sikka <mukul.sikka@broadcom.com>
+Signed-off-by: Brennan Lamoreaux <brennan.lamoreaux@broadcom.com>
+[Keerthana: In older stable branches (v6.6 and earlier), the allocation logic in
+pipapo_clone() still relies on `src->rules` rather than `src->rules_alloc`
+(introduced in v6.9 via 9f439bd6ef4f). Consequently, the previously
+backported INT_MAX clamping check uses `src->rules`. This patch correctly
+moves that `src->rules > (INT_MAX / ...)` check inside the new
+`if (src->rules > 0)` block]
+Signed-off-by: Keerthana K <keerthana.kalyanasundaram@broadcom.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index baabbfe62a27f..39623bb726a5e 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -524,6 +524,9 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ struct nft_pipapo_field *f;
+ int i;
+
++ if (m->bsize_max == 0)
++ return ret;
++
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ if (!res_map) {
+ ret = ERR_PTR(-ENOMEM);
+@@ -1363,14 +1366,20 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ src->bsize * sizeof(*dst->lt) *
+ src->groups * NFT_PIPAPO_BUCKETS(src->bb));
+
+- if (src->rules > (INT_MAX / sizeof(*src->mt)))
+- goto out_mt;
++ if (src->rules > 0) {
++ if (src->rules > (INT_MAX / sizeof(*src->mt)))
++ goto out_mt;
++
++ dst->mt = kvmalloc_array(src->rules, sizeof(*src->mt),
++ GFP_KERNEL);
++ if (!dst->mt)
++ goto out_mt;
+
+- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
+- if (!dst->mt)
+- goto out_mt;
++ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
++ } else {
++ dst->mt = NULL;
++ }
+
+- memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ src++;
+ dst++;
+ }
+--
+2.53.0
+
af_unix-read-unix_diag_vfs-data-under-unix_state_loc.patch
l2tp-drop-large-packets-with-udp-encap.patch
crypto-algif_aead-fix-minimum-rx-size-check-for-decr.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch
+drm-i915-gt-fix-refcount-underflow-in-intel_engine_p.patch
+mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflo.patch
+mips-mm-allocate-tlb_vpn-array-atomically.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+netfilter-nft_set_pipapo-do-not-rely-on-zero_size_pt.patch
+batman-adv-hold-claim-backbone-gateways-by-reference.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch-4341
+drm-i915-gt-fix-refcount-underflow-in-intel_engine_p.patch-18701
+mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflo.patch-7929
+mips-mm-allocate-tlb_vpn-array-atomically.patch-11202
+mips-always-record-segbits-in-cpu_data.vmbits.patch-28732
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch-18877
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch-25661
+netfilter-nft_set_pipapo-do-not-rely-on-zero_size_pt.patch-17183
+batman-adv-hold-claim-backbone-gateways-by-reference.patch-4239
--- /dev/null
+From 33ea23496035b1fdbd2c75f2cd5cc6a77cacf97f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index f33d25a4e4cc7..682adbdf7ee79 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -304,9 +304,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -314,8 +342,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -327,26 +357,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 33ea23496035b1fdbd2c75f2cd5cc6a77cacf97f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index f33d25a4e4cc7..682adbdf7ee79 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -304,9 +304,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -314,8 +342,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -327,26 +357,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 9247a4d23e700a19a1168245c0b138c87a0cbfca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index acfad87636277..e1c6a38303dfd 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1982,7 +1982,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.53.0
+
--- /dev/null
+From 9247a4d23e700a19a1168245c0b138c87a0cbfca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:34:48 +1030
+Subject: ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit a8cc55bf81a45772cad44c83ea7bb0e98431094a ]
+
+Remove QUIRK_FLAG_VALIDATE_RATES for Focusrite. With the previous
+commit, focusrite_valid_sample_rate() produces correct rate tables
+without USB probing.
+
+QUIRK_FLAG_VALIDATE_RATES sends SET_CUR requests for each rate (~25ms
+each) and leaves the device at 192kHz. This is a problem because that
+rate: 1) disables the internal mixer, so outputs are silent until an
+application opens the PCM and sets a lower rate, and 2) the Air and
+Safe modes get disabled.
+
+Fixes: 5963e5262180 ("ALSA: usb-audio: Enable rate validation for Scarlett devices")
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/09b9c012024c998c4ca14bd876ef0dce0d0b6101.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index acfad87636277..e1c6a38303dfd 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1982,7 +1982,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ VENDOR_FLG(0x07fd, /* MOTU */
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+- QUIRK_FLAG_VALIDATE_RATES),
++ 0),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+--
+2.53.0
+
--- /dev/null
+From fbc12451ba055f84b311e28e013bc44d9e0758c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jul 2023 19:56:45 +0300
+Subject: ALSA: usb-audio: Update for native DSD support quirks
+
+From: Jussi Laako <jussi@sonarnerd.net>
+
+[ Upstream commit f7fea075edfa085c25eb34c44ceacf3602537f98 ]
+
+Maintenance patch for native DSD support.
+
+Remove incorrect T+A device quirks. Move set of device quirks to vendor
+quirks. Add set of missing device and vendor quirks.
+
+Signed-off-by: Jussi Laako <jussi@sonarnerd.net>
+Link: https://lore.kernel.org/r/20230726165645.404311-1-jussi@sonarnerd.net
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: a8cc55bf81a4 ("ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 34 ++++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4cf2f48b401ee..acfad87636277 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1631,8 +1631,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+
+ /* XMOS based USB DACs */
+ switch (chip->usb_id) {
+- case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
+- case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */
++ case USB_ID(0x139f, 0x5504): /* Nagra DAC */
++ case USB_ID(0x20b1, 0x3089): /* Mola-Mola DAC */
++ case USB_ID(0x2522, 0x0007): /* LH Labs Geek Out 1V5 */
++ case USB_ID(0x2522, 0x0009): /* LH Labs Geek Pulse X Inifinity 2V0 */
+ case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */
+ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
+ if (fp->altsetting == 2)
+@@ -1642,14 +1644,18 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
+ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
+ case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
+- case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
++ case USB_ID(0x16d0, 0x06b4): /* NuPrime Audio HD-AVP/AVA */
+ case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
++ case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */
+ case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
++ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
+ case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
++ case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */
+ case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
+ case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
+ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
+ case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
++ case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
+ case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
+ case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
+ case USB_ID(0x6b42, 0x0042): /* MSB Technology */
+@@ -1659,9 +1665,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+
+ /* Amanero Combo384 USB based DACs with native DSD support */
+ case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
+- case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
+- case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
+- case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
+ if (fp->altsetting == 2) {
+ switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
+ case 0x199:
+@@ -1817,6 +1820,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_IFACE_DELAY),
+ DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
+ QUIRK_FLAG_FORCE_IFACE_RESET),
++ DEVICE_FLG(0x0644, 0x806b, /* TEAC UD-701 */
++ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
++ QUIRK_FLAG_IFACE_DELAY),
+ DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+@@ -1873,6 +1879,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x154e, 0x3006, /* Marantz SA-14S1 */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
++ DEVICE_FLG(0x154e, 0x300b, /* Marantz SA-KI RUBY / SA-12 */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x154e, 0x500e, /* Denon DN-X1600 */
+ QUIRK_FLAG_IGNORE_CLOCK_SOURCE),
+ DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */
+@@ -1929,6 +1937,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x21b4, 0x0230, /* Ayre QB-9 Twenty */
++ QUIRK_FLAG_DSD_RAW),
++ DEVICE_FLG(0x21b4, 0x0232, /* Ayre QX-5 Twenty */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ QUIRK_FLAG_SET_IFACE_FIRST),
+ DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */
+@@ -1971,12 +1983,18 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+ QUIRK_FLAG_VALIDATE_RATES),
++ VENDOR_FLG(0x1511, /* AURALiC */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x18d1, /* iBasso devices */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x1de7, /* Phoenix Audio */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ VENDOR_FLG(0x20b1, /* XMOS based devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x21ed, /* Accuphase Laboratory */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x22d9, /* Oppo */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x23ba, /* Playback Design */
+@@ -1992,10 +2010,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2ab6, /* T+A devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x2d87, /* Cayin device */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3336, /* HEM devices */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3353, /* Khadas devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x35f4, /* MSB Technology */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3842, /* EVGA */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0xc502, /* HiBy devices */
+--
+2.53.0
+
--- /dev/null
+From fbc12451ba055f84b311e28e013bc44d9e0758c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jul 2023 19:56:45 +0300
+Subject: ALSA: usb-audio: Update for native DSD support quirks
+
+From: Jussi Laako <jussi@sonarnerd.net>
+
+[ Upstream commit f7fea075edfa085c25eb34c44ceacf3602537f98 ]
+
+Maintenance patch for native DSD support.
+
+Remove incorrect T+A device quirks. Move set of device quirks to vendor
+quirks. Add set of missing device and vendor quirks.
+
+Signed-off-by: Jussi Laako <jussi@sonarnerd.net>
+Link: https://lore.kernel.org/r/20230726165645.404311-1-jussi@sonarnerd.net
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: a8cc55bf81a4 ("ALSA: usb-audio: Remove VALIDATE_RATES quirk for Focusrite devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 34 ++++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4cf2f48b401ee..acfad87636277 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1631,8 +1631,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+
+ /* XMOS based USB DACs */
+ switch (chip->usb_id) {
+- case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
+- case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */
++ case USB_ID(0x139f, 0x5504): /* Nagra DAC */
++ case USB_ID(0x20b1, 0x3089): /* Mola-Mola DAC */
++ case USB_ID(0x2522, 0x0007): /* LH Labs Geek Out 1V5 */
++ case USB_ID(0x2522, 0x0009): /* LH Labs Geek Pulse X Inifinity 2V0 */
+ case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */
+ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
+ if (fp->altsetting == 2)
+@@ -1642,14 +1644,18 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
+ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
+ case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
+- case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
++ case USB_ID(0x16d0, 0x06b4): /* NuPrime Audio HD-AVP/AVA */
+ case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
++ case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */
+ case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
++ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
+ case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
++ case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */
+ case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
+ case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
+ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
+ case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
++ case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
+ case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
+ case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
+ case USB_ID(0x6b42, 0x0042): /* MSB Technology */
+@@ -1659,9 +1665,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+
+ /* Amanero Combo384 USB based DACs with native DSD support */
+ case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
+- case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
+- case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
+- case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
+ if (fp->altsetting == 2) {
+ switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
+ case 0x199:
+@@ -1817,6 +1820,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_IFACE_DELAY),
+ DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
+ QUIRK_FLAG_FORCE_IFACE_RESET),
++ DEVICE_FLG(0x0644, 0x806b, /* TEAC UD-701 */
++ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
++ QUIRK_FLAG_IFACE_DELAY),
+ DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+@@ -1873,6 +1879,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x154e, 0x3006, /* Marantz SA-14S1 */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
++ DEVICE_FLG(0x154e, 0x300b, /* Marantz SA-KI RUBY / SA-12 */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x154e, 0x500e, /* Denon DN-X1600 */
+ QUIRK_FLAG_IGNORE_CLOCK_SOURCE),
+ DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */
+@@ -1929,6 +1937,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x21b4, 0x0230, /* Ayre QB-9 Twenty */
++ QUIRK_FLAG_DSD_RAW),
++ DEVICE_FLG(0x21b4, 0x0232, /* Ayre QX-5 Twenty */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ QUIRK_FLAG_SET_IFACE_FIRST),
+ DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */
+@@ -1971,12 +1983,18 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+ QUIRK_FLAG_VALIDATE_RATES),
++ VENDOR_FLG(0x1511, /* AURALiC */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x152a, /* Thesycon devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x18d1, /* iBasso devices */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x1de7, /* Phoenix Audio */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ VENDOR_FLG(0x20b1, /* XMOS based devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x21ed, /* Accuphase Laboratory */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x22d9, /* Oppo */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x23ba, /* Playback Design */
+@@ -1992,10 +2010,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2ab6, /* T+A devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x2d87, /* Cayin device */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3336, /* HEM devices */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3353, /* Khadas devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x35f4, /* MSB Technology */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3842, /* EVGA */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0xc502, /* HiBy devices */
+--
+2.53.0
+
--- /dev/null
+From dee28f7611a3cbfccbb161d1baae69081c1d07bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 14:54:07 +0200
+Subject: batman-adv: hold claim backbone gateways by reference
+
+From: Haoze Xie <royenheart@gmail.com>
+
+commit 82d8701b2c930d0e96b0dbc9115a218d791cb0d2 upstream.
+
+batadv_bla_add_claim() can replace claim->backbone_gw and drop the old
+gateway's last reference while readers still follow the pointer.
+
+The netlink claim dump path dereferences claim->backbone_gw->orig and
+takes claim->backbone_gw->crc_lock without pinning the underlying
+backbone gateway. batadv_bla_check_claim() still has the same naked
+pointer access pattern.
+
+Reuse batadv_bla_claim_get_backbone_gw() in both readers so they operate
+on a stable gateway reference until the read-side work is complete.
+This keeps the dump and claim-check paths aligned with the lifetime
+rules introduced for the other BLA claim readers.
+
+Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code")
+Fixes: 04f3f5bf1883 ("batman-adv: add B.A.T.M.A.N. Dump BLA claims via netlink")
+Cc: stable@vger.kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Haoze Xie <royenheart@gmail.com>
+Signed-off-by: Ao Zhou <n05ec@lzu.edu.cn>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/bridge_loop_avoidance.c | 27 +++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index 17687848daec5..fb9aaf82f7136 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -2131,6 +2131,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+ struct batadv_bla_claim *claim)
+ {
+ u8 *primary_addr = primary_if->net_dev->dev_addr;
++ struct batadv_bla_backbone_gw *backbone_gw;
+ u16 backbone_crc;
+ bool is_own;
+ void *hdr;
+@@ -2146,32 +2147,35 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+
+ genl_dump_check_consistent(cb, hdr);
+
+- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+- primary_addr);
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
+
+- spin_lock_bh(&claim->backbone_gw->crc_lock);
+- backbone_crc = claim->backbone_gw->crc;
+- spin_unlock_bh(&claim->backbone_gw->crc_lock);
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
+ nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+- claim->backbone_gw->orig) ||
++ backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
++put_backbone_gw:
++ batadv_backbone_gw_put(backbone_gw);
+ out:
+ return ret;
+ }
+@@ -2467,6 +2471,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
+ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+ {
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim search_claim;
+ struct batadv_bla_claim *claim = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+@@ -2489,9 +2494,13 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ * return false.
+ */
+ if (claim) {
+- if (!batadv_compare_eth(claim->backbone_gw->orig,
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ ret = false;
++
++ batadv_backbone_gw_put(backbone_gw);
+ batadv_claim_put(claim);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From dee28f7611a3cbfccbb161d1baae69081c1d07bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 14:54:07 +0200
+Subject: batman-adv: hold claim backbone gateways by reference
+
+From: Haoze Xie <royenheart@gmail.com>
+
+commit 82d8701b2c930d0e96b0dbc9115a218d791cb0d2 upstream.
+
+batadv_bla_add_claim() can replace claim->backbone_gw and drop the old
+gateway's last reference while readers still follow the pointer.
+
+The netlink claim dump path dereferences claim->backbone_gw->orig and
+takes claim->backbone_gw->crc_lock without pinning the underlying
+backbone gateway. batadv_bla_check_claim() still has the same naked
+pointer access pattern.
+
+Reuse batadv_bla_claim_get_backbone_gw() in both readers so they operate
+on a stable gateway reference until the read-side work is complete.
+This keeps the dump and claim-check paths aligned with the lifetime
+rules introduced for the other BLA claim readers.
+
+Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code")
+Fixes: 04f3f5bf1883 ("batman-adv: add B.A.T.M.A.N. Dump BLA claims via netlink")
+Cc: stable@vger.kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Haoze Xie <royenheart@gmail.com>
+Signed-off-by: Ao Zhou <n05ec@lzu.edu.cn>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/bridge_loop_avoidance.c | 27 +++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index 17687848daec5..fb9aaf82f7136 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -2131,6 +2131,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+ struct batadv_bla_claim *claim)
+ {
+ u8 *primary_addr = primary_if->net_dev->dev_addr;
++ struct batadv_bla_backbone_gw *backbone_gw;
+ u16 backbone_crc;
+ bool is_own;
+ void *hdr;
+@@ -2146,32 +2147,35 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+
+ genl_dump_check_consistent(cb, hdr);
+
+- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+- primary_addr);
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
+
+- spin_lock_bh(&claim->backbone_gw->crc_lock);
+- backbone_crc = claim->backbone_gw->crc;
+- spin_unlock_bh(&claim->backbone_gw->crc_lock);
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
+ nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+- claim->backbone_gw->orig) ||
++ backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc)) {
+ genlmsg_cancel(msg, hdr);
+- goto out;
++ goto put_backbone_gw;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
++put_backbone_gw:
++ batadv_backbone_gw_put(backbone_gw);
+ out:
+ return ret;
+ }
+@@ -2467,6 +2471,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
+ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+ {
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim search_claim;
+ struct batadv_bla_claim *claim = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+@@ -2489,9 +2494,13 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ * return false.
+ */
+ if (claim) {
+- if (!batadv_compare_eth(claim->backbone_gw->orig,
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ ret = false;
++
++ batadv_backbone_gw_put(backbone_gw);
+ batadv_claim_put(claim);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From a85dac3d9c2aa8c62ecb1e81100ecf2ad8157229 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Mar 2022 13:42:26 +0000
+Subject: i3c: fix uninitialized variable use in i2c setup
+
+From: Jamie Iles <quic_jiles@quicinc.com>
+
+[ Upstream commit 6cbf8b38dfe3aabe330f2c356949bc4d6a1f034f ]
+
+Commit 31b9887c7258 ("i3c: remove i2c board info from i2c_dev_desc")
+removed the boardinfo from i2c_dev_desc to decouple device enumeration from
+setup but did not correctly lookup the i2c_dev_desc to store the new
+device, instead dereferencing an uninitialized variable.
+
+Lookup the device that has already been registered by address to store
+the i2c client device.
+
+Fixes: 31b9887c7258 ("i3c: remove i2c board info from i2c_dev_desc")
+Reported-by: kernel test robot <lkp@intel.com>
+Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Jamie Iles <quic_jiles@quicinc.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/20220308134226.1042367-1-quic_jiles@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i3c/master.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index dee694024f280..5df943d25cf0a 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2199,8 +2199,13 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+ * We silently ignore failures here. The bus should keep working
+ * correctly even if one or more i2c devices are not registered.
+ */
+- list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node)
++ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
++ i2cdev = i3c_master_find_i2c_dev_by_addr(master,
++ i2cboardinfo->base.addr);
++ if (WARN_ON(!i2cdev))
++ continue;
+ i2cdev->dev = i2c_new_client_device(adap, &i2cboardinfo->base);
++ }
+
+ return 0;
+ }
+--
+2.53.0
+
--- /dev/null
+From a85dac3d9c2aa8c62ecb1e81100ecf2ad8157229 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Mar 2022 13:42:26 +0000
+Subject: i3c: fix uninitialized variable use in i2c setup
+
+From: Jamie Iles <quic_jiles@quicinc.com>
+
+[ Upstream commit 6cbf8b38dfe3aabe330f2c356949bc4d6a1f034f ]
+
+Commit 31b9887c7258 ("i3c: remove i2c board info from i2c_dev_desc")
+removed the boardinfo from i2c_dev_desc to decouple device enumeration from
+setup but did not correctly lookup the i2c_dev_desc to store the new
+device, instead dereferencing an uninitialized variable.
+
+Lookup the device that has already been registered by address to store
+the i2c client device.
+
+Fixes: 31b9887c7258 ("i3c: remove i2c board info from i2c_dev_desc")
+Reported-by: kernel test robot <lkp@intel.com>
+Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Jamie Iles <quic_jiles@quicinc.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/20220308134226.1042367-1-quic_jiles@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i3c/master.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index dee694024f280..5df943d25cf0a 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2199,8 +2199,13 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+ * We silently ignore failures here. The bus should keep working
+ * correctly even if one or more i2c devices are not registered.
+ */
+- list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node)
++ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
++ i2cdev = i3c_master_find_i2c_dev_by_addr(master,
++ i2cboardinfo->base.addr);
++ if (WARN_ON(!i2cdev))
++ continue;
+ i2cdev->dev = i2c_new_client_device(adap, &i2cboardinfo->base);
++ }
+
+ return 0;
+ }
+--
+2.53.0
+
--- /dev/null
+From 592c9e22fa34a0d8f4d81726630eb3281e7c6d11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:02 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index e69833213e792..c1baf1b06ccec 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index acdf8c69220b0..a1bb5f16d4497 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1719,6 +1719,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index f258c5f15f903..464258c6ab464 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -208,11 +208,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index af654771918cd..3c9d5a2fd7928 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -160,6 +160,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 592c9e22fa34a0d8f4d81726630eb3281e7c6d11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:02 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index e69833213e792..c1baf1b06ccec 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index acdf8c69220b0..a1bb5f16d4497 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1719,6 +1719,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index f258c5f15f903..464258c6ab464 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -208,11 +208,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index af654771918cd..3c9d5a2fd7928 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -160,6 +160,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 3282fa8eac829a3ad1350fb30d866e98cb0e5be8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:01 +0100
+Subject: mips: mm: Allocate tlb_vpn array atomically
+
+From: Stefan Wiehler <stefan.wiehler@nokia.com>
+
+commit 01cc50ea5167bb14117257ec084637abe9e5f691 upstream.
+
+Found by DEBUG_ATOMIC_SLEEP:
+
+ BUG: sleeping function called from invalid context at /include/linux/sched/mm.h:306
+ in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 0, name: swapper/1
+ preempt_count: 1, expected: 0
+ RCU nest depth: 0, expected: 0
+ no locks held by swapper/1/0.
+ irq event stamp: 0
+ hardirqs last enabled at (0): [<0000000000000000>] 0x0
+ hardirqs last disabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last enabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last disabled at (0): [<0000000000000000>] 0x0
+ CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.6.119-d79e757675ec-fct #1
+ Stack : 800000000290bad8 0000000000000000 0000000000000008 800000000290bae8
+ 800000000290bae8 800000000290bc78 0000000000000000 0000000000000000
+ ffffffff80c80000 0000000000000001 ffffffff80d8dee8 ffffffff810d09c0
+ 784bb2a7ec10647d 0000000000000010 ffffffff80a6fd60 8000000001d8a9c0
+ 0000000000000000 0000000000000000 ffffffff80d90000 0000000000000000
+ ffffffff80c9e0e8 0000000007ffffff 0000000000000cc0 0000000000000400
+ ffffffffffffffff 0000000000000001 0000000000000002 ffffffffc0149ed8
+ fffffffffffffffe 8000000002908000 800000000290bae0 ffffffff80a81b74
+ ffffffff80129fb0 0000000000000000 0000000000000000 0000000000000000
+ 0000000000000000 0000000000000000 ffffffff80129fd0 0000000000000000
+ ...
+ Call Trace:
+ [<ffffffff80129fd0>] show_stack+0x60/0x158
+ [<ffffffff80a7f894>] dump_stack_lvl+0x88/0xbc
+ [<ffffffff8018d3c8>] __might_resched+0x268/0x288
+ [<ffffffff803648b0>] __kmem_cache_alloc_node+0x2e0/0x330
+ [<ffffffff80302788>] __kmalloc+0x58/0xd0
+ [<ffffffff80a81b74>] r4k_tlb_uniquify+0x7c/0x428
+ [<ffffffff80143e8c>] tlb_init+0x7c/0x110
+ [<ffffffff8012bdb4>] per_cpu_trap_init+0x16c/0x1d0
+ [<ffffffff80133258>] start_secondary+0x28/0x128
+
+Fixes: 231ac951faba ("MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow")
+Signed-off-by: Stefan Wiehler <stefan.wiehler@nokia.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 78e1420471b4e..f782214d23d3b 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -528,7 +528,7 @@ static void __ref r4k_tlb_uniquify(void)
+
+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+ tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+ if (WARN_ON(!tlb_vpns))
+ return; /* Pray local_flush_tlb_all() is good enough. */
+--
+2.53.0
+
--- /dev/null
+From 3282fa8eac829a3ad1350fb30d866e98cb0e5be8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:01 +0100
+Subject: mips: mm: Allocate tlb_vpn array atomically
+
+From: Stefan Wiehler <stefan.wiehler@nokia.com>
+
+commit 01cc50ea5167bb14117257ec084637abe9e5f691 upstream.
+
+Found by DEBUG_ATOMIC_SLEEP:
+
+ BUG: sleeping function called from invalid context at /include/linux/sched/mm.h:306
+ in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 0, name: swapper/1
+ preempt_count: 1, expected: 0
+ RCU nest depth: 0, expected: 0
+ no locks held by swapper/1/0.
+ irq event stamp: 0
+ hardirqs last enabled at (0): [<0000000000000000>] 0x0
+ hardirqs last disabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last enabled at (0): [<ffffffff801477fc>] copy_process+0x75c/0x1b68
+ softirqs last disabled at (0): [<0000000000000000>] 0x0
+ CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.6.119-d79e757675ec-fct #1
+ Stack : 800000000290bad8 0000000000000000 0000000000000008 800000000290bae8
+ 800000000290bae8 800000000290bc78 0000000000000000 0000000000000000
+ ffffffff80c80000 0000000000000001 ffffffff80d8dee8 ffffffff810d09c0
+ 784bb2a7ec10647d 0000000000000010 ffffffff80a6fd60 8000000001d8a9c0
+ 0000000000000000 0000000000000000 ffffffff80d90000 0000000000000000
+ ffffffff80c9e0e8 0000000007ffffff 0000000000000cc0 0000000000000400
+ ffffffffffffffff 0000000000000001 0000000000000002 ffffffffc0149ed8
+ fffffffffffffffe 8000000002908000 800000000290bae0 ffffffff80a81b74
+ ffffffff80129fb0 0000000000000000 0000000000000000 0000000000000000
+ 0000000000000000 0000000000000000 ffffffff80129fd0 0000000000000000
+ ...
+ Call Trace:
+ [<ffffffff80129fd0>] show_stack+0x60/0x158
+ [<ffffffff80a7f894>] dump_stack_lvl+0x88/0xbc
+ [<ffffffff8018d3c8>] __might_resched+0x268/0x288
+ [<ffffffff803648b0>] __kmem_cache_alloc_node+0x2e0/0x330
+ [<ffffffff80302788>] __kmalloc+0x58/0xd0
+ [<ffffffff80a81b74>] r4k_tlb_uniquify+0x7c/0x428
+ [<ffffffff80143e8c>] tlb_init+0x7c/0x110
+ [<ffffffff8012bdb4>] per_cpu_trap_init+0x16c/0x1d0
+ [<ffffffff80133258>] start_secondary+0x28/0x128
+
+Fixes: 231ac951faba ("MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow")
+Signed-off-by: Stefan Wiehler <stefan.wiehler@nokia.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 78e1420471b4e..f782214d23d3b 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -528,7 +528,7 @@ static void __ref r4k_tlb_uniquify(void)
+
+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+ tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+ if (WARN_ON(!tlb_vpns))
+ return; /* Pray local_flush_tlb_all() is good enough. */
+--
+2.53.0
+
--- /dev/null
+From 2c48f1c099fed7cca9db8605c1353ba0daa3cff9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:00 +0100
+Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream.
+
+Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+
+cores can have more than 64 TLB entries. Therefore allocate an array
+for uniquification instead of placing too an small array on the stack.
+
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Co-developed-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+Cc: stable@vger.kernel.org # v6.17+
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+[ Use memblock_free_ptr() for 5.15.y. ]
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index d9a5ede8869bd..78e1420471b4e 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, const void *b)
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+-static void r4k_tlb_uniquify(void)
++static void __ref r4k_tlb_uniquify(void)
+ {
+- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
++ phys_addr_t tlb_vpn_size;
++ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void)
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
++ if (use_slab)
++ kfree(tlb_vpns);
++ else
++ memblock_free_ptr(tlb_vpns, tlb_vpn_size);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 2c48f1c099fed7cca9db8605c1353ba0daa3cff9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:00 +0100
+Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream.
+
+Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+
+cores can have more than 64 TLB entries. Therefore allocate an array
+for uniquification instead of placing too an small array on the stack.
+
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Co-developed-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+Cc: stable@vger.kernel.org # v6.17+
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+[ Use memblock_free_ptr() for 5.15.y. ]
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index d9a5ede8869bd..78e1420471b4e 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, const void *b)
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+-static void r4k_tlb_uniquify(void)
++static void __ref r4k_tlb_uniquify(void)
+ {
+- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
++ phys_addr_t tlb_vpn_size;
++ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void)
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
++ if (use_slab)
++ kfree(tlb_vpns);
++ else
++ memblock_free_ptr(tlb_vpns, tlb_vpn_size);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 9d076fdca2358182d6b859818cc19761b0279a2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:04 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index ae7436b860b58..da5a9b699b683 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbmisc.h>
+
+ extern void build_tlb_refill_handler(void);
+@@ -501,87 +503,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From 9d076fdca2358182d6b859818cc19761b0279a2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:04 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index ae7436b860b58..da5a9b699b683 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbmisc.h>
+
+ extern void build_tlb_refill_handler(void);
+@@ -501,87 +503,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From 47cbcf01f2b3af9757688401ec11053cc591faf7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:03 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index f782214d23d3b..ae7436b860b58 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -630,7 +630,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
--- /dev/null
+From 47cbcf01f2b3af9757688401ec11053cc591faf7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 18:20:03 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index f782214d23d3b..ae7436b860b58 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -630,7 +630,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
--- /dev/null
+From fb799fb90a5fca6b5e6d8c3b62d897a66a9cd1b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index def356f828cd8..da00a770ca6d6 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3486,7 +3486,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7ffd698497f2a..90458799324ec 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -600,7 +600,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From fb799fb90a5fca6b5e6d8c3b62d897a66a9cd1b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index def356f828cd8..da00a770ca6d6 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3486,7 +3486,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7ffd698497f2a..90458799324ec 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -600,7 +600,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
perf-x86-intel-uncore-skip-discovery-table-for-offli.patch
clockevents-prevent-timer-interrupt-starvation.patch
crypto-algif_aead-fix-minimum-rx-size-check-for-decr.patch
+i3c-fix-uninitialized-variable-use-in-i2c-setup.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch
+mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflo.patch
+mips-mm-allocate-tlb_vpn-array-atomically.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch
+alsa-usb-audio-update-for-native-dsd-support-quirks.patch
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch
+batman-adv-hold-claim-backbone-gateways-by-reference.patch
+i3c-fix-uninitialized-variable-use-in-i2c-setup.patch-13675
+netfilter-conntrack-add-missing-netlink-policy-valid.patch-15093
+mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflo.patch-32193
+mips-mm-allocate-tlb_vpn-array-atomically.patch-25779
+mips-always-record-segbits-in-cpu_data.vmbits.patch-29682
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch-19646
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch-29791
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch-5842
+alsa-usb-audio-update-for-native-dsd-support-quirks.patch-14145
+alsa-usb-audio-remove-validate_rates-quirk-for-focus.patch-10820
+batman-adv-hold-claim-backbone-gateways-by-reference.patch-32502
--- /dev/null
+From fc64cb98f74170b3f76a0b78faab0ef2ac69ab11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index f33d25a4e4cc7..682adbdf7ee79 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -304,9 +304,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -314,8 +342,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -327,26 +357,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From fc64cb98f74170b3f76a0b78faab0ef2ac69ab11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index f33d25a4e4cc7..682adbdf7ee79 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -304,9 +304,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -314,8 +342,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -327,26 +357,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 84ed5fb14698b3e6c641c1f625b3b5310dc4c95c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 14:59:00 -0400
+Subject: drm/i915/psr: Do not use pipe_src as borders for SU area
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 75519f5df2a9b23f7bf305e12dc9a6e3e65c24b7 ]
+
+This far using crtc_state->pipe_src as borders for Selective Update area
+haven't caused visible problems as drm_rect_width(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+drm_rect_height(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_vdisplay when pipe scaling is not
+used. On the other hand using pipe scaling is forcing full frame updates and all the
+Selective Update area calculations are skipped. Now this improper usage of
+crtc_state->pipe_src is causing following warnings:
+
+<4> [7771.978166] xe 0000:00:02.0: [drm] drm_WARN_ON_ONCE(su_lines % vdsc_cfg->slice_height)
+
+after WARN_ON_ONCE was added by commit:
+
+"drm/i915/dsc: Add helper for writing DSC Selective Update ET parameters"
+
+These warnings are seen when DSC and pipe scaling are enabled
+simultaneously. This is because on full frame update SU area is improperly
+set as pipe_src which is not aligned with DSC slice height.
+
+Fix these by creating local rectangle using
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+crtc_state->hw.adjusted_mode.crtc_vdisplay. Use this local rectangle as
+borders for SU area.
+
+Fixes: d6774b8c3c58 ("drm/i915: Ensure damage clip area is within pipe area")
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Mika Kahola <mika.kahola@intel.com>
+Link: https://patch.msgid.link/20260327114553.195285-1-jouni.hogander@intel.com
+(cherry picked from commit da0cdc1c329dd2ff09c41fbbe9fbd9c92c5d2c6e)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+[ omitted hunks for DSC selective update ET alignment infrastructure ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index e2d7c0a6802aa..a465b19293108 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1686,9 +1686,9 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
+
+ static void clip_area_update(struct drm_rect *overlap_damage_area,
+ struct drm_rect *damage_area,
+- struct drm_rect *pipe_src)
++ struct drm_rect *display_area)
+ {
+- if (!drm_rect_intersect(damage_area, pipe_src))
++ if (!drm_rect_intersect(damage_area, display_area))
+ return;
+
+ if (overlap_damage_area->y1 == -1) {
+@@ -1761,6 +1761,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
+ struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane *plane;
++ struct drm_rect display_area = {
++ .x1 = 0,
++ .y1 = 0,
++ .x2 = crtc_state->hw.adjusted_mode.crtc_hdisplay,
++ .y2 = crtc_state->hw.adjusted_mode.crtc_vdisplay,
++ };
+ bool full_update = false;
+ int i, ret;
+
+@@ -1807,14 +1813,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+@@ -1822,7 +1828,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ continue;
+ }
+
+@@ -1838,7 +1844,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
+- clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
++ clip_area_update(&pipe_clip, &damaged_area, &display_area);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 84ed5fb14698b3e6c641c1f625b3b5310dc4c95c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 14:59:00 -0400
+Subject: drm/i915/psr: Do not use pipe_src as borders for SU area
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 75519f5df2a9b23f7bf305e12dc9a6e3e65c24b7 ]
+
+This far using crtc_state->pipe_src as borders for Selective Update area
+haven't caused visible problems as drm_rect_width(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+drm_rect_height(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_vdisplay when pipe scaling is not
+used. On the other hand using pipe scaling is forcing full frame updates and all the
+Selective Update area calculations are skipped. Now this improper usage of
+crtc_state->pipe_src is causing following warnings:
+
+<4> [7771.978166] xe 0000:00:02.0: [drm] drm_WARN_ON_ONCE(su_lines % vdsc_cfg->slice_height)
+
+after WARN_ON_ONCE was added by commit:
+
+"drm/i915/dsc: Add helper for writing DSC Selective Update ET parameters"
+
+These warnings are seen when DSC and pipe scaling are enabled
+simultaneously. This is because on full frame update SU area is improperly
+set as pipe_src which is not aligned with DSC slice height.
+
+Fix these by creating local rectangle using
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+crtc_state->hw.adjusted_mode.crtc_vdisplay. Use this local rectangle as
+borders for SU area.
+
+Fixes: d6774b8c3c58 ("drm/i915: Ensure damage clip area is within pipe area")
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Mika Kahola <mika.kahola@intel.com>
+Link: https://patch.msgid.link/20260327114553.195285-1-jouni.hogander@intel.com
+(cherry picked from commit da0cdc1c329dd2ff09c41fbbe9fbd9c92c5d2c6e)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+[ omitted hunks for DSC selective update ET alignment infrastructure ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index e2d7c0a6802aa..a465b19293108 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1686,9 +1686,9 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
+
+ static void clip_area_update(struct drm_rect *overlap_damage_area,
+ struct drm_rect *damage_area,
+- struct drm_rect *pipe_src)
++ struct drm_rect *display_area)
+ {
+- if (!drm_rect_intersect(damage_area, pipe_src))
++ if (!drm_rect_intersect(damage_area, display_area))
+ return;
+
+ if (overlap_damage_area->y1 == -1) {
+@@ -1761,6 +1761,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
+ struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane *plane;
++ struct drm_rect display_area = {
++ .x1 = 0,
++ .y1 = 0,
++ .x2 = crtc_state->hw.adjusted_mode.crtc_hdisplay,
++ .y2 = crtc_state->hw.adjusted_mode.crtc_vdisplay,
++ };
+ bool full_update = false;
+ int i, ret;
+
+@@ -1807,14 +1813,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+@@ -1822,7 +1828,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ continue;
+ }
+
+@@ -1838,7 +1844,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
+- clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
++ clip_area_update(&pipe_clip, &damaged_area, &display_area);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 69373f47f58c6dd22ad4ce7e75b0c99e8d2cb591 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 89cec02de68ba..bcbd77608365a 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3458,7 +3458,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7ffd698497f2a..90458799324ec 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -600,7 +600,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From 69373f47f58c6dd22ad4ce7e75b0c99e8d2cb591 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 89cec02de68ba..bcbd77608365a 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3458,7 +3458,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7ffd698497f2a..90458799324ec 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -600,7 +600,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From c6cc3d769b300572ad203a24e5278cfa6f00c4c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 10:05:47 +0200
+Subject: Revert "drm: Fix use-after-free on framebuffers and property blobs
+ when calling drm_dev_unplug"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maarten Lankhorst <dev@lankhorst.se>
+
+commit 45ebe43ea00d6b9f5b3e0db9c35b8ca2a96b7e70 upstream.
+
+This reverts commit 6bee098b91417654703e17eb5c1822c6dfd0c01d.
+
+Den 2026-03-25 kl. 22:11, skrev Simona Vetter:
+> On Wed, Mar 25, 2026 at 10:26:40AM -0700, Guenter Roeck wrote:
+>> Hi,
+>>
+>> On Fri, Mar 13, 2026 at 04:17:27PM +0100, Maarten Lankhorst wrote:
+>>> When trying to do a rather aggressive test of igt's "xe_module_load
+>>> --r reload" with a full desktop environment and game running I noticed
+>>> a few OOPSes when dereferencing freed pointers, related to
+>>> framebuffers and property blobs after the compositor exits.
+>>>
+>>> Solve this by guarding the freeing in drm_file with drm_dev_enter/exit,
+>>> and immediately put the references from struct drm_file objects during
+>>> drm_dev_unplug().
+>>>
+>>
+>> With this patch in v6.18.20, I get the warning backtraces below.
+>> The backtraces are gone with the patch reverted.
+>
+> Yeah, this needs to be reverted, reasoning below. Maarten, can you please
+> take care of that and feed the revert through the usual channels? I don't
+> think it's critical enough that we need to fast-track this into drm.git
+> directly.
+>
+> Quoting the patch here again:
+>
+>> drivers/gpu/drm/drm_file.c| 5 ++++-
+>> drivers/gpu/drm/drm_mode_config.c | 9 ++++++---
+>> 2 files changed, 10 insertions(+), 4 deletions(-)
+>>
+>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+>> index ec820686b3021..f52141f842a1f 100644
+>> --- a/drivers/gpu/drm/drm_file.c
+>> +++ b/drivers/gpu/drm/drm_file.c
+>> @@ -233,6 +233,7 @@ static void drm_events_release(struct drm_file *file_priv)
+>> void drm_file_free(struct drm_file *file)
+>> {
+>> struct drm_device *dev;
+>> +int idx;
+>>
+>> if (!file)
+>> return;
+>> @@ -249,9 +250,11 @@ void drm_file_free(struct drm_file *file)
+>>
+>> drm_events_release(file);
+>>
+>> -if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+>> +if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+>> +drm_dev_enter(dev, &idx)) {
+>
+> This is misplaced for two reasons:
+>
+> - Even if we'd want to guarantee that we hold a drm_dev_enter/exit
+> reference during framebuffer teardown, we'd need to do this
+> _consistently over all callsites. Not ad-hoc in just one place that a
+> testcase hits. This also means kerneldoc updates of the relevant hooks
+> and at least a bunch of acks from other driver people to document the
+> consensus.
+>
+> - More importantly, this is driver responsibilities in general unless we
+> have extremely good reasons to the contrary. Which means this must be
+> placed in xe.
+>
+>> drm_fb_release(file);
+>> drm_property_destroy_user_blobs(dev, file);
+>> +drm_dev_exit(idx);
+>> }
+>>
+>> if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+>> diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
+>> index 84ae8a23a3678..e349418978f79 100644
+>> --- a/drivers/gpu/drm/drm_mode_config.c
+>> +++ b/drivers/gpu/drm/drm_mode_config.c
+>> @@ -583,10 +583,13 @@ void drm_mode_config_cleanup(struct drm_device *dev)
+>> */
+>> WARN_ON(!list_empty(&dev->mode_config.fb_list));
+>> list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+>> -struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
+>> +if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
+>> +struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
+>
+> This is also wrong:
+>
+> - Firstly, it's a completely independent bug, we do not smash two bugfixes
+> into one patch.
+>
+> - Secondly, it's again a driver bug: drm_mode_cleanup must be called when
+> the last drm_device reference disappears (hence the existence of
+> drmm_mode_config_init), not when the driver gets unbound. The fact that
+> this shows up in a callchain from a devres cleanup means the intel
+> driver gets this wrong (like almost everyone else because historically
+> we didn't know better).
+>
+> If we don't follow this rule, then we get races with this code here
+> running concurrently with drm_file fb cleanups, which just does not
+> work. Review pointed that out, but then shrugged it off with a confused
+> explanation:
+>
+> https://lore.kernel.org/all/e61e64c796ccfb17ae673331a3df4b877bf42d82.camel@linux.intel.com/
+>
+> Yes this also means a lot of the other drm_device teardown that drivers
+> do happens way too early. There is a massive can of worms here of a
+> magnitude that most likely is much, much bigger than what you can
+> backport to stable kernels. Hotunplug is _hard_.
+
+Back to the drawing board, and fixing it in the intel display driver
+instead.
+
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Fixes: 6bee098b9141 ("drm: Fix use-after-free on framebuffers and property blobs when calling drm_dev_unplug")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Acked-by: Simona Vetter <simona.vetter@ffwll.ch>
+Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
+Link: https://patch.msgid.link/20260326082217.39941-2-dev@lankhorst.se
+[ Thorsten: adjust to the v6.6.y/v6.6.y backports of 6bee098b9141 ]
+Signed-off-by: Thorsten Leemhuis <linux@leemhuis.info>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_file.c | 5 +----
+ drivers/gpu/drm/drm_mode_config.c | 9 +++------
+ 2 files changed, 4 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index 3722c796e632f..d6a0572984b54 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -239,7 +239,6 @@ static void drm_events_release(struct drm_file *file_priv)
+ void drm_file_free(struct drm_file *file)
+ {
+ struct drm_device *dev;
+- int idx;
+
+ if (!file)
+ return;
+@@ -265,11 +264,9 @@ void drm_file_free(struct drm_file *file)
+
+ drm_events_release(file);
+
+- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+- drm_dev_enter(dev, &idx)) {
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_fb_release(file);
+ drm_property_destroy_user_blobs(dev, file);
+- drm_dev_exit(idx);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
+index 8c844bce4f28a..8525ef8515406 100644
+--- a/drivers/gpu/drm/drm_mode_config.c
++++ b/drivers/gpu/drm/drm_mode_config.c
+@@ -544,13 +544,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+- if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
+- struct drm_printer p = drm_debug_printer("[leaked fb]");
++ struct drm_printer p = drm_debug_printer("[leaked fb]");
+
+- drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
+- drm_framebuffer_print_info(&p, 1, fb);
+- }
+- list_del_init(&fb->filp_head);
++ drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
++ drm_framebuffer_print_info(&p, 1, fb);
+ drm_framebuffer_free(&fb->base.refcount);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From c6cc3d769b300572ad203a24e5278cfa6f00c4c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 10:05:47 +0200
+Subject: Revert "drm: Fix use-after-free on framebuffers and property blobs
+ when calling drm_dev_unplug"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maarten Lankhorst <dev@lankhorst.se>
+
+commit 45ebe43ea00d6b9f5b3e0db9c35b8ca2a96b7e70 upstream.
+
+This reverts commit 6bee098b91417654703e17eb5c1822c6dfd0c01d.
+
+Den 2026-03-25 kl. 22:11, skrev Simona Vetter:
+> On Wed, Mar 25, 2026 at 10:26:40AM -0700, Guenter Roeck wrote:
+>> Hi,
+>>
+>> On Fri, Mar 13, 2026 at 04:17:27PM +0100, Maarten Lankhorst wrote:
+>>> When trying to do a rather aggressive test of igt's "xe_module_load
+>>> --r reload" with a full desktop environment and game running I noticed
+>>> a few OOPSes when dereferencing freed pointers, related to
+>>> framebuffers and property blobs after the compositor exits.
+>>>
+>>> Solve this by guarding the freeing in drm_file with drm_dev_enter/exit,
+>>> and immediately put the references from struct drm_file objects during
+>>> drm_dev_unplug().
+>>>
+>>
+>> With this patch in v6.18.20, I get the warning backtraces below.
+>> The backtraces are gone with the patch reverted.
+>
+> Yeah, this needs to be reverted, reasoning below. Maarten, can you please
+> take care of that and feed the revert through the usual channels? I don't
+> think it's critical enough that we need to fast-track this into drm.git
+> directly.
+>
+> Quoting the patch here again:
+>
+>> drivers/gpu/drm/drm_file.c| 5 ++++-
+>> drivers/gpu/drm/drm_mode_config.c | 9 ++++++---
+>> 2 files changed, 10 insertions(+), 4 deletions(-)
+>>
+>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+>> index ec820686b3021..f52141f842a1f 100644
+>> --- a/drivers/gpu/drm/drm_file.c
+>> +++ b/drivers/gpu/drm/drm_file.c
+>> @@ -233,6 +233,7 @@ static void drm_events_release(struct drm_file *file_priv)
+>> void drm_file_free(struct drm_file *file)
+>> {
+>> struct drm_device *dev;
+>> +int idx;
+>>
+>> if (!file)
+>> return;
+>> @@ -249,9 +250,11 @@ void drm_file_free(struct drm_file *file)
+>>
+>> drm_events_release(file);
+>>
+>> -if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+>> +if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+>> +drm_dev_enter(dev, &idx)) {
+>
+> This is misplaced for two reasons:
+>
+> - Even if we'd want to guarantee that we hold a drm_dev_enter/exit
+> reference during framebuffer teardown, we'd need to do this
+> _consistently over all callsites. Not ad-hoc in just one place that a
+> testcase hits. This also means kerneldoc updates of the relevant hooks
+> and at least a bunch of acks from other driver people to document the
+> consensus.
+>
+> - More importantly, this is driver responsibilities in general unless we
+> have extremely good reasons to the contrary. Which means this must be
+> placed in xe.
+>
+>> drm_fb_release(file);
+>> drm_property_destroy_user_blobs(dev, file);
+>> +drm_dev_exit(idx);
+>> }
+>>
+>> if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+>> diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
+>> index 84ae8a23a3678..e349418978f79 100644
+>> --- a/drivers/gpu/drm/drm_mode_config.c
+>> +++ b/drivers/gpu/drm/drm_mode_config.c
+>> @@ -583,10 +583,13 @@ void drm_mode_config_cleanup(struct drm_device *dev)
+>> */
+>> WARN_ON(!list_empty(&dev->mode_config.fb_list));
+>> list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+>> -struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
+>> +if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
+>> +struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
+>
+> This is also wrong:
+>
+> - Firstly, it's a completely independent bug, we do not smash two bugfixes
+> into one patch.
+>
+> - Secondly, it's again a driver bug: drm_mode_cleanup must be called when
+> the last drm_device reference disappears (hence the existence of
+> drmm_mode_config_init), not when the driver gets unbound. The fact that
+> this shows up in a callchain from a devres cleanup means the intel
+> driver gets this wrong (like almost everyone else because historically
+> we didn't know better).
+>
+> If we don't follow this rule, then we get races with this code here
+> running concurrently with drm_file fb cleanups, which just does not
+> work. Review pointed that out, but then shrugged it off with a confused
+> explanation:
+>
+> https://lore.kernel.org/all/e61e64c796ccfb17ae673331a3df4b877bf42d82.camel@linux.intel.com/
+>
+> Yes this also means a lot of the other drm_device teardown that drivers
+> do happens way too early. There is a massive can of worms here of a
+> magnitude that most likely is much, much bigger than what you can
+> backport to stable kernels. Hotunplug is _hard_.
+
+Back to the drawing board, and fixing it in the intel display driver
+instead.
+
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Fixes: 6bee098b9141 ("drm: Fix use-after-free on framebuffers and property blobs when calling drm_dev_unplug")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Acked-by: Simona Vetter <simona.vetter@ffwll.ch>
+Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
+Link: https://patch.msgid.link/20260326082217.39941-2-dev@lankhorst.se
+[ Thorsten: adjust to the v6.6.y/v6.6.y backports of 6bee098b9141 ]
+Signed-off-by: Thorsten Leemhuis <linux@leemhuis.info>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_file.c | 5 +----
+ drivers/gpu/drm/drm_mode_config.c | 9 +++------
+ 2 files changed, 4 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index 3722c796e632f..d6a0572984b54 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -239,7 +239,6 @@ static void drm_events_release(struct drm_file *file_priv)
+ void drm_file_free(struct drm_file *file)
+ {
+ struct drm_device *dev;
+- int idx;
+
+ if (!file)
+ return;
+@@ -265,11 +264,9 @@ void drm_file_free(struct drm_file *file)
+
+ drm_events_release(file);
+
+- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+- drm_dev_enter(dev, &idx)) {
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_fb_release(file);
+ drm_property_destroy_user_blobs(dev, file);
+- drm_dev_exit(idx);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
+index 8c844bce4f28a..8525ef8515406 100644
+--- a/drivers/gpu/drm/drm_mode_config.c
++++ b/drivers/gpu/drm/drm_mode_config.c
+@@ -544,13 +544,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+- if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
+- struct drm_printer p = drm_debug_printer("[leaked fb]");
++ struct drm_printer p = drm_debug_printer("[leaked fb]");
+
+- drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
+- drm_framebuffer_print_info(&p, 1, fb);
+- }
+- list_del_init(&fb->filp_head);
++ drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
++ drm_framebuffer_print_info(&p, 1, fb);
+ drm_framebuffer_free(&fb->base.refcount);
+ }
+
+--
+2.53.0
+
perf-x86-intel-uncore-skip-discovery-table-for-offli.patch
clockevents-prevent-timer-interrupt-starvation.patch
crypto-algif_aead-fix-minimum-rx-size-check-for-decr.patch
+revert-drm-fix-use-after-free-on-framebuffers-and-pr.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch
+drm-i915-psr-do-not-use-pipe_src-as-borders-for-su-a.patch
+revert-drm-fix-use-after-free-on-framebuffers-and-pr.patch-14736
+netfilter-conntrack-add-missing-netlink-policy-valid.patch-11295
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch-3427
+drm-i915-psr-do-not-use-pipe_src-as-borders-for-su-a.patch-15118
--- /dev/null
+From ff76bc32d4ef2dfadd9f2f3e00c6f4d3797c2520 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index a9283b2bd2f4e..7041633b02947 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -301,9 +301,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -311,8 +339,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -324,26 +354,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From ff76bc32d4ef2dfadd9f2f3e00c6f4d3797c2520 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index a9283b2bd2f4e..7041633b02947 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -301,9 +301,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -311,8 +339,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -324,26 +354,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 68b3a49bc26716400394db2088dd503ebe01019b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 10:07:50 -0700
+Subject: idpf: fix PREEMPT_RT raw/bh spinlock nesting for async VC handling
+
+From: Emil Tantilov <emil.s.tantilov@intel.com>
+
+[ Upstream commit 591478118293c1bd628de330a99eb1eb2ef8d76b ]
+
+Switch from using the completion's raw spinlock to a local lock in the
+idpf_vc_xn struct. The conversion is safe because complete/_all() are
+called outside the lock and there is no reason to share the completion
+lock in the current logic. This avoids invalid wait context reported by
+the kernel due to the async handler taking BH spinlock:
+
+[ 805.726977] =============================
+[ 805.726991] [ BUG: Invalid wait context ]
+[ 805.727006] 7.0.0-rc2-net-devq-031026+ #28 Tainted: G S OE
+[ 805.727026] -----------------------------
+[ 805.727038] kworker/u261:0/572 is trying to lock:
+[ 805.727051] ff190da6a8dbb6a0 (&vport_config->mac_filter_list_lock){+...}-{3:3}, at: idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727099] other info that might help us debug this:
+[ 805.727111] context-{5:5}
+[ 805.727119] 3 locks held by kworker/u261:0/572:
+[ 805.727132] #0: ff190da6db3e6148 ((wq_completion)idpf-0000:83:00.0-mbx){+.+.}-{0:0}, at: process_one_work+0x4b5/0x730
+[ 805.727163] #1: ff3c6f0a6131fe50 ((work_completion)(&(&adapter->mbx_task)->work)){+.+.}-{0:0}, at: process_one_work+0x1e5/0x730
+[ 805.727191] #2: ff190da765190020 (&x->wait#34){+.+.}-{2:2}, at: idpf_recv_mb_msg+0xc8/0x710 [idpf]
+[ 805.727218] stack backtrace:
+...
+[ 805.727238] Workqueue: idpf-0000:83:00.0-mbx idpf_mbx_task [idpf]
+[ 805.727247] Call Trace:
+[ 805.727249] <TASK>
+[ 805.727251] dump_stack_lvl+0x77/0xb0
+[ 805.727259] __lock_acquire+0xb3b/0x2290
+[ 805.727268] ? __irq_work_queue_local+0x59/0x130
+[ 805.727275] lock_acquire+0xc6/0x2f0
+[ 805.727277] ? idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727284] ? _printk+0x5b/0x80
+[ 805.727290] _raw_spin_lock_bh+0x38/0x50
+[ 805.727298] ? idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727303] idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727310] idpf_recv_mb_msg+0x1c8/0x710 [idpf]
+[ 805.727317] process_one_work+0x226/0x730
+[ 805.727322] worker_thread+0x19e/0x340
+[ 805.727325] ? __pfx_worker_thread+0x10/0x10
+[ 805.727328] kthread+0xf4/0x130
+[ 805.727333] ? __pfx_kthread+0x10/0x10
+[ 805.727336] ret_from_fork+0x32c/0x410
+[ 805.727345] ? __pfx_kthread+0x10/0x10
+[ 805.727347] ret_from_fork_asm+0x1a/0x30
+[ 805.727354] </TASK>
+
+Fixes: 34c21fa894a1 ("idpf: implement virtchnl transaction manager")
+Cc: stable@vger.kernel.org
+Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Ray Zhang <sgzhang@google.com>
+Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index a3d4a0185c542..c4ae7d62aaf8d 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -59,6 +59,7 @@ typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ */
+ struct idpf_vc_xn {
+ struct completion completed;
++ spinlock_t lock;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+@@ -312,26 +313,21 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ return err;
+ }
+
+-/* API for virtchnl "transaction" support ("xn" for short).
+- *
+- * We are reusing the completion lock to serialize the accesses to the
+- * transaction state for simplicity, but it could be its own separate synchro
+- * as well. For now, this API is only used from within a workqueue context;
+- * raw_spin_lock() is enough.
+- */
++/* API for virtchnl "transaction" support ("xn" for short). */
++
+ /**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+ #define idpf_vc_xn_lock(xn) \
+- raw_spin_lock(&(xn)->completed.wait.lock)
++ spin_lock(&(xn)->lock)
+
+ /**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+ #define idpf_vc_xn_unlock(xn) \
+- raw_spin_unlock(&(xn)->completed.wait.lock)
++ spin_unlock(&(xn)->lock)
+
+ /**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+@@ -363,6 +359,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
++ spin_lock_init(&xn->lock);
+ init_completion(&xn->completed);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 68b3a49bc26716400394db2088dd503ebe01019b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 10:07:50 -0700
+Subject: idpf: fix PREEMPT_RT raw/bh spinlock nesting for async VC handling
+
+From: Emil Tantilov <emil.s.tantilov@intel.com>
+
+[ Upstream commit 591478118293c1bd628de330a99eb1eb2ef8d76b ]
+
+Switch from using the completion's raw spinlock to a local lock in the
+idpf_vc_xn struct. The conversion is safe because complete/_all() are
+called outside the lock and there is no reason to share the completion
+lock in the current logic. This avoids invalid wait context reported by
+the kernel due to the async handler taking BH spinlock:
+
+[ 805.726977] =============================
+[ 805.726991] [ BUG: Invalid wait context ]
+[ 805.727006] 7.0.0-rc2-net-devq-031026+ #28 Tainted: G S OE
+[ 805.727026] -----------------------------
+[ 805.727038] kworker/u261:0/572 is trying to lock:
+[ 805.727051] ff190da6a8dbb6a0 (&vport_config->mac_filter_list_lock){+...}-{3:3}, at: idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727099] other info that might help us debug this:
+[ 805.727111] context-{5:5}
+[ 805.727119] 3 locks held by kworker/u261:0/572:
+[ 805.727132] #0: ff190da6db3e6148 ((wq_completion)idpf-0000:83:00.0-mbx){+.+.}-{0:0}, at: process_one_work+0x4b5/0x730
+[ 805.727163] #1: ff3c6f0a6131fe50 ((work_completion)(&(&adapter->mbx_task)->work)){+.+.}-{0:0}, at: process_one_work+0x1e5/0x730
+[ 805.727191] #2: ff190da765190020 (&x->wait#34){+.+.}-{2:2}, at: idpf_recv_mb_msg+0xc8/0x710 [idpf]
+[ 805.727218] stack backtrace:
+...
+[ 805.727238] Workqueue: idpf-0000:83:00.0-mbx idpf_mbx_task [idpf]
+[ 805.727247] Call Trace:
+[ 805.727249] <TASK>
+[ 805.727251] dump_stack_lvl+0x77/0xb0
+[ 805.727259] __lock_acquire+0xb3b/0x2290
+[ 805.727268] ? __irq_work_queue_local+0x59/0x130
+[ 805.727275] lock_acquire+0xc6/0x2f0
+[ 805.727277] ? idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727284] ? _printk+0x5b/0x80
+[ 805.727290] _raw_spin_lock_bh+0x38/0x50
+[ 805.727298] ? idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727303] idpf_mac_filter_async_handler+0xe9/0x260 [idpf]
+[ 805.727310] idpf_recv_mb_msg+0x1c8/0x710 [idpf]
+[ 805.727317] process_one_work+0x226/0x730
+[ 805.727322] worker_thread+0x19e/0x340
+[ 805.727325] ? __pfx_worker_thread+0x10/0x10
+[ 805.727328] kthread+0xf4/0x130
+[ 805.727333] ? __pfx_kthread+0x10/0x10
+[ 805.727336] ret_from_fork+0x32c/0x410
+[ 805.727345] ? __pfx_kthread+0x10/0x10
+[ 805.727347] ret_from_fork_asm+0x1a/0x30
+[ 805.727354] </TASK>
+
+Fixes: 34c21fa894a1 ("idpf: implement virtchnl transaction manager")
+Cc: stable@vger.kernel.org
+Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Ray Zhang <sgzhang@google.com>
+Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index a3d4a0185c542..c4ae7d62aaf8d 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -59,6 +59,7 @@ typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ */
+ struct idpf_vc_xn {
+ struct completion completed;
++ spinlock_t lock;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+@@ -312,26 +313,21 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ return err;
+ }
+
+-/* API for virtchnl "transaction" support ("xn" for short).
+- *
+- * We are reusing the completion lock to serialize the accesses to the
+- * transaction state for simplicity, but it could be its own separate synchro
+- * as well. For now, this API is only used from within a workqueue context;
+- * raw_spin_lock() is enough.
+- */
++/* API for virtchnl "transaction" support ("xn" for short). */
++
+ /**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+ #define idpf_vc_xn_lock(xn) \
+- raw_spin_lock(&(xn)->completed.wait.lock)
++ spin_lock(&(xn)->lock)
+
+ /**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+ #define idpf_vc_xn_unlock(xn) \
+- raw_spin_unlock(&(xn)->completed.wait.lock)
++ spin_unlock(&(xn)->lock)
+
+ /**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+@@ -363,6 +359,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
++ spin_lock_init(&xn->lock);
+ init_completion(&xn->completed);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From fdb1ef358c9b4b48fa24937e35f5c8113c4df8e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 323e147fe282b..f51cdfba68fbd 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3460,7 +3460,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 4cc97f971264e..fabb2c1ca00ab 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -587,7 +587,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From fdb1ef358c9b4b48fa24937e35f5c8113c4df8e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 323e147fe282b..f51cdfba68fbd 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3460,7 +3460,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 4cc97f971264e..fabb2c1ca00ab 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -587,7 +587,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From 5989ee54e687264d86eaf6ce561ed9338f2f8f65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2026 12:01:46 -0400
+Subject: Revert "drm/xe/mmio: Avoid double-adjust in 64-bit reads"
+
+This reverts commit 8f6848b2f6eadd903d29572ba0a684eda1e2f4ef.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_mmio.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
+index 449e6c5636712..9ea0973337eda 100644
+--- a/drivers/gpu/drm/xe/xe_mmio.c
++++ b/drivers/gpu/drm/xe/xe_mmio.c
+@@ -316,11 +316,11 @@ u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
+ struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
+ u32 ldw, udw, oldudw, retries;
+
+- /*
+- * The two dwords of a 64-bit register can never straddle the offset
+- * adjustment cutoff.
+- */
+- xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7));
++ reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
++
++ /* we shouldn't adjust just one register address */
++ xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
+
+ oldudw = xe_mmio_read32(mmio, reg_udw);
+ for (retries = 5; retries; --retries) {
+--
+2.53.0
+
--- /dev/null
+From 5989ee54e687264d86eaf6ce561ed9338f2f8f65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2026 12:01:46 -0400
+Subject: Revert "drm/xe/mmio: Avoid double-adjust in 64-bit reads"
+
+This reverts commit 8f6848b2f6eadd903d29572ba0a684eda1e2f4ef.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_mmio.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
+index 449e6c5636712..9ea0973337eda 100644
+--- a/drivers/gpu/drm/xe/xe_mmio.c
++++ b/drivers/gpu/drm/xe/xe_mmio.c
+@@ -316,11 +316,11 @@ u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
+ struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
+ u32 ldw, udw, oldudw, retries;
+
+- /*
+- * The two dwords of a 64-bit register can never straddle the offset
+- * adjustment cutoff.
+- */
+- xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7));
++ reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
++
++ /* we shouldn't adjust just one register address */
++ xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
+
+ oldudw = xe_mmio_read32(mmio, reg_udw);
+ for (retries = 5; retries; --retries) {
+--
+2.53.0
+
--- /dev/null
+From 798435b5f812079154555d2fa5e5a9682597592d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2026 12:01:47 -0400
+Subject: Revert "drm/xe: Switch MMIO interface to take xe_mmio instead of
+ xe_gt"
+
+This reverts commit 26a40327c25c005c1653d66e7b1d8de0fbee15a4.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_mmio.c | 131 ++++++++++++++++++----------------
+ drivers/gpu/drm/xe/xe_mmio.h | 76 +++++---------------
+ drivers/gpu/drm/xe/xe_trace.h | 7 +-
+ 3 files changed, 88 insertions(+), 126 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
+index 9ea0973337eda..46924f4042418 100644
+--- a/drivers/gpu/drm/xe/xe_mmio.c
++++ b/drivers/gpu/drm/xe/xe_mmio.c
+@@ -67,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
+
+ /* Possibly override number of tile based on configuration register */
+ if (!xe->info.skip_mtcfg) {
+- struct xe_mmio *mmio = xe_root_tile_mmio(xe);
++ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ u8 tile_count;
+ u32 mtcfg;
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+- * is fine as it's going to the root tile's mmio, that's
+- * guaranteed to be initialized earlier in xe_mmio_init()
++ * is fine as it's going to the root gt, that's guaranteed to be
++ * initialized earlier in xe_mmio_init()
+ */
+- mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
++ mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
+ tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
+ if (tile_count < xe->info.tile_count) {
+@@ -187,111 +187,116 @@ int xe_mmio_init(struct xe_device *xe)
+ return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
+ }
+
+-static void mmio_flush_pending_writes(struct xe_mmio *mmio)
++static void mmio_flush_pending_writes(struct xe_gt *gt)
+ {
+ #define DUMMY_REG_OFFSET 0x130030
++ struct xe_tile *tile = gt_to_tile(gt);
+ int i;
+
+- if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
++ if (tile->xe->info.platform != XE_LUNARLAKE)
+ return;
+
+ /* 4 dummy writes */
+ for (i = 0; i < 4; i++)
+- writel(0, mmio->regs + DUMMY_REG_OFFSET);
++ writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
+ }
+
+-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
++u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u8 val;
+
+ /* Wa_15015404425 */
+- mmio_flush_pending_writes(mmio);
++ mmio_flush_pending_writes(gt);
+
+- val = readb(mmio->regs + addr);
+- trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
++ val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
++ trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+
+ return val;
+ }
+
+-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
++u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u16 val;
+
+ /* Wa_15015404425 */
+- mmio_flush_pending_writes(mmio);
++ mmio_flush_pending_writes(gt);
+
+- val = readw(mmio->regs + addr);
+- trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
++ val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
++ trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+
+ return val;
+ }
+
+-void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
++void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+- trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
++ trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
+
+- if (!reg.vf && mmio->sriov_vf_gt)
+- xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
++ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
++ xe_gt_sriov_vf_write32(gt, reg, val);
+ else
+- writel(val, mmio->regs + addr);
++ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+ }
+
+-u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
++u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 val;
+
+ /* Wa_15015404425 */
+- mmio_flush_pending_writes(mmio);
++ mmio_flush_pending_writes(gt);
+
+- if (!reg.vf && mmio->sriov_vf_gt)
+- val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
++ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
++ val = xe_gt_sriov_vf_read32(gt, reg);
+ else
+- val = readl(mmio->regs + addr);
++ val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+
+- trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
++ trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+
+ return val;
+ }
+
+-u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
++u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
+ {
+ u32 old, reg_val;
+
+- old = xe_mmio_read32(mmio, reg);
++ old = xe_mmio_read32(gt, reg);
+ reg_val = (old & ~clr) | set;
+- xe_mmio_write32(mmio, reg, reg_val);
++ xe_mmio_write32(gt, reg, reg_val);
+
+ return old;
+ }
+
+-int __xe_mmio_write32_and_verify(struct xe_mmio *mmio,
+- struct xe_reg reg, u32 val, u32 mask, u32 eval)
++int xe_mmio_write32_and_verify(struct xe_gt *gt,
++ struct xe_reg reg, u32 val, u32 mask, u32 eval)
+ {
+ u32 reg_val;
+
+- xe_mmio_write32(mmio, reg, val);
+- reg_val = xe_mmio_read32(mmio, reg);
++ xe_mmio_write32(gt, reg, val);
++ reg_val = xe_mmio_read32(gt, reg);
+
+ return (reg_val & mask) != eval ? -EINVAL : 0;
+ }
+
+-bool __xe_mmio_in_range(const struct xe_mmio *mmio,
+- const struct xe_mmio_range *range,
+- struct xe_reg reg)
++bool xe_mmio_in_range(const struct xe_gt *gt,
++ const struct xe_mmio_range *range,
++ struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+ return range && addr >= range->start && addr <= range->end;
+ }
+
+ /**
+ * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
+- * @mmio: MMIO target
++ * @gt: MMIO target GT
+ * @reg: register to read value from
+ *
+ * Although Intel GPUs have some 64-bit registers, the hardware officially
+@@ -311,21 +316,21 @@ bool __xe_mmio_in_range(const struct xe_mmio *mmio,
+ *
+ * Returns the value of the 64-bit register.
+ */
+-u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
++u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
+ {
+ struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
+ u32 ldw, udw, oldudw, retries;
+
+- reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
+- reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
++ reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
++ reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
+
+ /* we shouldn't adjust just one register address */
+- xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
++ xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
+
+- oldudw = xe_mmio_read32(mmio, reg_udw);
++ oldudw = xe_mmio_read32(gt, reg_udw);
+ for (retries = 5; retries; --retries) {
+- ldw = xe_mmio_read32(mmio, reg);
+- udw = xe_mmio_read32(mmio, reg_udw);
++ ldw = xe_mmio_read32(gt, reg);
++ udw = xe_mmio_read32(gt, reg_udw);
+
+ if (udw == oldudw)
+ break;
+@@ -333,14 +338,14 @@ u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
+ oldudw = udw;
+ }
+
+- drm_WARN(&mmio->tile->xe->drm, retries == 0,
+- "64-bit read of %#x did not stabilize\n", reg.addr);
++ xe_gt_WARN(gt, retries == 0,
++ "64-bit read of %#x did not stabilize\n", reg.addr);
+
+ return (u64)udw << 32 | ldw;
+ }
+
+-static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+- u32 *out_val, bool atomic, bool expect_match)
++static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic, bool expect_match)
+ {
+ ktime_t cur = ktime_get_raw();
+ const ktime_t end = ktime_add_us(cur, timeout_us);
+@@ -350,7 +355,7 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ bool check;
+
+ for (;;) {
+- read = xe_mmio_read32(mmio, reg);
++ read = xe_mmio_read32(gt, reg);
+
+ check = (read & mask) == val;
+ if (!expect_match)
+@@ -376,7 +381,7 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ }
+
+ if (ret != 0) {
+- read = xe_mmio_read32(mmio, reg);
++ read = xe_mmio_read32(gt, reg);
+
+ check = (read & mask) == val;
+ if (!expect_match)
+@@ -394,7 +399,7 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+
+ /**
+ * xe_mmio_wait32() - Wait for a register to match the desired masked value
+- * @mmio: MMIO target
++ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: desired value after applying the mask
+@@ -411,15 +416,15 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
+ * it is possible that this function succeeds even after @timeout_us has passed.
+ */
+-int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+- u32 *out_val, bool atomic)
++int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic)
+ {
+- return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
++ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+ }
+
+ /**
+ * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+- * @mmio: MMIO target
++ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: value not to be matched after applying the mask
+@@ -430,8 +435,8 @@ int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ * This function works exactly like xe_mmio_wait32() with the exception that
+ * @val is expected not to be matched.
+ */
+-int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+- u32 *out_val, bool atomic)
++int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic)
+ {
+- return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
++ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
+index ac6846447c52a..26551410ecc87 100644
+--- a/drivers/gpu/drm/xe/xe_mmio.h
++++ b/drivers/gpu/drm/xe/xe_mmio.h
+@@ -14,67 +14,25 @@ struct xe_reg;
+ int xe_mmio_init(struct xe_device *xe);
+ int xe_mmio_probe_tiles(struct xe_device *xe);
+
+-/*
+- * Temporary transition helper for xe_gt -> xe_mmio conversion. Allows
+- * continued usage of xe_gt as a parameter to MMIO operations which now
+- * take an xe_mmio structure instead. Will be removed once the driver-wide
+- * conversion is complete.
+- */
+-#define __to_xe_mmio(ptr) \
+- _Generic(ptr, \
+- const struct xe_gt *: (&((const struct xe_gt *)(ptr))->mmio), \
+- struct xe_gt *: (&((struct xe_gt *)(ptr))->mmio), \
+- const struct xe_mmio *: (ptr), \
+- struct xe_mmio *: (ptr))
+-
+-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
+-
+-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
+-
+-void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
+-#define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
+-
+-u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read32(p, reg) __xe_mmio_read32(__to_xe_mmio(p), reg)
+-
+-u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set);
+-#define xe_mmio_rmw32(p, reg, clr, set) __xe_mmio_rmw32(__to_xe_mmio(p), reg, clr, set)
+-
+-int __xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg,
+- u32 val, u32 mask, u32 eval);
+-#define xe_mmio_write32_and_verify(p, reg, val, mask, eval) \
+- __xe_mmio_write32_and_verify(__to_xe_mmio(p), reg, val, mask, eval)
+-
+-bool __xe_mmio_in_range(const struct xe_mmio *mmio,
+- const struct xe_mmio_range *range, struct xe_reg reg);
+-#define xe_mmio_in_range(p, range, reg) __xe_mmio_in_range(__to_xe_mmio(p), range, reg)
+-
+-u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read64_2x32(p, reg) __xe_mmio_read64_2x32(__to_xe_mmio(p), reg)
+-
+-int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+- u32 timeout_us, u32 *out_val, bool atomic);
+-#define xe_mmio_wait32(p, reg, mask, val, timeout_us, out_val, atomic) \
+- __xe_mmio_wait32(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
+-
+-int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+- u32 val, u32 timeout_us, u32 *out_val, bool atomic);
+-#define xe_mmio_wait32_not(p, reg, mask, val, timeout_us, out_val, atomic) \
+- __xe_mmio_wait32_not(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
+-
+-static inline u32 __xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr)
++u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
++u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
++void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
++u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
++u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
++int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
++bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
++
++u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
++int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic);
++int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic);
++
++static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
+ {
+- if (addr < mmio->adj_limit)
+- addr += mmio->adj_offset;
++ if (addr < gt->mmio.adj_limit)
++ addr += gt->mmio.adj_offset;
+ return addr;
+ }
+-#define xe_mmio_adjusted_addr(p, addr) __xe_mmio_adjusted_addr(__to_xe_mmio(p), addr)
+-
+-static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
+-{
+- return &xe->tiles[0].mmio;
+-}
+
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
+index 91130ad8999cd..8573d7a87d840 100644
+--- a/drivers/gpu/drm/xe/xe_trace.h
++++ b/drivers/gpu/drm/xe/xe_trace.h
+@@ -21,7 +21,6 @@
+ #include "xe_vm.h"
+
+ #define __dev_name_xe(xe) dev_name((xe)->drm.dev)
+-#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
+ #define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
+ #define __dev_name_eq(q) __dev_name_gt((q)->gt)
+
+@@ -343,12 +342,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
+ );
+
+ TRACE_EVENT(xe_reg_rw,
+- TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
++ TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
+
+- TP_ARGS(mmio, write, reg, val, len),
++ TP_ARGS(gt, write, reg, val, len),
+
+ TP_STRUCT__entry(
+- __string(dev, __dev_name_tile(mmio->tile))
++ __string(dev, __dev_name_gt(gt))
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+--
+2.53.0
+
--- /dev/null
+From 798435b5f812079154555d2fa5e5a9682597592d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2026 12:01:47 -0400
+Subject: Revert "drm/xe: Switch MMIO interface to take xe_mmio instead of
+ xe_gt"
+
+This reverts commit 26a40327c25c005c1653d66e7b1d8de0fbee15a4.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_mmio.c | 131 ++++++++++++++++++----------------
+ drivers/gpu/drm/xe/xe_mmio.h | 76 +++++---------------
+ drivers/gpu/drm/xe/xe_trace.h | 7 +-
+ 3 files changed, 88 insertions(+), 126 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
+index 9ea0973337eda..46924f4042418 100644
+--- a/drivers/gpu/drm/xe/xe_mmio.c
++++ b/drivers/gpu/drm/xe/xe_mmio.c
+@@ -67,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
+
+ /* Possibly override number of tile based on configuration register */
+ if (!xe->info.skip_mtcfg) {
+- struct xe_mmio *mmio = xe_root_tile_mmio(xe);
++ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ u8 tile_count;
+ u32 mtcfg;
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+- * is fine as it's going to the root tile's mmio, that's
+- * guaranteed to be initialized earlier in xe_mmio_init()
++ * is fine as it's going to the root gt, that's guaranteed to be
++ * initialized earlier in xe_mmio_init()
+ */
+- mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
++ mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
+ tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
+ if (tile_count < xe->info.tile_count) {
+@@ -187,111 +187,116 @@ int xe_mmio_init(struct xe_device *xe)
+ return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
+ }
+
+-static void mmio_flush_pending_writes(struct xe_mmio *mmio)
++static void mmio_flush_pending_writes(struct xe_gt *gt)
+ {
+ #define DUMMY_REG_OFFSET 0x130030
++ struct xe_tile *tile = gt_to_tile(gt);
+ int i;
+
+- if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
++ if (tile->xe->info.platform != XE_LUNARLAKE)
+ return;
+
+ /* 4 dummy writes */
+ for (i = 0; i < 4; i++)
+- writel(0, mmio->regs + DUMMY_REG_OFFSET);
++ writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
+ }
+
+-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
++u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u8 val;
+
+ /* Wa_15015404425 */
+- mmio_flush_pending_writes(mmio);
++ mmio_flush_pending_writes(gt);
+
+- val = readb(mmio->regs + addr);
+- trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
++ val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
++ trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+
+ return val;
+ }
+
+-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
++u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u16 val;
+
+ /* Wa_15015404425 */
+- mmio_flush_pending_writes(mmio);
++ mmio_flush_pending_writes(gt);
+
+- val = readw(mmio->regs + addr);
+- trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
++ val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
++ trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+
+ return val;
+ }
+
+-void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
++void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+- trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
++ trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
+
+- if (!reg.vf && mmio->sriov_vf_gt)
+- xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
++ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
++ xe_gt_sriov_vf_write32(gt, reg, val);
+ else
+- writel(val, mmio->regs + addr);
++ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+ }
+
+-u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
++u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ struct xe_tile *tile = gt_to_tile(gt);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 val;
+
+ /* Wa_15015404425 */
+- mmio_flush_pending_writes(mmio);
++ mmio_flush_pending_writes(gt);
+
+- if (!reg.vf && mmio->sriov_vf_gt)
+- val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
++ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
++ val = xe_gt_sriov_vf_read32(gt, reg);
+ else
+- val = readl(mmio->regs + addr);
++ val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+
+- trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
++ trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+
+ return val;
+ }
+
+-u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
++u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
+ {
+ u32 old, reg_val;
+
+- old = xe_mmio_read32(mmio, reg);
++ old = xe_mmio_read32(gt, reg);
+ reg_val = (old & ~clr) | set;
+- xe_mmio_write32(mmio, reg, reg_val);
++ xe_mmio_write32(gt, reg, reg_val);
+
+ return old;
+ }
+
+-int __xe_mmio_write32_and_verify(struct xe_mmio *mmio,
+- struct xe_reg reg, u32 val, u32 mask, u32 eval)
++int xe_mmio_write32_and_verify(struct xe_gt *gt,
++ struct xe_reg reg, u32 val, u32 mask, u32 eval)
+ {
+ u32 reg_val;
+
+- xe_mmio_write32(mmio, reg, val);
+- reg_val = xe_mmio_read32(mmio, reg);
++ xe_mmio_write32(gt, reg, val);
++ reg_val = xe_mmio_read32(gt, reg);
+
+ return (reg_val & mask) != eval ? -EINVAL : 0;
+ }
+
+-bool __xe_mmio_in_range(const struct xe_mmio *mmio,
+- const struct xe_mmio_range *range,
+- struct xe_reg reg)
++bool xe_mmio_in_range(const struct xe_gt *gt,
++ const struct xe_mmio_range *range,
++ struct xe_reg reg)
+ {
+- u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
++ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+ return range && addr >= range->start && addr <= range->end;
+ }
+
+ /**
+ * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
+- * @mmio: MMIO target
++ * @gt: MMIO target GT
+ * @reg: register to read value from
+ *
+ * Although Intel GPUs have some 64-bit registers, the hardware officially
+@@ -311,21 +316,21 @@ bool __xe_mmio_in_range(const struct xe_mmio *mmio,
+ *
+ * Returns the value of the 64-bit register.
+ */
+-u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
++u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
+ {
+ struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
+ u32 ldw, udw, oldudw, retries;
+
+- reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
+- reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
++ reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
++ reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
+
+ /* we shouldn't adjust just one register address */
+- xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
++ xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
+
+- oldudw = xe_mmio_read32(mmio, reg_udw);
++ oldudw = xe_mmio_read32(gt, reg_udw);
+ for (retries = 5; retries; --retries) {
+- ldw = xe_mmio_read32(mmio, reg);
+- udw = xe_mmio_read32(mmio, reg_udw);
++ ldw = xe_mmio_read32(gt, reg);
++ udw = xe_mmio_read32(gt, reg_udw);
+
+ if (udw == oldudw)
+ break;
+@@ -333,14 +338,14 @@ u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
+ oldudw = udw;
+ }
+
+- drm_WARN(&mmio->tile->xe->drm, retries == 0,
+- "64-bit read of %#x did not stabilize\n", reg.addr);
++ xe_gt_WARN(gt, retries == 0,
++ "64-bit read of %#x did not stabilize\n", reg.addr);
+
+ return (u64)udw << 32 | ldw;
+ }
+
+-static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+- u32 *out_val, bool atomic, bool expect_match)
++static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic, bool expect_match)
+ {
+ ktime_t cur = ktime_get_raw();
+ const ktime_t end = ktime_add_us(cur, timeout_us);
+@@ -350,7 +355,7 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ bool check;
+
+ for (;;) {
+- read = xe_mmio_read32(mmio, reg);
++ read = xe_mmio_read32(gt, reg);
+
+ check = (read & mask) == val;
+ if (!expect_match)
+@@ -376,7 +381,7 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ }
+
+ if (ret != 0) {
+- read = xe_mmio_read32(mmio, reg);
++ read = xe_mmio_read32(gt, reg);
+
+ check = (read & mask) == val;
+ if (!expect_match)
+@@ -394,7 +399,7 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+
+ /**
+ * xe_mmio_wait32() - Wait for a register to match the desired masked value
+- * @mmio: MMIO target
++ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: desired value after applying the mask
+@@ -411,15 +416,15 @@ static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
+ * it is possible that this function succeeds even after @timeout_us has passed.
+ */
+-int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+- u32 *out_val, bool atomic)
++int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic)
+ {
+- return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
++ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+ }
+
+ /**
+ * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+- * @mmio: MMIO target
++ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: value not to be matched after applying the mask
+@@ -430,8 +435,8 @@ int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ * This function works exactly like xe_mmio_wait32() with the exception that
+ * @val is expected not to be matched.
+ */
+-int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+- u32 *out_val, bool atomic)
++int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic)
+ {
+- return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
++ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
+index ac6846447c52a..26551410ecc87 100644
+--- a/drivers/gpu/drm/xe/xe_mmio.h
++++ b/drivers/gpu/drm/xe/xe_mmio.h
+@@ -14,67 +14,25 @@ struct xe_reg;
+ int xe_mmio_init(struct xe_device *xe);
+ int xe_mmio_probe_tiles(struct xe_device *xe);
+
+-/*
+- * Temporary transition helper for xe_gt -> xe_mmio conversion. Allows
+- * continued usage of xe_gt as a parameter to MMIO operations which now
+- * take an xe_mmio structure instead. Will be removed once the driver-wide
+- * conversion is complete.
+- */
+-#define __to_xe_mmio(ptr) \
+- _Generic(ptr, \
+- const struct xe_gt *: (&((const struct xe_gt *)(ptr))->mmio), \
+- struct xe_gt *: (&((struct xe_gt *)(ptr))->mmio), \
+- const struct xe_mmio *: (ptr), \
+- struct xe_mmio *: (ptr))
+-
+-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
+-
+-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
+-
+-void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
+-#define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
+-
+-u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read32(p, reg) __xe_mmio_read32(__to_xe_mmio(p), reg)
+-
+-u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set);
+-#define xe_mmio_rmw32(p, reg, clr, set) __xe_mmio_rmw32(__to_xe_mmio(p), reg, clr, set)
+-
+-int __xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg,
+- u32 val, u32 mask, u32 eval);
+-#define xe_mmio_write32_and_verify(p, reg, val, mask, eval) \
+- __xe_mmio_write32_and_verify(__to_xe_mmio(p), reg, val, mask, eval)
+-
+-bool __xe_mmio_in_range(const struct xe_mmio *mmio,
+- const struct xe_mmio_range *range, struct xe_reg reg);
+-#define xe_mmio_in_range(p, range, reg) __xe_mmio_in_range(__to_xe_mmio(p), range, reg)
+-
+-u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg);
+-#define xe_mmio_read64_2x32(p, reg) __xe_mmio_read64_2x32(__to_xe_mmio(p), reg)
+-
+-int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+- u32 timeout_us, u32 *out_val, bool atomic);
+-#define xe_mmio_wait32(p, reg, mask, val, timeout_us, out_val, atomic) \
+- __xe_mmio_wait32(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
+-
+-int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+- u32 val, u32 timeout_us, u32 *out_val, bool atomic);
+-#define xe_mmio_wait32_not(p, reg, mask, val, timeout_us, out_val, atomic) \
+- __xe_mmio_wait32_not(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
+-
+-static inline u32 __xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr)
++u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
++u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
++void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
++u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
++u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
++int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
++bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
++
++u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
++int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic);
++int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
++ u32 *out_val, bool atomic);
++
++static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
+ {
+- if (addr < mmio->adj_limit)
+- addr += mmio->adj_offset;
++ if (addr < gt->mmio.adj_limit)
++ addr += gt->mmio.adj_offset;
+ return addr;
+ }
+-#define xe_mmio_adjusted_addr(p, addr) __xe_mmio_adjusted_addr(__to_xe_mmio(p), addr)
+-
+-static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
+-{
+- return &xe->tiles[0].mmio;
+-}
+
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
+index 91130ad8999cd..8573d7a87d840 100644
+--- a/drivers/gpu/drm/xe/xe_trace.h
++++ b/drivers/gpu/drm/xe/xe_trace.h
+@@ -21,7 +21,6 @@
+ #include "xe_vm.h"
+
+ #define __dev_name_xe(xe) dev_name((xe)->drm.dev)
+-#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
+ #define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
+ #define __dev_name_eq(q) __dev_name_gt((q)->gt)
+
+@@ -343,12 +342,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
+ );
+
+ TRACE_EVENT(xe_reg_rw,
+- TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
++ TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
+
+- TP_ARGS(mmio, write, reg, val, len),
++ TP_ARGS(gt, write, reg, val, len),
+
+ TP_STRUCT__entry(
+- __string(dev, __dev_name_tile(mmio->tile))
++ __string(dev, __dev_name_gt(gt))
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+--
+2.53.0
+
sched-deadline-use-revised-wakeup-rule-for-dl_server.patch
clockevents-prevent-timer-interrupt-starvation.patch
crypto-algif_aead-fix-minimum-rx-size-check-for-decr.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch
+revert-drm-xe-mmio-avoid-double-adjust-in-64-bit-rea.patch
+revert-drm-xe-switch-mmio-interface-to-take-xe_mmio-.patch
+thermal-core-mark-thermal-zones-as-exiting-before-un.patch
+thermal-core-address-thermal-zone-removal-races-with.patch
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch
+idpf-fix-preempt_rt-raw-bh-spinlock-nesting-for-asyn.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch-5826
+revert-drm-xe-mmio-avoid-double-adjust-in-64-bit-rea.patch-19262
+revert-drm-xe-switch-mmio-interface-to-take-xe_mmio-.patch-24157
+thermal-core-mark-thermal-zones-as-exiting-before-un.patch-19120
+thermal-core-address-thermal-zone-removal-races-with.patch-8428
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch-16013
+idpf-fix-preempt_rt-raw-bh-spinlock-nesting-for-asyn.patch-17824
--- /dev/null
+From f767faefffdc6a457d02f588c91c06b5620f0dd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 20:14:51 -0300
+Subject: thermal: core: Address thermal zone removal races with resume
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 45b859b0728267a6199ee5002d62e6c6f3e8c89d ]
+
+Since thermal_zone_pm_complete() and thermal_zone_device_resume()
+re-initialize the poll_queue delayed work for the given thermal zone,
+the cancel_delayed_work_sync() in thermal_zone_device_unregister()
+may miss some already running work items and the thermal zone may
+be freed prematurely [1].
+
+There are two failing scenarios that both start with
+running thermal_pm_notify_complete() right before invoking
+thermal_zone_device_unregister() for one of the thermal zones.
+
+In the first scenario, there is a work item already running for
+the given thermal zone when thermal_pm_notify_complete() calls
+thermal_zone_pm_complete() for that thermal zone and it continues to
+run when thermal_zone_device_unregister() starts. Since the poll_queue
+delayed work has been re-initialized by thermal_pm_notify_complete(), the
+running work item will be missed by the cancel_delayed_work_sync() in
+thermal_zone_device_unregister() and if it continues to run past the
+freeing of the thermal zone object, a use-after-free will occur.
+
+In the second scenario, thermal_zone_device_resume() queued up by
+thermal_pm_notify_complete() runs right after the thermal_zone_exit()
+called by thermal_zone_device_unregister() has returned. The poll_queue
+delayed work is re-initialized by it before cancel_delayed_work_sync() is
+called by thermal_zone_device_unregister(), so it may continue to run
+after the freeing of the thermal zone object, which also leads to a
+use-after-free.
+
+Address the first failing scenario by ensuring that no thermal work
+items will be running when thermal_pm_notify_complete() is called.
+For this purpose, first move the cancel_delayed_work() call from
+thermal_zone_pm_complete() to thermal_zone_pm_prepare() to prevent
+new work from entering the workqueue going forward. Next, switch
+over to using a dedicated workqueue for thermal events and update
+the code in thermal_pm_notify() to flush that workqueue after
+thermal_pm_notify_prepare() has returned which will take care of
+all leftover thermal work already on the workqueue (that leftover
+work would do nothing useful anyway because all of the thermal zones
+have been flagged as suspended).
+
+The second failing scenario is addressed by adding a tz->state check
+to thermal_zone_device_resume() to prevent it from re-initializing
+the poll_queue delayed work if the thermal zone is going away.
+
+Note that the above changes will also facilitate relocating the suspend
+and resume of thermal zones closer to the suspend and resume of devices,
+respectively.
+
+Fixes: 5a5efdaffda5 ("thermal: core: Resume thermal zones asynchronously")
+Reported-by: syzbot+3b3852c6031d0f30dfaf@syzkaller.appspotmail.com
+Closes: https://syzbot.org/bug?extid=3b3852c6031d0f30dfaf
+Reported-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Closes: https://lore.kernel.org/linux-pm/20260324-thermal-core-uaf-init_delayed_work-v1-1-6611ae76a8a1@igalia.com/ [1]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Tested-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/6267615.lOV4Wx5bFT@rafael.j.wysocki
+[ mfo: backport for 6.12.y:
+ - No guard() or thermal_pm_notify_{prepare,complete}() for the lack of
+ commit d1c8aa2a5c5c ("thermal: core: Manage thermal_list_lock using a mutex guard")
+ - thermal_zone_device_resume() calls mutex_unlock() to return;
+ - thermal_pm_notify() has thermal_pm_notify_prepare() in *_PREPARE;
+ - No WQ_PERCPU flag in alloc_workqueue(), introduced in v6.17. ]
+Signed-off-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/thermal_core.c | 33 ++++++++++++++++++++++++++++-----
+ 1 file changed, 28 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 4663ca7a587c5..8ce1134e15e56 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -42,6 +42,8 @@ static struct thermal_governor *def_governor;
+
+ static bool thermal_pm_suspended;
+
++static struct workqueue_struct *thermal_wq __ro_after_init;
++
+ /*
+ * Governor section: set of functions to handle thermal governors
+ *
+@@ -328,7 +330,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
+ if (delay > HZ)
+ delay = round_jiffies_relative(delay);
+
+- mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay);
++ mod_delayed_work(thermal_wq, &tz->poll_queue, delay);
+ }
+
+ static void thermal_zone_recheck(struct thermal_zone_device *tz, int error)
+@@ -1691,6 +1693,12 @@ static void thermal_zone_device_resume(struct work_struct *work)
+
+ mutex_lock(&tz->lock);
+
++ /* If the thermal zone is going away, there's nothing to do. */
++ if (tz->state & TZ_STATE_FLAG_EXIT) {
++ mutex_unlock(&tz->lock);
++ return;
++ }
++
+ tz->state &= ~(TZ_STATE_FLAG_SUSPENDED | TZ_STATE_FLAG_RESUMING);
+
+ thermal_debug_tz_resume(tz);
+@@ -1722,6 +1730,9 @@ static void thermal_zone_pm_prepare(struct thermal_zone_device *tz)
+
+ tz->state |= TZ_STATE_FLAG_SUSPENDED;
+
++ /* Prevent new work from getting to the workqueue subsequently. */
++ cancel_delayed_work(&tz->poll_queue);
++
+ mutex_unlock(&tz->lock);
+ }
+
+@@ -1729,8 +1740,6 @@ static void thermal_zone_pm_complete(struct thermal_zone_device *tz)
+ {
+ mutex_lock(&tz->lock);
+
+- cancel_delayed_work(&tz->poll_queue);
+-
+ reinit_completion(&tz->resume);
+ tz->state |= TZ_STATE_FLAG_RESUMING;
+
+@@ -1740,7 +1749,7 @@ static void thermal_zone_pm_complete(struct thermal_zone_device *tz)
+ */
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_resume);
+ /* Queue up the work without a delay. */
+- mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, 0);
++ mod_delayed_work(thermal_wq, &tz->poll_queue, 0);
+
+ mutex_unlock(&tz->lock);
+ }
+@@ -1762,6 +1771,11 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ thermal_zone_pm_prepare(tz);
+
+ mutex_unlock(&thermal_list_lock);
++ /*
++ * Allow any leftover thermal work items already on the
++ * worqueue to complete so they don't get in the way later.
++ */
++ flush_workqueue(thermal_wq);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+@@ -1801,9 +1815,16 @@ static int __init thermal_init(void)
+ if (result)
+ goto error;
+
++ thermal_wq = alloc_workqueue("thermal_events",
++ WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0);
++ if (!thermal_wq) {
++ result = -ENOMEM;
++ goto unregister_netlink;
++ }
++
+ result = thermal_register_governors();
+ if (result)
+- goto unregister_netlink;
++ goto destroy_workqueue;
+
+ thermal_class = kzalloc(sizeof(*thermal_class), GFP_KERNEL);
+ if (!thermal_class) {
+@@ -1830,6 +1851,8 @@ static int __init thermal_init(void)
+
+ unregister_governors:
+ thermal_unregister_governors();
++destroy_workqueue:
++ destroy_workqueue(thermal_wq);
+ unregister_netlink:
+ thermal_netlink_exit();
+ error:
+--
+2.53.0
+
--- /dev/null
+From f767faefffdc6a457d02f588c91c06b5620f0dd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 20:14:51 -0300
+Subject: thermal: core: Address thermal zone removal races with resume
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 45b859b0728267a6199ee5002d62e6c6f3e8c89d ]
+
+Since thermal_zone_pm_complete() and thermal_zone_device_resume()
+re-initialize the poll_queue delayed work for the given thermal zone,
+the cancel_delayed_work_sync() in thermal_zone_device_unregister()
+may miss some already running work items and the thermal zone may
+be freed prematurely [1].
+
+There are two failing scenarios that both start with
+running thermal_pm_notify_complete() right before invoking
+thermal_zone_device_unregister() for one of the thermal zones.
+
+In the first scenario, there is a work item already running for
+the given thermal zone when thermal_pm_notify_complete() calls
+thermal_zone_pm_complete() for that thermal zone and it continues to
+run when thermal_zone_device_unregister() starts. Since the poll_queue
+delayed work has been re-initialized by thermal_pm_notify_complete(), the
+running work item will be missed by the cancel_delayed_work_sync() in
+thermal_zone_device_unregister() and if it continues to run past the
+freeing of the thermal zone object, a use-after-free will occur.
+
+In the second scenario, thermal_zone_device_resume() queued up by
+thermal_pm_notify_complete() runs right after the thermal_zone_exit()
+called by thermal_zone_device_unregister() has returned. The poll_queue
+delayed work is re-initialized by it before cancel_delayed_work_sync() is
+called by thermal_zone_device_unregister(), so it may continue to run
+after the freeing of the thermal zone object, which also leads to a
+use-after-free.
+
+Address the first failing scenario by ensuring that no thermal work
+items will be running when thermal_pm_notify_complete() is called.
+For this purpose, first move the cancel_delayed_work() call from
+thermal_zone_pm_complete() to thermal_zone_pm_prepare() to prevent
+new work from entering the workqueue going forward. Next, switch
+over to using a dedicated workqueue for thermal events and update
+the code in thermal_pm_notify() to flush that workqueue after
+thermal_pm_notify_prepare() has returned which will take care of
+all leftover thermal work already on the workqueue (that leftover
+work would do nothing useful anyway because all of the thermal zones
+have been flagged as suspended).
+
+The second failing scenario is addressed by adding a tz->state check
+to thermal_zone_device_resume() to prevent it from re-initializing
+the poll_queue delayed work if the thermal zone is going away.
+
+Note that the above changes will also facilitate relocating the suspend
+and resume of thermal zones closer to the suspend and resume of devices,
+respectively.
+
+Fixes: 5a5efdaffda5 ("thermal: core: Resume thermal zones asynchronously")
+Reported-by: syzbot+3b3852c6031d0f30dfaf@syzkaller.appspotmail.com
+Closes: https://syzbot.org/bug?extid=3b3852c6031d0f30dfaf
+Reported-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Closes: https://lore.kernel.org/linux-pm/20260324-thermal-core-uaf-init_delayed_work-v1-1-6611ae76a8a1@igalia.com/ [1]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Tested-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/6267615.lOV4Wx5bFT@rafael.j.wysocki
+[ mfo: backport for 6.12.y:
+ - No guard() or thermal_pm_notify_{prepare,complete}() for the lack of
+ commit d1c8aa2a5c5c ("thermal: core: Manage thermal_list_lock using a mutex guard")
+ - thermal_zone_device_resume() calls mutex_unlock() to return;
+ - thermal_pm_notify() has thermal_pm_notify_prepare() in *_PREPARE;
+ - No WQ_PERCPU flag in alloc_workqueue(), introduced in v6.17. ]
+Signed-off-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/thermal_core.c | 33 ++++++++++++++++++++++++++++-----
+ 1 file changed, 28 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 4663ca7a587c5..8ce1134e15e56 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -42,6 +42,8 @@ static struct thermal_governor *def_governor;
+
+ static bool thermal_pm_suspended;
+
++static struct workqueue_struct *thermal_wq __ro_after_init;
++
+ /*
+ * Governor section: set of functions to handle thermal governors
+ *
+@@ -328,7 +330,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
+ if (delay > HZ)
+ delay = round_jiffies_relative(delay);
+
+- mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay);
++ mod_delayed_work(thermal_wq, &tz->poll_queue, delay);
+ }
+
+ static void thermal_zone_recheck(struct thermal_zone_device *tz, int error)
+@@ -1691,6 +1693,12 @@ static void thermal_zone_device_resume(struct work_struct *work)
+
+ mutex_lock(&tz->lock);
+
++ /* If the thermal zone is going away, there's nothing to do. */
++ if (tz->state & TZ_STATE_FLAG_EXIT) {
++ mutex_unlock(&tz->lock);
++ return;
++ }
++
+ tz->state &= ~(TZ_STATE_FLAG_SUSPENDED | TZ_STATE_FLAG_RESUMING);
+
+ thermal_debug_tz_resume(tz);
+@@ -1722,6 +1730,9 @@ static void thermal_zone_pm_prepare(struct thermal_zone_device *tz)
+
+ tz->state |= TZ_STATE_FLAG_SUSPENDED;
+
++ /* Prevent new work from getting to the workqueue subsequently. */
++ cancel_delayed_work(&tz->poll_queue);
++
+ mutex_unlock(&tz->lock);
+ }
+
+@@ -1729,8 +1740,6 @@ static void thermal_zone_pm_complete(struct thermal_zone_device *tz)
+ {
+ mutex_lock(&tz->lock);
+
+- cancel_delayed_work(&tz->poll_queue);
+-
+ reinit_completion(&tz->resume);
+ tz->state |= TZ_STATE_FLAG_RESUMING;
+
+@@ -1740,7 +1749,7 @@ static void thermal_zone_pm_complete(struct thermal_zone_device *tz)
+ */
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_resume);
+ /* Queue up the work without a delay. */
+- mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, 0);
++ mod_delayed_work(thermal_wq, &tz->poll_queue, 0);
+
+ mutex_unlock(&tz->lock);
+ }
+@@ -1762,6 +1771,11 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ thermal_zone_pm_prepare(tz);
+
+ mutex_unlock(&thermal_list_lock);
++ /*
++ * Allow any leftover thermal work items already on the
++ * worqueue to complete so they don't get in the way later.
++ */
++ flush_workqueue(thermal_wq);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+@@ -1801,9 +1815,16 @@ static int __init thermal_init(void)
+ if (result)
+ goto error;
+
++ thermal_wq = alloc_workqueue("thermal_events",
++ WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0);
++ if (!thermal_wq) {
++ result = -ENOMEM;
++ goto unregister_netlink;
++ }
++
+ result = thermal_register_governors();
+ if (result)
+- goto unregister_netlink;
++ goto destroy_workqueue;
+
+ thermal_class = kzalloc(sizeof(*thermal_class), GFP_KERNEL);
+ if (!thermal_class) {
+@@ -1830,6 +1851,8 @@ static int __init thermal_init(void)
+
+ unregister_governors:
+ thermal_unregister_governors();
++destroy_workqueue:
++ destroy_workqueue(thermal_wq);
+ unregister_netlink:
+ thermal_netlink_exit();
+ error:
+--
+2.53.0
+
--- /dev/null
+From 20a424396c29db0a8ff7c2de802b7c99c9c127db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 20:14:50 -0300
+Subject: thermal: core: Mark thermal zones as exiting before unregistration
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 1dae3e70b473adc32f81ca1be926440f9b1de9dc ]
+
+In analogy with a previous change in the thermal zone registration code
+path, to ensure that __thermal_zone_device_update() will return early
+for thermal zones that are going away, introduce a thermal zone state
+flag representing the "exit" state and set it while deleting the thermal
+zone from thermal_tz_list.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://patch.msgid.link/4394176.ejJDZkT8p0@rjwysocki.net
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+[ mfo: this commit is a dependency/helper for backporting next commit. ]
+Signed-off-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/thermal_core.c | 3 +++
+ drivers/thermal/thermal_core.h | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index aa302ac62b2e2..4663ca7a587c5 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1614,7 +1614,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+ }
+
+ mutex_lock(&tz->lock);
++
++ tz->state |= TZ_STATE_FLAG_EXIT;
+ list_del(&tz->node);
++
+ mutex_unlock(&tz->lock);
+
+ /* Unbind all cdevs associated with 'this' thermal zone */
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index 163871699a602..007990ce139d3 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -65,6 +65,7 @@ struct thermal_governor {
+ #define TZ_STATE_FLAG_SUSPENDED BIT(0)
+ #define TZ_STATE_FLAG_RESUMING BIT(1)
+ #define TZ_STATE_FLAG_INIT BIT(2)
++#define TZ_STATE_FLAG_EXIT BIT(3)
+
+ #define TZ_STATE_READY 0
+
+--
+2.53.0
+
--- /dev/null
+From 20a424396c29db0a8ff7c2de802b7c99c9c127db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 20:14:50 -0300
+Subject: thermal: core: Mark thermal zones as exiting before unregistration
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 1dae3e70b473adc32f81ca1be926440f9b1de9dc ]
+
+In analogy with a previous change in the thermal zone registration code
+path, to ensure that __thermal_zone_device_update() will return early
+for thermal zones that are going away, introduce a thermal zone state
+flag representing the "exit" state and set it while deleting the thermal
+zone from thermal_tz_list.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://patch.msgid.link/4394176.ejJDZkT8p0@rjwysocki.net
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+[ mfo: this commit is a dependency/helper for backporting next commit. ]
+Signed-off-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/thermal_core.c | 3 +++
+ drivers/thermal/thermal_core.h | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index aa302ac62b2e2..4663ca7a587c5 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1614,7 +1614,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+ }
+
+ mutex_lock(&tz->lock);
++
++ tz->state |= TZ_STATE_FLAG_EXIT;
+ list_del(&tz->node);
++
+ mutex_unlock(&tz->lock);
+
+ /* Unbind all cdevs associated with 'this' thermal zone */
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index 163871699a602..007990ce139d3 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -65,6 +65,7 @@ struct thermal_governor {
+ #define TZ_STATE_FLAG_SUSPENDED BIT(0)
+ #define TZ_STATE_FLAG_RESUMING BIT(1)
+ #define TZ_STATE_FLAG_INIT BIT(2)
++#define TZ_STATE_FLAG_EXIT BIT(3)
+
+ #define TZ_STATE_READY 0
+
+--
+2.53.0
+
--- /dev/null
+From bc25554d5bf2cd3f772348a7dab5fb60f60aa6a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index becffc15e7579..fbe9e3f1036f8 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3475,7 +3475,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7c6f7c9f73320..645d2c43ebf7a 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -582,7 +582,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From bc25554d5bf2cd3f772348a7dab5fb60f60aa6a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index becffc15e7579..fbe9e3f1036f8 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3475,7 +3475,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7c6f7c9f73320..645d2c43ebf7a 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -582,7 +582,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
clockevents-prevent-timer-interrupt-starvation.patch
crypto-af_alg-fix-page-reassignment-overflow-in-af_a.patch
crypto-algif_aead-fix-minimum-rx-size-check-for-decr.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch-30840
--- /dev/null
+From cd37abaa05cb98532ec753595834d0936d81e0cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index f33d25a4e4cc7..682adbdf7ee79 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -304,9 +304,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -314,8 +342,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -327,26 +357,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From cd37abaa05cb98532ec753595834d0936d81e0cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 02:33:45 +1030
+Subject: ALSA: usb-audio: Improve Focusrite sample rate filtering
+
+From: Geoffrey D. Bennett <g@b4.vu>
+
+[ Upstream commit 24d2d3c5f94007a5a0554065ab7349bb69e28bcb ]
+
+Replace the bLength == 10 max_rate check in
+focusrite_valid_sample_rate() with filtering that also examines the
+bmControls VAL_ALT_SETTINGS bit.
+
+When VAL_ALT_SETTINGS is readable, the device uses strict
+per-altsetting rate filtering (only the highest rate pair for that
+altsetting is valid). When it is not readable, all rates up to
+max_rate are valid.
+
+For devices without the bLength == 10 Format Type descriptor extension
+but with VAL_ALT_SETTINGS readable and multiple altsettings (only seen
+in Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
+convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+
+This produces correct rate tables for all tested Focusrite devices
+(all Scarlett 2nd, 3rd, and 4th Gen, Clarett+, and Vocaster) using
+only USB descriptors, allowing QUIRK_FLAG_VALIDATE_RATES to be removed
+for Focusrite in the next commit.
+
+Signed-off-by: Geoffrey D. Bennett <g@b4.vu>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/7e18c1f393a6ecb6fc75dd867a2c4dbe135e3e22.1771594828.git.g@b4.vu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 86 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 74 insertions(+), 12 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index f33d25a4e4cc7..682adbdf7ee79 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -304,9 +304,37 @@ static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ }
+
+ /*
+- * Many Focusrite devices supports a limited set of sampling rates per
+- * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+- * descriptor which has a non-standard bLength = 10.
++ * Focusrite devices use rate pairs: 44100/48000, 88200/96000, and
++ * 176400/192000. Return true if rate is in the pair for max_rate.
++ */
++static bool focusrite_rate_pair(unsigned int rate,
++ unsigned int max_rate)
++{
++ switch (max_rate) {
++ case 48000: return rate == 44100 || rate == 48000;
++ case 96000: return rate == 88200 || rate == 96000;
++ case 192000: return rate == 176400 || rate == 192000;
++ default: return true;
++ }
++}
++
++/*
++ * Focusrite devices report all supported rates in a single clock
++ * source but only a subset is valid per altsetting.
++ *
++ * Detection uses two descriptor features:
++ *
++ * 1. Format Type descriptor bLength == 10: non-standard extension
++ * with max sample rate in bytes 6..9.
++ *
++ * 2. bmControls VAL_ALT_SETTINGS readable bit: when set, the device
++ * only supports the highest rate pair for that altsetting, and when
++ * clear, all rates up to max_rate are valid.
++ *
++ * For devices without the bLength == 10 extension but with
++ * VAL_ALT_SETTINGS readable and multiple altsettings (only seen in
++ * Scarlett 18i8 3rd Gen playback), fall back to the Focusrite
++ * convention: alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
+ */
+ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+@@ -314,8 +342,10 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ {
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
++ struct uac2_as_header_descriptor *as;
+ unsigned char *fmt;
+ unsigned int max_rate;
++ bool val_alt;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+@@ -327,26 +357,58 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ if (!fmt)
+ return true;
+
++ as = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++ NULL, UAC_AS_GENERAL);
++ if (!as)
++ return true;
++
++ val_alt = uac_v2v3_control_is_readable(as->bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS);
++
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+- /* Validate max rate */
+- if (max_rate != 48000 &&
+- max_rate != 96000 &&
+- max_rate != 192000 &&
+- max_rate != 384000) {
+-
++ if (val_alt)
++ return focusrite_rate_pair(rate, max_rate);
++
++ /* No val_alt: rates fall through from higher */
++ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
++ case 48000:
++ return (rate == 44100 || rate == 48000);
++ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+-
+ return true;
+ }
++ }
+
+- return rate <= max_rate;
++ if (!val_alt)
++ return true;
++
++ /* Multi-altsetting device with val_alt but no max_rate
++ * in the format descriptor. Use Focusrite convention:
++ * alt 1 = 48kHz, alt 2 = 96kHz, alt 3 = 192kHz.
++ */
++ if (iface->num_altsetting <= 2)
++ return true;
++
++ switch (fp->altsetting) {
++ case 1: max_rate = 48000; break;
++ case 2: max_rate = 96000; break;
++ case 3: max_rate = 192000; break;
++ default: return true;
+ }
+
+- return true;
++ return focusrite_rate_pair(rate, max_rate);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 3b0e2f23ebb9ab3806448bce5c878cf509a994d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 12:26:31 -0400
+Subject: drm/i915/psr: Do not use pipe_src as borders for SU area
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 75519f5df2a9b23f7bf305e12dc9a6e3e65c24b7 ]
+
+This far using crtc_state->pipe_src as borders for Selective Update area
+haven't caused visible problems as drm_rect_width(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+drm_rect_height(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_vdisplay when pipe scaling is not
+used. On the other hand using pipe scaling is forcing full frame updates and all the
+Selective Update area calculations are skipped. Now this improper usage of
+crtc_state->pipe_src is causing following warnings:
+
+<4> [7771.978166] xe 0000:00:02.0: [drm] drm_WARN_ON_ONCE(su_lines % vdsc_cfg->slice_height)
+
+after WARN_ON_ONCE was added by commit:
+
+"drm/i915/dsc: Add helper for writing DSC Selective Update ET parameters"
+
+These warnings are seen when DSC and pipe scaling are enabled
+simultaneously. This is because on full frame update SU area is improperly
+set as pipe_src which is not aligned with DSC slice height.
+
+Fix these by creating local rectangle using
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+crtc_state->hw.adjusted_mode.crtc_vdisplay. Use this local rectangle as
+borders for SU area.
+
+Fixes: d6774b8c3c58 ("drm/i915: Ensure damage clip area is within pipe area")
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Mika Kahola <mika.kahola@intel.com>
+Link: https://patch.msgid.link/20260327114553.195285-1-jouni.hogander@intel.com
+(cherry picked from commit da0cdc1c329dd2ff09c41fbbe9fbd9c92c5d2c6e)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+[ omitted hunks for DSC selective update ET alignment infrastructure ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 5cf3db7058b98..b0818dc8480ed 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1924,9 +1924,9 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
+
+ static void clip_area_update(struct drm_rect *overlap_damage_area,
+ struct drm_rect *damage_area,
+- struct drm_rect *pipe_src)
++ struct drm_rect *display_area)
+ {
+- if (!drm_rect_intersect(damage_area, pipe_src))
++ if (!drm_rect_intersect(damage_area, display_area))
+ return;
+
+ if (overlap_damage_area->y1 == -1) {
+@@ -2004,6 +2004,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
+ struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane *plane;
++ struct drm_rect display_area = {
++ .x1 = 0,
++ .y1 = 0,
++ .x2 = crtc_state->hw.adjusted_mode.crtc_hdisplay,
++ .y2 = crtc_state->hw.adjusted_mode.crtc_vdisplay,
++ };
+ bool full_update = false;
+ int i, ret;
+
+@@ -2050,14 +2056,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+@@ -2065,7 +2071,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ continue;
+ }
+
+@@ -2081,7 +2087,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
+- clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
++ clip_area_update(&pipe_clip, &damaged_area, &display_area);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 3b0e2f23ebb9ab3806448bce5c878cf509a994d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Apr 2026 12:26:31 -0400
+Subject: drm/i915/psr: Do not use pipe_src as borders for SU area
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 75519f5df2a9b23f7bf305e12dc9a6e3e65c24b7 ]
+
+This far using crtc_state->pipe_src as borders for Selective Update area
+haven't caused visible problems as drm_rect_width(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+drm_rect_height(crtc_state->pipe_src) ==
+crtc_state->hw.adjusted_mode.crtc_vdisplay when pipe scaling is not
+used. On the other hand using pipe scaling is forcing full frame updates and all the
+Selective Update area calculations are skipped. Now this improper usage of
+crtc_state->pipe_src is causing following warnings:
+
+<4> [7771.978166] xe 0000:00:02.0: [drm] drm_WARN_ON_ONCE(su_lines % vdsc_cfg->slice_height)
+
+after WARN_ON_ONCE was added by commit:
+
+"drm/i915/dsc: Add helper for writing DSC Selective Update ET parameters"
+
+These warnings are seen when DSC and pipe scaling are enabled
+simultaneously. This is because on full frame update SU area is improperly
+set as pipe_src which is not aligned with DSC slice height.
+
+Fix these by creating local rectangle using
+crtc_state->hw.adjusted_mode.crtc_hdisplay and
+crtc_state->hw.adjusted_mode.crtc_vdisplay. Use this local rectangle as
+borders for SU area.
+
+Fixes: d6774b8c3c58 ("drm/i915: Ensure damage clip area is within pipe area")
+Cc: <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Mika Kahola <mika.kahola@intel.com>
+Link: https://patch.msgid.link/20260327114553.195285-1-jouni.hogander@intel.com
+(cherry picked from commit da0cdc1c329dd2ff09c41fbbe9fbd9c92c5d2c6e)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+[ omitted hunks for DSC selective update ET alignment infrastructure ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 5cf3db7058b98..b0818dc8480ed 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1924,9 +1924,9 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
+
+ static void clip_area_update(struct drm_rect *overlap_damage_area,
+ struct drm_rect *damage_area,
+- struct drm_rect *pipe_src)
++ struct drm_rect *display_area)
+ {
+- if (!drm_rect_intersect(damage_area, pipe_src))
++ if (!drm_rect_intersect(damage_area, display_area))
+ return;
+
+ if (overlap_damage_area->y1 == -1) {
+@@ -2004,6 +2004,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
+ struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane *plane;
++ struct drm_rect display_area = {
++ .x1 = 0,
++ .y1 = 0,
++ .x2 = crtc_state->hw.adjusted_mode.crtc_hdisplay,
++ .y2 = crtc_state->hw.adjusted_mode.crtc_vdisplay,
++ };
+ bool full_update = false;
+ int i, ret;
+
+@@ -2050,14 +2056,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+@@ -2065,7 +2071,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+- &crtc_state->pipe_src);
++ &display_area);
+ continue;
+ }
+
+@@ -2081,7 +2087,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
+- clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
++ clip_area_update(&pipe_clip, &damaged_area, &display_area);
+ }
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From 2a00ee92f7350a8be9e4d564b223d20dcac380e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 9b089cdfcd352..255996f43d854 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3454,7 +3454,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 4cc97f971264e..fabb2c1ca00ab 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -587,7 +587,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From 2a00ee92f7350a8be9e4d564b223d20dcac380e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2026 00:28:29 +0100
+Subject: netfilter: conntrack: add missing netlink policy validations
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f900e1d77ee0ef87bfb5ab3fe60f0b3d8ad5ba05 ]
+
+Hyunwoo Kim reports out-of-bounds access in sctp and ctnetlink.
+
+These attributes are used by the kernel without any validation.
+Extend the netlink policies accordingly.
+
+Quoting the reporter:
+ nlattr_to_sctp() assigns the user-supplied CTA_PROTOINFO_SCTP_STATE
+ value directly to ct->proto.sctp.state without checking that it is
+ within the valid range. [..]
+
+ and: ... with exp->dir = 100, the access at
+ ct->master->tuplehash[100] reads 5600 bytes past the start of a
+ 320-byte nf_conn object, causing a slab-out-of-bounds read confirmed by
+ UBSAN.
+
+Fixes: 076a0ca02644 ("netfilter: ctnetlink: add NAT support for expectations")
+Fixes: a258860e01b8 ("netfilter: ctnetlink: add full support for SCTP to ctnetlink")
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 2 +-
+ net/netfilter/nf_conntrack_proto_sctp.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 9b089cdfcd352..255996f43d854 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3454,7 +3454,7 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+- [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
++ [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY),
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+ };
+ #endif
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 4cc97f971264e..fabb2c1ca00ab 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -587,7 +587,8 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
+ }
+
+ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
+- [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
++ [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8,
++ SCTP_CONNTRACK_HEARTBEAT_SENT),
+ [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
+ [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
+ };
+--
+2.53.0
+
--- /dev/null
+From 131ca81ee35a758abe5cceb09c79946c7b968b3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Oct 2023 17:08:19 -0700
+Subject: objtool: Remove max symbol name length limitation
+
+From: Aaron Plattner <aplattner@nvidia.com>
+
+[ Upstream commit f404a58dcf0c862b05602f641ce5fdd8b98fbc3a ]
+
+If one of the symbols processed by read_symbols() happens to have a
+.cold variant with a name longer than objtool's MAX_NAME_LEN limit, the
+build fails.
+
+Avoid this problem by just using strndup() to copy the parent function's
+name, rather than strncpy()ing it onto the stack.
+
+Signed-off-by: Aaron Plattner <aplattner@nvidia.com>
+Link: https://lore.kernel.org/r/41e94cfea1d9131b758dd637fecdeacd459d4584.1696355111.git.aplattner@nvidia.com
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/objtool/elf.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index 797507a90251b..19021f9755ac7 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -22,8 +22,6 @@
+ #include <objtool/elf.h>
+ #include <objtool/warn.h>
+
+-#define MAX_NAME_LEN 128
+-
+ static inline u32 str_hash(const char *str)
+ {
+ return jhash(str, strlen(str), 0);
+@@ -515,7 +513,7 @@ static int read_symbols(struct elf *elf)
+ /* Create parent/child links for any cold subfunctions */
+ list_for_each_entry(sec, &elf->sections, list) {
+ sec_for_each_sym(sec, sym) {
+- char pname[MAX_NAME_LEN + 1];
++ char *pname;
+ size_t pnamelen;
+ if (sym->type != STT_FUNC)
+ continue;
+@@ -531,15 +529,15 @@ static int read_symbols(struct elf *elf)
+ continue;
+
+ pnamelen = coldstr - sym->name;
+- if (pnamelen > MAX_NAME_LEN) {
+- WARN("%s(): parent function name exceeds maximum length of %d characters",
+- sym->name, MAX_NAME_LEN);
++ pname = strndup(sym->name, pnamelen);
++ if (!pname) {
++ WARN("%s(): failed to allocate memory",
++ sym->name);
+ return -1;
+ }
+
+- strncpy(pname, sym->name, pnamelen);
+- pname[pnamelen] = '\0';
+ pfunc = find_symbol_by_name(elf, pname);
++ free(pname);
+
+ if (!pfunc) {
+ WARN("%s(): can't find parent function",
+--
+2.53.0
+
--- /dev/null
+From 131ca81ee35a758abe5cceb09c79946c7b968b3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Oct 2023 17:08:19 -0700
+Subject: objtool: Remove max symbol name length limitation
+
+From: Aaron Plattner <aplattner@nvidia.com>
+
+[ Upstream commit f404a58dcf0c862b05602f641ce5fdd8b98fbc3a ]
+
+If one of the symbols processed by read_symbols() happens to have a
+.cold variant with a name longer than objtool's MAX_NAME_LEN limit, the
+build fails.
+
+Avoid this problem by just using strndup() to copy the parent function's
+name, rather than strncpy()ing it onto the stack.
+
+Signed-off-by: Aaron Plattner <aplattner@nvidia.com>
+Link: https://lore.kernel.org/r/41e94cfea1d9131b758dd637fecdeacd459d4584.1696355111.git.aplattner@nvidia.com
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/objtool/elf.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index 797507a90251b..19021f9755ac7 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -22,8 +22,6 @@
+ #include <objtool/elf.h>
+ #include <objtool/warn.h>
+
+-#define MAX_NAME_LEN 128
+-
+ static inline u32 str_hash(const char *str)
+ {
+ return jhash(str, strlen(str), 0);
+@@ -515,7 +513,7 @@ static int read_symbols(struct elf *elf)
+ /* Create parent/child links for any cold subfunctions */
+ list_for_each_entry(sec, &elf->sections, list) {
+ sec_for_each_sym(sec, sym) {
+- char pname[MAX_NAME_LEN + 1];
++ char *pname;
+ size_t pnamelen;
+ if (sym->type != STT_FUNC)
+ continue;
+@@ -531,15 +529,15 @@ static int read_symbols(struct elf *elf)
+ continue;
+
+ pnamelen = coldstr - sym->name;
+- if (pnamelen > MAX_NAME_LEN) {
+- WARN("%s(): parent function name exceeds maximum length of %d characters",
+- sym->name, MAX_NAME_LEN);
++ pname = strndup(sym->name, pnamelen);
++ if (!pname) {
++ WARN("%s(): failed to allocate memory",
++ sym->name);
+ return -1;
+ }
+
+- strncpy(pname, sym->name, pnamelen);
+- pname[pnamelen] = '\0';
+ pfunc = find_symbol_by_name(elf, pname);
++ free(pname);
+
+ if (!pfunc) {
+ WARN("%s(): can't find parent function",
+--
+2.53.0
+
perf-x86-intel-uncore-skip-discovery-table-for-offli.patch
clockevents-prevent-timer-interrupt-starvation.patch
crypto-algif_aead-fix-minimum-rx-size-check-for-decr.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch
+objtool-remove-max-symbol-name-length-limitation.patch
+drm-i915-psr-do-not-use-pipe_src-as-borders-for-su-a.patch
+netfilter-conntrack-add-missing-netlink-policy-valid.patch-27048
+alsa-usb-audio-improve-focusrite-sample-rate-filteri.patch-12901
+objtool-remove-max-symbol-name-length-limitation.patch-22157
+drm-i915-psr-do-not-use-pipe_src-as-borders-for-su-a.patch-32089