--- /dev/null
+From a4e45ccf80894a62d3f04104ce464bce9e730553 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 10:36:48 +0100
+Subject: block: rnbd-clt: Fix leaked ID in init_dev()
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit c9b5645fd8ca10f310e41b07540f98e6a9720f40 ]
+
+If kstrdup() fails in init_dev(), then the newly allocated ID is lost.
+
+Fixes: 64e8a6ece1a5 ("block/rnbd-clt: Dynamically alloc buffer for pathname & blk_symlink_name")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index ea4b7002f438..2aebb07eff92 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1378,9 +1378,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+- GFP_KERNEL);
+- if (ret < 0) {
++ dev->clt_device_id = ida_alloc_max(&index_ida,
++ (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
++ GFP_KERNEL);
++ if (dev->clt_device_id < 0) {
++ ret = dev->clt_device_id;
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+@@ -1389,10 +1391,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
+ if (!dev->pathname) {
+ ret = -ENOMEM;
+- goto out_queues;
++ goto out_ida;
+ }
+
+- dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ mutex_init(&dev->lock);
+@@ -1407,6 +1408,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+
+ return dev;
+
++out_ida:
++ ida_free(&index_ida, dev->clt_device_id);
+ out_queues:
+ kfree(dev->hw_queues);
+ out_alloc:
+--
+2.51.0
+
--- /dev/null
+From 5d21381b95912fde4dd75c97e653799890c0f2a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Dec 2022 09:09:26 +0800
+Subject: block/rnbd-clt: fix wrong max ID in ida_alloc_max
+
+From: Guoqing Jiang <guoqing.jiang@linux.dev>
+
+[ Upstream commit 9d6033e350694a67885605674244d43c9559dc36 ]
+
+We need to pass 'end - 1' to ida_alloc_max after switch from
+ida_simple_get to ida_alloc_max.
+
+Otherwise smatch warns.
+
+drivers/block/rnbd/rnbd-clt.c:1460 init_dev() error: Calling ida_alloc_max() with a 'max' argument which is a power of 2. -1 missing?
+
+Fixes: 24afc15dbe21 ("block/rnbd: Remove a useless mutex")
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Guoqing Jiang <guoqing.jiang@linux.dev>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Link: https://lore.kernel.org/r/20221230010926.32243-1-guoqing.jiang@linux.dev
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c9b5645fd8ca ("block: rnbd-clt: Fix leaked ID in init_dev()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index ced9c4d7b926..ea4b7002f438 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1378,7 +1378,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
++ ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+ GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+--
+2.51.0
+
--- /dev/null
+From bae595110f8582b6138b34cd4adb1c57ab3fd3b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Feb 2022 21:48:19 +0100
+Subject: block/rnbd: Remove a useless mutex
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 24afc15dbe218f860994f627b4ba1fb09225a298 ]
+
+According to lib/idr.c,
+ The IDA handles its own locking. It is safe to call any of the IDA
+ functions without synchronisation in your code.
+
+so the 'ida_lock' mutex can just be removed.
+It is here only to protect some ida_simple_get()/ida_simple_remove() calls.
+
+While at it, switch to ida_alloc_XXX()/ida_free() instead to
+ida_simple_get()/ida_simple_remove().
+The latter is deprecated and more verbose.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Link: https://lore.kernel.org/r/7f9eccd8b1fce1bac45ac9b01a78cf72f54c0a61.1644266862.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c9b5645fd8ca ("block: rnbd-clt: Fix leaked ID in init_dev()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 71b86fee81c2..ced9c4d7b926 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -23,7 +23,6 @@ MODULE_LICENSE("GPL");
+
+ static int rnbd_client_major;
+ static DEFINE_IDA(index_ida);
+-static DEFINE_MUTEX(ida_lock);
+ static DEFINE_MUTEX(sess_lock);
+ static LIST_HEAD(sess_list);
+
+@@ -55,9 +54,7 @@ static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
+ if (!refcount_dec_and_test(&dev->refcount))
+ return;
+
+- mutex_lock(&ida_lock);
+- ida_simple_remove(&index_ida, dev->clt_device_id);
+- mutex_unlock(&ida_lock);
++ ida_free(&index_ida, dev->clt_device_id);
+ kfree(dev->hw_queues);
+ kfree(dev->pathname);
+ rnbd_clt_put_sess(dev->sess);
+@@ -1381,10 +1378,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- mutex_lock(&ida_lock);
+- ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
+- GFP_KERNEL);
+- mutex_unlock(&ida_lock);
++ ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
++ GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+--
+2.51.0
+
--- /dev/null
+From 9a1b05ea06b24df26918d5dff182ed225d52a98e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 17:48:08 +0800
+Subject: hwmon: (ibmpex) fix use-after-free in high/low store
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit 6946c726c3f4c36f0f049e6f97e88c510b15f65d ]
+
+The ibmpex_high_low_store() function retrieves driver data using
+dev_get_drvdata() and uses it without validation. This creates a race
+condition where the sysfs callback can be invoked after the data
+structure is freed, leading to use-after-free.
+
+Fix by adding a NULL check after dev_get_drvdata(), and reordering
+operations in the deletion path to prevent TOCTOU.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Reported-by: Junrui Luo <moonafterrain@outlook.com>
+Fixes: 57c7c3a0fdea ("hwmon: IBM power meter driver")
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://lore.kernel.org/r/MEYPR01MB7886BE2F51BFE41875B74B60AFA0A@MEYPR01MB7886.ausprd01.prod.outlook.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ibmpex.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
+index fe90f0536d76..235d56e96879 100644
+--- a/drivers/hwmon/ibmpex.c
++++ b/drivers/hwmon/ibmpex.c
+@@ -282,6 +282,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
+ {
+ struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+
++ if (!data)
++ return -ENODEV;
++
+ ibmpex_reset_high_low_data(data);
+
+ return count;
+@@ -514,6 +517,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ {
+ int i, j;
+
++ hwmon_device_unregister(data->hwmon_dev);
++ dev_set_drvdata(data->bmc_device, NULL);
++
+ device_remove_file(data->bmc_device,
+ &sensor_dev_attr_reset_high_low.dev_attr);
+ device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr);
+@@ -527,8 +533,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ }
+
+ list_del(&data->list);
+- dev_set_drvdata(data->bmc_device, NULL);
+- hwmon_device_unregister(data->hwmon_dev);
++
+ ipmi_destroy_user(data->user);
+ kfree(data->sensors);
+ kfree(data);
+--
+2.51.0
+
--- /dev/null
+From 254f9c752384f19252bc99b24f6cd9d4f8c3ed9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 18:36:18 +0800
+Subject: MIPS: Fix a reference leak bug in ip22_check_gio()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+[ Upstream commit 680ad315caaa2860df411cb378bf3614d96c7648 ]
+
+If gio_device_register fails, gio_dev_put() is required to
+drop the gio_dev device reference.
+
+Fixes: e84de0c61905 ("MIPS: GIO bus support for SGI IP22/28")
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/sgi-ip22/ip22-gio.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
+index de0768a49ee8..ef671680740e 100644
+--- a/arch/mips/sgi-ip22/ip22-gio.c
++++ b/arch/mips/sgi-ip22/ip22-gio.c
+@@ -372,7 +372,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
+ gio_dev->resource.flags = IORESOURCE_MEM;
+ gio_dev->irq = irq;
+ dev_set_name(&gio_dev->dev, "%d", slotno);
+- gio_device_register(gio_dev);
++ if (gio_device_register(gio_dev))
++ gio_dev_put(gio_dev);
+ } else
+ printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
+ }
+--
+2.51.0
+
net-mlx5-fw_tracer-handle-escaped-percent-properly.patch
net-hns3-using-the-num_tqps-in-the-vf-driver-to-appl.patch
net-hns3-add-vlan-id-validation-before-using.patch
+hwmon-ibmpex-fix-use-after-free-in-high-low-store.patch
+mips-fix-a-reference-leak-bug-in-ip22_check_gio.patch
+block-rnbd-remove-a-useless-mutex.patch
+block-rnbd-clt-fix-wrong-max-id-in-ida_alloc_max.patch
+block-rnbd-clt-fix-leaked-id-in-init_dev.patch
--- /dev/null
+From d2f2ab9f8c57380ddd712181afb1d3d56cabf5db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 10:36:48 +0100
+Subject: block: rnbd-clt: Fix leaked ID in init_dev()
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit c9b5645fd8ca10f310e41b07540f98e6a9720f40 ]
+
+If kstrdup() fails in init_dev(), then the newly allocated ID is lost.
+
+Fixes: 64e8a6ece1a5 ("block/rnbd-clt: Dynamically alloc buffer for pathname & blk_symlink_name")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 5eb8c7855970..4291bdbe36ba 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1440,9 +1440,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+- GFP_KERNEL);
+- if (ret < 0) {
++ dev->clt_device_id = ida_alloc_max(&index_ida,
++ (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
++ GFP_KERNEL);
++ if (dev->clt_device_id < 0) {
++ ret = dev->clt_device_id;
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+@@ -1451,10 +1453,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
+ if (!dev->pathname) {
+ ret = -ENOMEM;
+- goto out_queues;
++ goto out_ida;
+ }
+
+- dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ dev->nr_poll_queues = nr_poll_queues;
+@@ -1470,6 +1471,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+
+ return dev;
+
++out_ida:
++ ida_free(&index_ida, dev->clt_device_id);
+ out_queues:
+ kfree(dev->hw_queues);
+ out_alloc:
+--
+2.51.0
+
--- /dev/null
+From 7c45bb7f99a2b0d01c6735cc7c6f0cbcd06969f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 17:48:08 +0800
+Subject: hwmon: (ibmpex) fix use-after-free in high/low store
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit 6946c726c3f4c36f0f049e6f97e88c510b15f65d ]
+
+The ibmpex_high_low_store() function retrieves driver data using
+dev_get_drvdata() and uses it without validation. This creates a race
+condition where the sysfs callback can be invoked after the data
+structure is freed, leading to use-after-free.
+
+Fix by adding a NULL check after dev_get_drvdata(), and reordering
+operations in the deletion path to prevent TOCTOU.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Reported-by: Junrui Luo <moonafterrain@outlook.com>
+Fixes: 57c7c3a0fdea ("hwmon: IBM power meter driver")
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://lore.kernel.org/r/MEYPR01MB7886BE2F51BFE41875B74B60AFA0A@MEYPR01MB7886.ausprd01.prod.outlook.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ibmpex.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
+index 1837cccd993c..9c7e2fa395db 100644
+--- a/drivers/hwmon/ibmpex.c
++++ b/drivers/hwmon/ibmpex.c
+@@ -282,6 +282,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
+ {
+ struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+
++ if (!data)
++ return -ENODEV;
++
+ ibmpex_reset_high_low_data(data);
+
+ return count;
+@@ -514,6 +517,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ {
+ int i, j;
+
++ hwmon_device_unregister(data->hwmon_dev);
++ dev_set_drvdata(data->bmc_device, NULL);
++
+ device_remove_file(data->bmc_device,
+ &sensor_dev_attr_reset_high_low.dev_attr);
+ device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr);
+@@ -527,8 +533,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ }
+
+ list_del(&data->list);
+- dev_set_drvdata(data->bmc_device, NULL);
+- hwmon_device_unregister(data->hwmon_dev);
++
+ ipmi_destroy_user(data->user);
+ kfree(data->sensors);
+ kfree(data);
+--
+2.51.0
+
--- /dev/null
+From 93815e222bfc223ee63ca33494ca43ddb61a1ad2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Dec 2025 19:43:43 +0300
+Subject: hwmon: (tmp401) fix overflow caused by default conversion rate value
+
+From: Alexey Simakov <bigalex934@gmail.com>
+
+[ Upstream commit 82f2aab35a1ab2e1460de06ef04c726460aed51c ]
+
+The driver computes conversion intervals using the formula:
+
+ interval = (1 << (7 - rate)) * 125ms
+
+where 'rate' is the sensor's conversion rate register value. According to
+the datasheet, the power-on reset value of this register is 0x8, which
+could be assigned to the register, after handling i2c general call.
+Using this default value causes a result greater than the bit width of
+left operand and an undefined behaviour in the calculation above, since
+shifting by values larger than the bit width is undefined behaviour as
+per C language standard.
+
+Limit the maximum usable 'rate' value to 7 to prevent undefined
+behaviour in calculations.
+
+Found by Linux Verification Center (linuxtesting.org) with Svace.
+
+Note (groeck):
+ This does not matter in practice unless someone overwrites the chip
+ configuration from outside the driver while the driver is loaded.
+ The conversion time register is initialized with a value of 5 (500ms)
+ when the driver is loaded, and the driver never writes a bad value.
+
+Fixes: ca53e7640de7 ("hwmon: (tmp401) Convert to _info API")
+Signed-off-by: Alexey Simakov <bigalex934@gmail.com>
+Link: https://lore.kernel.org/r/20251211164342.6291-1-bigalex934@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/tmp401.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
+index f358ba679626..f869d585c345 100644
+--- a/drivers/hwmon/tmp401.c
++++ b/drivers/hwmon/tmp401.c
+@@ -408,7 +408,7 @@ static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val
+ ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, ®val);
+ if (ret < 0)
+ return ret;
+- *val = (1 << (7 - regval)) * 125;
++ *val = (1 << (7 - min(regval, 7))) * 125;
+ break;
+ case hwmon_chip_temp_reset_history:
+ *val = 0;
+--
+2.51.0
+
--- /dev/null
+From efb67be04d08dc6c67e6554af3ac7784ffc284ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 18:36:18 +0800
+Subject: MIPS: Fix a reference leak bug in ip22_check_gio()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+[ Upstream commit 680ad315caaa2860df411cb378bf3614d96c7648 ]
+
+If gio_device_register fails, gio_dev_put() is required to
+drop the gio_dev device reference.
+
+Fixes: e84de0c61905 ("MIPS: GIO bus support for SGI IP22/28")
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/sgi-ip22/ip22-gio.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
+index 8686e8c1c4e5..5b3d3d208db0 100644
+--- a/arch/mips/sgi-ip22/ip22-gio.c
++++ b/arch/mips/sgi-ip22/ip22-gio.c
+@@ -373,7 +373,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
+ gio_dev->resource.flags = IORESOURCE_MEM;
+ gio_dev->irq = irq;
+ dev_set_name(&gio_dev->dev, "%d", slotno);
+- gio_device_register(gio_dev);
++ if (gio_device_register(gio_dev))
++ gio_dev_put(gio_dev);
+ } else
+ printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
+ }
+--
+2.51.0
+
net-hns3-using-the-num_tqps-in-the-vf-driver-to-appl.patch
net-hns3-using-the-num_tqps-to-check-whether-tqp_ind.patch
net-hns3-add-vlan-id-validation-before-using.patch
+hwmon-ibmpex-fix-use-after-free-in-high-low-store.patch
+hwmon-tmp401-fix-overflow-caused-by-default-conversi.patch
+mips-fix-a-reference-leak-bug-in-ip22_check_gio.patch
+x86-xen-move-xen-upcall-handler.patch
+x86-xen-fix-sparse-warning-in-enlighten_pv.c.patch
+spi-cadence-quadspi-add-support-for-starfive-jh7110-.patch
+spi-cadence-quadspi-add-compatible-for-amd-pensando-.patch
+spi-cadence-quadspi-add-clock-configuration-for-star.patch
+spi-cadence-quadspi-add-missing-clk_disable_unprepar.patch
+spi-cadence-quadspi-fix-clock-disable-on-probe-failu.patch
+block-rnbd-clt-fix-leaked-id-in-init_dev.patch
--- /dev/null
+From 8af4c20bac53b733a1220ed6865013cd911ae392 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Aug 2023 10:02:53 +0800
+Subject: spi: cadence-quadspi: Add clock configuration for StarFive JH7110
+ QSPI
+
+From: William Qiu <william.qiu@starfivetech.com>
+
+[ Upstream commit 33f1ef6d4eb6bca726608ed939c9fd94d96ceefd ]
+
+Add JH7110's clock initialization code to the driver.
+
+Signed-off-by: William Qiu <william.qiu@starfivetech.com>
+Reviewed-by: Hal Feng <hal.feng@starfivetech.com>
+Link: https://lore.kernel.org/r/20230804020254.291239-3-william.qiu@starfivetech.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 1889dd208197 ("spi: cadence-quadspi: Fix clock disable on probe failure path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 67 +++++++++++++++++++++++++++++++
+ 1 file changed, 67 insertions(+)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index ca393f3fcd90..af0ec2a8c647 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -47,6 +47,12 @@
+
+ #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
+
++enum {
++ CLK_QSPI_APB = 0,
++ CLK_QSPI_AHB,
++ CLK_QSPI_NUM,
++};
++
+ struct cqspi_st;
+
+ struct cqspi_flash_pdata {
+@@ -64,6 +70,7 @@ struct cqspi_st {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct clk *clk;
++ struct clk *clks[CLK_QSPI_NUM];
+ unsigned int sclk;
+
+ void __iomem *iobase;
+@@ -91,6 +98,8 @@ struct cqspi_st {
+ bool wr_completion;
+ bool slow_sram;
+ bool apb_ahb_hazard;
++
++ bool is_jh7110; /* Flag for StarFive JH7110 SoC */
+ };
+
+ struct cqspi_driver_platdata {
+@@ -99,6 +108,8 @@ struct cqspi_driver_platdata {
+ int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
+ u_char *rxbuf, loff_t from_addr, size_t n_rx);
+ u32 (*get_dma_status)(struct cqspi_st *cqspi);
++ int (*jh7110_clk_init)(struct platform_device *pdev,
++ struct cqspi_st *cqspi);
+ };
+
+ /* Operation timeout value */
+@@ -1592,6 +1603,51 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
+ return 0;
+ }
+
++static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi)
++{
++ static struct clk_bulk_data qspiclk[] = {
++ { .id = "apb" },
++ { .id = "ahb" },
++ };
++
++ int ret = 0;
++
++ ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk);
++ if (ret) {
++ dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__);
++ return ret;
++ }
++
++ cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk;
++ cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk;
++
++ ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]);
++ if (ret) {
++ dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__);
++ return ret;
++ }
++
++ ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]);
++ if (ret) {
++ dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__);
++ goto disable_apb_clk;
++ }
++
++ cqspi->is_jh7110 = true;
++
++ return 0;
++
++disable_apb_clk:
++ clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
++
++ return ret;
++}
++
++static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi)
++{
++ clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]);
++ clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
++}
+ static int cqspi_probe(struct platform_device *pdev)
+ {
+ const struct cqspi_driver_platdata *ddata;
+@@ -1618,6 +1674,7 @@ static int cqspi_probe(struct platform_device *pdev)
+
+ cqspi->pdev = pdev;
+ cqspi->master = master;
++ cqspi->is_jh7110 = false;
+ platform_set_drvdata(pdev, cqspi);
+
+ /* Obtain configuration from OF. */
+@@ -1729,6 +1786,12 @@ static int cqspi_probe(struct platform_device *pdev)
+ if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
+ cqspi->apb_ahb_hazard = true;
+
++ if (ddata->jh7110_clk_init) {
++ ret = cqspi_jh7110_clk_init(pdev, cqspi);
++ if (ret)
++ goto probe_clk_failed;
++ }
++
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0")) {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+@@ -1793,6 +1856,9 @@ static int cqspi_remove(struct platform_device *pdev)
+
+ clk_disable_unprepare(cqspi->clk);
+
++ if (cqspi->is_jh7110)
++ cqspi_jh7110_disable_clk(pdev, cqspi);
++
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+@@ -1860,6 +1926,7 @@ static const struct cqspi_driver_platdata versal_ospi = {
+
+ static const struct cqspi_driver_platdata jh7110_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE,
++ .jh7110_clk_init = cqspi_jh7110_clk_init,
+ };
+
+ static const struct cqspi_driver_platdata pensando_cdns_qspi = {
+--
+2.51.0
+
--- /dev/null
+From fc4ce4557d4b712187eb6f53bd4a423c13b3575d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 May 2023 11:16:05 -0700
+Subject: spi: cadence-quadspi: Add compatible for AMD Pensando Elba SoC
+
+From: Brad Larson <blarson@amd.com>
+
+[ Upstream commit f5c2f9f9584353bc816d76a65c97dd03dc61678c ]
+
+The AMD Pensando Elba SoC has the Cadence QSPI controller integrated.
+
+The quirk CQSPI_NEEDS_APB_AHB_HAZARD_WAR is added and if enabled
+a dummy readback from the controller is performed to ensure
+synchronization.
+
+Signed-off-by: Brad Larson <blarson@amd.com
+Link: https://lore.kernel.org/r/20230515181606.65953-8-blarson@amd.com
+Signed-off-by: Mark Brown <broonie@kernel.org
+Stable-dep-of: 1889dd208197 ("spi: cadence-quadspi: Fix clock disable on probe failure path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index fe537b8d87e5..ca393f3fcd90 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -40,6 +40,7 @@
+ #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
+ #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+ #define CQSPI_SLOW_SRAM BIT(4)
++#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
+
+ /* Capabilities */
+ #define CQSPI_SUPPORTS_OCTAL BIT(0)
+@@ -89,6 +90,7 @@ struct cqspi_st {
+ u32 pd_dev_id;
+ bool wr_completion;
+ bool slow_sram;
++ bool apb_ahb_hazard;
+ };
+
+ struct cqspi_driver_platdata {
+@@ -983,6 +985,13 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
+
++ /*
++ * If a hazard exists between the APB and AHB interfaces, perform a
++ * dummy readback from the controller to ensure synchronization.
++ */
++ if (cqspi->apb_ahb_hazard)
++ readl(reg_base + CQSPI_REG_INDIRECTWR);
++
+ while (remaining > 0) {
+ size_t write_words, mod_bytes;
+
+@@ -1717,6 +1726,8 @@ static int cqspi_probe(struct platform_device *pdev)
+ cqspi->wr_completion = false;
+ if (ddata->quirks & CQSPI_SLOW_SRAM)
+ cqspi->slow_sram = true;
++ if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
++ cqspi->apb_ahb_hazard = true;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0")) {
+@@ -1851,6 +1862,10 @@ static const struct cqspi_driver_platdata jh7110_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE,
+ };
+
++static const struct cqspi_driver_platdata pensando_cdns_qspi = {
++ .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE,
++};
++
+ static const struct of_device_id cqspi_dt_ids[] = {
+ {
+ .compatible = "cdns,qspi-nor",
+@@ -1880,6 +1895,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
+ .compatible = "starfive,jh7110-qspi",
+ .data = &jh7110_qspi,
+ },
++ {
++ .compatible = "amd,pensando-elba-qspi",
++ .data = &pensando_cdns_qspi,
++ },
+ { /* end of table */ }
+ };
+
+--
+2.51.0
+
--- /dev/null
+From 9d805ee784f0de187417cea90f36dc0ecfebb69c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Nov 2023 16:11:47 +0800
+Subject: spi: cadence-quadspi: add missing clk_disable_unprepare() in
+ cqspi_probe()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 5cb475174cce1bfedf1025b6e235e2c43d81144f ]
+
+cqspi_jh7110_clk_init() is called after clk_prepare_enable(cqspi->clk),
+if it fails, it should goto label 'probe_reset_failed' to disable
+cqspi->clk.
+
+In the error path after calling cqspi_jh7110_clk_init(),
+cqspi_jh7110_disable_clk() need be called.
+
+Fixes: 33f1ef6d4eb6 ("spi: cadence-quadspi: Add clock configuration for StarFive JH7110 QSPI")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20231129081147.628004-1-yangyingliang@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 1889dd208197 ("spi: cadence-quadspi: Fix clock disable on probe failure path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index af0ec2a8c647..cb094ac3f211 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1789,7 +1789,7 @@ static int cqspi_probe(struct platform_device *pdev)
+ if (ddata->jh7110_clk_init) {
+ ret = cqspi_jh7110_clk_init(pdev, cqspi);
+ if (ret)
+- goto probe_clk_failed;
++ goto probe_reset_failed;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+@@ -1836,6 +1836,8 @@ static int cqspi_probe(struct platform_device *pdev)
+ probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+ probe_reset_failed:
++ if (cqspi->is_jh7110)
++ cqspi_jh7110_disable_clk(pdev, cqspi);
+ clk_disable_unprepare(cqspi->clk);
+ probe_clk_failed:
+ pm_runtime_put_sync(dev);
+--
+2.51.0
+
--- /dev/null
+From b71c605c7b29e547d14b55cbcc5a02a973a3c262 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Mar 2023 18:52:21 +0800
+Subject: spi: cadence-quadspi: Add support for StarFive JH7110 QSPI
+
+From: William Qiu <william.qiu@starfivetech.com>
+
+[ Upstream commit 47fef94afeae2a125607b6b45145594713471320 ]
+
+Add QSPI reset operation in device probe and add RISCV support to
+QUAD SPI Kconfig.
+
+Co-developed-by: Ziv Xu <ziv.xu@starfivetech.com>
+Signed-off-by: Ziv Xu <ziv.xu@starfivetech.com>
+Signed-off-by: William Qiu <william.qiu@starfivetech.com>
+Link: https://lore.kernel.org/r/20230302105221.197421-3-william.qiu@starfivetech.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 1889dd208197 ("spi: cadence-quadspi: Fix clock disable on probe failure path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/Kconfig | 2 +-
+ drivers/spi/spi-cadence-quadspi.c | 21 ++++++++++++++++++++-
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 15ea11ebcbe0..834fc0b8e27e 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -230,7 +230,7 @@ config SPI_CADENCE
+
+ config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+- depends on OF && (ARM || ARM64 || X86 || COMPILE_TEST)
++ depends on OF && (ARM || ARM64 || X86 || RISCV || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 3200e55136cd..fe537b8d87e5 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1586,7 +1586,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
+ static int cqspi_probe(struct platform_device *pdev)
+ {
+ const struct cqspi_driver_platdata *ddata;
+- struct reset_control *rstc, *rstc_ocp;
++ struct reset_control *rstc, *rstc_ocp, *rstc_ref;
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct resource *res_ahb;
+@@ -1679,6 +1679,17 @@ static int cqspi_probe(struct platform_device *pdev)
+ goto probe_reset_failed;
+ }
+
++ if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) {
++ rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref");
++ if (IS_ERR(rstc_ref)) {
++ ret = PTR_ERR(rstc_ref);
++ dev_err(dev, "Cannot get QSPI REF reset.\n");
++ goto probe_reset_failed;
++ }
++ reset_control_assert(rstc_ref);
++ reset_control_deassert(rstc_ref);
++ }
++
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+
+@@ -1836,6 +1847,10 @@ static const struct cqspi_driver_platdata versal_ospi = {
+ .get_dma_status = cqspi_get_versal_dma_status,
+ };
+
++static const struct cqspi_driver_platdata jh7110_qspi = {
++ .quirks = CQSPI_DISABLE_DAC_MODE,
++};
++
+ static const struct of_device_id cqspi_dt_ids[] = {
+ {
+ .compatible = "cdns,qspi-nor",
+@@ -1861,6 +1876,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
+ .compatible = "intel,socfpga-qspi",
+ .data = &socfpga_qspi,
+ },
++ {
++ .compatible = "starfive,jh7110-qspi",
++ .data = &jh7110_qspi,
++ },
+ { /* end of table */ }
+ };
+
+--
+2.51.0
+
--- /dev/null
+From 2fd31d7673c636aa029d3101d892134552350a62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 12:53:12 +0530
+Subject: spi: cadence-quadspi: Fix clock disable on probe failure path
+
+From: Anurag Dutta <a-dutta@ti.com>
+
+[ Upstream commit 1889dd2081975ce1f6275b06cdebaa8d154847a9 ]
+
+When cqspi_request_mmap_dma() returns -EPROBE_DEFER after runtime PM
+is enabled, the error path calls clk_disable_unprepare() on an already
+disabled clock, causing an imbalance.
+
+Use pm_runtime_get_sync() to increment the usage counter and resume the
+device. This prevents runtime_suspend() from being invoked and causing
+a double clock disable.
+
+Fixes: 140623410536 ("mtd: spi-nor: Add driver for Cadence Quad SPI Flash Controller")
+Signed-off-by: Anurag Dutta <a-dutta@ti.com>
+Tested-by: Nishanth Menon <nm@ti.com>
+Link: https://patch.msgid.link/20251212072312.2711806-3-a-dutta@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index cb094ac3f211..5b0ce13521f2 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1838,7 +1838,9 @@ static int cqspi_probe(struct platform_device *pdev)
+ probe_reset_failed:
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
+- clk_disable_unprepare(cqspi->clk);
++
++ if (pm_runtime_get_sync(&pdev->dev) >= 0)
++ clk_disable_unprepare(cqspi->clk);
+ probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ probe_pm_failed:
+--
+2.51.0
+
--- /dev/null
+From a11b3e1c202f05a299b0bc563353322f57782c03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 12:51:12 +0100
+Subject: x86/xen: Fix sparse warning in enlighten_pv.c
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit e5aff444e3a7bdeef5ea796a2099fc3c60a070fa ]
+
+The sparse tool issues a warning for arch/x76/xen/enlighten_pv.c:
+
+ arch/x86/xen/enlighten_pv.c:120:9: sparse: sparse: incorrect type
+ in initializer (different address spaces)
+ expected void const [noderef] __percpu *__vpp_verify
+ got bool *
+
+This is due to the percpu variable xen_in_preemptible_hcall being
+exported via EXPORT_SYMBOL_GPL() instead of EXPORT_PER_CPU_SYMBOL_GPL().
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202512140856.Ic6FetG6-lkp@intel.com/
+Fixes: fdfd811ddde3 ("x86/xen: allow privcmd hypercalls to be preempted")
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20251215115112.15072-1-jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/xen/enlighten_pv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 9a54049744bf..772970fce042 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -109,7 +109,7 @@ static int xen_cpu_dead_pv(unsigned int cpu);
+ * calls.
+ */
+ DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++EXPORT_PER_CPU_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+ /*
+ * In case of scheduling the flag must be cleared and restored after
+--
+2.51.0
+
--- /dev/null
+From a84ff2d0b22124e5ce86d1aba380f964aa0d721c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Mar 2025 11:12:14 -0400
+Subject: x86/xen: Move Xen upcall handler
+
+From: Brian Gerst <brgerst@gmail.com>
+
+[ Upstream commit 1ab7b5ed44ba9bce581e225f40219b793bc779d6 ]
+
+Move the upcall handler to Xen-specific files.
+
+No functional changes.
+
+Signed-off-by: Brian Gerst <brgerst@gmail.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/20250314151220.862768-2-brgerst@gmail.com
+Stable-dep-of: e5aff444e3a7 ("x86/xen: Fix sparse warning in enlighten_pv.c")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/common.c | 72 -------------------------------------
+ arch/x86/xen/enlighten_pv.c | 69 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 69 insertions(+), 72 deletions(-)
+
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index e72dac092245..3c88bdf96e7a 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -20,11 +20,6 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+
+-#ifdef CONFIG_XEN_PV
+-#include <xen/xen-ops.h>
+-#include <xen/events.h>
+-#endif
+-
+ #include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/traps.h>
+@@ -346,70 +341,3 @@ SYSCALL_DEFINE0(ni_syscall)
+ {
+ return -ENOSYS;
+ }
+-
+-#ifdef CONFIG_XEN_PV
+-#ifndef CONFIG_PREEMPTION
+-/*
+- * Some hypercalls issued by the toolstack can take many 10s of
+- * seconds. Allow tasks running hypercalls via the privcmd driver to
+- * be voluntarily preempted even if full kernel preemption is
+- * disabled.
+- *
+- * Such preemptible hypercalls are bracketed by
+- * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+- * calls.
+- */
+-DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+-
+-/*
+- * In case of scheduling the flag must be cleared and restored after
+- * returning from schedule as the task might move to a different CPU.
+- */
+-static __always_inline bool get_and_clear_inhcall(void)
+-{
+- bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+-
+- __this_cpu_write(xen_in_preemptible_hcall, false);
+- return inhcall;
+-}
+-
+-static __always_inline void restore_inhcall(bool inhcall)
+-{
+- __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+-}
+-#else
+-static __always_inline bool get_and_clear_inhcall(void) { return false; }
+-static __always_inline void restore_inhcall(bool inhcall) { }
+-#endif
+-
+-static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- struct pt_regs *old_regs = set_irq_regs(regs);
+-
+- inc_irq_stat(irq_hv_callback_count);
+-
+- xen_evtchn_do_upcall();
+-
+- set_irq_regs(old_regs);
+-}
+-
+-__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- irqentry_state_t state = irqentry_enter(regs);
+- bool inhcall;
+-
+- instrumentation_begin();
+- run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+-
+- inhcall = get_and_clear_inhcall();
+- if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
+- irqentry_exit_cond_resched();
+- instrumentation_end();
+- restore_inhcall(inhcall);
+- } else {
+- instrumentation_end();
+- irqentry_exit(regs, state);
+- }
+-}
+-#endif /* CONFIG_XEN_PV */
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index ee8f452cc58b..9a54049744bf 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -71,6 +71,7 @@
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+ #include <asm/cpu.h>
++#include <asm/irq_stack.h>
+ #ifdef CONFIG_X86_IOPL_IOPERM
+ #include <asm/io_bitmap.h>
+ #endif
+@@ -96,6 +97,44 @@ void *xen_initial_gdt;
+ static int xen_cpu_up_prepare_pv(unsigned int cpu);
+ static int xen_cpu_dead_pv(unsigned int cpu);
+
++#ifndef CONFIG_PREEMPTION
++/*
++ * Some hypercalls issued by the toolstack can take many 10s of
++ * seconds. Allow tasks running hypercalls via the privcmd driver to
++ * be voluntarily preempted even if full kernel preemption is
++ * disabled.
++ *
++ * Such preemptible hypercalls are bracketed by
++ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
++ * calls.
++ */
++DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
++EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++
++/*
++ * In case of scheduling the flag must be cleared and restored after
++ * returning from schedule as the task might move to a different CPU.
++ */
++static __always_inline bool get_and_clear_inhcall(void)
++{
++ bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
++
++ __this_cpu_write(xen_in_preemptible_hcall, false);
++ return inhcall;
++}
++
++static __always_inline void restore_inhcall(bool inhcall)
++{
++ __this_cpu_write(xen_in_preemptible_hcall, inhcall);
++}
++
++#else
++
++static __always_inline bool get_and_clear_inhcall(void) { return false; }
++static __always_inline void restore_inhcall(bool inhcall) { }
++
++#endif
++
+ struct tls_descs {
+ struct desc_struct desc[3];
+ };
+@@ -597,6 +636,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
+ }
+ #endif
+
++static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
++{
++ struct pt_regs *old_regs = set_irq_regs(regs);
++
++ inc_irq_stat(irq_hv_callback_count);
++
++ xen_evtchn_do_upcall();
++
++ set_irq_regs(old_regs);
++}
++
++__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
++{
++ irqentry_state_t state = irqentry_enter(regs);
++ bool inhcall;
++
++ instrumentation_begin();
++ run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
++
++ inhcall = get_and_clear_inhcall();
++ if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
++ irqentry_exit_cond_resched();
++ instrumentation_end();
++ restore_inhcall(inhcall);
++ } else {
++ instrumentation_end();
++ irqentry_exit(regs, state);
++ }
++}
++
+ struct trap_array_entry {
+ void (*orig)(void);
+ void (*xen)(void);
+--
+2.51.0
+
--- /dev/null
+From cb9d0c76820e12d58aad75fdb0f41b41a94a741f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:59:34 +0800
+Subject: arm64: kdump: Fix elfcorehdr overlap caused by reserved memory
+ processing reorder
+
+From: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+
+[ Upstream commit 3e8ade58b71b48913d21b647b2089e03e81f117e ]
+
+Commit 8a6e02d0c00e ("of: reserved_mem: Restructure how the reserved
+memory regions are processed") changed the processing order of reserved
+memory regions, causing elfcorehdr to overlap with dynamically allocated
+reserved memory regions during kdump kernel boot.
+
+The issue occurs because:
+1. kexec-tools allocates elfcorehdr in the last crashkernel reserved
+ memory region and passes it to the second kernel
+2. The problematic commit moved dynamic reserved memory allocation
+ (like bman-fbpr) to occur during fdt_scan_reserved_mem(), before
+ elfcorehdr reservation in fdt_reserve_elfcorehdr()
+3. bman-fbpr with 16MB alignment requirement can get allocated at
+ addresses that overlap with the elfcorehdr location
+4. When fdt_reserve_elfcorehdr() tries to reserve elfcorehdr memory,
+ overlap detection identifies the conflict and skips reservation
+5. kdump kernel fails with "Unable to handle kernel paging request"
+ because elfcorehdr memory is not properly reserved
+
+The boot log:
+Before 8a6e02d0c00e:
+ OF: fdt: Reserving 1 KiB of memory at 0xf4fff000 for elfcorehdr
+ OF: reserved mem: 0xf3000000..0xf3ffffff bman-fbpr
+
+After 8a6e02d0c00e:
+ OF: reserved mem: 0xf4000000..0xf4ffffff bman-fbpr
+ OF: fdt: elfcorehdr is overlapped
+
+Fix this by ensuring elfcorehdr reservation occurs before dynamic
+reserved memory allocation.
+
+Fixes: 8a6e02d0c00e ("of: reserved_mem: Restructure how the reserved memory regions are processed")
+Signed-off-by: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+Link: https://patch.msgid.link/20251205015934.700016-1-jianpeng.chang.cn@windriver.com
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/fdt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 8c80f4dc8b3f..0940955d3701 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -501,8 +501,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
+ if (!initial_boot_params)
+ return;
+
+- fdt_scan_reserved_mem();
+ fdt_reserve_elfcorehdr();
++ fdt_scan_reserved_mem();
+
+ /* Process header /memreserve/ fields */
+ for (n = 0; ; n++) {
+--
+2.51.0
+
--- /dev/null
+From 26066bf94937272778be202d93b9925261200e58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 10:36:48 +0100
+Subject: block: rnbd-clt: Fix leaked ID in init_dev()
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit c9b5645fd8ca10f310e41b07540f98e6a9720f40 ]
+
+If kstrdup() fails in init_dev(), then the newly allocated ID is lost.
+
+Fixes: 64e8a6ece1a5 ("block/rnbd-clt: Dynamically alloc buffer for pathname & blk_symlink_name")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index c34695d2eea7..5be0581c3334 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1424,9 +1424,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+- GFP_KERNEL);
+- if (ret < 0) {
++ dev->clt_device_id = ida_alloc_max(&index_ida,
++ (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
++ GFP_KERNEL);
++ if (dev->clt_device_id < 0) {
++ ret = dev->clt_device_id;
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+@@ -1435,10 +1437,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
+ if (!dev->pathname) {
+ ret = -ENOMEM;
+- goto out_queues;
++ goto out_ida;
+ }
+
+- dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ dev->nr_poll_queues = nr_poll_queues;
+@@ -1454,6 +1455,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+
+ return dev;
+
++out_ida:
++ ida_free(&index_ida, dev->clt_device_id);
+ out_queues:
+ kfree(dev->hw_queues);
+ out_alloc:
+--
+2.51.0
+
--- /dev/null
+From 19e016b7f4d63d77e81b12a3074464ffd09a95ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Nov 2025 11:31:52 +0800
+Subject: drm/me/gsc: mei interrupt top half should be in irq disabled context
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Junxiao Chang <junxiao.chang@intel.com>
+
+[ Upstream commit 17445af7dcc7d645b6fb8951fd10c8b72cc7f23f ]
+
+MEI GSC interrupt comes from i915 or xe driver. It has top half and
+bottom half. Top half is called from i915/xe interrupt handler. It
+should be in irq disabled context.
+
+With RT kernel(PREEMPT_RT enabled), by default IRQ handler is in
+threaded IRQ. MEI GSC top half might be in threaded IRQ context.
+generic_handle_irq_safe API could be called from either IRQ or
+process context, it disables local IRQ then calls MEI GSC interrupt
+top half.
+
+This change fixes B580 GPU boot issue with RT enabled.
+
+Fixes: e02cea83d32d ("drm/xe/gsc: add Battlemage support")
+Tested-by: Baoli Zhang <baoli.zhang@intel.com>
+Signed-off-by: Junxiao Chang <junxiao.chang@intel.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251107033152.834960-1-junxiao.chang@intel.com
+Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
+(cherry picked from commit 3efadf028783a49ab2941294187c8b6dd86bf7da)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_heci_gsc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
+index 65b2e147c4b9..894a6bd33285 100644
+--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
++++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
+@@ -230,7 +230,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+- ret = generic_handle_irq(xe->heci_gsc.irq);
++ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+ }
+@@ -250,7 +250,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+- ret = generic_handle_irq(xe->heci_gsc.irq);
++ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+ }
+--
+2.51.0
+
--- /dev/null
+From a9085355ee602773326f80fd0cbfb0491d748ebb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 30 Nov 2025 23:40:05 +0100
+Subject: drm/panel: sony-td4353-jdi: Enable prepare_prev_first
+
+From: Marijn Suijten <marijn.suijten@somainline.org>
+
+[ Upstream commit 2b973ca48ff3ef1952091c8f988d7796781836c8 ]
+
+The DSI host must be enabled before our prepare function can run, which
+has to send its init sequence over DSI. Without enabling the host first
+the panel will not probe.
+
+Fixes: 9e15123eca79 ("drm/msm/dsi: Stop unconditionally powering up DSI hosts at modeset")
+Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Martin Botka <martin.botka@somainline.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20251130-sony-akari-fix-panel-v1-1-1d27c60a55f5@somainline.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-sony-td4353-jdi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+index 472195d4bbbe..9ac3e0759efc 100644
+--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
++++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+@@ -274,6 +274,8 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
++ ctx->panel.prepare_prev_first = true;
++
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+--
+2.51.0
+
--- /dev/null
+From 5db99dbef7917da48a7130d25c1bc5a1c3cf5143 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 23:47:17 +0000
+Subject: drm/xe: Limit num_syncs to prevent oversized allocations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 8e461304009135270e9ccf2d7e2dfe29daec9b60 ]
+
+The exec and vm_bind ioctl allow userspace to specify an arbitrary
+num_syncs value. Without bounds checking, a very large num_syncs
+can force an excessively large allocation, leading to kernel warnings
+from the page allocator as below.
+
+Introduce DRM_XE_MAX_SYNCS (set to 1024) and reject any request
+exceeding this limit.
+
+"
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 1217 at mm/page_alloc.c:5124 __alloc_frozen_pages_noprof+0x2f8/0x2180 mm/page_alloc.c:5124
+...
+Call Trace:
+ <TASK>
+ alloc_pages_mpol+0xe4/0x330 mm/mempolicy.c:2416
+ ___kmalloc_large_node+0xd8/0x110 mm/slub.c:4317
+ __kmalloc_large_node_noprof+0x18/0xe0 mm/slub.c:4348
+ __do_kmalloc_node mm/slub.c:4364 [inline]
+ __kmalloc_noprof+0x3d4/0x4b0 mm/slub.c:4388
+ kmalloc_noprof include/linux/slab.h:909 [inline]
+ kmalloc_array_noprof include/linux/slab.h:948 [inline]
+ xe_exec_ioctl+0xa47/0x1e70 drivers/gpu/drm/xe/xe_exec.c:158
+ drm_ioctl_kernel+0x1f1/0x3e0 drivers/gpu/drm/drm_ioctl.c:797
+ drm_ioctl+0x5e7/0xc50 drivers/gpu/drm/drm_ioctl.c:894
+ xe_drm_ioctl+0x10b/0x170 drivers/gpu/drm/xe/xe_device.c:224
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:598 [inline]
+ __se_sys_ioctl fs/ioctl.c:584 [inline]
+ __x64_sys_ioctl+0x18b/0x210 fs/ioctl.c:584
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xbb/0x380 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+"
+
+v2: Add "Reported-by" and Cc stable kernels.
+v3: Change XE_MAX_SYNCS from 64 to 1024. (Matt & Ashutosh)
+v4: s/XE_MAX_SYNCS/DRM_XE_MAX_SYNCS/ (Matt)
+v5: Do the check at the top of the exec func. (Matt)
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Reported-by: Koen Koning <koen.koning@intel.com>
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6450
+Cc: <stable@vger.kernel.org> # v6.12+
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Michal Mrozek <michal.mrozek@intel.com>
+Cc: Carl Zhang <carl.zhang@intel.com>
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Cc: Ivan Briano <ivan.briano@intel.com>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251205234715.2476561-5-shuicheng.lin@intel.com
+(cherry picked from commit b07bac9bd708ec468cd1b8a5fe70ae2ac9b0a11c)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Stable-dep-of: f8dd66bfb4e1 ("drm/xe/oa: Limit num_syncs to prevent oversized allocations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_exec.c | 3 ++-
+ drivers/gpu/drm/xe/xe_vm.c | 3 +++
+ include/uapi/drm/xe_drm.h | 1 +
+ 3 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
+index 31cca938956f..886d03ccf744 100644
+--- a/drivers/gpu/drm/xe/xe_exec.c
++++ b/drivers/gpu/drm/xe/xe_exec.c
+@@ -125,7 +125,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+
+ if (XE_IOCTL_DBG(xe, args->extensions) ||
+ XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+- XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
++ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]) ||
++ XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
+ return -EINVAL;
+
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 30625ce691fa..79f08337cc27 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -2829,6 +2829,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
+ if (XE_IOCTL_DBG(xe, args->extensions))
+ return -EINVAL;
+
++ if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
++ return -EINVAL;
++
+ if (args->num_binds > 1) {
+ u64 __user *bind_user =
+ u64_to_user_ptr(args->vector_of_binds);
+diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
+index 4a8a4a63e99c..05f01ad0bfd9 100644
+--- a/include/uapi/drm/xe_drm.h
++++ b/include/uapi/drm/xe_drm.h
+@@ -1281,6 +1281,7 @@ struct drm_xe_exec {
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
++#define DRM_XE_MAX_SYNCS 1024
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+--
+2.51.0
+
--- /dev/null
+From 63e16a0178ffdc8524d4de746aa701f01df8d671 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 23:47:18 +0000
+Subject: drm/xe/oa: Limit num_syncs to prevent oversized allocations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit f8dd66bfb4e184c71bd26418a00546ebe7f5c17a ]
+
+The OA open parameters did not validate num_syncs, allowing
+userspace to pass arbitrarily large values, potentially
+leading to excessive allocations.
+
+Add check to ensure that num_syncs does not exceed DRM_XE_MAX_SYNCS,
+returning -EINVAL when the limit is violated.
+
+v2: use XE_IOCTL_DBG() and drop duplicated check. (Ashutosh)
+
+Fixes: c8507a25cebd ("drm/xe/oa/uapi: Define and parse OA sync properties")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251205234715.2476561-6-shuicheng.lin@intel.com
+(cherry picked from commit e057b2d2b8d815df3858a87dffafa2af37e5945b)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index d306ed0a0443..5916187cd78f 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1200,6 +1200,9 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+ static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
++ if (XE_IOCTL_DBG(oa->xe, value > DRM_XE_MAX_SYNCS))
++ return -EINVAL;
++
+ param->num_syncs = value;
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e69dc983cc92772596243f6614c5273b7533159b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 15:56:18 +0100
+Subject: drm/xe: Restore engine registers before restarting schedulers after
+ GT reset
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jan Maslak <jan.maslak@intel.com>
+
+[ Upstream commit eed5b815fa49c17d513202f54e980eb91955d3ed ]
+
+During GT reset recovery in do_gt_restart(), xe_uc_start() was called
+before xe_reg_sr_apply_mmio() restored engine-specific registers. This
+created a race window where the scheduler could run jobs before hardware
+state was fully restored.
+
+This caused failures in eudebug tests (xe_exec_sip_eudebug@breakpoint-
+waitsip-*) where TD_CTL register (containing TD_CTL_GLOBAL_DEBUG_ENABLE)
+wasn't restored before jobs started executing. Breakpoints would fail to
+trigger SIP entry because the debug enable bit wasn't set yet.
+
+Fix by moving xe_uc_start() after all MMIO register restoration,
+including engine registers and CCS mode configuration, ensuring all
+hardware state is fully restored before any jobs can be scheduled.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Signed-off-by: Jan Maslak <jan.maslak@intel.com>
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251210145618.169625-2-jan.maslak@intel.com
+(cherry picked from commit 825aed0328588b2837636c1c5a0c48795d724617)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index de011f5629fd..292947e44a8a 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -721,9 +721,6 @@ static int do_gt_restart(struct xe_gt *gt)
+ xe_gt_sriov_pf_init_hw(gt);
+
+ xe_mocs_init(gt);
+- err = xe_uc_start(>->uc);
+- if (err)
+- return err;
+
+ for_each_hw_engine(hwe, gt, id) {
+ xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
+@@ -733,6 +730,10 @@ static int do_gt_restart(struct xe_gt *gt)
+ /* Get CCS mode in sync between sw/hw */
+ xe_gt_apply_ccs_mode(gt);
+
++ err = xe_uc_start(>->uc);
++ if (err)
++ return err;
++
+ /* Restore GT freq to expected values */
+ xe_gt_sanitize_freq(gt);
+
+--
+2.51.0
+
--- /dev/null
+From f4606a626a8576f70fb5ac5f8255e92e3080554a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Dec 2025 09:37:06 +0300
+Subject: hwmon: (dell-smm) Limit fan multiplier to avoid overflow
+
+From: Denis Sergeev <denserg.edu@gmail.com>
+
+[ Upstream commit 46c28bbbb150b80827e4bcbea231560af9d16854 ]
+
+The fan nominal speed returned by SMM is limited to 16 bits, but the
+driver allows the fan multiplier to be set via a module parameter.
+
+Clamp the computed fan multiplier so that fan_nominal_speed *
+i8k_fan_mult always fits into a signed 32-bit integer and refuse to
+initialize the driver if the value is too large.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 20bdeebc88269 ("hwmon: (dell-smm) Introduce helper function for data init")
+Signed-off-by: Denis Sergeev <denserg.edu@gmail.com>
+Link: https://lore.kernel.org/r/20251209063706.49008-1-denserg.edu@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/dell-smm-hwmon.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index f73f46193748..9df78861f5f8 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -75,6 +75,9 @@
+ #define DELL_SMM_NO_TEMP 10
+ #define DELL_SMM_NO_FANS 4
+
++/* limit fan multiplier to avoid overflow */
++#define DELL_SMM_MAX_FAN_MULT (INT_MAX / U16_MAX)
++
+ struct smm_regs {
+ unsigned int eax;
+ unsigned int ebx;
+@@ -1203,6 +1206,12 @@ static int dell_smm_init_data(struct device *dev, const struct dell_smm_ops *ops
+ data->ops = ops;
+ /* All options must not be 0 */
+ data->i8k_fan_mult = fan_mult ? : I8K_FAN_MULT;
++ if (data->i8k_fan_mult > DELL_SMM_MAX_FAN_MULT) {
++ dev_err(dev,
++ "fan multiplier %u is too large (max %u)\n",
++ data->i8k_fan_mult, DELL_SMM_MAX_FAN_MULT);
++ return -EINVAL;
++ }
+ data->i8k_fan_max = fan_max ? : I8K_FAN_HIGH;
+ data->i8k_pwm_mult = DIV_ROUND_UP(255, data->i8k_fan_max);
+
+--
+2.51.0
+
--- /dev/null
+From bab173ac1f5c2e1db18af35e3745bfd833fd2797 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 17:48:08 +0800
+Subject: hwmon: (ibmpex) fix use-after-free in high/low store
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit 6946c726c3f4c36f0f049e6f97e88c510b15f65d ]
+
+The ibmpex_high_low_store() function retrieves driver data using
+dev_get_drvdata() and uses it without validation. This creates a race
+condition where the sysfs callback can be invoked after the data
+structure is freed, leading to use-after-free.
+
+Fix by adding a NULL check after dev_get_drvdata(), and reordering
+operations in the deletion path to prevent TOCTOU.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Reported-by: Junrui Luo <moonafterrain@outlook.com>
+Fixes: 57c7c3a0fdea ("hwmon: IBM power meter driver")
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://lore.kernel.org/r/MEYPR01MB7886BE2F51BFE41875B74B60AFA0A@MEYPR01MB7886.ausprd01.prod.outlook.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ibmpex.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
+index 228c5f6c6f38..129f3a9e8fe9 100644
+--- a/drivers/hwmon/ibmpex.c
++++ b/drivers/hwmon/ibmpex.c
+@@ -277,6 +277,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
+ {
+ struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+
++ if (!data)
++ return -ENODEV;
++
+ ibmpex_reset_high_low_data(data);
+
+ return count;
+@@ -508,6 +511,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ {
+ int i, j;
+
++ hwmon_device_unregister(data->hwmon_dev);
++ dev_set_drvdata(data->bmc_device, NULL);
++
+ device_remove_file(data->bmc_device,
+ &sensor_dev_attr_reset_high_low.dev_attr);
+ device_remove_file(data->bmc_device, &dev_attr_name.attr);
+@@ -521,8 +527,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ }
+
+ list_del(&data->list);
+- dev_set_drvdata(data->bmc_device, NULL);
+- hwmon_device_unregister(data->hwmon_dev);
++
+ ipmi_destroy_user(data->user);
+ kfree(data->sensors);
+ kfree(data);
+--
+2.51.0
+
--- /dev/null
+From d354565621327804544b083be094254beaa44957 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 16:11:05 +0000
+Subject: hwmon: (ltc4282): Fix reset_history file permissions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+[ Upstream commit b3db91c3bfea69a6c6258fea508f25a59c0feb1a ]
+
+The reset_history attributes are write only. Hence don't report them as
+readable just to return -EOPNOTSUPP later on.
+
+Fixes: cbc29538dbf7 ("hwmon: Add driver for LTC4282")
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Link: https://lore.kernel.org/r/20251219-ltc4282-fix-reset-history-v1-1-8eab974c124b@analog.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ltc4282.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
+index 953dfe2bd166..d98c57918ce3 100644
+--- a/drivers/hwmon/ltc4282.c
++++ b/drivers/hwmon/ltc4282.c
+@@ -1016,8 +1016,9 @@ static umode_t ltc4282_in_is_visible(const struct ltc4282_state *st, u32 attr)
+ case hwmon_in_max:
+ case hwmon_in_min:
+ case hwmon_in_enable:
+- case hwmon_in_reset_history:
+ return 0644;
++ case hwmon_in_reset_history:
++ return 0200;
+ default:
+ return 0;
+ }
+@@ -1036,8 +1037,9 @@ static umode_t ltc4282_curr_is_visible(u32 attr)
+ return 0444;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+- case hwmon_curr_reset_history:
+ return 0644;
++ case hwmon_curr_reset_history:
++ return 0200;
+ default:
+ return 0;
+ }
+@@ -1055,8 +1057,9 @@ static umode_t ltc4282_power_is_visible(u32 attr)
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_min:
+- case hwmon_power_reset_history:
+ return 0644;
++ case hwmon_power_reset_history:
++ return 0200;
+ default:
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e5853a947c1d3a1d18d80bf4fe1aad8a2c02047f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Dec 2025 19:43:43 +0300
+Subject: hwmon: (tmp401) fix overflow caused by default conversion rate value
+
+From: Alexey Simakov <bigalex934@gmail.com>
+
+[ Upstream commit 82f2aab35a1ab2e1460de06ef04c726460aed51c ]
+
+The driver computes conversion intervals using the formula:
+
+ interval = (1 << (7 - rate)) * 125ms
+
+where 'rate' is the sensor's conversion rate register value. According to
+the datasheet, the power-on reset value of this register is 0x8, which
+could be assigned to the register, after handling i2c general call.
+Using this default value causes a result greater than the bit width of
+left operand and an undefined behaviour in the calculation above, since
+shifting by values larger than the bit width is undefined behaviour as
+per C language standard.
+
+Limit the maximum usable 'rate' value to 7 to prevent undefined
+behaviour in calculations.
+
+Found by Linux Verification Center (linuxtesting.org) with Svace.
+
+Note (groeck):
+ This does not matter in practice unless someone overwrites the chip
+ configuration from outside the driver while the driver is loaded.
+ The conversion time register is initialized with a value of 5 (500ms)
+ when the driver is loaded, and the driver never writes a bad value.
+
+Fixes: ca53e7640de7 ("hwmon: (tmp401) Convert to _info API")
+Signed-off-by: Alexey Simakov <bigalex934@gmail.com>
+Link: https://lore.kernel.org/r/20251211164342.6291-1-bigalex934@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/tmp401.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
+index 02c5a3bb1071..84aaf817144c 100644
+--- a/drivers/hwmon/tmp401.c
++++ b/drivers/hwmon/tmp401.c
+@@ -401,7 +401,7 @@ static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val
+ ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, ®val);
+ if (ret < 0)
+ return ret;
+- *val = (1 << (7 - regval)) * 125;
++ *val = (1 << (7 - min(regval, 7))) * 125;
+ break;
+ case hwmon_chip_temp_reset_history:
+ *val = 0;
+--
+2.51.0
+
--- /dev/null
+From d57736576c4ab8f8306e8862538f33a803bc892e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 18:36:18 +0800
+Subject: MIPS: Fix a reference leak bug in ip22_check_gio()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+[ Upstream commit 680ad315caaa2860df411cb378bf3614d96c7648 ]
+
+If gio_device_register fails, gio_dev_put() is required to
+drop the gio_dev device reference.
+
+Fixes: e84de0c61905 ("MIPS: GIO bus support for SGI IP22/28")
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/sgi-ip22/ip22-gio.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
+index d20eec742bfa..f6e66c858e69 100644
+--- a/arch/mips/sgi-ip22/ip22-gio.c
++++ b/arch/mips/sgi-ip22/ip22-gio.c
+@@ -373,7 +373,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
+ gio_dev->resource.flags = IORESOURCE_MEM;
+ gio_dev->irq = irq;
+ dev_set_name(&gio_dev->dev, "%d", slotno);
+- gio_device_register(gio_dev);
++ if (gio_device_register(gio_dev))
++ gio_dev_put(gio_dev);
+ } else
+ printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
+ }
+--
+2.51.0
+
net-hns3-using-the-num_tqps-in-the-vf-driver-to-appl.patch
net-hns3-using-the-num_tqps-to-check-whether-tqp_ind.patch
net-hns3-add-vlan-id-validation-before-using.patch
+hwmon-dell-smm-limit-fan-multiplier-to-avoid-overflo.patch
+hwmon-ibmpex-fix-use-after-free-in-high-low-store.patch
+hwmon-tmp401-fix-overflow-caused-by-default-conversi.patch
+drm-me-gsc-mei-interrupt-top-half-should-be-in-irq-d.patch
+drm-xe-restore-engine-registers-before-restarting-sc.patch
+mips-fix-a-reference-leak-bug-in-ip22_check_gio.patch
+drm-panel-sony-td4353-jdi-enable-prepare_prev_first.patch
+x86-xen-move-xen-upcall-handler.patch
+x86-xen-fix-sparse-warning-in-enlighten_pv.c.patch
+arm64-kdump-fix-elfcorehdr-overlap-caused-by-reserve.patch
+spi-cadence-quadspi-fix-clock-disable-on-probe-failu.patch
+block-rnbd-clt-fix-leaked-id-in-init_dev.patch
+drm-xe-limit-num_syncs-to-prevent-oversized-allocati.patch
+drm-xe-oa-limit-num_syncs-to-prevent-oversized-alloc.patch
+hwmon-ltc4282-fix-reset_history-file-permissions.patch
--- /dev/null
+From 37ee75516aa44591fa8310a2001a926a2f6764b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 12:53:12 +0530
+Subject: spi: cadence-quadspi: Fix clock disable on probe failure path
+
+From: Anurag Dutta <a-dutta@ti.com>
+
+[ Upstream commit 1889dd2081975ce1f6275b06cdebaa8d154847a9 ]
+
+When cqspi_request_mmap_dma() returns -EPROBE_DEFER after runtime PM
+is enabled, the error path calls clk_disable_unprepare() on an already
+disabled clock, causing an imbalance.
+
+Use pm_runtime_get_sync() to increment the usage counter and resume the
+device. This prevents runtime_suspend() from being invoked and causing
+a double clock disable.
+
+Fixes: 140623410536 ("mtd: spi-nor: Add driver for Cadence Quad SPI Flash Controller")
+Signed-off-by: Anurag Dutta <a-dutta@ti.com>
+Tested-by: Nishanth Menon <nm@ti.com>
+Link: https://patch.msgid.link/20251212072312.2711806-3-a-dutta@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 06e43b184d85..aca3681d32ea 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1959,7 +1959,9 @@ static int cqspi_probe(struct platform_device *pdev)
+ probe_reset_failed:
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
+- clk_disable_unprepare(cqspi->clk);
++
++ if (pm_runtime_get_sync(&pdev->dev) >= 0)
++ clk_disable_unprepare(cqspi->clk);
+ probe_clk_failed:
+ return ret;
+ }
+--
+2.51.0
+
--- /dev/null
+From 613e3a4cbac77ccd7b8a0edfa6b10603387a449b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 12:51:12 +0100
+Subject: x86/xen: Fix sparse warning in enlighten_pv.c
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit e5aff444e3a7bdeef5ea796a2099fc3c60a070fa ]
+
+The sparse tool issues a warning for arch/x76/xen/enlighten_pv.c:
+
+ arch/x86/xen/enlighten_pv.c:120:9: sparse: sparse: incorrect type
+ in initializer (different address spaces)
+ expected void const [noderef] __percpu *__vpp_verify
+ got bool *
+
+This is due to the percpu variable xen_in_preemptible_hcall being
+exported via EXPORT_SYMBOL_GPL() instead of EXPORT_PER_CPU_SYMBOL_GPL().
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202512140856.Ic6FetG6-lkp@intel.com/
+Fixes: fdfd811ddde3 ("x86/xen: allow privcmd hypercalls to be preempted")
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20251215115112.15072-1-jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/xen/enlighten_pv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 6e9d1b287f8e..bf750cd599b2 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -106,7 +106,7 @@ static int xen_cpu_dead_pv(unsigned int cpu);
+ * calls.
+ */
+ DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++EXPORT_PER_CPU_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+ /*
+ * In case of scheduling the flag must be cleared and restored after
+--
+2.51.0
+
--- /dev/null
+From 6ab5b1bb97853c5c924b195dee3d46cf51fbe443 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Mar 2025 11:12:14 -0400
+Subject: x86/xen: Move Xen upcall handler
+
+From: Brian Gerst <brgerst@gmail.com>
+
+[ Upstream commit 1ab7b5ed44ba9bce581e225f40219b793bc779d6 ]
+
+Move the upcall handler to Xen-specific files.
+
+No functional changes.
+
+Signed-off-by: Brian Gerst <brgerst@gmail.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/20250314151220.862768-2-brgerst@gmail.com
+Stable-dep-of: e5aff444e3a7 ("x86/xen: Fix sparse warning in enlighten_pv.c")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/common.c | 72 -------------------------------------
+ arch/x86/xen/enlighten_pv.c | 69 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 69 insertions(+), 72 deletions(-)
+
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 51efd2da4d7f..7b9321c48a90 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -21,11 +21,6 @@
+ #include <linux/uaccess.h>
+ #include <linux/init.h>
+
+-#ifdef CONFIG_XEN_PV
+-#include <xen/xen-ops.h>
+-#include <xen/events.h>
+-#endif
+-
+ #include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/traps.h>
+@@ -454,70 +449,3 @@ SYSCALL_DEFINE0(ni_syscall)
+ {
+ return -ENOSYS;
+ }
+-
+-#ifdef CONFIG_XEN_PV
+-#ifndef CONFIG_PREEMPTION
+-/*
+- * Some hypercalls issued by the toolstack can take many 10s of
+- * seconds. Allow tasks running hypercalls via the privcmd driver to
+- * be voluntarily preempted even if full kernel preemption is
+- * disabled.
+- *
+- * Such preemptible hypercalls are bracketed by
+- * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+- * calls.
+- */
+-DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+-
+-/*
+- * In case of scheduling the flag must be cleared and restored after
+- * returning from schedule as the task might move to a different CPU.
+- */
+-static __always_inline bool get_and_clear_inhcall(void)
+-{
+- bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+-
+- __this_cpu_write(xen_in_preemptible_hcall, false);
+- return inhcall;
+-}
+-
+-static __always_inline void restore_inhcall(bool inhcall)
+-{
+- __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+-}
+-#else
+-static __always_inline bool get_and_clear_inhcall(void) { return false; }
+-static __always_inline void restore_inhcall(bool inhcall) { }
+-#endif
+-
+-static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- struct pt_regs *old_regs = set_irq_regs(regs);
+-
+- inc_irq_stat(irq_hv_callback_count);
+-
+- xen_evtchn_do_upcall();
+-
+- set_irq_regs(old_regs);
+-}
+-
+-__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- irqentry_state_t state = irqentry_enter(regs);
+- bool inhcall;
+-
+- instrumentation_begin();
+- run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+-
+- inhcall = get_and_clear_inhcall();
+- if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
+- irqentry_exit_cond_resched();
+- instrumentation_end();
+- restore_inhcall(inhcall);
+- } else {
+- instrumentation_end();
+- irqentry_exit(regs, state);
+- }
+-}
+-#endif /* CONFIG_XEN_PV */
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index e033d5594265..6e9d1b287f8e 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -72,6 +72,7 @@
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+ #include <asm/cpu.h>
++#include <asm/irq_stack.h>
+ #ifdef CONFIG_X86_IOPL_IOPERM
+ #include <asm/io_bitmap.h>
+ #endif
+@@ -93,6 +94,44 @@ void *xen_initial_gdt;
+ static int xen_cpu_up_prepare_pv(unsigned int cpu);
+ static int xen_cpu_dead_pv(unsigned int cpu);
+
++#ifndef CONFIG_PREEMPTION
++/*
++ * Some hypercalls issued by the toolstack can take many 10s of
++ * seconds. Allow tasks running hypercalls via the privcmd driver to
++ * be voluntarily preempted even if full kernel preemption is
++ * disabled.
++ *
++ * Such preemptible hypercalls are bracketed by
++ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
++ * calls.
++ */
++DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
++EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++
++/*
++ * In case of scheduling the flag must be cleared and restored after
++ * returning from schedule as the task might move to a different CPU.
++ */
++static __always_inline bool get_and_clear_inhcall(void)
++{
++ bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
++
++ __this_cpu_write(xen_in_preemptible_hcall, false);
++ return inhcall;
++}
++
++static __always_inline void restore_inhcall(bool inhcall)
++{
++ __this_cpu_write(xen_in_preemptible_hcall, inhcall);
++}
++
++#else
++
++static __always_inline bool get_and_clear_inhcall(void) { return false; }
++static __always_inline void restore_inhcall(bool inhcall) { }
++
++#endif
++
+ struct tls_descs {
+ struct desc_struct desc[3];
+ };
+@@ -686,6 +725,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
+ }
+ #endif
+
++static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
++{
++ struct pt_regs *old_regs = set_irq_regs(regs);
++
++ inc_irq_stat(irq_hv_callback_count);
++
++ xen_evtchn_do_upcall();
++
++ set_irq_regs(old_regs);
++}
++
++__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
++{
++ irqentry_state_t state = irqentry_enter(regs);
++ bool inhcall;
++
++ instrumentation_begin();
++ run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
++
++ inhcall = get_and_clear_inhcall();
++ if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
++ irqentry_exit_cond_resched();
++ instrumentation_end();
++ restore_inhcall(inhcall);
++ } else {
++ instrumentation_end();
++ irqentry_exit(regs, state);
++ }
++}
++
+ struct trap_array_entry {
+ void (*orig)(void);
+ void (*xen)(void);
+--
+2.51.0
+
--- /dev/null
+From 46fffaa4ae226730de66c98c9c9c457144489913 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 14:41:15 +0530
+Subject: amd/iommu: Preserve domain ids inside the kdump kernel
+
+From: Sairaj Kodilkar <sarunkod@amd.com>
+
+[ Upstream commit c2e8dc1222c2136e714d5d972dce7e64924e4ed8 ]
+
+Currently AMD IOMMU driver does not reserve domain ids programmed in the
+DTE while reusing the device table inside kdump kernel. This can cause
+reallocation of these domain ids for newer domains that are created by
+the kdump kernel, which can lead to potential IO_PAGE_FAULTs
+
+Hence reserve these ids inside pdom_ids.
+
+Fixes: 38e5f33ee359 ("iommu/amd: Reuse device table for kdump")
+Signed-off-by: Sairaj Kodilkar <sarunkod@amd.com>
+Reported-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index f2991c11867c..14eb9de33ccb 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1136,9 +1136,13 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
+ static bool __reuse_device_table(struct amd_iommu *iommu)
+ {
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+- u32 lo, hi, old_devtb_size;
++ struct dev_table_entry *old_dev_tbl_entry;
++ u32 lo, hi, old_devtb_size, devid;
+ phys_addr_t old_devtb_phys;
++ u16 dom_id;
++ bool dte_v;
+ u64 entry;
++ int ret;
+
+ /* Each IOMMU use separate device table with the same size */
+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+@@ -1173,6 +1177,23 @@ static bool __reuse_device_table(struct amd_iommu *iommu)
+ return false;
+ }
+
++ for (devid = 0; devid <= pci_seg->last_bdf; devid++) {
++ old_dev_tbl_entry = &pci_seg->old_dev_tbl_cpy[devid];
++ dte_v = FIELD_GET(DTE_FLAG_V, old_dev_tbl_entry->data[0]);
++ dom_id = FIELD_GET(DEV_DOMID_MASK, old_dev_tbl_entry->data[1]);
++
++ if (!dte_v || !dom_id)
++ continue;
++ /*
++ * ID reservation can fail with -ENOSPC when there
++ * are multiple devices present in the same domain,
++ * hence check only for -ENOMEM.
++ */
++ ret = ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_KERNEL);
++ if (ret == -ENOMEM)
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 0bcda5dba3c67fd4b2d01336f74d7a5a043cc756 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 22:59:38 +0100
+Subject: arm64: dts: mediatek: Apply mt8395-radxa DT overlay at build time
+
+From: Rob Herring (Arm) <robh@kernel.org>
+
+[ Upstream commit ce7b1d58609abc2941a1f38094147f439fb74233 ]
+
+It's a requirement that DT overlays be applied at build time in order to
+validate them as overlays are not validated on their own.
+
+Add missing target for mt8395-radxa hd panel overlay.
+
+Fixes: 4c8ff61199a7 ("arm64: dts: mediatek: mt8395-radxa-nio-12l: Add Radxa 8 HD panel")
+Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
+Acked-by: AngeloGioacchino Del Regno <angelogiaocchino.delregno@collabora.com>
+Link: https://patch.msgid.link/20251205215940.19287-1-linux@fw-web.de
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/mediatek/Makefile | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
+index a4df4c21399e..b50799b2a65f 100644
+--- a/arch/arm64/boot/dts/mediatek/Makefile
++++ b/arch/arm64/boot/dts/mediatek/Makefile
+@@ -104,6 +104,8 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8390-genio-700-evk.dtb
+ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-kontron-3-5-sbc-i1200.dtb
+ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-radxa-nio-12l.dtb
+ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-radxa-nio-12l-8-hd-panel.dtbo
++mt8395-radxa-nio-12l-8-hd-panel-dtbs := mt8395-radxa-nio-12l.dtb mt8395-radxa-nio-12l-8-hd-panel.dtbo
++dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-radxa-nio-12l-8-hd-panel.dtb
+ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
+
+ # Device tree overlays support
+--
+2.51.0
+
--- /dev/null
+From 720866f382dcddbda4c5235e0a9c4a97e2fa2a5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:59:34 +0800
+Subject: arm64: kdump: Fix elfcorehdr overlap caused by reserved memory
+ processing reorder
+
+From: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+
+[ Upstream commit 3e8ade58b71b48913d21b647b2089e03e81f117e ]
+
+Commit 8a6e02d0c00e ("of: reserved_mem: Restructure how the reserved
+memory regions are processed") changed the processing order of reserved
+memory regions, causing elfcorehdr to overlap with dynamically allocated
+reserved memory regions during kdump kernel boot.
+
+The issue occurs because:
+1. kexec-tools allocates elfcorehdr in the last crashkernel reserved
+ memory region and passes it to the second kernel
+2. The problematic commit moved dynamic reserved memory allocation
+ (like bman-fbpr) to occur during fdt_scan_reserved_mem(), before
+ elfcorehdr reservation in fdt_reserve_elfcorehdr()
+3. bman-fbpr with 16MB alignment requirement can get allocated at
+ addresses that overlap with the elfcorehdr location
+4. When fdt_reserve_elfcorehdr() tries to reserve elfcorehdr memory,
+ overlap detection identifies the conflict and skips reservation
+5. kdump kernel fails with "Unable to handle kernel paging request"
+ because elfcorehdr memory is not properly reserved
+
+The boot log:
+Before 8a6e02d0c00e:
+ OF: fdt: Reserving 1 KiB of memory at 0xf4fff000 for elfcorehdr
+ OF: reserved mem: 0xf3000000..0xf3ffffff bman-fbpr
+
+After 8a6e02d0c00e:
+ OF: reserved mem: 0xf4000000..0xf4ffffff bman-fbpr
+ OF: fdt: elfcorehdr is overlapped
+
+Fix this by ensuring elfcorehdr reservation occurs before dynamic
+reserved memory allocation.
+
+Fixes: 8a6e02d0c00e ("of: reserved_mem: Restructure how the reserved memory regions are processed")
+Signed-off-by: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+Link: https://patch.msgid.link/20251205015934.700016-1-jianpeng.chang.cn@windriver.com
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/fdt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index fdaee4906836..3851ce244585 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -503,8 +503,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
+ if (!initial_boot_params)
+ return;
+
+- fdt_scan_reserved_mem();
+ fdt_reserve_elfcorehdr();
++ fdt_scan_reserved_mem();
+
+ /* Process header /memreserve/ fields */
+ for (n = 0; ; n++) {
+--
+2.51.0
+
--- /dev/null
+From 13bd89e740e4ce789f06fafb1c23e18a0a8cb85e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 22:35:00 +0800
+Subject: block: fix race between wbt_enable_default and IO submission
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 9869d3a6fed381f3b98404e26e1afc75d680cbf9 ]
+
+When wbt_enable_default() is moved out of queue freezing in elevator_change(),
+it can cause the wbt inflight counter to become negative (-1), leading to hung
+tasks in the writeback path. Tasks get stuck in wbt_wait() because the counter
+is in an inconsistent state.
+
+The issue occurs because wbt_enable_default() could race with IO submission,
+allowing the counter to be decremented before proper initialization. This manifests
+as:
+
+ rq_wait[0]:
+ inflight: -1
+ has_waiters: True
+
+rwb_enabled() checks the state, which can be updated exactly between wbt_wait()
+(rq_qos_throttle()) and wbt_track()(rq_qos_track()), then the inflight counter
+will become negative.
+
+And results in hung task warnings like:
+ task:kworker/u24:39 state:D stack:0 pid:14767
+ Call Trace:
+ rq_qos_wait+0xb4/0x150
+ wbt_wait+0xa9/0x100
+ __rq_qos_throttle+0x24/0x40
+ blk_mq_submit_bio+0x672/0x7b0
+ ...
+
+Fix this by:
+
+1. Splitting wbt_enable_default() into:
+ - __wbt_enable_default(): Returns true if wbt_init() should be called
+ - wbt_enable_default(): Wrapper for existing callers (no init)
+ - wbt_init_enable_default(): New function that checks and inits WBT
+
+2. Using wbt_init_enable_default() in blk_register_queue() to ensure
+ proper initialization during queue registration
+
+3. Move wbt_init() out of wbt_enable_default() which is only for enabling
+ disabled wbt from bfq and iocost, and wbt_init() isn't needed. Then the
+ original lock warning can be avoided.
+
+4. Removing the ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT flag and its handling
+ code since it's no longer needed
+
+This ensures WBT is properly initialized before any IO can be submitted,
+preventing the counter from going negative.
+
+Cc: Nilay Shroff <nilay@linux.ibm.com>
+Cc: Yu Kuai <yukuai@fnnas.com>
+Cc: Guangwu Zhang <guazhang@redhat.com>
+Fixes: 78c271344b6f ("block: move wbt_enable_default() out of queue freezing from sched ->exit()")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c | 2 +-
+ block/blk-sysfs.c | 2 +-
+ block/blk-wbt.c | 20 ++++++++++++++++----
+ block/blk-wbt.h | 5 +++++
+ block/elevator.c | 4 ----
+ block/elevator.h | 1 -
+ 6 files changed, 23 insertions(+), 11 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 4a8d3d96bfe4..6e54b1d3d8bc 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -7181,7 +7181,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ blk_stat_disable_accounting(bfqd->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_DISABLE_WBT_DEF, bfqd->queue);
+- set_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, &e->flags);
++ wbt_enable_default(bfqd->queue->disk);
+
+ kfree(bfqd);
+ }
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 76c47fe9b8d6..c0e4daaf9610 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -942,7 +942,7 @@ int blk_register_queue(struct gendisk *disk)
+ elevator_set_default(q);
+
+ blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
+- wbt_enable_default(disk);
++ wbt_init_enable_default(disk);
+
+ /* Now everything is ready and send out KOBJ_ADD uevent */
+ kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
+diff --git a/block/blk-wbt.c b/block/blk-wbt.c
+index eb8037bae0bd..0974875f77bd 100644
+--- a/block/blk-wbt.c
++++ b/block/blk-wbt.c
+@@ -699,7 +699,7 @@ static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
+ /*
+ * Enable wbt if defaults are configured that way
+ */
+-void wbt_enable_default(struct gendisk *disk)
++static bool __wbt_enable_default(struct gendisk *disk)
+ {
+ struct request_queue *q = disk->queue;
+ struct rq_qos *rqos;
+@@ -716,19 +716,31 @@ void wbt_enable_default(struct gendisk *disk)
+ if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
+ RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
+ mutex_unlock(&disk->rqos_state_mutex);
+- return;
++ return false;
+ }
+ mutex_unlock(&disk->rqos_state_mutex);
+
+ /* Queue not registered? Maybe shutting down... */
+ if (!blk_queue_registered(q))
+- return;
++ return false;
+
+ if (queue_is_mq(q) && enable)
+- wbt_init(disk);
++ return true;
++ return false;
++}
++
++void wbt_enable_default(struct gendisk *disk)
++{
++ __wbt_enable_default(disk);
+ }
+ EXPORT_SYMBOL_GPL(wbt_enable_default);
+
++void wbt_init_enable_default(struct gendisk *disk)
++{
++ if (__wbt_enable_default(disk))
++ WARN_ON_ONCE(wbt_init(disk));
++}
++
+ u64 wbt_default_latency_nsec(struct request_queue *q)
+ {
+ /*
+diff --git a/block/blk-wbt.h b/block/blk-wbt.h
+index e5fc653b9b76..925f22475738 100644
+--- a/block/blk-wbt.h
++++ b/block/blk-wbt.h
+@@ -5,6 +5,7 @@
+ #ifdef CONFIG_BLK_WBT
+
+ int wbt_init(struct gendisk *disk);
++void wbt_init_enable_default(struct gendisk *disk);
+ void wbt_disable_default(struct gendisk *disk);
+ void wbt_enable_default(struct gendisk *disk);
+
+@@ -16,6 +17,10 @@ u64 wbt_default_latency_nsec(struct request_queue *);
+
+ #else
+
++static inline void wbt_init_enable_default(struct gendisk *disk)
++{
++}
++
+ static inline void wbt_disable_default(struct gendisk *disk)
+ {
+ }
+diff --git a/block/elevator.c b/block/elevator.c
+index 5b37ef44f52d..a2f8b2251dc6 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -633,14 +633,10 @@ static int elevator_change_done(struct request_queue *q,
+ .et = ctx->old->et,
+ .data = ctx->old->elevator_data
+ };
+- bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
+- &ctx->old->flags);
+
+ elv_unregister_queue(q, ctx->old);
+ blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set);
+ kobject_put(&ctx->old->kobj);
+- if (enable_wbt)
+- wbt_enable_default(q->disk);
+ }
+ if (ctx->new) {
+ ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
+diff --git a/block/elevator.h b/block/elevator.h
+index 3ee1d494f48a..021726376042 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -156,7 +156,6 @@ struct elevator_queue
+
+ #define ELEVATOR_FLAG_REGISTERED 0
+ #define ELEVATOR_FLAG_DYING 1
+-#define ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT 2
+
+ /*
+ * block elevator interface
+--
+2.51.0
+
--- /dev/null
+From 39370981763ff3c08e4cd857e6ca30d87aefd498 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Nov 2025 14:28:20 +0530
+Subject: block: introduce alloc_sched_data and free_sched_data elevator
+ methods
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 61019afdf6ac17c8e8f9c42665aa1fa82f04a3e2 ]
+
+The recent lockdep splat [1] highlights a potential deadlock risk
+involving ->elevator_lock and ->freeze_lock dependencies on -pcpu_alloc_
+mutex. The trace shows that the issue occurs when the Kyber scheduler
+allocates dynamic memory for its elevator data during initialization.
+
+To address this, introduce two new elevator operation callbacks:
+->alloc_sched_data and ->free_sched_data. The subsequent patch would
+build upon these newly introduced methods to suppress lockdep splat[1].
+
+[1] https://lore.kernel.org/all/CAGVVp+VNW4M-5DZMNoADp6o2VKFhi7KxWpTDkcnVyjO0=-D5+A@mail.gmail.com/
+
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 9869d3a6fed3 ("block: fix race between wbt_enable_default and IO submission")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.h | 24 ++++++++++++++++++++++++
+ block/elevator.h | 2 ++
+ 2 files changed, 26 insertions(+)
+
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 1f8e58dd4b49..4e1b86e85a8a 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -38,6 +38,30 @@ void blk_mq_free_sched_res(struct elevator_resources *res,
+ struct blk_mq_tag_set *set);
+ void blk_mq_free_sched_res_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set);
++/*
++ * blk_mq_alloc_sched_data() - Allocates scheduler specific data
++ * Returns:
++ * - Pointer to allocated data on success
++ * - NULL if no allocation needed
++ * - ERR_PTR(-ENOMEM) in case of failure
++ */
++static inline void *blk_mq_alloc_sched_data(struct request_queue *q,
++ struct elevator_type *e)
++{
++ void *sched_data;
++
++ if (!e || !e->ops.alloc_sched_data)
++ return NULL;
++
++ sched_data = e->ops.alloc_sched_data(q);
++ return (sched_data) ?: ERR_PTR(-ENOMEM);
++}
++
++static inline void blk_mq_free_sched_data(struct elevator_type *e, void *data)
++{
++ if (e && e->ops.free_sched_data)
++ e->ops.free_sched_data(data);
++}
+
+ static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+ {
+diff --git a/block/elevator.h b/block/elevator.h
+index 621a63597249..e34043f6da26 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -58,6 +58,8 @@ struct elevator_mq_ops {
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*depth_updated)(struct request_queue *);
++ void *(*alloc_sched_data)(struct request_queue *);
++ void (*free_sched_data)(void *);
+
+ bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+ bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
+--
+2.51.0
+
--- /dev/null
+From 4363c8b7c4a79a9b7129f6839bf0ffcfb9e9dc4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Nov 2025 14:28:19 +0530
+Subject: block: move elevator tags into struct elevator_resources
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 04728ce90966c54417fd8120a3820104d18ba68d ]
+
+This patch introduces a new structure, struct elevator_resources, to
+group together all elevator-related resources that share the same
+lifetime. As a first step, this change moves the elevator tag pointer
+from struct elv_change_ctx into the new struct elevator_resources.
+
+Additionally, rename blk_mq_alloc_sched_tags_batch() and
+blk_mq_free_sched_tags_batch() to blk_mq_alloc_sched_res_batch() and
+blk_mq_free_sched_res_batch(), respectively. Introduce two new wrapper
+helpers, blk_mq_alloc_sched_res() and blk_mq_free_sched_res(), around
+blk_mq_alloc_sched_tags() and blk_mq_free_sched_tags().
+
+These changes pave the way for consolidating the allocation and freeing
+of elevator-specific resources into common helper functions. This
+refactoring improves encapsulation and prepares the code for future
+extensions, allowing additional elevator-specific data to be added to
+struct elevator_resources without cluttering struct elv_change_ctx.
+
+Subsequent patches will extend struct elevator_resources to include
+other elevator-related data.
+
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai@fnnas.com>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 9869d3a6fed3 ("block: fix race between wbt_enable_default and IO submission")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 48 ++++++++++++++++++++++++++++++--------------
+ block/blk-mq-sched.h | 10 ++++++---
+ block/blk-mq.c | 2 +-
+ block/elevator.c | 31 ++++++++++++++--------------
+ block/elevator.h | 9 +++++++--
+ 5 files changed, 64 insertions(+), 36 deletions(-)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 3d9386555a50..03ff16c49976 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -427,7 +427,16 @@ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ kfree(et);
+ }
+
+-void blk_mq_free_sched_tags_batch(struct xarray *elv_tbl,
++void blk_mq_free_sched_res(struct elevator_resources *res,
++ struct blk_mq_tag_set *set)
++{
++ if (res->et) {
++ blk_mq_free_sched_tags(res->et, set);
++ res->et = NULL;
++ }
++}
++
++void blk_mq_free_sched_res_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set)
+ {
+ struct request_queue *q;
+@@ -445,12 +454,11 @@ void blk_mq_free_sched_tags_batch(struct xarray *elv_tbl,
+ */
+ if (q->elevator) {
+ ctx = xa_load(elv_tbl, q->id);
+- if (!ctx || !ctx->et) {
++ if (!ctx) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+- blk_mq_free_sched_tags(ctx->et, set);
+- ctx->et = NULL;
++ blk_mq_free_sched_res(&ctx->res, set);
+ }
+ }
+ }
+@@ -532,12 +540,24 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ return NULL;
+ }
+
+-int blk_mq_alloc_sched_tags_batch(struct xarray *elv_tbl,
++int blk_mq_alloc_sched_res(struct request_queue *q,
++ struct elevator_resources *res, unsigned int nr_hw_queues)
++{
++ struct blk_mq_tag_set *set = q->tag_set;
++
++ res->et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
++ blk_mq_default_nr_requests(set));
++ if (!res->et)
++ return -ENOMEM;
++
++ return 0;
++}
++
++int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
+ {
+ struct elv_change_ctx *ctx;
+ struct request_queue *q;
+- struct elevator_tags *et;
+ int ret = -ENOMEM;
+
+ lockdep_assert_held_write(&set->update_nr_hwq_lock);
+@@ -557,11 +577,10 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *elv_tbl,
+ goto out_unwind;
+ }
+
+- ctx->et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
+- blk_mq_default_nr_requests(set));
+- if (!ctx->et)
++ ret = blk_mq_alloc_sched_res(q, &ctx->res,
++ nr_hw_queues);
++ if (ret)
+ goto out_unwind;
+-
+ }
+ }
+ return 0;
+@@ -569,10 +588,8 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *elv_tbl,
+ list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
+ if (q->elevator) {
+ ctx = xa_load(elv_tbl, q->id);
+- if (ctx && ctx->et) {
+- blk_mq_free_sched_tags(ctx->et, set);
+- ctx->et = NULL;
+- }
++ if (ctx)
++ blk_mq_free_sched_res(&ctx->res, set);
+ }
+ }
+ return ret;
+@@ -580,9 +597,10 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *elv_tbl,
+
+ /* caller must have a reference to @e, will grab another one if successful */
+ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
+- struct elevator_tags *et)
++ struct elevator_resources *res)
+ {
+ unsigned int flags = q->tag_set->flags;
++ struct elevator_tags *et = res->et;
+ struct blk_mq_hw_ctx *hctx;
+ struct elevator_queue *eq;
+ unsigned long i;
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 2fddbc91a235..1f8e58dd4b49 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -19,20 +19,24 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
+ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
+
+ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
+- struct elevator_tags *et);
++ struct elevator_resources *res);
+ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+ void blk_mq_sched_free_rqs(struct request_queue *q);
+
+ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues, unsigned int nr_requests);
+-int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
++int blk_mq_alloc_sched_res(struct request_queue *q,
++ struct elevator_resources *res, unsigned int nr_hw_queues);
++int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
+ int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set);
+ void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl);
+ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ struct blk_mq_tag_set *set);
+-void blk_mq_free_sched_tags_batch(struct xarray *et_table,
++void blk_mq_free_sched_res(struct elevator_resources *res,
++ struct blk_mq_tag_set *set);
++void blk_mq_free_sched_res_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set);
+
+ static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 180d45db5624..ea5f948af7a4 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -5079,7 +5079,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ if (blk_mq_alloc_sched_ctx_batch(&elv_tbl, set) < 0)
+ goto out_free_ctx;
+
+- if (blk_mq_alloc_sched_tags_batch(&elv_tbl, set, nr_hw_queues) < 0)
++ if (blk_mq_alloc_sched_res_batch(&elv_tbl, set, nr_hw_queues) < 0)
+ goto out_free_ctx;
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+diff --git a/block/elevator.c b/block/elevator.c
+index cd7bdff205c8..cbec292a4af5 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -580,7 +580,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
+ }
+
+ if (new_e) {
+- ret = blk_mq_init_sched(q, new_e, ctx->et);
++ ret = blk_mq_init_sched(q, new_e, &ctx->res);
+ if (ret)
+ goto out_unfreeze;
+ ctx->new = q->elevator;
+@@ -604,7 +604,8 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
+ return ret;
+ }
+
+-static void elv_exit_and_release(struct request_queue *q)
++static void elv_exit_and_release(struct elv_change_ctx *ctx,
++ struct request_queue *q)
+ {
+ struct elevator_queue *e;
+ unsigned memflags;
+@@ -616,7 +617,7 @@ static void elv_exit_and_release(struct request_queue *q)
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+ if (e) {
+- blk_mq_free_sched_tags(e->et, q->tag_set);
++ blk_mq_free_sched_res(&ctx->res, q->tag_set);
+ kobject_put(&e->kobj);
+ }
+ }
+@@ -627,11 +628,12 @@ static int elevator_change_done(struct request_queue *q,
+ int ret = 0;
+
+ if (ctx->old) {
++ struct elevator_resources res = {.et = ctx->old->et};
+ bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
+ &ctx->old->flags);
+
+ elv_unregister_queue(q, ctx->old);
+- blk_mq_free_sched_tags(ctx->old->et, q->tag_set);
++ blk_mq_free_sched_res(&res, q->tag_set);
+ kobject_put(&ctx->old->kobj);
+ if (enable_wbt)
+ wbt_enable_default(q->disk);
+@@ -639,7 +641,7 @@ static int elevator_change_done(struct request_queue *q,
+ if (ctx->new) {
+ ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
+ if (ret)
+- elv_exit_and_release(q);
++ elv_exit_and_release(ctx, q);
+ }
+ return ret;
+ }
+@@ -656,10 +658,9 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ lockdep_assert_held(&set->update_nr_hwq_lock);
+
+ if (strncmp(ctx->name, "none", 4)) {
+- ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues,
+- blk_mq_default_nr_requests(set));
+- if (!ctx->et)
+- return -ENOMEM;
++ ret = blk_mq_alloc_sched_res(q, &ctx->res, set->nr_hw_queues);
++ if (ret)
++ return ret;
+ }
+
+ memflags = blk_mq_freeze_queue(q);
+@@ -681,10 +682,10 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ if (!ret)
+ ret = elevator_change_done(q, ctx);
+ /*
+- * Free sched tags if it's allocated but we couldn't switch elevator.
++ * Free sched resource if it's allocated but we couldn't switch elevator.
+ */
+- if (ctx->et && !ctx->new)
+- blk_mq_free_sched_tags(ctx->et, set);
++ if (!ctx->new)
++ blk_mq_free_sched_res(&ctx->res, set);
+
+ return ret;
+ }
+@@ -711,10 +712,10 @@ void elv_update_nr_hw_queues(struct request_queue *q,
+ if (!ret)
+ WARN_ON_ONCE(elevator_change_done(q, ctx));
+ /*
+- * Free sched tags if it's allocated but we couldn't switch elevator.
++ * Free sched resource if it's allocated but we couldn't switch elevator.
+ */
+- if (ctx->et && !ctx->new)
+- blk_mq_free_sched_tags(ctx->et, set);
++ if (!ctx->new)
++ blk_mq_free_sched_res(&ctx->res, set);
+ }
+
+ /*
+diff --git a/block/elevator.h b/block/elevator.h
+index bad43182361e..621a63597249 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -32,6 +32,11 @@ struct elevator_tags {
+ struct blk_mq_tags *tags[];
+ };
+
++struct elevator_resources {
++ /* holds elevator tags */
++ struct elevator_tags *et;
++};
++
+ /* Holding context data for changing elevator */
+ struct elv_change_ctx {
+ const char *name;
+@@ -43,8 +48,8 @@ struct elv_change_ctx {
+ struct elevator_queue *new;
+ /* store elevator type */
+ struct elevator_type *type;
+- /* holds sched tags data */
+- struct elevator_tags *et;
++ /* store elevator resources */
++ struct elevator_resources res;
+ };
+
+ struct elevator_mq_ops {
+--
+2.51.0
+
--- /dev/null
+From ca8fe9eac31ee4c6d384b4845f89b1afb36ae37c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 10:36:48 +0100
+Subject: block: rnbd-clt: Fix leaked ID in init_dev()
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit c9b5645fd8ca10f310e41b07540f98e6a9720f40 ]
+
+If kstrdup() fails in init_dev(), then the newly allocated ID is lost.
+
+Fixes: 64e8a6ece1a5 ("block/rnbd-clt: Dynamically alloc buffer for pathname & blk_symlink_name")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index f1409e54010a..d1c354636315 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1423,9 +1423,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+- GFP_KERNEL);
+- if (ret < 0) {
++ dev->clt_device_id = ida_alloc_max(&index_ida,
++ (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
++ GFP_KERNEL);
++ if (dev->clt_device_id < 0) {
++ ret = dev->clt_device_id;
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+@@ -1434,10 +1436,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
+ if (!dev->pathname) {
+ ret = -ENOMEM;
+- goto out_queues;
++ goto out_ida;
+ }
+
+- dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ dev->nr_poll_queues = nr_poll_queues;
+@@ -1453,6 +1454,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+
+ return dev;
+
++out_ida:
++ ida_free(&index_ida, dev->clt_device_id);
+ out_queues:
+ kfree(dev->hw_queues);
+ out_alloc:
+--
+2.51.0
+
--- /dev/null
+From a28041cbc3206fef25cff32906a2d02b69360c74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Nov 2025 14:28:18 +0530
+Subject: block: unify elevator tags and type xarrays into struct
+ elv_change_ctx
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 232143b605387b372dee0ec7830f93b93df5f67d ]
+
+Currently, the nr_hw_queues update path manages two disjoint xarrays —
+one for elevator tags and another for elevator type — both used during
+elevator switching. Maintaining these two parallel structures for the
+same purpose adds unnecessary complexity and potential for mismatched
+state.
+
+This patch unifies both xarrays into a single structure, struct
+elv_change_ctx, which holds all per-queue elevator change context. A
+single xarray, named elv_tbl, now maps each queue (q->id) in a tagset
+to its corresponding elv_change_ctx entry, encapsulating the elevator
+tags, type and name references.
+
+This unification simplifies the code, improves maintainability, and
+clarifies ownership of per-queue elevator state.
+
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai@fnnas.com>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 9869d3a6fed3 ("block: fix race between wbt_enable_default and IO submission")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 76 +++++++++++++++++++++++++++++++++-----------
+ block/blk-mq-sched.h | 3 ++
+ block/blk-mq.c | 50 +++++++++++++++++------------
+ block/blk.h | 7 ++--
+ block/elevator.c | 31 ++++--------------
+ block/elevator.h | 15 +++++++++
+ 6 files changed, 115 insertions(+), 67 deletions(-)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index e0bed16485c3..3d9386555a50 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -427,11 +427,11 @@ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ kfree(et);
+ }
+
+-void blk_mq_free_sched_tags_batch(struct xarray *et_table,
++void blk_mq_free_sched_tags_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set)
+ {
+ struct request_queue *q;
+- struct elevator_tags *et;
++ struct elv_change_ctx *ctx;
+
+ lockdep_assert_held_write(&set->update_nr_hwq_lock);
+
+@@ -444,13 +444,47 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
+ * concurrently.
+ */
+ if (q->elevator) {
+- et = xa_load(et_table, q->id);
+- if (unlikely(!et))
++ ctx = xa_load(elv_tbl, q->id);
++ if (!ctx || !ctx->et) {
+ WARN_ON_ONCE(1);
+- else
+- blk_mq_free_sched_tags(et, set);
++ continue;
++ }
++ blk_mq_free_sched_tags(ctx->et, set);
++ ctx->et = NULL;
++ }
++ }
++}
++
++void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl)
++{
++ unsigned long i;
++ struct elv_change_ctx *ctx;
++
++ xa_for_each(elv_tbl, i, ctx) {
++ xa_erase(elv_tbl, i);
++ kfree(ctx);
++ }
++}
++
++int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
++ struct blk_mq_tag_set *set)
++{
++ struct request_queue *q;
++ struct elv_change_ctx *ctx;
++
++ lockdep_assert_held_write(&set->update_nr_hwq_lock);
++
++ list_for_each_entry(q, &set->tag_list, tag_set_list) {
++ ctx = kzalloc(sizeof(struct elv_change_ctx), GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++
++ if (xa_insert(elv_tbl, q->id, ctx, GFP_KERNEL)) {
++ kfree(ctx);
++ return -ENOMEM;
+ }
+ }
++ return 0;
+ }
+
+ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+@@ -498,12 +532,13 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ return NULL;
+ }
+
+-int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
++int blk_mq_alloc_sched_tags_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
+ {
++ struct elv_change_ctx *ctx;
+ struct request_queue *q;
+ struct elevator_tags *et;
+- gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
++ int ret = -ENOMEM;
+
+ lockdep_assert_held_write(&set->update_nr_hwq_lock);
+
+@@ -516,26 +551,31 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
+ * concurrently.
+ */
+ if (q->elevator) {
+- et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
++ ctx = xa_load(elv_tbl, q->id);
++ if (WARN_ON_ONCE(!ctx)) {
++ ret = -ENOENT;
++ goto out_unwind;
++ }
++
++ ctx->et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
+ blk_mq_default_nr_requests(set));
+- if (!et)
++ if (!ctx->et)
+ goto out_unwind;
+- if (xa_insert(et_table, q->id, et, gfp))
+- goto out_free_tags;
++
+ }
+ }
+ return 0;
+-out_free_tags:
+- blk_mq_free_sched_tags(et, set);
+ out_unwind:
+ list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
+ if (q->elevator) {
+- et = xa_load(et_table, q->id);
+- if (et)
+- blk_mq_free_sched_tags(et, set);
++ ctx = xa_load(elv_tbl, q->id);
++ if (ctx && ctx->et) {
++ blk_mq_free_sched_tags(ctx->et, set);
++ ctx->et = NULL;
++ }
+ }
+ }
+- return -ENOMEM;
++ return ret;
+ }
+
+ /* caller must have a reference to @e, will grab another one if successful */
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 8e21a6b1415d..2fddbc91a235 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -27,6 +27,9 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues, unsigned int nr_requests);
+ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
++int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
++ struct blk_mq_tag_set *set);
++void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl);
+ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ struct blk_mq_tag_set *set);
+ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index f901aeba8552..180d45db5624 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -4996,27 +4996,28 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
+ * Switch back to the elevator type stored in the xarray.
+ */
+ static void blk_mq_elv_switch_back(struct request_queue *q,
+- struct xarray *elv_tbl, struct xarray *et_tbl)
++ struct xarray *elv_tbl)
+ {
+- struct elevator_type *e = xa_load(elv_tbl, q->id);
+- struct elevator_tags *t = xa_load(et_tbl, q->id);
++ struct elv_change_ctx *ctx = xa_load(elv_tbl, q->id);
++
++ if (WARN_ON_ONCE(!ctx))
++ return;
+
+ /* The elv_update_nr_hw_queues unfreezes the queue. */
+- elv_update_nr_hw_queues(q, e, t);
++ elv_update_nr_hw_queues(q, ctx);
+
+ /* Drop the reference acquired in blk_mq_elv_switch_none. */
+- if (e)
+- elevator_put(e);
++ if (ctx->type)
++ elevator_put(ctx->type);
+ }
+
+ /*
+- * Stores elevator type in xarray and set current elevator to none. It uses
+- * q->id as an index to store the elevator type into the xarray.
++ * Stores elevator name and type in ctx and set current elevator to none.
+ */
+ static int blk_mq_elv_switch_none(struct request_queue *q,
+ struct xarray *elv_tbl)
+ {
+- int ret = 0;
++ struct elv_change_ctx *ctx;
+
+ lockdep_assert_held_write(&q->tag_set->update_nr_hwq_lock);
+
+@@ -5028,10 +5029,11 @@ static int blk_mq_elv_switch_none(struct request_queue *q,
+ * can't run concurrently.
+ */
+ if (q->elevator) {
++ ctx = xa_load(elv_tbl, q->id);
++ if (WARN_ON_ONCE(!ctx))
++ return -ENOENT;
+
+- ret = xa_insert(elv_tbl, q->id, q->elevator->type, GFP_KERNEL);
+- if (WARN_ON_ONCE(ret))
+- return ret;
++ ctx->name = q->elevator->type->elevator_name;
+
+ /*
+ * Before we switch elevator to 'none', take a reference to
+@@ -5042,9 +5044,14 @@ static int blk_mq_elv_switch_none(struct request_queue *q,
+ */
+ __elevator_get(q->elevator->type);
+
++ /*
++ * Store elevator type so that we can release the reference
++ * taken above later.
++ */
++ ctx->type = q->elevator->type;
+ elevator_set_none(q);
+ }
+- return ret;
++ return 0;
+ }
+
+ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+@@ -5054,7 +5061,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ int prev_nr_hw_queues = set->nr_hw_queues;
+ unsigned int memflags;
+ int i;
+- struct xarray elv_tbl, et_tbl;
++ struct xarray elv_tbl;
+ bool queues_frozen = false;
+
+ lockdep_assert_held(&set->tag_list_lock);
+@@ -5068,11 +5075,12 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+
+ memflags = memalloc_noio_save();
+
+- xa_init(&et_tbl);
+- if (blk_mq_alloc_sched_tags_batch(&et_tbl, set, nr_hw_queues) < 0)
+- goto out_memalloc_restore;
+-
+ xa_init(&elv_tbl);
++ if (blk_mq_alloc_sched_ctx_batch(&elv_tbl, set) < 0)
++ goto out_free_ctx;
++
++ if (blk_mq_alloc_sched_tags_batch(&elv_tbl, set, nr_hw_queues) < 0)
++ goto out_free_ctx;
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_debugfs_unregister_hctxs(q);
+@@ -5118,7 +5126,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ /* switch_back expects queue to be frozen */
+ if (!queues_frozen)
+ blk_mq_freeze_queue_nomemsave(q);
+- blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
++ blk_mq_elv_switch_back(q, &elv_tbl);
+ }
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+@@ -5129,9 +5137,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ blk_mq_add_hw_queues_cpuhp(q);
+ }
+
++out_free_ctx:
++ blk_mq_free_sched_ctx_batch(&elv_tbl);
+ xa_destroy(&elv_tbl);
+- xa_destroy(&et_tbl);
+-out_memalloc_restore:
+ memalloc_noio_restore(memflags);
+
+ /* Free the excess tags when nr_hw_queues shrink. */
+diff --git a/block/blk.h b/block/blk.h
+index 170794632135..a7992680f9e1 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -11,8 +11,7 @@
+ #include <xen/xen.h>
+ #include "blk-crypto-internal.h"
+
+-struct elevator_type;
+-struct elevator_tags;
++struct elv_change_ctx;
+
+ /*
+ * Default upper limit for the software max_sectors limit used for regular I/Os.
+@@ -333,8 +332,8 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
+
+ bool blk_insert_flush(struct request *rq);
+
+-void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
+- struct elevator_tags *t);
++void elv_update_nr_hw_queues(struct request_queue *q,
++ struct elv_change_ctx *ctx);
+ void elevator_set_default(struct request_queue *q);
+ void elevator_set_none(struct request_queue *q);
+
+diff --git a/block/elevator.c b/block/elevator.c
+index e2ebfbf107b3..cd7bdff205c8 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -45,19 +45,6 @@
+ #include "blk-wbt.h"
+ #include "blk-cgroup.h"
+
+-/* Holding context data for changing elevator */
+-struct elv_change_ctx {
+- const char *name;
+- bool no_uevent;
+-
+- /* for unregistering old elevator */
+- struct elevator_queue *old;
+- /* for registering new elevator */
+- struct elevator_queue *new;
+- /* holds sched tags data */
+- struct elevator_tags *et;
+-};
+-
+ static DEFINE_SPINLOCK(elv_list_lock);
+ static LIST_HEAD(elv_list);
+
+@@ -706,32 +693,28 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ * The I/O scheduler depends on the number of hardware queues, this forces a
+ * reattachment when nr_hw_queues changes.
+ */
+-void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
+- struct elevator_tags *t)
++void elv_update_nr_hw_queues(struct request_queue *q,
++ struct elv_change_ctx *ctx)
+ {
+ struct blk_mq_tag_set *set = q->tag_set;
+- struct elv_change_ctx ctx = {};
+ int ret = -ENODEV;
+
+ WARN_ON_ONCE(q->mq_freeze_depth == 0);
+
+- if (e && !blk_queue_dying(q) && blk_queue_registered(q)) {
+- ctx.name = e->elevator_name;
+- ctx.et = t;
+-
++ if (ctx->type && !blk_queue_dying(q) && blk_queue_registered(q)) {
+ mutex_lock(&q->elevator_lock);
+ /* force to reattach elevator after nr_hw_queue is updated */
+- ret = elevator_switch(q, &ctx);
++ ret = elevator_switch(q, ctx);
+ mutex_unlock(&q->elevator_lock);
+ }
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ if (!ret)
+- WARN_ON_ONCE(elevator_change_done(q, &ctx));
++ WARN_ON_ONCE(elevator_change_done(q, ctx));
+ /*
+ * Free sched tags if it's allocated but we couldn't switch elevator.
+ */
+- if (t && !ctx.new)
+- blk_mq_free_sched_tags(t, set);
++ if (ctx->et && !ctx->new)
++ blk_mq_free_sched_tags(ctx->et, set);
+ }
+
+ /*
+diff --git a/block/elevator.h b/block/elevator.h
+index c4d20155065e..bad43182361e 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -32,6 +32,21 @@ struct elevator_tags {
+ struct blk_mq_tags *tags[];
+ };
+
++/* Holding context data for changing elevator */
++struct elv_change_ctx {
++ const char *name;
++ bool no_uevent;
++
++ /* for unregistering old elevator */
++ struct elevator_queue *old;
++ /* for registering new elevator */
++ struct elevator_queue *new;
++ /* store elevator type */
++ struct elevator_type *type;
++ /* holds sched tags data */
++ struct elevator_tags *et;
++};
++
+ struct elevator_mq_ops {
+ int (*init_sched)(struct request_queue *, struct elevator_queue *);
+ void (*exit_sched)(struct elevator_queue *);
+--
+2.51.0
+
--- /dev/null
+From d049ab946f4c6c23d3df50740210e4df295e2ed7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Nov 2025 14:28:21 +0530
+Subject: block: use {alloc|free}_sched data methods
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 0315476e78c050048e80f66334a310e5581b46bb ]
+
+The previous patch introduced ->alloc_sched_data and
+->free_sched_data methods. This patch builds upon that
+by now using these methods during elevator switch and
+nr_hw_queue update.
+
+It's also ensured that scheduler-specific data is
+allocated and freed through the new callbacks outside
+of the ->freeze_lock and ->elevator_lock locking contexts,
+thereby preventing any dependency on pcpu_alloc_mutex.
+
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai@fnnas.com>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 9869d3a6fed3 ("block: fix race between wbt_enable_default and IO submission")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 27 +++++++++++++++++++++------
+ block/blk-mq-sched.h | 5 ++++-
+ block/elevator.c | 34 ++++++++++++++++++++++------------
+ block/elevator.h | 4 +++-
+ 4 files changed, 50 insertions(+), 20 deletions(-)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 03ff16c49976..128f2be9d420 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -428,12 +428,17 @@ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ }
+
+ void blk_mq_free_sched_res(struct elevator_resources *res,
++ struct elevator_type *type,
+ struct blk_mq_tag_set *set)
+ {
+ if (res->et) {
+ blk_mq_free_sched_tags(res->et, set);
+ res->et = NULL;
+ }
++ if (res->data) {
++ blk_mq_free_sched_data(type, res->data);
++ res->data = NULL;
++ }
+ }
+
+ void blk_mq_free_sched_res_batch(struct xarray *elv_tbl,
+@@ -458,7 +463,7 @@ void blk_mq_free_sched_res_batch(struct xarray *elv_tbl,
+ WARN_ON_ONCE(1);
+ continue;
+ }
+- blk_mq_free_sched_res(&ctx->res, set);
++ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
+ }
+ }
+ }
+@@ -541,7 +546,9 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ }
+
+ int blk_mq_alloc_sched_res(struct request_queue *q,
+- struct elevator_resources *res, unsigned int nr_hw_queues)
++ struct elevator_type *type,
++ struct elevator_resources *res,
++ unsigned int nr_hw_queues)
+ {
+ struct blk_mq_tag_set *set = q->tag_set;
+
+@@ -550,6 +557,12 @@ int blk_mq_alloc_sched_res(struct request_queue *q,
+ if (!res->et)
+ return -ENOMEM;
+
++ res->data = blk_mq_alloc_sched_data(q, type);
++ if (IS_ERR(res->data)) {
++ blk_mq_free_sched_tags(res->et, set);
++ return -ENOMEM;
++ }
++
+ return 0;
+ }
+
+@@ -577,19 +590,21 @@ int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
+ goto out_unwind;
+ }
+
+- ret = blk_mq_alloc_sched_res(q, &ctx->res,
+- nr_hw_queues);
++ ret = blk_mq_alloc_sched_res(q, q->elevator->type,
++ &ctx->res, nr_hw_queues);
+ if (ret)
+ goto out_unwind;
+ }
+ }
+ return 0;
++
+ out_unwind:
+ list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
+ if (q->elevator) {
+ ctx = xa_load(elv_tbl, q->id);
+ if (ctx)
+- blk_mq_free_sched_res(&ctx->res, set);
++ blk_mq_free_sched_res(&ctx->res,
++ ctx->type, set);
+ }
+ }
+ return ret;
+@@ -606,7 +621,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
+ unsigned long i;
+ int ret;
+
+- eq = elevator_alloc(q, e, et);
++ eq = elevator_alloc(q, e, res);
+ if (!eq)
+ return -ENOMEM;
+
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 4e1b86e85a8a..02c40a72e959 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -26,7 +26,9 @@ void blk_mq_sched_free_rqs(struct request_queue *q);
+ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues, unsigned int nr_requests);
+ int blk_mq_alloc_sched_res(struct request_queue *q,
+- struct elevator_resources *res, unsigned int nr_hw_queues);
++ struct elevator_type *type,
++ struct elevator_resources *res,
++ unsigned int nr_hw_queues);
+ int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
+ int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
+@@ -35,6 +37,7 @@ void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl);
+ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ struct blk_mq_tag_set *set);
+ void blk_mq_free_sched_res(struct elevator_resources *res,
++ struct elevator_type *type,
+ struct blk_mq_tag_set *set);
+ void blk_mq_free_sched_res_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set);
+diff --git a/block/elevator.c b/block/elevator.c
+index cbec292a4af5..5b37ef44f52d 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -121,7 +121,7 @@ static struct elevator_type *elevator_find_get(const char *name)
+ static const struct kobj_type elv_ktype;
+
+ struct elevator_queue *elevator_alloc(struct request_queue *q,
+- struct elevator_type *e, struct elevator_tags *et)
++ struct elevator_type *e, struct elevator_resources *res)
+ {
+ struct elevator_queue *eq;
+
+@@ -134,7 +134,8 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
+ kobject_init(&eq->kobj, &elv_ktype);
+ mutex_init(&eq->sysfs_lock);
+ hash_init(eq->hash);
+- eq->et = et;
++ eq->et = res->et;
++ eq->elevator_data = res->data;
+
+ return eq;
+ }
+@@ -617,7 +618,7 @@ static void elv_exit_and_release(struct elv_change_ctx *ctx,
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+ if (e) {
+- blk_mq_free_sched_res(&ctx->res, q->tag_set);
++ blk_mq_free_sched_res(&ctx->res, ctx->type, q->tag_set);
+ kobject_put(&e->kobj);
+ }
+ }
+@@ -628,12 +629,15 @@ static int elevator_change_done(struct request_queue *q,
+ int ret = 0;
+
+ if (ctx->old) {
+- struct elevator_resources res = {.et = ctx->old->et};
++ struct elevator_resources res = {
++ .et = ctx->old->et,
++ .data = ctx->old->elevator_data
++ };
+ bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
+ &ctx->old->flags);
+
+ elv_unregister_queue(q, ctx->old);
+- blk_mq_free_sched_res(&res, q->tag_set);
++ blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set);
+ kobject_put(&ctx->old->kobj);
+ if (enable_wbt)
+ wbt_enable_default(q->disk);
+@@ -658,7 +662,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ lockdep_assert_held(&set->update_nr_hwq_lock);
+
+ if (strncmp(ctx->name, "none", 4)) {
+- ret = blk_mq_alloc_sched_res(q, &ctx->res, set->nr_hw_queues);
++ ret = blk_mq_alloc_sched_res(q, ctx->type, &ctx->res,
++ set->nr_hw_queues);
+ if (ret)
+ return ret;
+ }
+@@ -681,11 +686,12 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ blk_mq_unfreeze_queue(q, memflags);
+ if (!ret)
+ ret = elevator_change_done(q, ctx);
++
+ /*
+ * Free sched resource if it's allocated but we couldn't switch elevator.
+ */
+ if (!ctx->new)
+- blk_mq_free_sched_res(&ctx->res, set);
++ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
+
+ return ret;
+ }
+@@ -711,11 +717,12 @@ void elv_update_nr_hw_queues(struct request_queue *q,
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ if (!ret)
+ WARN_ON_ONCE(elevator_change_done(q, ctx));
++
+ /*
+ * Free sched resource if it's allocated but we couldn't switch elevator.
+ */
+ if (!ctx->new)
+- blk_mq_free_sched_res(&ctx->res, set);
++ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
+ }
+
+ /*
+@@ -729,7 +736,6 @@ void elevator_set_default(struct request_queue *q)
+ .no_uevent = true,
+ };
+ int err;
+- struct elevator_type *e;
+
+ /* now we allow to switch elevator */
+ blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
+@@ -742,8 +748,8 @@ void elevator_set_default(struct request_queue *q)
+ * have multiple queues or mq-deadline is not available, default
+ * to "none".
+ */
+- e = elevator_find_get(ctx.name);
+- if (!e)
++ ctx.type = elevator_find_get(ctx.name);
++ if (!ctx.type)
+ return;
+
+ if ((q->nr_hw_queues == 1 ||
+@@ -753,7 +759,7 @@ void elevator_set_default(struct request_queue *q)
+ pr_warn("\"%s\" elevator initialization, failed %d, falling back to \"none\"\n",
+ ctx.name, err);
+ }
+- elevator_put(e);
++ elevator_put(ctx.type);
+ }
+
+ void elevator_set_none(struct request_queue *q)
+@@ -802,6 +808,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ ctx.name = strstrip(elevator_name);
+
+ elv_iosched_load_module(ctx.name);
++ ctx.type = elevator_find_get(ctx.name);
+
+ down_read(&set->update_nr_hwq_lock);
+ if (!blk_queue_no_elv_switch(q)) {
+@@ -812,6 +819,9 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ ret = -ENOENT;
+ }
+ up_read(&set->update_nr_hwq_lock);
++
++ if (ctx.type)
++ elevator_put(ctx.type);
+ return ret;
+ }
+
+diff --git a/block/elevator.h b/block/elevator.h
+index e34043f6da26..3ee1d494f48a 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -33,6 +33,8 @@ struct elevator_tags {
+ };
+
+ struct elevator_resources {
++ /* holds elevator data */
++ void *data;
+ /* holds elevator tags */
+ struct elevator_tags *et;
+ };
+@@ -185,7 +187,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count);
+
+ extern bool elv_bio_merge_ok(struct request *, struct bio *);
+ struct elevator_queue *elevator_alloc(struct request_queue *,
+- struct elevator_type *, struct elevator_tags *);
++ struct elevator_type *, struct elevator_resources *);
+
+ /*
+ * Helper functions.
+--
+2.51.0
+
--- /dev/null
+From e51d067e5ccd0dba83e6fb03e298fc63749f8bbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 11:02:30 -0500
+Subject: drm/amdgpu: fix a job->pasid access race in gpu recovery
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 77f73253015cbc7893fca1821ac3eae9eb4bc943 ]
+
+Avoid a possible UAF in GPU recovery due to a race between
+the sched timeout callback and the tdr work queue.
+
+The gpu recovery function calls drm_sched_stop() and
+later drm_sched_start(). drm_sched_start() restarts
+the tdr queue which will eventually free the job. If
+the tdr queue frees the job before time out callback
+completes, the job will be freed and we'll get a UAF
+when accessing the pasid. Cache it early to avoid the
+UAF.
+
+Example KASAN trace:
+[ 493.058141] BUG: KASAN: slab-use-after-free in amdgpu_device_gpu_recover+0x968/0x990 [amdgpu]
+[ 493.067530] Read of size 4 at addr ffff88b0ce3f794c by task kworker/u128:1/323
+[ 493.074892]
+[ 493.076485] CPU: 9 UID: 0 PID: 323 Comm: kworker/u128:1 Tainted: G E 6.16.0-1289896.2.zuul.bf4f11df81c1410bbe901c4373305a31 #1 PREEMPT(voluntary)
+[ 493.076493] Tainted: [E]=UNSIGNED_MODULE
+[ 493.076495] Hardware name: TYAN B8021G88V2HR-2T/S8021GM2NR-2T, BIOS V1.03.B10 04/01/2019
+[ 493.076500] Workqueue: amdgpu-reset-dev drm_sched_job_timedout [gpu_sched]
+[ 493.076512] Call Trace:
+[ 493.076515] <TASK>
+[ 493.076518] dump_stack_lvl+0x64/0x80
+[ 493.076529] print_report+0xce/0x630
+[ 493.076536] ? _raw_spin_lock_irqsave+0x86/0xd0
+[ 493.076541] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+[ 493.076545] ? amdgpu_device_gpu_recover+0x968/0x990 [amdgpu]
+[ 493.077253] kasan_report+0xb8/0xf0
+[ 493.077258] ? amdgpu_device_gpu_recover+0x968/0x990 [amdgpu]
+[ 493.077965] amdgpu_device_gpu_recover+0x968/0x990 [amdgpu]
+[ 493.078672] ? __pfx_amdgpu_device_gpu_recover+0x10/0x10 [amdgpu]
+[ 493.079378] ? amdgpu_coredump+0x1fd/0x4c0 [amdgpu]
+[ 493.080111] amdgpu_job_timedout+0x642/0x1400 [amdgpu]
+[ 493.080903] ? pick_task_fair+0x24e/0x330
+[ 493.080910] ? __pfx_amdgpu_job_timedout+0x10/0x10 [amdgpu]
+[ 493.081702] ? _raw_spin_lock+0x75/0xc0
+[ 493.081708] ? __pfx__raw_spin_lock+0x10/0x10
+[ 493.081712] drm_sched_job_timedout+0x1b0/0x4b0 [gpu_sched]
+[ 493.081721] ? __pfx__raw_spin_lock_irq+0x10/0x10
+[ 493.081725] process_one_work+0x679/0xff0
+[ 493.081732] worker_thread+0x6ce/0xfd0
+[ 493.081736] ? __pfx_worker_thread+0x10/0x10
+[ 493.081739] kthread+0x376/0x730
+[ 493.081744] ? __pfx_kthread+0x10/0x10
+[ 493.081748] ? __pfx__raw_spin_lock_irq+0x10/0x10
+[ 493.081751] ? __pfx_kthread+0x10/0x10
+[ 493.081755] ret_from_fork+0x247/0x330
+[ 493.081761] ? __pfx_kthread+0x10/0x10
+[ 493.081764] ret_from_fork_asm+0x1a/0x30
+[ 493.081771] </TASK>
+
+Fixes: a72002cb181f ("drm/amdgpu: Make use of drm_wedge_task_info")
+Link: https://github.com/HansKristian-Work/vkd3d-proton/pull/2670
+Cc: SRINIVASAN.SHANMUGAM@amd.com
+Cc: vitaly.prosyak@amd.com
+Cc: christian.koenig@amd.com
+Suggested-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 20880a3fd5dd7bca1a079534cf6596bda92e107d)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 96b6738e6252..843770e61e42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -6476,6 +6476,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
++ /* save the pasid here as the job may be freed before the end of the reset */
++ int pasid = job ? job->pasid : -EINVAL;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+@@ -6572,8 +6574,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+- if (job)
+- ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
++ /*
++ * The job may already be freed at this point via the sched tdr workqueue so
++ * use the cached pasid.
++ */
++ if (pasid >= 0)
++ ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+--
+2.51.0
+
--- /dev/null
+From c376770c8c831b5a930ef89980bb9d5f7bf8d2e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Nov 2025 11:31:52 +0800
+Subject: drm/me/gsc: mei interrupt top half should be in irq disabled context
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Junxiao Chang <junxiao.chang@intel.com>
+
+[ Upstream commit 17445af7dcc7d645b6fb8951fd10c8b72cc7f23f ]
+
+MEI GSC interrupt comes from i915 or xe driver. It has top half and
+bottom half. Top half is called from i915/xe interrupt handler. It
+should be in irq disabled context.
+
+With RT kernel(PREEMPT_RT enabled), by default IRQ handler is in
+threaded IRQ. MEI GSC top half might be in threaded IRQ context.
+generic_handle_irq_safe API could be called from either IRQ or
+process context, it disables local IRQ then calls MEI GSC interrupt
+top half.
+
+This change fixes B580 GPU boot issue with RT enabled.
+
+Fixes: e02cea83d32d ("drm/xe/gsc: add Battlemage support")
+Tested-by: Baoli Zhang <baoli.zhang@intel.com>
+Signed-off-by: Junxiao Chang <junxiao.chang@intel.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251107033152.834960-1-junxiao.chang@intel.com
+Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
+(cherry picked from commit 3efadf028783a49ab2941294187c8b6dd86bf7da)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_heci_gsc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
+index a415ca488791..32d509b11391 100644
+--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
++++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
+@@ -221,7 +221,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+- ret = generic_handle_irq(xe->heci_gsc.irq);
++ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+ }
+@@ -241,7 +241,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+- ret = generic_handle_irq(xe->heci_gsc.irq);
++ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+ }
+--
+2.51.0
+
--- /dev/null
+From 3ce7ea4665874afbb71668676e8133ef49a11b6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 30 Nov 2025 23:40:05 +0100
+Subject: drm/panel: sony-td4353-jdi: Enable prepare_prev_first
+
+From: Marijn Suijten <marijn.suijten@somainline.org>
+
+[ Upstream commit 2b973ca48ff3ef1952091c8f988d7796781836c8 ]
+
+The DSI host must be enabled before our prepare function can run, which
+has to send its init sequence over DSI. Without enabling the host first
+the panel will not probe.
+
+Fixes: 9e15123eca79 ("drm/msm/dsi: Stop unconditionally powering up DSI hosts at modeset")
+Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Martin Botka <martin.botka@somainline.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20251130-sony-akari-fix-panel-v1-1-1d27c60a55f5@somainline.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-sony-td4353-jdi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+index 7c989b70ab51..a14c86c60d19 100644
+--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
++++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+@@ -212,6 +212,8 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
++ ctx->panel.prepare_prev_first = true;
++
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+--
+2.51.0
+
--- /dev/null
+From b3c9eb440b1fc87d210a7866f53758b020f195ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Nov 2025 11:25:21 +0100
+Subject: drm/tests: Handle EDEADLK in drm_test_check_valid_clones()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit 141d95e42884628314f5ad9394657b0b35424300 ]
+
+Fedora/CentOS/RHEL CI is reporting intermittent failures while running
+the drm_test_check_valid_clones() KUnit test.
+
+The error log can be either [1]:
+
+ # drm_test_check_valid_clones: ASSERTION FAILED at
+ # drivers/gpu/drm/tests/drm_atomic_state_test.c:295
+ Expected ret == param->expected_result, but
+ ret == -35 (0xffffffffffffffdd)
+ param->expected_result == 0 (0x0)
+
+Or [2] depending on the test case:
+
+ # drm_test_check_valid_clones: ASSERTION FAILED at
+ # drivers/gpu/drm/tests/drm_atomic_state_test.c:295
+ Expected ret == param->expected_result, but
+ ret == -35 (0xffffffffffffffdd)
+ param->expected_result == -22 (0xffffffffffffffea)
+
+Restart the atomic sequence when EDEADLK is returned.
+
+[1] https://s3.amazonaws.com/arr-cki-prod-trusted-artifacts/trusted-artifacts/2113057246/test_x86_64/11802139999/artifacts/jobwatch/logs/recipes/19824965/tasks/204347800/results/946112713/logs/dmesg.log
+[2] https://s3.amazonaws.com/arr-cki-prod-trusted-artifacts/trusted-artifacts/2106744297/test_aarch64/11762450907/artifacts/jobwatch/logs/recipes/19797942/tasks/204139727/results/945094561/logs/dmesg.log
+
+Fixes: 88849f24e2ab ("drm/tests: Add test for drm_atomic_helper_check_modeset()")
+Closes: https://datawarehouse.cki-project.org/issue/4004
+Reviewed-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Link: https://patch.msgid.link/20251104102535.12212-1-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tests/drm_atomic_state_test.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
+index 2f6ac7a09f44..1e857d86574c 100644
+--- a/drivers/gpu/drm/tests/drm_atomic_state_test.c
++++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
+@@ -283,7 +283,14 @@ static void drm_test_check_valid_clones(struct kunit *test)
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
++retry:
+ crtc_state = drm_atomic_get_crtc_state(state, priv->crtc);
++ if (PTR_ERR(crtc_state) == -EDEADLK) {
++ drm_atomic_state_clear(state);
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry;
++ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->encoder_mask = param->encoder_mask;
+@@ -292,6 +299,12 @@ static void drm_test_check_valid_clones(struct kunit *test)
+ crtc_state->mode_changed = true;
+
+ ret = drm_atomic_helper_check_modeset(drm, state);
++ if (ret == -EDEADLK) {
++ drm_atomic_state_clear(state);
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry;
++ }
+ KUNIT_ASSERT_EQ(test, ret, param->expected_result);
+
+ drm_modeset_drop_locks(&ctx);
+--
+2.51.0
+
--- /dev/null
+From 1d551507f548446a516b7254b892101bc7f32da0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Nov 2025 11:25:22 +0100
+Subject: drm/tests: Handle EDEADLK in set_up_atomic_state()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit 526aafabd756cc56401b383d6ae554af3e21dcdd ]
+
+Fedora/CentOS/RHEL CI is reporting intermittent failures while running
+the drm_validate_modeset test [1]:
+
+ # drm_test_check_connector_changed_modeset: EXPECTATION FAILED at
+ # drivers/gpu/drm/tests/drm_atomic_state_test.c:162
+ Expected ret == 0, but
+ ret == -35 (0xffffffffffffffdd)
+
+Change the set_up_atomic_state() helper function to return on error and
+restart the atomic sequence when the returned error is EDEADLK.
+
+[1] https://s3.amazonaws.com/arr-cki-prod-trusted-artifacts/trusted-artifacts/2106744096/test_x86_64/11762450343/artifacts/jobwatch/logs/recipes/19797909/tasks/204139142/results/945095586/logs/dmesg.log
+
+Fixes: 73d934d7b6e3 ("drm/tests: Add test for drm_atomic_helper_commit_modeset_disables()")
+Closes: https://datawarehouse.cki-project.org/issue/4004
+Reviewed-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Link: https://patch.msgid.link/20251104102535.12212-2-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tests/drm_atomic_state_test.c | 27 +++++++++++++++----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
+index 1e857d86574c..bc27f65b2823 100644
+--- a/drivers/gpu/drm/tests/drm_atomic_state_test.c
++++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
+@@ -156,24 +156,29 @@ static int set_up_atomic_state(struct kunit *test,
+
+ if (connector) {
+ conn_state = drm_atomic_get_connector_state(state, connector);
+- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
++ if (IS_ERR(conn_state))
++ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+- KUNIT_EXPECT_EQ(test, ret, 0);
++ if (ret)
++ return ret;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
++ if (IS_ERR(crtc_state))
++ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, &drm_atomic_test_mode);
+- KUNIT_EXPECT_EQ(test, ret, 0);
++ if (ret)
++ return ret;
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ if (connector) {
+ ret = drm_atomic_commit(state);
+- KUNIT_ASSERT_EQ(test, ret, 0);
++ if (ret)
++ return ret;
+ } else {
+ // dummy connector mask
+ crtc_state->connector_mask = DRM_TEST_CONN_0;
+@@ -206,7 +211,13 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test)
+ drm_modeset_acquire_init(&ctx, 0);
+
+ // first modeset to enable
++retry_set_up:
+ ret = set_up_atomic_state(test, priv, old_conn, &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_set_up;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -277,7 +288,13 @@ static void drm_test_check_valid_clones(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_set_up:
+ ret = set_up_atomic_state(test, priv, NULL, &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_set_up;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+--
+2.51.0
+
--- /dev/null
+From 3a5d9a0a5aeea6a7020eb54357359e536bad2d77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Nov 2025 11:22:35 +0100
+Subject: drm/tests: hdmi: Handle drm_kunit_helper_enable_crtc_connector()
+ returning EDEADLK
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit fe27e709d91fb645182751b602cb88966b4a1bb6 ]
+
+Fedora/CentOS/RHEL CI is reporting intermittent failures while running
+the KUnit tests present in drm_hdmi_state_helper_test.c [1].
+
+While the specific test causing the failure change between runs, all of
+them are caused by drm_kunit_helper_enable_crtc_connector() returning
+-EDEADLK. The error trace always follow this structure:
+
+ # <test name>: ASSERTION FAILED at
+ # drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c:<line>
+ Expected ret == 0, but
+ ret == -35 (0xffffffffffffffdd)
+
+As documented, if the drm_kunit_helper_enable_crtc_connector() function
+returns -EDEADLK (-35), the entire atomic sequence must be restarted.
+
+Handle this error code for all function calls.
+
+Closes: https://datawarehouse.cki-project.org/issue/4039 [1]
+Fixes: 6a5c0ad7e08e ("drm/tests: hdmi_state_helpers: Switch to new helper")
+Reviewed-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Link: https://patch.msgid.link/20251104102258.10026-1-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/tests/drm_hdmi_state_helper_test.c | 143 ++++++++++++++++++
+ 1 file changed, 143 insertions(+)
+
+diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+index 8bd412735000..70f9aa702143 100644
+--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+@@ -257,10 +257,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -326,10 +332,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -397,10 +409,16 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -457,10 +475,17 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ crtc = priv->crtc;
++
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -518,10 +543,16 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -580,10 +611,17 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ crtc = priv->crtc;
++
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -643,10 +681,16 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -705,10 +749,17 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ crtc = priv->crtc;
++
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -870,10 +921,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -946,10 +1003,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+@@ -1022,10 +1085,16 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1069,10 +1138,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1118,10 +1193,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1167,10 +1248,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1218,10 +1305,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* You shouldn't be doing that at home. */
+@@ -1292,10 +1385,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1440,10 +1539,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1669,10 +1774,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
+ drm_modeset_acquire_init(&ctx, 0);
+
+ crtc = priv->crtc;
++
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1736,10 +1848,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1805,10 +1923,16 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1865,10 +1989,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1927,10 +2057,16 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
+
+ drm_modeset_acquire_init(&ctx, 0);
+
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+@@ -1970,10 +2106,17 @@ static void drm_test_check_disable_connector(struct kunit *test)
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
++
++retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
++ if (ret == -EDEADLK) {
++ ret = drm_modeset_backoff(&ctx);
++ if (!ret)
++ goto retry_conn_enable;
++ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+--
+2.51.0
+
--- /dev/null
+From 362518e801cca65f82aa836960e9bf2ddca30e58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Nov 2025 21:25:48 -0800
+Subject: drm/xe: Apply Wa_14020316580 in xe_gt_idle_enable_pg()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
+
+[ Upstream commit c88a0731ed95f9705deb127a7f1927fa59aa742b ]
+
+Wa_14020316580 was getting clobbered by power gating init code
+later in the driver load sequence. Move the Wa so that
+it applies correctly.
+
+Fixes: 7cd05ef89c9d ("drm/xe/xe2hpm: Add initial set of workarounds")
+Suggested-by: Matt Roper <matthew.d.roper@intel.com>
+Signed-off-by: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
+Reviewed-by: Riana Tauro <riana.tauro@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patch.msgid.link/20251129052548.70766-1-vinay.belgaumkar@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 8b5502145351bde87f522df082b9e41356898ba3)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt_idle.c | 8 ++++++++
+ drivers/gpu/drm/xe/xe_wa.c | 8 --------
+ drivers/gpu/drm/xe/xe_wa_oob.rules | 1 +
+ 3 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
+index bdc9d9877ec4..3e3d1d52f630 100644
+--- a/drivers/gpu/drm/xe/xe_gt_idle.c
++++ b/drivers/gpu/drm/xe/xe_gt_idle.c
+@@ -5,6 +5,7 @@
+
+ #include <drm/drm_managed.h>
+
++#include <generated/xe_wa_oob.h>
+ #include "xe_force_wake.h"
+ #include "xe_device.h"
+ #include "xe_gt.h"
+@@ -16,6 +17,7 @@
+ #include "xe_mmio.h"
+ #include "xe_pm.h"
+ #include "xe_sriov.h"
++#include "xe_wa.h"
+
+ /**
+ * DOC: Xe GT Idle
+@@ -145,6 +147,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
+ xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
+ }
+
++ if (XE_GT_WA(gt, 14020316580))
++ gtidle->powergate_enable &= ~(VDN_HCP_POWERGATE_ENABLE(0) |
++ VDN_MFXVDENC_POWERGATE_ENABLE(0) |
++ VDN_HCP_POWERGATE_ENABLE(2) |
++ VDN_MFXVDENC_POWERGATE_ENABLE(2));
++
+ xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
+index 3cf30718b200..d209434fd7fc 100644
+--- a/drivers/gpu/drm/xe/xe_wa.c
++++ b/drivers/gpu/drm/xe/xe_wa.c
+@@ -270,14 +270,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+- { XE_RTP_NAME("14020316580"),
+- XE_RTP_RULES(MEDIA_VERSION(1301)),
+- XE_RTP_ACTIONS(CLR(POWERGATE_ENABLE,
+- VDN_HCP_POWERGATE_ENABLE(0) |
+- VDN_MFXVDENC_POWERGATE_ENABLE(0) |
+- VDN_HCP_POWERGATE_ENABLE(2) |
+- VDN_MFXVDENC_POWERGATE_ENABLE(2))),
+- },
+ { XE_RTP_NAME("14019449301"),
+ XE_RTP_RULES(MEDIA_VERSION(1301), ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
+diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
+index f3a6d5d239ce..1d32fa1c3b2d 100644
+--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
++++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
+@@ -81,3 +81,4 @@
+
+ 15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
+ 16026007364 MEDIA_VERSION(3000)
++14020316580 MEDIA_VERSION(1301)
+--
+2.51.0
+
--- /dev/null
+From ddf11bbc6f04afb213f8db01a0b177f61a5a5da3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 10:46:58 +0100
+Subject: drm/xe: fix drm_gpusvm_init() arguments
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 9acc3295813b9b846791fd3eab0a78a3144af560 ]
+
+The Xe driver fails to build when CONFIG_DRM_XE_GPUSVM is disabled
+but CONFIG_DRM_GPUSVM is turned on, due to the clash of two commits:
+
+In file included from drivers/gpu/drm/xe/xe_vm_madvise.c:8:
+drivers/gpu/drm/xe/xe_svm.h: In function 'xe_svm_init':
+include/linux/stddef.h:8:14: error: passing argument 5 of 'drm_gpusvm_init' makes integer from pointer without a cast [-Wint-conversion]
+drivers/gpu/drm/xe/xe_svm.h:217:38: note: in expansion of macro 'NULL'
+ 217 | NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+ | ^~~~
+In file included from drivers/gpu/drm/xe/xe_bo_types.h:11,
+ from drivers/gpu/drm/xe/xe_bo.h:11,
+ from drivers/gpu/drm/xe/xe_vm_madvise.c:11:
+include/drm/drm_gpusvm.h:254:35: note: expected 'long unsigned int' but argument is of type 'void *'
+ 254 | unsigned long mm_start, unsigned long mm_range,
+ | ~~~~~~~~~~~~~~^~~~~~~~
+In file included from drivers/gpu/drm/xe/xe_vm_madvise.c:14:
+drivers/gpu/drm/xe/xe_svm.h:216:16: error: too many arguments to function 'drm_gpusvm_init'; expected 10, have 11
+ 216 | return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
+ | ^~~~~~~~~~~~~~~
+ 217 | NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+ | ~
+include/drm/drm_gpusvm.h:251:5: note: declared here
+
+Adapt the caller to the new argument list by removing the extraneous
+NULL argument.
+
+Fixes: 9e9787414882 ("drm/xe/userptr: replace xe_hmm with gpusvm")
+Fixes: 10aa5c806030 ("drm/gpusvm, drm/xe: Fix userptr to not allow device private pages")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Link: https://patch.msgid.link/20251204094704.1030933-1-arnd@kernel.org
+(cherry picked from commit 29bce9c8b41d5c378263a927acb9a9074d0e7a0e)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_svm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
+index 0955d2ac8d74..fa757dd07954 100644
+--- a/drivers/gpu/drm/xe/xe_svm.h
++++ b/drivers/gpu/drm/xe/xe_svm.h
+@@ -214,7 +214,7 @@ int xe_svm_init(struct xe_vm *vm)
+ {
+ #if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
+- NULL, NULL, 0, 0, 0, NULL, NULL, 0);
++ NULL, 0, 0, 0, NULL, NULL, 0);
+ #else
+ return 0;
+ #endif
+--
+2.51.0
+
--- /dev/null
+From 456141c273ce60d49cfeb6b65ea7f011823e1845 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Nov 2025 20:56:39 +0000
+Subject: drm/xe: Fix freq kobject leak on sysfs_create_files failure
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit b32045d73bb4333a2cebc5d3c005807adb03ab58 ]
+
+Ensure gt->freq is released when sysfs_create_files() fails
+in xe_gt_freq_init(). Without this, the kobject would leak.
+Add kobject_put() before returning the error.
+
+Fixes: fdc81c43f0c1 ("drm/xe: use devm_add_action_or_reset() helper")
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Alex Zuo <alex.zuo@intel.com>
+Reviewed-by: Xin Wang <x.wang@intel.com>
+Link: https://patch.msgid.link/20251114205638.2184529-2-shuicheng.lin@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 251be5fb4982ebb0f5a81b62d975bd770f3ad5c2)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt_freq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
+index 4ff1b6b58d6b..e8e70fd2e8c4 100644
+--- a/drivers/gpu/drm/xe/xe_gt_freq.c
++++ b/drivers/gpu/drm/xe/xe_gt_freq.c
+@@ -296,8 +296,10 @@ int xe_gt_freq_init(struct xe_gt *gt)
+ return -ENOMEM;
+
+ err = sysfs_create_files(gt->freq, freq_attrs);
+- if (err)
++ if (err) {
++ kobject_put(gt->freq);
+ return err;
++ }
+
+ err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From c670d925c7b831dcf7293bb72068e0b170fe3afc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 05:21:46 +0800
+Subject: drm/xe: Increase TDF timeout
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jagmeet Randhawa <jagmeet.randhawa@intel.com>
+
+[ Upstream commit eafb6f62093f756535a7be1fc4559374a511e460 ]
+
+There are some corner cases where flushing transient
+data may take slightly longer than the 150us timeout
+we currently allow. Update the driver to use a 300us
+timeout instead based on the latest guidance from
+the hardware team. An update to the bspec to formally
+document this is expected to arrive soon.
+
+Fixes: c01c6066e6fa ("drm/xe/device: implement transient flush")
+Signed-off-by: Jagmeet Randhawa <jagmeet.randhawa@intel.com>
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patch.msgid.link/0201b1d6ec64d3651fcbff1ea21026efa915126a.1765487866.git.jagmeet.randhawa@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit d69d3636f5f7a84bae7cd43473b3701ad9b7d544)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 456899238377..5f757790d6f5 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -1046,7 +1046,7 @@ static void tdf_request_sync(struct xe_device *xe)
+ * transient and need to be flushed..
+ */
+ if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
+- 150, NULL, false))
++ 300, NULL, false))
+ xe_gt_err_once(gt, "TD flush timeout\n");
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+--
+2.51.0
+
--- /dev/null
+From 71e4a833f2429ddf45a3ef03f338fec146b75dfa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 23:47:17 +0000
+Subject: drm/xe: Limit num_syncs to prevent oversized allocations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 8e461304009135270e9ccf2d7e2dfe29daec9b60 ]
+
+The exec and vm_bind ioctl allow userspace to specify an arbitrary
+num_syncs value. Without bounds checking, a very large num_syncs
+can force an excessively large allocation, leading to kernel warnings
+from the page allocator as below.
+
+Introduce DRM_XE_MAX_SYNCS (set to 1024) and reject any request
+exceeding this limit.
+
+"
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 1217 at mm/page_alloc.c:5124 __alloc_frozen_pages_noprof+0x2f8/0x2180 mm/page_alloc.c:5124
+...
+Call Trace:
+ <TASK>
+ alloc_pages_mpol+0xe4/0x330 mm/mempolicy.c:2416
+ ___kmalloc_large_node+0xd8/0x110 mm/slub.c:4317
+ __kmalloc_large_node_noprof+0x18/0xe0 mm/slub.c:4348
+ __do_kmalloc_node mm/slub.c:4364 [inline]
+ __kmalloc_noprof+0x3d4/0x4b0 mm/slub.c:4388
+ kmalloc_noprof include/linux/slab.h:909 [inline]
+ kmalloc_array_noprof include/linux/slab.h:948 [inline]
+ xe_exec_ioctl+0xa47/0x1e70 drivers/gpu/drm/xe/xe_exec.c:158
+ drm_ioctl_kernel+0x1f1/0x3e0 drivers/gpu/drm/drm_ioctl.c:797
+ drm_ioctl+0x5e7/0xc50 drivers/gpu/drm/drm_ioctl.c:894
+ xe_drm_ioctl+0x10b/0x170 drivers/gpu/drm/xe/xe_device.c:224
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:598 [inline]
+ __se_sys_ioctl fs/ioctl.c:584 [inline]
+ __x64_sys_ioctl+0x18b/0x210 fs/ioctl.c:584
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xbb/0x380 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+"
+
+v2: Add "Reported-by" and Cc stable kernels.
+v3: Change XE_MAX_SYNCS from 64 to 1024. (Matt & Ashutosh)
+v4: s/XE_MAX_SYNCS/DRM_XE_MAX_SYNCS/ (Matt)
+v5: Do the check at the top of the exec func. (Matt)
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Reported-by: Koen Koning <koen.koning@intel.com>
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6450
+Cc: <stable@vger.kernel.org> # v6.12+
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Michal Mrozek <michal.mrozek@intel.com>
+Cc: Carl Zhang <carl.zhang@intel.com>
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Cc: Ivan Briano <ivan.briano@intel.com>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251205234715.2476561-5-shuicheng.lin@intel.com
+(cherry picked from commit b07bac9bd708ec468cd1b8a5fe70ae2ac9b0a11c)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Stable-dep-of: f8dd66bfb4e1 ("drm/xe/oa: Limit num_syncs to prevent oversized allocations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_exec.c | 3 ++-
+ drivers/gpu/drm/xe/xe_vm.c | 3 +++
+ include/uapi/drm/xe_drm.h | 1 +
+ 3 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
+index a8ab363a8046..ca85f7c15fab 100644
+--- a/drivers/gpu/drm/xe/xe_exec.c
++++ b/drivers/gpu/drm/xe/xe_exec.c
+@@ -130,7 +130,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+
+ if (XE_IOCTL_DBG(xe, args->extensions) ||
+ XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+- XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
++ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]) ||
++ XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
+ return -EINVAL;
+
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index cdd1dc540a59..f0f699baa9f6 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -3282,6 +3282,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
+ if (XE_IOCTL_DBG(xe, args->extensions))
+ return -EINVAL;
+
++ if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
++ return -EINVAL;
++
+ if (args->num_binds > 1) {
+ u64 __user *bind_user =
+ u64_to_user_ptr(args->vector_of_binds);
+diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
+index 517489a7ec60..400555a8af18 100644
+--- a/include/uapi/drm/xe_drm.h
++++ b/include/uapi/drm/xe_drm.h
+@@ -1459,6 +1459,7 @@ struct drm_xe_exec {
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
++#define DRM_XE_MAX_SYNCS 1024
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+--
+2.51.0
+
--- /dev/null
+From 07fbdf91397aee9e5da7db4a5664181d18801a61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 13:26:13 -0800
+Subject: drm/xe/oa: Always set OAG_OAGLBCTXCTRL_COUNTER_RESUME
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+[ Upstream commit 256edb267a9d0b5aef70e408e9fba4f930f9926e ]
+
+Reports can be written out to the OA buffer using ways other than periodic
+sampling. These include mmio trigger and context switches. To support these
+use cases, when periodic sampling is not enabled,
+OAG_OAGLBCTXCTRL_COUNTER_RESUME must be set.
+
+Fixes: 1db9a9dc90ae ("drm/xe/oa: OA stream initialization (OAG)")
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Reviewed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Link: https://patch.msgid.link/20251205212613.826224-4-ashutosh.dixit@intel.com
+(cherry picked from commit 88d98e74adf3e20f678bb89581a5c3149fdbdeaa)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 10047373e184..d0ceb67af83e 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1104,11 +1104,12 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
+ oag_buf_size_select(stream) |
+ oag_configure_mmio_trigger(stream, true));
+
+- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
+- (OAG_OAGLBCTXCTRL_COUNTER_RESUME |
++ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl,
++ OAG_OAGLBCTXCTRL_COUNTER_RESUME |
++ (stream->periodic ?
+ OAG_OAGLBCTXCTRL_TIMER_ENABLE |
+ REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
+- stream->period_exponent)) : 0);
++ stream->period_exponent) : 0));
+
+ /*
+ * Initialize Super Queue Internal Cnt Register
+--
+2.51.0
+
--- /dev/null
+From 60e896600e5a2a31c57c53f8116aad34e36c265f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 23:47:18 +0000
+Subject: drm/xe/oa: Limit num_syncs to prevent oversized allocations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit f8dd66bfb4e184c71bd26418a00546ebe7f5c17a ]
+
+The OA open parameters did not validate num_syncs, allowing
+userspace to pass arbitrarily large values, potentially
+leading to excessive allocations.
+
+Add check to ensure that num_syncs does not exceed DRM_XE_MAX_SYNCS,
+returning -EINVAL when the limit is violated.
+
+v2: use XE_IOCTL_DBG() and drop duplicated check. (Ashutosh)
+
+Fixes: c8507a25cebd ("drm/xe/oa/uapi: Define and parse OA sync properties")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251205234715.2476561-6-shuicheng.lin@intel.com
+(cherry picked from commit e057b2d2b8d815df3858a87dffafa2af37e5945b)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 125698a9ecf1..10047373e184 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1253,6 +1253,9 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+ static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
++ if (XE_IOCTL_DBG(oa->xe, value > DRM_XE_MAX_SYNCS))
++ return -EINVAL;
++
+ param->num_syncs = value;
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 6120fae7cb9cb5d870a8c5d61cfdd1a764ae2792 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 15:56:18 +0100
+Subject: drm/xe: Restore engine registers before restarting schedulers after
+ GT reset
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jan Maslak <jan.maslak@intel.com>
+
+[ Upstream commit eed5b815fa49c17d513202f54e980eb91955d3ed ]
+
+During GT reset recovery in do_gt_restart(), xe_uc_start() was called
+before xe_reg_sr_apply_mmio() restored engine-specific registers. This
+created a race window where the scheduler could run jobs before hardware
+state was fully restored.
+
+This caused failures in eudebug tests (xe_exec_sip_eudebug@breakpoint-
+waitsip-*) where TD_CTL register (containing TD_CTL_GLOBAL_DEBUG_ENABLE)
+wasn't restored before jobs started executing. Breakpoints would fail to
+trigger SIP entry because the debug enable bit wasn't set yet.
+
+Fix by moving xe_uc_start() after all MMIO register restoration,
+including engine registers and CCS mode configuration, ensuring all
+hardware state is fully restored before any jobs can be scheduled.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Signed-off-by: Jan Maslak <jan.maslak@intel.com>
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251210145618.169625-2-jan.maslak@intel.com
+(cherry picked from commit 825aed0328588b2837636c1c5a0c48795d724617)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 6d3db5e55d98..61bed3b04ded 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -784,9 +784,6 @@ static int do_gt_restart(struct xe_gt *gt)
+ xe_gt_sriov_pf_init_hw(gt);
+
+ xe_mocs_init(gt);
+- err = xe_uc_start(>->uc);
+- if (err)
+- return err;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
+@@ -794,6 +791,10 @@ static int do_gt_restart(struct xe_gt *gt)
+ /* Get CCS mode in sync between sw/hw */
+ xe_gt_apply_ccs_mode(gt);
+
++ err = xe_uc_start(>->uc);
++ if (err)
++ return err;
++
+ /* Restore GT freq to expected values */
+ xe_gt_sanitize_freq(gt);
+
+--
+2.51.0
+
--- /dev/null
+From a31a54fc0f30a31d166d830e935362bdbd543c33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Dec 2025 09:37:06 +0300
+Subject: hwmon: (dell-smm) Limit fan multiplier to avoid overflow
+
+From: Denis Sergeev <denserg.edu@gmail.com>
+
+[ Upstream commit 46c28bbbb150b80827e4bcbea231560af9d16854 ]
+
+The fan nominal speed returned by SMM is limited to 16 bits, but the
+driver allows the fan multiplier to be set via a module parameter.
+
+Clamp the computed fan multiplier so that fan_nominal_speed *
+i8k_fan_mult always fits into a signed 32-bit integer and refuse to
+initialize the driver if the value is too large.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 20bdeebc88269 ("hwmon: (dell-smm) Introduce helper function for data init")
+Signed-off-by: Denis Sergeev <denserg.edu@gmail.com>
+Link: https://lore.kernel.org/r/20251209063706.49008-1-denserg.edu@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/dell-smm-hwmon.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index cbe1a74a3dee..f0e8a9bc0d0e 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -76,6 +76,9 @@
+ #define DELL_SMM_NO_TEMP 10
+ #define DELL_SMM_NO_FANS 4
+
++/* limit fan multiplier to avoid overflow */
++#define DELL_SMM_MAX_FAN_MULT (INT_MAX / U16_MAX)
++
+ struct smm_regs {
+ unsigned int eax;
+ unsigned int ebx;
+@@ -1253,6 +1256,12 @@ static int dell_smm_init_data(struct device *dev, const struct dell_smm_ops *ops
+ data->ops = ops;
+ /* All options must not be 0 */
+ data->i8k_fan_mult = fan_mult ? : I8K_FAN_MULT;
++ if (data->i8k_fan_mult > DELL_SMM_MAX_FAN_MULT) {
++ dev_err(dev,
++ "fan multiplier %u is too large (max %u)\n",
++ data->i8k_fan_mult, DELL_SMM_MAX_FAN_MULT);
++ return -EINVAL;
++ }
+ data->i8k_fan_max = fan_max ? : I8K_FAN_HIGH;
+ data->i8k_pwm_mult = DIV_ROUND_UP(255, data->i8k_fan_max);
+
+--
+2.51.0
+
--- /dev/null
+From 332ca034c63837ca612b78336ab428ec5fd6b785 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 17:48:08 +0800
+Subject: hwmon: (ibmpex) fix use-after-free in high/low store
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit 6946c726c3f4c36f0f049e6f97e88c510b15f65d ]
+
+The ibmpex_high_low_store() function retrieves driver data using
+dev_get_drvdata() and uses it without validation. This creates a race
+condition where the sysfs callback can be invoked after the data
+structure is freed, leading to use-after-free.
+
+Fix by adding a NULL check after dev_get_drvdata(), and reordering
+operations in the deletion path to prevent TOCTOU.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Reported-by: Junrui Luo <moonafterrain@outlook.com>
+Fixes: 57c7c3a0fdea ("hwmon: IBM power meter driver")
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://lore.kernel.org/r/MEYPR01MB7886BE2F51BFE41875B74B60AFA0A@MEYPR01MB7886.ausprd01.prod.outlook.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ibmpex.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
+index 228c5f6c6f38..129f3a9e8fe9 100644
+--- a/drivers/hwmon/ibmpex.c
++++ b/drivers/hwmon/ibmpex.c
+@@ -277,6 +277,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
+ {
+ struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+
++ if (!data)
++ return -ENODEV;
++
+ ibmpex_reset_high_low_data(data);
+
+ return count;
+@@ -508,6 +511,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ {
+ int i, j;
+
++ hwmon_device_unregister(data->hwmon_dev);
++ dev_set_drvdata(data->bmc_device, NULL);
++
+ device_remove_file(data->bmc_device,
+ &sensor_dev_attr_reset_high_low.dev_attr);
+ device_remove_file(data->bmc_device, &dev_attr_name.attr);
+@@ -521,8 +527,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ }
+
+ list_del(&data->list);
+- dev_set_drvdata(data->bmc_device, NULL);
+- hwmon_device_unregister(data->hwmon_dev);
++
+ ipmi_destroy_user(data->user);
+ kfree(data->sensors);
+ kfree(data);
+--
+2.51.0
+
--- /dev/null
+From 7b78234870a5016899d97ea60fc6f81b25bfe967 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 16:11:05 +0000
+Subject: hwmon: (ltc4282): Fix reset_history file permissions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+[ Upstream commit b3db91c3bfea69a6c6258fea508f25a59c0feb1a ]
+
+The reset_history attributes are write only. Hence don't report them as
+readable just to return -EOPNOTSUPP later on.
+
+Fixes: cbc29538dbf7 ("hwmon: Add driver for LTC4282")
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Link: https://lore.kernel.org/r/20251219-ltc4282-fix-reset-history-v1-1-8eab974c124b@analog.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ltc4282.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
+index 1d664a2d7b3c..58c2d3a62432 100644
+--- a/drivers/hwmon/ltc4282.c
++++ b/drivers/hwmon/ltc4282.c
+@@ -1018,8 +1018,9 @@ static umode_t ltc4282_in_is_visible(const struct ltc4282_state *st, u32 attr)
+ case hwmon_in_max:
+ case hwmon_in_min:
+ case hwmon_in_enable:
+- case hwmon_in_reset_history:
+ return 0644;
++ case hwmon_in_reset_history:
++ return 0200;
+ default:
+ return 0;
+ }
+@@ -1038,8 +1039,9 @@ static umode_t ltc4282_curr_is_visible(u32 attr)
+ return 0444;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+- case hwmon_curr_reset_history:
+ return 0644;
++ case hwmon_curr_reset_history:
++ return 0200;
+ default:
+ return 0;
+ }
+@@ -1057,8 +1059,9 @@ static umode_t ltc4282_power_is_visible(u32 attr)
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_min:
+- case hwmon_power_reset_history:
+ return 0644;
++ case hwmon_power_reset_history:
++ return 0200;
+ default:
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From b543573506cfda0505c2a553cbc82af6ba0a5e09 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Dec 2025 19:43:43 +0300
+Subject: hwmon: (tmp401) fix overflow caused by default conversion rate value
+
+From: Alexey Simakov <bigalex934@gmail.com>
+
+[ Upstream commit 82f2aab35a1ab2e1460de06ef04c726460aed51c ]
+
+The driver computes conversion intervals using the formula:
+
+ interval = (1 << (7 - rate)) * 125ms
+
+where 'rate' is the sensor's conversion rate register value. According to
+the datasheet, the power-on reset value of this register is 0x8, which
+could be assigned to the register, after handling i2c general call.
+Using this default value causes a result greater than the bit width of
+left operand and an undefined behaviour in the calculation above, since
+shifting by values larger than the bit width is undefined behaviour as
+per C language standard.
+
+Limit the maximum usable 'rate' value to 7 to prevent undefined
+behaviour in calculations.
+
+Found by Linux Verification Center (linuxtesting.org) with Svace.
+
+Note (groeck):
+ This does not matter in practice unless someone overwrites the chip
+ configuration from outside the driver while the driver is loaded.
+ The conversion time register is initialized with a value of 5 (500ms)
+ when the driver is loaded, and the driver never writes a bad value.
+
+Fixes: ca53e7640de7 ("hwmon: (tmp401) Convert to _info API")
+Signed-off-by: Alexey Simakov <bigalex934@gmail.com>
+Link: https://lore.kernel.org/r/20251211164342.6291-1-bigalex934@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/tmp401.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
+index 02c5a3bb1071..84aaf817144c 100644
+--- a/drivers/hwmon/tmp401.c
++++ b/drivers/hwmon/tmp401.c
+@@ -401,7 +401,7 @@ static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val
+ ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, ®val);
+ if (ret < 0)
+ return ret;
+- *val = (1 << (7 - regval)) * 125;
++ *val = (1 << (7 - min(regval, 7))) * 125;
+ break;
+ case hwmon_chip_temp_reset_history:
+ *val = 0;
+--
+2.51.0
+
--- /dev/null
+From f8904b828a5fb918a0895b66afec43fce667ad40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 14:26:32 +0800
+Subject: io_uring: fix nr_segs calculation in io_import_kbuf
+
+From: huang-jl <huang-jl@deepseek.com>
+
+[ Upstream commit 114ea9bbaf7681c4d363e13b7916e6fef6a4963a ]
+
+io_import_kbuf() calculates nr_segs incorrectly when iov_offset is
+non-zero after iov_iter_advance(). It doesn't account for the partial
+consumption of the first bvec.
+
+The problem comes when meet the following conditions:
+1. Use UBLK_F_AUTO_BUF_REG feature of ublk.
+2. The kernel will help to register the buffer, into the io uring.
+3. Later, the ublk server try to send IO request using the registered
+ buffer in the io uring, to read/write to fuse-based filesystem, with
+O_DIRECT.
+
+>From a userspace perspective, the ublk server thread is blocked in the
+kernel, and will see "soft lockup" in the kernel dmesg.
+
+When ublk registers a buffer with mixed-size bvecs like [4K]*6 + [12K]
+and a request partially consumes a bvec, the next request's nr_segs
+calculation uses bvec->bv_len instead of (bv_len - iov_offset).
+
+This causes fuse_get_user_pages() to loop forever because nr_segs
+indicates fewer pages than actually needed.
+
+Specifically, the infinite loop happens at:
+fuse_get_user_pages()
+ -> iov_iter_extract_pages()
+ -> iov_iter_extract_bvec_pages()
+Since the nr_segs is miscalculated, the iov_iter_extract_bvec_pages
+returns when finding that i->nr_segs is zero. Then
+iov_iter_extract_pages returns zero. However, fuse_get_user_pages does
+still not get enough data/pages, causing infinite loop.
+
+Example:
+ - Bvecs: [4K, 4K, 4K, 4K, 4K, 4K, 12K, ...]
+ - Request 1: 32K at offset 0, uses 6*4K + 8K of the 12K bvec
+ - Request 2: 32K at offset 32K
+ - iov_offset = 8K (8K already consumed from 12K bvec)
+ - Bug: calculates using 12K, not (12K - 8K) = 4K
+ - Result: nr_segs too small, infinite loop in fuse_get_user_pages.
+
+Fix by accounting for iov_offset when calculating the first segment's
+available length.
+
+Fixes: b419bed4f0a6 ("io_uring/rsrc: ensure segments counts are correct on kbuf buffers")
+Signed-off-by: huang-jl <huang-jl@deepseek.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/rsrc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 0010c4992490..5b6b73c6a62b 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1057,6 +1057,7 @@ static int io_import_kbuf(int ddir, struct iov_iter *iter,
+ if (count < imu->len) {
+ const struct bio_vec *bvec = iter->bvec;
+
++ len += iter->iov_offset;
+ while (len > bvec->bv_len) {
+ len -= bvec->bv_len;
+ bvec++;
+--
+2.51.0
+
--- /dev/null
+From 3fa17796efb3eda898947bd8d99f957ac7e4e08d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 18:36:18 +0800
+Subject: MIPS: Fix a reference leak bug in ip22_check_gio()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+[ Upstream commit 680ad315caaa2860df411cb378bf3614d96c7648 ]
+
+If gio_device_register fails, gio_dev_put() is required to
+drop the gio_dev device reference.
+
+Fixes: e84de0c61905 ("MIPS: GIO bus support for SGI IP22/28")
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/sgi-ip22/ip22-gio.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
+index 5893ea4e382c..19b70928d6dc 100644
+--- a/arch/mips/sgi-ip22/ip22-gio.c
++++ b/arch/mips/sgi-ip22/ip22-gio.c
+@@ -372,7 +372,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
+ gio_dev->resource.flags = IORESOURCE_MEM;
+ gio_dev->irq = irq;
+ dev_set_name(&gio_dev->dev, "%d", slotno);
+- gio_device_register(gio_dev);
++ if (gio_device_register(gio_dev))
++ gio_dev_put(gio_dev);
+ } else
+ printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
+ }
+--
+2.51.0
+
--- /dev/null
+From b070d877958b9b009e155596c1319d3aee73491d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 10:16:59 -0700
+Subject: selftests: ublk: fix overflow in ublk_queue_auto_zc_fallback()
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 9637fc3bdd10c8e073f71897bd35babbd21e9b29 ]
+
+The functions ublk_queue_use_zc(), ublk_queue_use_auto_zc(), and
+ublk_queue_auto_zc_fallback() were returning int, but performing
+bitwise AND on q->flags which is __u64.
+
+When a flag bit is set in the upper 32 bits (beyond INT_MAX), the
+result of the bitwise AND operation could overflow when cast to int,
+leading to incorrect boolean evaluation.
+
+For example, if UBLKS_Q_AUTO_BUF_REG_FALLBACK is 0x8000000000000000:
+ - (u64)flags & 0x8000000000000000 = 0x8000000000000000
+ - Cast to int: undefined behavior / incorrect value
+ - Used in if(): may evaluate incorrectly
+
+Fix by:
+1. Changing return type from int to bool for semantic correctness
+2. Using !! to explicitly convert to boolean (0 or 1)
+
+This ensures the functions return proper boolean values regardless
+of which bit position the flags occupy in the 64-bit field.
+
+Fixes: c3a6d48f86da ("selftests: ublk: remove ublk queue self-defined flags")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/ublk/kublk.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
+index 5e55484fb0aa..1b8833a40064 100644
+--- a/tools/testing/selftests/ublk/kublk.h
++++ b/tools/testing/selftests/ublk/kublk.h
+@@ -393,19 +393,19 @@ static inline int ublk_completed_tgt_io(struct ublk_thread *t,
+ return --io->tgt_ios == 0;
+ }
+
+-static inline int ublk_queue_use_zc(const struct ublk_queue *q)
++static inline bool ublk_queue_use_zc(const struct ublk_queue *q)
+ {
+- return q->flags & UBLK_F_SUPPORT_ZERO_COPY;
++ return !!(q->flags & UBLK_F_SUPPORT_ZERO_COPY);
+ }
+
+-static inline int ublk_queue_use_auto_zc(const struct ublk_queue *q)
++static inline bool ublk_queue_use_auto_zc(const struct ublk_queue *q)
+ {
+- return q->flags & UBLK_F_AUTO_BUF_REG;
++ return !!(q->flags & UBLK_F_AUTO_BUF_REG);
+ }
+
+-static inline int ublk_queue_auto_zc_fallback(const struct ublk_queue *q)
++static inline bool ublk_queue_auto_zc_fallback(const struct ublk_queue *q)
+ {
+- return q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK;
++ return !!(q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK);
+ }
+
+ static inline int ublk_queue_no_buf(const struct ublk_queue *q)
+--
+2.51.0
+
net-hns3-using-the-num_tqps-in-the-vf-driver-to-appl.patch
net-hns3-using-the-num_tqps-to-check-whether-tqp_ind.patch
net-hns3-add-vlan-id-validation-before-using.patch
+drm-tests-hdmi-handle-drm_kunit_helper_enable_crtc_c.patch
+drm-tests-handle-edeadlk-in-drm_test_check_valid_clo.patch
+drm-tests-handle-edeadlk-in-set_up_atomic_state.patch
+selftests-ublk-fix-overflow-in-ublk_queue_auto_zc_fa.patch
+block-unify-elevator-tags-and-type-xarrays-into-stru.patch
+block-move-elevator-tags-into-struct-elevator_resour.patch
+block-introduce-alloc_sched_data-and-free_sched_data.patch
+block-use-alloc-free-_sched-data-methods.patch
+block-fix-race-between-wbt_enable_default-and-io-sub.patch
+spi-microchip-rename-driver-file-and-internal-identi.patch
+spi-mpfs-fix-an-error-handling-path-in-mpfs_spi_prob.patch
+hwmon-dell-smm-limit-fan-multiplier-to-avoid-overflo.patch
+hwmon-ibmpex-fix-use-after-free-in-high-low-store.patch
+hwmon-tmp401-fix-overflow-caused-by-default-conversi.patch
+drm-xe-fix-freq-kobject-leak-on-sysfs_create_files-f.patch
+drm-xe-apply-wa_14020316580-in-xe_gt_idle_enable_pg.patch
+drm-xe-fix-drm_gpusvm_init-arguments.patch
+drm-me-gsc-mei-interrupt-top-half-should-be-in-irq-d.patch
+drm-xe-increase-tdf-timeout.patch
+drm-xe-restore-engine-registers-before-restarting-sc.patch
+mips-fix-a-reference-leak-bug-in-ip22_check_gio.patch
+drm-panel-sony-td4353-jdi-enable-prepare_prev_first.patch
+x86-xen-fix-sparse-warning-in-enlighten_pv.c.patch
+arm64-kdump-fix-elfcorehdr-overlap-caused-by-reserve.patch
+drm-amdgpu-fix-a-job-pasid-access-race-in-gpu-recove.patch
+spi-cadence-quadspi-fix-clock-disable-on-probe-failu.patch
+io_uring-fix-nr_segs-calculation-in-io_import_kbuf.patch
+ublk-add-parameter-struct-io_uring_cmd-to-ublk_prep_.patch
+ublk-add-union-ublk_io_buf-with-improved-naming.patch
+ublk-refactor-auto-buffer-register-in-ublk_dispatch_.patch
+ublk-fix-deadlock-when-reading-partition-table.patch
+block-rnbd-clt-fix-leaked-id-in-init_dev.patch
+drm-xe-limit-num_syncs-to-prevent-oversized-allocati.patch
+drm-xe-oa-limit-num_syncs-to-prevent-oversized-alloc.patch
+drm-xe-oa-always-set-oag_oaglbctxctrl_counter_resume.patch
+amd-iommu-preserve-domain-ids-inside-the-kdump-kerne.patch
+arm64-dts-mediatek-apply-mt8395-radxa-dt-overlay-at-.patch
+hwmon-ltc4282-fix-reset_history-file-permissions.patch
--- /dev/null
+From f5ac1fc3bbe588dfa663634949e2b26dba89211a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 12:53:12 +0530
+Subject: spi: cadence-quadspi: Fix clock disable on probe failure path
+
+From: Anurag Dutta <a-dutta@ti.com>
+
+[ Upstream commit 1889dd2081975ce1f6275b06cdebaa8d154847a9 ]
+
+When cqspi_request_mmap_dma() returns -EPROBE_DEFER after runtime PM
+is enabled, the error path calls clk_disable_unprepare() on an already
+disabled clock, causing an imbalance.
+
+Use pm_runtime_get_sync() to increment the usage counter and resume the
+device. This prevents runtime_suspend() from being invoked and causing
+a double clock disable.
+
+Fixes: 140623410536 ("mtd: spi-nor: Add driver for Cadence Quad SPI Flash Controller")
+Signed-off-by: Anurag Dutta <a-dutta@ti.com>
+Tested-by: Nishanth Menon <nm@ti.com>
+Link: https://patch.msgid.link/20251212072312.2711806-3-a-dutta@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index af6d050da1c8..3231bdaf9bd0 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -2024,7 +2024,9 @@ static int cqspi_probe(struct platform_device *pdev)
+ probe_reset_failed:
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
+- clk_disable_unprepare(cqspi->clk);
++
++ if (pm_runtime_get_sync(&pdev->dev) >= 0)
++ clk_disable_unprepare(cqspi->clk);
+ probe_clk_failed:
+ return ret;
+ }
+--
+2.51.0
+
--- /dev/null
+From 5fc856b2ef61d5b9cb834b8aefffa0855ded7d70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Nov 2025 10:45:43 +0000
+Subject: spi: microchip: rename driver file and internal identifiers
+
+From: Prajna Rajendra Kumar <prajna.rajendrakumar@microchip.com>
+
+[ Upstream commit 71c814e98696f2cd53e9e6cef7501c2d667d4c5a ]
+
+The spi-microchip-core.c driver provides support for the Microchip
+PolarFire SoC (MPFS) "hard" SPI controller. It was originally named
+"core" with the expectation that it might also cover Microchip's
+CoreSPI "soft" IP, but that never materialized.
+
+The CoreSPI IP cannot be supported by this driver because its register
+layout differs substantially from the MPFS SPI controller. In practice
+most of the code would need to be replaced to handle those differences
+so keeping the drivers separate is the simpler approach.
+
+The file and internal symbols are renamed to reflect MPFS support and
+to free up "spi-microchip-core.c" for CoreSPI driver.
+
+Fixes: 9ac8d17694b6 ("spi: add support for microchip fpga spi controllers")
+Signed-off-by: Prajna Rajendra Kumar <prajna.rajendrakumar@microchip.com>
+Acked-by: Conor Dooley <conor.dooley@microchip.com>
+Link: https://patch.msgid.link/20251114104545.284765-2-prajna.rajendrakumar@microchip.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: a8a313612af7 ("spi: mpfs: Fix an error handling path in mpfs_spi_probe()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/Kconfig | 19 +-
+ drivers/spi/Makefile | 2 +-
+ .../spi/{spi-microchip-core.c => spi-mpfs.c} | 207 +++++++++---------
+ 3 files changed, 115 insertions(+), 113 deletions(-)
+ rename drivers/spi/{spi-microchip-core.c => spi-mpfs.c} (68%)
+
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 55675750182e..1872f9d54a5c 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -706,15 +706,6 @@ config SPI_MESON_SPIFC
+ This enables master mode support for the SPIFC (SPI flash
+ controller) available in Amlogic Meson SoCs.
+
+-config SPI_MICROCHIP_CORE
+- tristate "Microchip FPGA SPI controllers"
+- depends on SPI_MASTER
+- help
+- This enables the SPI driver for Microchip FPGA SPI controllers.
+- Say Y or M here if you want to use the "hard" controllers on
+- PolarFire SoC.
+- If built as a module, it will be called spi-microchip-core.
+-
+ config SPI_MICROCHIP_CORE_QSPI
+ tristate "Microchip FPGA QSPI controllers"
+ depends on SPI_MASTER
+@@ -871,6 +862,16 @@ config SPI_PL022
+ controller. If you have an embedded system with an AMBA(R)
+ bus and a PL022 controller, say Y or M here.
+
++config SPI_POLARFIRE_SOC
++
++ tristate "Microchip FPGA SPI controllers"
++ depends on SPI_MASTER && ARCH_MICROCHIP
++ help
++ This enables the SPI driver for Microchip FPGA SPI controllers.
++ Say Y or M here if you want to use the "hard" controllers on
++ PolarFire SoC.
++ If built as a module, it will be called spi-mpfs.
++
+ config SPI_PPC4xx
+ tristate "PPC4xx SPI Controller"
+ depends on PPC32 && 4xx
+diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
+index 8ff74a13faaa..1f7c06a3091d 100644
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -86,7 +86,6 @@ obj-$(CONFIG_SPI_LOONGSON_PLATFORM) += spi-loongson-plat.o
+ obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
+ obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
+ obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
+-obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o
+ obj-$(CONFIG_SPI_MICROCHIP_CORE_QSPI) += spi-microchip-core-qspi.o
+ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
+ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+@@ -97,6 +96,7 @@ obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
+ obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
+ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
+ obj-$(CONFIG_SPI_MXS) += spi-mxs.o
++obj-$(CONFIG_SPI_POLARFIRE_SOC) += spi-mpfs.o
+ obj-$(CONFIG_SPI_WPCM_FIU) += spi-wpcm-fiu.o
+ obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
+ obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
+diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-mpfs.c
+similarity index 68%
+rename from drivers/spi/spi-microchip-core.c
+rename to drivers/spi/spi-mpfs.c
+index 9128b86c5366..9a14d1732a15 100644
+--- a/drivers/spi/spi-microchip-core.c
++++ b/drivers/spi/spi-mpfs.c
+@@ -99,7 +99,7 @@
+ #define REG_CTRL2 (0x48)
+ #define REG_FRAMESUP (0x50)
+
+-struct mchp_corespi {
++struct mpfs_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ const u8 *tx_buf;
+@@ -113,34 +113,34 @@ struct mchp_corespi {
+ int n_bytes;
+ };
+
+-static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg)
++static inline u32 mpfs_spi_read(struct mpfs_spi *spi, unsigned int reg)
+ {
+ return readl(spi->regs + reg);
+ }
+
+-static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val)
++static inline void mpfs_spi_write(struct mpfs_spi *spi, unsigned int reg, u32 val)
+ {
+ writel(val, spi->regs + reg);
+ }
+
+-static inline void mchp_corespi_disable(struct mchp_corespi *spi)
++static inline void mpfs_spi_disable(struct mpfs_spi *spi)
+ {
+- u32 control = mchp_corespi_read(spi, REG_CONTROL);
++ u32 control = mpfs_spi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_ENABLE;
+
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
++static inline void mpfs_spi_read_fifo(struct mpfs_spi *spi, int fifo_max)
+ {
+ for (int i = 0; i < fifo_max; i++) {
+ u32 data;
+
+- while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
++ while (mpfs_spi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
+ ;
+
+- data = mchp_corespi_read(spi, REG_RX_DATA);
++ data = mpfs_spi_read(spi, REG_RX_DATA);
+
+ spi->rx_len -= spi->n_bytes;
+
+@@ -158,34 +158,34 @@ static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max
+ }
+ }
+
+-static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
++static void mpfs_spi_enable_ints(struct mpfs_spi *spi)
+ {
+- u32 control = mchp_corespi_read(spi, REG_CONTROL);
++ u32 control = mpfs_spi_read(spi, REG_CONTROL);
+
+ control |= INT_ENABLE_MASK;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
++static void mpfs_spi_disable_ints(struct mpfs_spi *spi)
+ {
+- u32 control = mchp_corespi_read(spi, REG_CONTROL);
++ u32 control = mpfs_spi_read(spi, REG_CONTROL);
+
+ control &= ~INT_ENABLE_MASK;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
++static inline void mpfs_spi_set_xfer_size(struct mpfs_spi *spi, int len)
+ {
+ u32 control;
+ u32 lenpart;
+- u32 frames = mchp_corespi_read(spi, REG_FRAMESUP);
++ u32 frames = mpfs_spi_read(spi, REG_FRAMESUP);
+
+ /*
+ * Writing to FRAMECNT in REG_CONTROL will reset the frame count, taking
+ * a shortcut requires an explicit clear.
+ */
+ if (frames == len) {
+- mchp_corespi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT);
++ mpfs_spi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT);
+ return;
+ }
+
+@@ -208,20 +208,20 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
+ * that matches the documentation.
+ */
+ lenpart = len & 0xffff;
+- control = mchp_corespi_read(spi, REG_CONTROL);
++ control = mpfs_spi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_FRAMECNT_MASK;
+ control |= lenpart << CONTROL_FRAMECNT_SHIFT;
+- mchp_corespi_write(spi, REG_CONTROL, control);
+- mchp_corespi_write(spi, REG_FRAMESUP, len);
++ mpfs_spi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_FRAMESUP, len);
+ }
+
+-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
++static inline void mpfs_spi_write_fifo(struct mpfs_spi *spi, int fifo_max)
+ {
+ int i = 0;
+
+- mchp_corespi_set_xfer_size(spi, fifo_max);
++ mpfs_spi_set_xfer_size(spi, fifo_max);
+
+- while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
++ while ((i < fifo_max) && !(mpfs_spi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
+ u32 word;
+
+ if (spi->n_bytes == 4)
+@@ -231,7 +231,7 @@ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_ma
+ else
+ word = spi->tx_buf ? *spi->tx_buf : 0xaa;
+
+- mchp_corespi_write(spi, REG_TX_DATA, word);
++ mpfs_spi_write(spi, REG_TX_DATA, word);
+ if (spi->tx_buf)
+ spi->tx_buf += spi->n_bytes;
+ i++;
+@@ -240,9 +240,9 @@ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_ma
+ spi->tx_len -= i * spi->n_bytes;
+ }
+
+-static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
++static inline void mpfs_spi_set_framesize(struct mpfs_spi *spi, int bt)
+ {
+- u32 frame_size = mchp_corespi_read(spi, REG_FRAME_SIZE);
++ u32 frame_size = mpfs_spi_read(spi, REG_FRAME_SIZE);
+ u32 control;
+
+ if ((frame_size & FRAME_SIZE_MASK) == bt)
+@@ -252,25 +252,25 @@ static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+ * Disable the SPI controller. Writes to the frame size have
+ * no effect when the controller is enabled.
+ */
+- control = mchp_corespi_read(spi, REG_CONTROL);
++ control = mpfs_spi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_ENABLE;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+
+- mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
++ mpfs_spi_write(spi, REG_FRAME_SIZE, bt);
+
+ control |= CONTROL_ENABLE;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
++static void mpfs_spi_set_cs(struct spi_device *spi, bool disable)
+ {
+ u32 reg;
+- struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
++ struct mpfs_spi *mspi = spi_controller_get_devdata(spi->controller);
+
+- reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
++ reg = mpfs_spi_read(mspi, REG_SLAVE_SELECT);
+ reg &= ~BIT(spi_get_chipselect(spi, 0));
+ reg |= !disable << spi_get_chipselect(spi, 0);
+- corespi->pending_slave_select = reg;
++ mspi->pending_slave_select = reg;
+
+ /*
+ * Only deassert chip select immediately. Writing to some registers
+@@ -281,12 +281,12 @@ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
+ * doesn't see any spurious clock transitions whilst CS is enabled.
+ */
+ if (((spi->mode & SPI_CS_HIGH) == 0) == disable)
+- mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
++ mpfs_spi_write(mspi, REG_SLAVE_SELECT, reg);
+ }
+
+-static int mchp_corespi_setup(struct spi_device *spi)
++static int mpfs_spi_setup(struct spi_device *spi)
+ {
+- struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
++ struct mpfs_spi *mspi = spi_controller_get_devdata(spi->controller);
+ u32 reg;
+
+ if (spi_is_csgpiod(spi))
+@@ -298,21 +298,21 @@ static int mchp_corespi_setup(struct spi_device *spi)
+ * driving their select line low.
+ */
+ if (spi->mode & SPI_CS_HIGH) {
+- reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
++ reg = mpfs_spi_read(mspi, REG_SLAVE_SELECT);
+ reg |= BIT(spi_get_chipselect(spi, 0));
+- corespi->pending_slave_select = reg;
+- mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
++ mspi->pending_slave_select = reg;
++ mpfs_spi_write(mspi, REG_SLAVE_SELECT, reg);
+ }
+ return 0;
+ }
+
+-static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *spi)
++static void mpfs_spi_init(struct spi_controller *host, struct mpfs_spi *spi)
+ {
+ unsigned long clk_hz;
+- u32 control = mchp_corespi_read(spi, REG_CONTROL);
++ u32 control = mpfs_spi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_ENABLE;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_MASTER;
+ control &= ~CONTROL_MODE_MASK;
+@@ -328,15 +328,15 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *
+ */
+ control |= CONTROL_SPS | CONTROL_BIGFIFO;
+
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+
+- mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
++ mpfs_spi_set_framesize(spi, DEFAULT_FRAMESIZE);
+
+ /* max. possible spi clock rate is the apb clock rate */
+ clk_hz = clk_get_rate(spi->clk);
+ host->max_speed_hz = clk_hz;
+
+- mchp_corespi_enable_ints(spi);
++ mpfs_spi_enable_ints(spi);
+
+ /*
+ * It is required to enable direct mode, otherwise control over the chip
+@@ -344,34 +344,34 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *
+ * can deal with active high targets.
+ */
+ spi->pending_slave_select = SSELOUT | SSEL_DIRECT;
+- mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
++ mpfs_spi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
+
+- control = mchp_corespi_read(spi, REG_CONTROL);
++ control = mpfs_spi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_RESET;
+ control |= CONTROL_ENABLE;
+
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
++static inline void mpfs_spi_set_clk_gen(struct mpfs_spi *spi)
+ {
+ u32 control;
+
+- control = mchp_corespi_read(spi, REG_CONTROL);
++ control = mpfs_spi_read(spi, REG_CONTROL);
+ if (spi->clk_mode)
+ control |= CONTROL_CLKMODE;
+ else
+ control &= ~CONTROL_CLKMODE;
+
+- mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CLK_GEN, spi->clk_gen);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
++static inline void mpfs_spi_set_mode(struct mpfs_spi *spi, unsigned int mode)
+ {
+ u32 mode_val;
+- u32 control = mchp_corespi_read(spi, REG_CONTROL);
++ u32 control = mpfs_spi_read(spi, REG_CONTROL);
+
+ switch (mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+@@ -394,22 +394,22 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int
+ */
+
+ control &= ~CONTROL_ENABLE;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+
+ control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
+ control |= mode_val;
+
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mpfs_spi_write(spi, REG_CONTROL, control);
+ }
+
+-static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
++static irqreturn_t mpfs_spi_interrupt(int irq, void *dev_id)
+ {
+ struct spi_controller *host = dev_id;
+- struct mchp_corespi *spi = spi_controller_get_devdata(host);
+- u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf;
++ struct mpfs_spi *spi = spi_controller_get_devdata(host);
++ u32 intfield = mpfs_spi_read(spi, REG_MIS) & 0xf;
+ bool finalise = false;
+
+ /* Interrupt line may be shared and not for us at all */
+@@ -417,7 +417,7 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ return IRQ_NONE;
+
+ if (intfield & INT_RX_CHANNEL_OVERFLOW) {
+- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
++ mpfs_spi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
+ finalise = true;
+ dev_err(&host->dev,
+ "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__,
+@@ -425,7 +425,7 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ }
+
+ if (intfield & INT_TX_CHANNEL_UNDERRUN) {
+- mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
++ mpfs_spi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
+ finalise = true;
+ dev_err(&host->dev,
+ "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__,
+@@ -438,8 +438,8 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
+-static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
+- unsigned long target_hz)
++static int mpfs_spi_calculate_clkgen(struct mpfs_spi *spi,
++ unsigned long target_hz)
+ {
+ unsigned long clk_hz, spi_hz, clk_gen;
+
+@@ -475,20 +475,20 @@ static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
+ return 0;
+ }
+
+-static int mchp_corespi_transfer_one(struct spi_controller *host,
+- struct spi_device *spi_dev,
+- struct spi_transfer *xfer)
++static int mpfs_spi_transfer_one(struct spi_controller *host,
++ struct spi_device *spi_dev,
++ struct spi_transfer *xfer)
+ {
+- struct mchp_corespi *spi = spi_controller_get_devdata(host);
++ struct mpfs_spi *spi = spi_controller_get_devdata(host);
+ int ret;
+
+- ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
++ ret = mpfs_spi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
+ if (ret) {
+ dev_err(&host->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
+ return ret;
+ }
+
+- mchp_corespi_set_clk_gen(spi);
++ mpfs_spi_set_clk_gen(spi);
+
+ spi->tx_buf = xfer->tx_buf;
+ spi->rx_buf = xfer->rx_buf;
+@@ -496,45 +496,46 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
+ spi->rx_len = xfer->len;
+ spi->n_bytes = roundup_pow_of_two(DIV_ROUND_UP(xfer->bits_per_word, BITS_PER_BYTE));
+
+- mchp_corespi_set_framesize(spi, xfer->bits_per_word);
++ mpfs_spi_set_framesize(spi, xfer->bits_per_word);
+
+- mchp_corespi_write(spi, REG_COMMAND, COMMAND_RXFIFORST | COMMAND_TXFIFORST);
++ mpfs_spi_write(spi, REG_COMMAND, COMMAND_RXFIFORST | COMMAND_TXFIFORST);
+
+- mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
++ mpfs_spi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
+
+ while (spi->tx_len) {
+ int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+
+- mchp_corespi_write_fifo(spi, fifo_max);
+- mchp_corespi_read_fifo(spi, fifo_max);
++ mpfs_spi_write_fifo(spi, fifo_max);
++ mpfs_spi_read_fifo(spi, fifo_max);
+ }
+
+ spi_finalize_current_transfer(host);
+ return 1;
+ }
+
+-static int mchp_corespi_prepare_message(struct spi_controller *host,
+- struct spi_message *msg)
++static int mpfs_spi_prepare_message(struct spi_controller *host,
++ struct spi_message *msg)
+ {
+ struct spi_device *spi_dev = msg->spi;
+- struct mchp_corespi *spi = spi_controller_get_devdata(host);
++ struct mpfs_spi *spi = spi_controller_get_devdata(host);
+
+- mchp_corespi_set_mode(spi, spi_dev->mode);
++ mpfs_spi_set_mode(spi, spi_dev->mode);
+
+ return 0;
+ }
+
+-static int mchp_corespi_probe(struct platform_device *pdev)
++static int mpfs_spi_probe(struct platform_device *pdev)
+ {
+ struct spi_controller *host;
+- struct mchp_corespi *spi;
++ struct mpfs_spi *spi;
+ struct resource *res;
+ u32 num_cs;
+ int ret = 0;
+
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi));
+ if (!host)
+- return -ENOMEM;
++ return dev_err_probe(&pdev->dev, -ENOMEM,
++ "unable to allocate host for SPI controller\n");
+
+ platform_set_drvdata(pdev, host);
+
+@@ -544,11 +545,11 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ host->num_chipselect = num_cs;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->use_gpio_descriptors = true;
+- host->setup = mchp_corespi_setup;
++ host->setup = mpfs_spi_setup;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+- host->transfer_one = mchp_corespi_transfer_one;
+- host->prepare_message = mchp_corespi_prepare_message;
+- host->set_cs = mchp_corespi_set_cs;
++ host->transfer_one = mpfs_spi_transfer_one;
++ host->prepare_message = mpfs_spi_prepare_message;
++ host->set_cs = mpfs_spi_set_cs;
+ host->dev.of_node = pdev->dev.of_node;
+
+ spi = spi_controller_get_devdata(host);
+@@ -561,7 +562,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ if (spi->irq < 0)
+ return spi->irq;
+
+- ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
++ ret = devm_request_irq(&pdev->dev, spi->irq, mpfs_spi_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), host);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+@@ -572,11 +573,11 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk),
+ "could not get clk\n");
+
+- mchp_corespi_init(host, spi);
++ mpfs_spi_init(host, spi);
+
+ ret = devm_spi_register_controller(&pdev->dev, host);
+ if (ret) {
+- mchp_corespi_disable(spi);
++ mpfs_spi_disable(spi);
+ return dev_err_probe(&pdev->dev, ret,
+ "unable to register host for SPI controller\n");
+ }
+@@ -586,13 +587,13 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static void mchp_corespi_remove(struct platform_device *pdev)
++static void mpfs_spi_remove(struct platform_device *pdev)
+ {
+ struct spi_controller *host = platform_get_drvdata(pdev);
+- struct mchp_corespi *spi = spi_controller_get_devdata(host);
++ struct mpfs_spi *spi = spi_controller_get_devdata(host);
+
+- mchp_corespi_disable_ints(spi);
+- mchp_corespi_disable(spi);
++ mpfs_spi_disable_ints(spi);
++ mpfs_spi_disable(spi);
+ }
+
+ #define MICROCHIP_SPI_PM_OPS (NULL)
+@@ -602,23 +603,23 @@ static void mchp_corespi_remove(struct platform_device *pdev)
+ */
+
+ #if defined(CONFIG_OF)
+-static const struct of_device_id mchp_corespi_dt_ids[] = {
++static const struct of_device_id mpfs_spi_dt_ids[] = {
+ { .compatible = "microchip,mpfs-spi" },
+ { /* sentinel */ }
+ };
+-MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids);
++MODULE_DEVICE_TABLE(of, mpfs_spi_dt_ids);
+ #endif
+
+-static struct platform_driver mchp_corespi_driver = {
+- .probe = mchp_corespi_probe,
++static struct platform_driver mpfs_spi_driver = {
++ .probe = mpfs_spi_probe,
+ .driver = {
+- .name = "microchip-corespi",
++ .name = "microchip-spi",
+ .pm = MICROCHIP_SPI_PM_OPS,
+- .of_match_table = of_match_ptr(mchp_corespi_dt_ids),
++ .of_match_table = of_match_ptr(mpfs_spi_dt_ids),
+ },
+- .remove = mchp_corespi_remove,
++ .remove = mpfs_spi_remove,
+ };
+-module_platform_driver(mchp_corespi_driver);
++module_platform_driver(mpfs_spi_driver);
+ MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver");
+ MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+ MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+--
+2.51.0
+
--- /dev/null
+From 03d8ae1b327729db9b7bb959137eb256496e56b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Dec 2025 08:48:51 +0100
+Subject: spi: mpfs: Fix an error handling path in mpfs_spi_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit a8a313612af7a55083ba5720f14f1835319debee ]
+
+mpfs_spi_init() calls mpfs_spi_enable_ints(), so mpfs_spi_disable_ints()
+should be called if an error occurs after calling mpfs_spi_init(), as
+already done in the remove function.
+
+Fixes: 9ac8d17694b6 ("spi: add support for microchip fpga spi controllers")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://patch.msgid.link/eb35f168517cc402ef7e78f26da02863e2f45c03.1765612110.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-mpfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/spi/spi-mpfs.c b/drivers/spi/spi-mpfs.c
+index 9a14d1732a15..7e9e64d8e6c8 100644
+--- a/drivers/spi/spi-mpfs.c
++++ b/drivers/spi/spi-mpfs.c
+@@ -577,6 +577,7 @@ static int mpfs_spi_probe(struct platform_device *pdev)
+
+ ret = devm_spi_register_controller(&pdev->dev, host);
+ if (ret) {
++ mpfs_spi_disable_ints(spi);
+ mpfs_spi_disable(spi);
+ return dev_err_probe(&pdev->dev, ret,
+ "unable to register host for SPI controller\n");
+--
+2.51.0
+
--- /dev/null
+From 1c7b22105678b9761209802d390a39ebd42be0a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 09:58:24 +0800
+Subject: ublk: add parameter `struct io_uring_cmd *` to
+ ublk_prep_auto_buf_reg()
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 3035b9b46b0611898babc0b96ede65790d3566f7 ]
+
+Add parameter `struct io_uring_cmd *` to ublk_prep_auto_buf_reg() and
+prepare for reusing this helper for the coming UBLK_BATCH_IO feature,
+which can fetch & commit one batch of io commands via single uring_cmd.
+
+Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c258f5c4502c ("ublk: fix deadlock when reading partition table")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/ublk_drv.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 359564c40cb5..599571634b7a 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1242,11 +1242,12 @@ ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io)
+ }
+
+ static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+- struct ublk_io *io, unsigned int issue_flags)
++ struct ublk_io *io, struct io_uring_cmd *cmd,
++ unsigned int issue_flags)
+ {
+ int ret;
+
+- ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
++ ret = io_buffer_register_bvec(cmd, req, ublk_io_release,
+ io->buf.index, issue_flags);
+ if (ret) {
+ if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+@@ -1258,18 +1259,19 @@ static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ }
+
+ io->task_registered_buffers = 1;
+- io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
++ io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd);
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ return true;
+ }
+
+ static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
++ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+ {
+ ublk_init_req_ref(ubq, io);
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
+- return ublk_auto_buf_reg(ubq, req, io, issue_flags);
++ return ublk_auto_buf_reg(ubq, req, io, cmd, issue_flags);
+
+ return true;
+ }
+@@ -1344,7 +1346,7 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
+ if (!ublk_start_io(ubq, req, io))
+ return;
+
+- if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
++ if (ublk_prep_auto_buf_reg(ubq, req, io, io->cmd, issue_flags))
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From a9f39a12f31303be81ed690fb08d67ca1effd9db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 09:58:25 +0800
+Subject: ublk: add `union ublk_io_buf` with improved naming
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 8d61ece156bd4f2b9e7d3b2a374a26d42c7a4a06 ]
+
+Add `union ublk_io_buf` for naming the anonymous union of struct ublk_io's
+addr and buf fields, meantime apply it to `struct ublk_io` for storing either
+ublk auto buffer register data or ublk server io buffer address.
+
+The union uses clear field names:
+- `addr`: for regular ublk server io buffer addresses
+- `auto_reg`: for ublk auto buffer registration data
+
+This eliminates confusing access patterns and improves code readability.
+
+Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c258f5c4502c ("ublk: fix deadlock when reading partition table")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/ublk_drv.c | 40 ++++++++++++++++++++++------------------
+ 1 file changed, 22 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 599571634b7a..c9d258b99090 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -155,12 +155,13 @@ struct ublk_uring_cmd_pdu {
+ */
+ #define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
+
++union ublk_io_buf {
++ __u64 addr;
++ struct ublk_auto_buf_reg auto_reg;
++};
++
+ struct ublk_io {
+- /* userspace buffer address from io cmd */
+- union {
+- __u64 addr;
+- struct ublk_auto_buf_reg buf;
+- };
++ union ublk_io_buf buf;
+ unsigned int flags;
+ int res;
+
+@@ -499,7 +500,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
+ iod->op_flags = ublk_op | ublk_req_build_flags(req);
+ iod->nr_sectors = blk_rq_sectors(req);
+ iod->start_sector = blk_rq_pos(req);
+- iod->addr = io->addr;
++ iod->addr = io->buf.addr;
+
+ return BLK_STS_OK;
+ }
+@@ -1047,7 +1048,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
+ struct iov_iter iter;
+ const int dir = ITER_DEST;
+
+- import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
++ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter);
+ return ublk_copy_user_pages(req, 0, &iter, dir);
+ }
+ return rq_bytes;
+@@ -1068,7 +1069,7 @@ static int ublk_unmap_io(bool need_map,
+
+ WARN_ON_ONCE(io->res > rq_bytes);
+
+- import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
++ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter);
+ return ublk_copy_user_pages(req, 0, &iter, dir);
+ }
+ return rq_bytes;
+@@ -1134,7 +1135,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
+ iod->op_flags = ublk_op | ublk_req_build_flags(req);
+ iod->nr_sectors = blk_rq_sectors(req);
+ iod->start_sector = blk_rq_pos(req);
+- iod->addr = io->addr;
++ iod->addr = io->buf.addr;
+
+ return BLK_STS_OK;
+ }
+@@ -1248,9 +1249,9 @@ static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ int ret;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release,
+- io->buf.index, issue_flags);
++ io->buf.auto_reg.index, issue_flags);
+ if (ret) {
+- if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
++ if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(ubq, io);
+ return true;
+ }
+@@ -1539,7 +1540,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ */
+ io->flags &= UBLK_IO_FLAG_CANCELED;
+ io->cmd = NULL;
+- io->addr = 0;
++ io->buf.addr = 0;
+
+ /*
+ * old task is PF_EXITING, put it now
+@@ -2100,13 +2101,16 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
+
+ static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd)
+ {
+- io->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
++ struct ublk_auto_buf_reg buf;
++
++ buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+- if (io->buf.reserved0 || io->buf.reserved1)
++ if (buf.reserved0 || buf.reserved1)
+ return -EINVAL;
+
+- if (io->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
++ if (buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
++ io->buf.auto_reg = buf;
+ return 0;
+ }
+
+@@ -2128,7 +2132,7 @@ static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ * this ublk request gets stuck.
+ */
+ if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+- *buf_idx = io->buf.index;
++ *buf_idx = io->buf.auto_reg.index;
+ }
+
+ return ublk_set_auto_buf_reg(io, cmd);
+@@ -2156,7 +2160,7 @@ ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
+ if (ublk_dev_support_auto_buf_reg(ub))
+ return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
+
+- io->addr = buf_addr;
++ io->buf.addr = buf_addr;
+ return 0;
+ }
+
+@@ -2353,7 +2357,7 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+- ublk_get_iod(ubq, req->tag)->addr = io->addr;
++ ublk_get_iod(ubq, req->tag)->addr = io->buf.addr;
+ pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+--
+2.51.0
+
--- /dev/null
+From 0bda6ac61e3d7dd0a899d8b626413a26cdbccaab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 22:34:15 +0800
+Subject: ublk: fix deadlock when reading partition table
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit c258f5c4502c9667bccf5d76fa731ab9c96687c1 ]
+
+When one process(such as udev) opens ublk block device (e.g., to read
+the partition table via bdev_open()), a deadlock[1] can occur:
+
+1. bdev_open() grabs disk->open_mutex
+2. The process issues read I/O to ublk backend to read partition table
+3. In __ublk_complete_rq(), blk_update_request() or blk_mq_end_request()
+ runs bio->bi_end_io() callbacks
+4. If this triggers fput() on file descriptor of ublk block device, the
+ work may be deferred to current task's task work (see fput() implementation)
+5. This eventually calls blkdev_release() from the same context
+6. blkdev_release() tries to grab disk->open_mutex again
+7. Deadlock: same task waiting for a mutex it already holds
+
+The fix is to run blk_update_request() and blk_mq_end_request() with bottom
+halves disabled. This forces blkdev_release() to run in kernel work-queue
+context instead of current task work context, and allows ublk server to make
+forward progress, and avoids the deadlock.
+
+Fixes: 71f28f3136af ("ublk_drv: add io_uring based userspace block driver")
+Link: https://github.com/ublk-org/ublksrv/issues/170 [1]
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
+[axboe: rewrite comment in ublk]
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/ublk_drv.c | 32 ++++++++++++++++++++++++++++----
+ 1 file changed, 28 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index bdb897d44089..fa7b0481ea04 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1146,12 +1146,20 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
+ return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
+ }
+
++static void ublk_end_request(struct request *req, blk_status_t error)
++{
++ local_bh_disable();
++ blk_mq_end_request(req, error);
++ local_bh_enable();
++}
++
+ /* todo: handle partial completion */
+ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
+ {
+ unsigned int unmapped_bytes;
+ blk_status_t res = BLK_STS_OK;
++ bool requeue;
+
+ /* failed read IO if nothing is read */
+ if (!io->res && req_op(req) == REQ_OP_READ)
+@@ -1183,14 +1191,30 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ if (unlikely(unmapped_bytes < io->res))
+ io->res = unmapped_bytes;
+
+- if (blk_update_request(req, BLK_STS_OK, io->res))
++ /*
++ * Run bio->bi_end_io() with softirqs disabled. If the final fput
++ * happens off this path, then that will prevent ublk's blkdev_release()
++ * from being called on current's task work, see fput() implementation.
++ *
++ * Otherwise, ublk server may not provide forward progress in case of
++ * reading the partition table from bdev_open() with disk->open_mutex
++ * held, and causes dead lock as we could already be holding
++ * disk->open_mutex here.
++ *
++ * Preferably we would not be doing IO with a mutex held that is also
++ * used for release, but this work-around will suffice for now.
++ */
++ local_bh_disable();
++ requeue = blk_update_request(req, BLK_STS_OK, io->res);
++ local_bh_enable();
++ if (requeue)
+ blk_mq_requeue_request(req, true);
+ else if (likely(!blk_should_fake_timeout(req->q)))
+ __blk_mq_end_request(req, BLK_STS_OK);
+
+ return;
+ exit:
+- blk_mq_end_request(req, res);
++ ublk_end_request(req, res);
+ }
+
+ static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
+@@ -1230,7 +1254,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ if (ublk_nosrv_dev_should_queue_io(ubq->dev))
+ blk_mq_requeue_request(rq, false);
+ else
+- blk_mq_end_request(rq, BLK_STS_IOERR);
++ ublk_end_request(rq, BLK_STS_IOERR);
+ }
+
+ static void
+@@ -1275,7 +1299,7 @@ __ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ ublk_auto_buf_reg_fallback(ubq, req->tag);
+ return AUTO_BUF_REG_FALLBACK;
+ }
+- blk_mq_end_request(req, BLK_STS_IOERR);
++ ublk_end_request(req, BLK_STS_IOERR);
+ return AUTO_BUF_REG_FAIL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 5caefbe9b42e5a37d298cb9435ced6893c21f613 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 09:58:26 +0800
+Subject: ublk: refactor auto buffer register in ublk_dispatch_req()
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 0a9beafa7c633e6ff66b05b81eea78231b7e6520 ]
+
+Refactor auto buffer register code and prepare for supporting batch IO
+feature, and the main motivation is to put 'ublk_io' operation code
+together, so that per-io lock can be applied for the code block.
+
+The key changes are:
+- Rename ublk_auto_buf_reg() as ublk_do_auto_buf_reg()
+- Introduce an enum `auto_buf_reg_res` to represent the result of
+ the buffer registration attempt (FAIL, FALLBACK, OK).
+- Split the existing `ublk_do_auto_buf_reg` function into two:
+ - `__ublk_do_auto_buf_reg`: Performs the actual buffer registration
+ and returns the `auto_buf_reg_res` status.
+ - `ublk_do_auto_buf_reg`: A wrapper that calls the internal function
+ and handles the I/O preparation based on the result.
+- Introduce `ublk_prep_auto_buf_reg_io` to encapsulate the logic for
+ preparing the I/O for completion after buffer registration.
+- Pass the `tag` directly to `ublk_auto_buf_reg_fallback` to avoid
+ recalculating it.
+
+This refactoring makes the control flow clearer and isolates the different
+stages of the auto buffer registration process.
+
+Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c258f5c4502c ("ublk: fix deadlock when reading partition table")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/ublk_drv.c | 64 +++++++++++++++++++++++++++-------------
+ 1 file changed, 43 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c9d258b99090..bdb897d44089 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1234,17 +1234,37 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ }
+
+ static void
+-ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io)
++ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, unsigned tag)
+ {
+- unsigned tag = io - ubq->ios;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
+
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+ }
+
+-static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+- struct ublk_io *io, struct io_uring_cmd *cmd,
+- unsigned int issue_flags)
++enum auto_buf_reg_res {
++ AUTO_BUF_REG_FAIL,
++ AUTO_BUF_REG_FALLBACK,
++ AUTO_BUF_REG_OK,
++};
++
++static void ublk_prep_auto_buf_reg_io(const struct ublk_queue *ubq,
++ struct request *req, struct ublk_io *io,
++ struct io_uring_cmd *cmd,
++ enum auto_buf_reg_res res)
++{
++ if (res == AUTO_BUF_REG_OK) {
++ io->task_registered_buffers = 1;
++ io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd);
++ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
++ }
++ ublk_init_req_ref(ubq, io);
++ __ublk_prep_compl_io_cmd(io, req);
++}
++
++static enum auto_buf_reg_res
++__ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
++ struct ublk_io *io, struct io_uring_cmd *cmd,
++ unsigned int issue_flags)
+ {
+ int ret;
+
+@@ -1252,29 +1272,27 @@ static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ io->buf.auto_reg.index, issue_flags);
+ if (ret) {
+ if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+- ublk_auto_buf_reg_fallback(ubq, io);
+- return true;
++ ublk_auto_buf_reg_fallback(ubq, req->tag);
++ return AUTO_BUF_REG_FALLBACK;
+ }
+ blk_mq_end_request(req, BLK_STS_IOERR);
+- return false;
++ return AUTO_BUF_REG_FAIL;
+ }
+
+- io->task_registered_buffers = 1;
+- io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd);
+- io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+- return true;
++ return AUTO_BUF_REG_OK;
+ }
+
+-static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
+- struct request *req, struct ublk_io *io,
+- struct io_uring_cmd *cmd,
+- unsigned int issue_flags)
++static void ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
++ struct ublk_io *io, struct io_uring_cmd *cmd,
++ unsigned int issue_flags)
+ {
+- ublk_init_req_ref(ubq, io);
+- if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
+- return ublk_auto_buf_reg(ubq, req, io, cmd, issue_flags);
++ enum auto_buf_reg_res res = __ublk_do_auto_buf_reg(ubq, req, io, cmd,
++ issue_flags);
+
+- return true;
++ if (res != AUTO_BUF_REG_FAIL) {
++ ublk_prep_auto_buf_reg_io(ubq, req, io, cmd, res);
++ io_uring_cmd_done(cmd, UBLK_IO_RES_OK, issue_flags);
++ }
+ }
+
+ static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+@@ -1347,8 +1365,12 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
+ if (!ublk_start_io(ubq, req, io))
+ return;
+
+- if (ublk_prep_auto_buf_reg(ubq, req, io, io->cmd, issue_flags))
++ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) {
++ ublk_do_auto_buf_reg(ubq, req, io, io->cmd, issue_flags);
++ } else {
++ ublk_init_req_ref(ubq, io);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
++ }
+ }
+
+ static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
+--
+2.51.0
+
--- /dev/null
+From 208898665ad8598d26020c022a1ba0d9c708c2d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 12:51:12 +0100
+Subject: x86/xen: Fix sparse warning in enlighten_pv.c
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit e5aff444e3a7bdeef5ea796a2099fc3c60a070fa ]
+
+The sparse tool issues a warning for arch/x76/xen/enlighten_pv.c:
+
+ arch/x86/xen/enlighten_pv.c:120:9: sparse: sparse: incorrect type
+ in initializer (different address spaces)
+ expected void const [noderef] __percpu *__vpp_verify
+ got bool *
+
+This is due to the percpu variable xen_in_preemptible_hcall being
+exported via EXPORT_SYMBOL_GPL() instead of EXPORT_PER_CPU_SYMBOL_GPL().
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202512140856.Ic6FetG6-lkp@intel.com/
+Fixes: fdfd811ddde3 ("x86/xen: allow privcmd hypercalls to be preempted")
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20251215115112.15072-1-jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/xen/enlighten_pv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 4806cc28d7ca..b74ff8bc7f2a 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -108,7 +108,7 @@ static int xen_cpu_dead_pv(unsigned int cpu);
+ * calls.
+ */
+ DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++EXPORT_PER_CPU_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+ /*
+ * In case of scheduling the flag must be cleared and restored after
+--
+2.51.0
+
--- /dev/null
+From 77a39060a883abc99c51abde4c8701fb912a1dc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 10:36:48 +0100
+Subject: block: rnbd-clt: Fix leaked ID in init_dev()
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit c9b5645fd8ca10f310e41b07540f98e6a9720f40 ]
+
+If kstrdup() fails in init_dev(), then the newly allocated ID is lost.
+
+Fixes: 64e8a6ece1a5 ("block/rnbd-clt: Dynamically alloc buffer for pathname & blk_symlink_name")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rnbd/rnbd-clt.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index b0550b68645d..0c3c63cf986b 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1440,9 +1440,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+- GFP_KERNEL);
+- if (ret < 0) {
++ dev->clt_device_id = ida_alloc_max(&index_ida,
++ (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
++ GFP_KERNEL);
++ if (dev->clt_device_id < 0) {
++ ret = dev->clt_device_id;
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+@@ -1451,10 +1453,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
+ if (!dev->pathname) {
+ ret = -ENOMEM;
+- goto out_queues;
++ goto out_ida;
+ }
+
+- dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ dev->nr_poll_queues = nr_poll_queues;
+@@ -1470,6 +1471,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+
+ return dev;
+
++out_ida:
++ ida_free(&index_ida, dev->clt_device_id);
+ out_queues:
+ kfree(dev->hw_queues);
+ out_alloc:
+--
+2.51.0
+
--- /dev/null
+From 36ff72a0ff4a97b09a0802808de4758a658f36fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 30 Nov 2025 23:40:05 +0100
+Subject: drm/panel: sony-td4353-jdi: Enable prepare_prev_first
+
+From: Marijn Suijten <marijn.suijten@somainline.org>
+
+[ Upstream commit 2b973ca48ff3ef1952091c8f988d7796781836c8 ]
+
+The DSI host must be enabled before our prepare function can run, which
+has to send its init sequence over DSI. Without enabling the host first
+the panel will not probe.
+
+Fixes: 9e15123eca79 ("drm/msm/dsi: Stop unconditionally powering up DSI hosts at modeset")
+Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Martin Botka <martin.botka@somainline.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20251130-sony-akari-fix-panel-v1-1-1d27c60a55f5@somainline.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-sony-td4353-jdi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+index 1bde2f01786b..e7383a0e3d36 100644
+--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
++++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+@@ -283,6 +283,8 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
++ ctx->panel.prepare_prev_first = true;
++
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+--
+2.51.0
+
--- /dev/null
+From 603831660c14de4cc429842b440d21f8e1bea49b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 17:48:08 +0800
+Subject: hwmon: (ibmpex) fix use-after-free in high/low store
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit 6946c726c3f4c36f0f049e6f97e88c510b15f65d ]
+
+The ibmpex_high_low_store() function retrieves driver data using
+dev_get_drvdata() and uses it without validation. This creates a race
+condition where the sysfs callback can be invoked after the data
+structure is freed, leading to use-after-free.
+
+Fix by adding a NULL check after dev_get_drvdata(), and reordering
+operations in the deletion path to prevent TOCTOU.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Reported-by: Junrui Luo <moonafterrain@outlook.com>
+Fixes: 57c7c3a0fdea ("hwmon: IBM power meter driver")
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://lore.kernel.org/r/MEYPR01MB7886BE2F51BFE41875B74B60AFA0A@MEYPR01MB7886.ausprd01.prod.outlook.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ibmpex.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
+index db066b368918..40fff7e95ea1 100644
+--- a/drivers/hwmon/ibmpex.c
++++ b/drivers/hwmon/ibmpex.c
+@@ -282,6 +282,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
+ {
+ struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+
++ if (!data)
++ return -ENODEV;
++
+ ibmpex_reset_high_low_data(data);
+
+ return count;
+@@ -514,6 +517,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ {
+ int i, j;
+
++ hwmon_device_unregister(data->hwmon_dev);
++ dev_set_drvdata(data->bmc_device, NULL);
++
+ device_remove_file(data->bmc_device,
+ &sensor_dev_attr_reset_high_low.dev_attr);
+ device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr);
+@@ -527,8 +533,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
+ }
+
+ list_del(&data->list);
+- dev_set_drvdata(data->bmc_device, NULL);
+- hwmon_device_unregister(data->hwmon_dev);
++
+ ipmi_destroy_user(data->user);
+ kfree(data->sensors);
+ kfree(data);
+--
+2.51.0
+
--- /dev/null
+From d77cc87dd6bc1f57a9adbe85d57be47ffac28337 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Dec 2025 19:43:43 +0300
+Subject: hwmon: (tmp401) fix overflow caused by default conversion rate value
+
+From: Alexey Simakov <bigalex934@gmail.com>
+
+[ Upstream commit 82f2aab35a1ab2e1460de06ef04c726460aed51c ]
+
+The driver computes conversion intervals using the formula:
+
+ interval = (1 << (7 - rate)) * 125ms
+
+where 'rate' is the sensor's conversion rate register value. According to
+the datasheet, the power-on reset value of this register is 0x8, which
+could be assigned to the register, after handling i2c general call.
+Using this default value causes a result greater than the bit width of
+left operand and an undefined behaviour in the calculation above, since
+shifting by values larger than the bit width is undefined behaviour as
+per C language standard.
+
+Limit the maximum usable 'rate' value to 7 to prevent undefined
+behaviour in calculations.
+
+Found by Linux Verification Center (linuxtesting.org) with Svace.
+
+Note (groeck):
+ This does not matter in practice unless someone overwrites the chip
+ configuration from outside the driver while the driver is loaded.
+ The conversion time register is initialized with a value of 5 (500ms)
+ when the driver is loaded, and the driver never writes a bad value.
+
+Fixes: ca53e7640de7 ("hwmon: (tmp401) Convert to _info API")
+Signed-off-by: Alexey Simakov <bigalex934@gmail.com>
+Link: https://lore.kernel.org/r/20251211164342.6291-1-bigalex934@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/tmp401.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
+index 91f2314568cf..5cc932ef0460 100644
+--- a/drivers/hwmon/tmp401.c
++++ b/drivers/hwmon/tmp401.c
+@@ -408,7 +408,7 @@ static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val
+ ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, ®val);
+ if (ret < 0)
+ return ret;
+- *val = (1 << (7 - regval)) * 125;
++ *val = (1 << (7 - min(regval, 7))) * 125;
+ break;
+ case hwmon_chip_temp_reset_history:
+ *val = 0;
+--
+2.51.0
+
--- /dev/null
+From 4d47a449facd97b5cce80bd52e1183ee89c5dc61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 18:36:18 +0800
+Subject: MIPS: Fix a reference leak bug in ip22_check_gio()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+[ Upstream commit 680ad315caaa2860df411cb378bf3614d96c7648 ]
+
+If gio_device_register fails, gio_dev_put() is required to
+drop the gio_dev device reference.
+
+Fixes: e84de0c61905 ("MIPS: GIO bus support for SGI IP22/28")
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/sgi-ip22/ip22-gio.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
+index 81c9f0a8880b..7026b464b02e 100644
+--- a/arch/mips/sgi-ip22/ip22-gio.c
++++ b/arch/mips/sgi-ip22/ip22-gio.c
+@@ -373,7 +373,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
+ gio_dev->resource.flags = IORESOURCE_MEM;
+ gio_dev->irq = irq;
+ dev_set_name(&gio_dev->dev, "%d", slotno);
+- gio_device_register(gio_dev);
++ if (gio_device_register(gio_dev))
++ gio_dev_put(gio_dev);
+ } else
+ printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
+ }
+--
+2.51.0
+
net-hns3-using-the-num_tqps-in-the-vf-driver-to-appl.patch
net-hns3-using-the-num_tqps-to-check-whether-tqp_ind.patch
net-hns3-add-vlan-id-validation-before-using.patch
+hwmon-ibmpex-fix-use-after-free-in-high-low-store.patch
+hwmon-tmp401-fix-overflow-caused-by-default-conversi.patch
+mips-fix-a-reference-leak-bug-in-ip22_check_gio.patch
+drm-panel-sony-td4353-jdi-enable-prepare_prev_first.patch
+x86-xen-move-xen-upcall-handler.patch
+x86-xen-fix-sparse-warning-in-enlighten_pv.c.patch
+spi-cadence-quadspi-fix-clock-disable-on-probe-failu.patch
+block-rnbd-clt-fix-leaked-id-in-init_dev.patch
--- /dev/null
+From 1183a8ee7cfc4659e19cb2ef119cfbedcca5d298 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 12:53:12 +0530
+Subject: spi: cadence-quadspi: Fix clock disable on probe failure path
+
+From: Anurag Dutta <a-dutta@ti.com>
+
+[ Upstream commit 1889dd2081975ce1f6275b06cdebaa8d154847a9 ]
+
+When cqspi_request_mmap_dma() returns -EPROBE_DEFER after runtime PM
+is enabled, the error path calls clk_disable_unprepare() on an already
+disabled clock, causing an imbalance.
+
+Use pm_runtime_get_sync() to increment the usage counter and resume the
+device. This prevents runtime_suspend() from being invoked and causing
+a double clock disable.
+
+Fixes: 140623410536 ("mtd: spi-nor: Add driver for Cadence Quad SPI Flash Controller")
+Signed-off-by: Anurag Dutta <a-dutta@ti.com>
+Tested-by: Nishanth Menon <nm@ti.com>
+Link: https://patch.msgid.link/20251212072312.2711806-3-a-dutta@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 7b809644436e..eed88aba2cfe 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1885,7 +1885,9 @@ static int cqspi_probe(struct platform_device *pdev)
+ probe_reset_failed:
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
+- clk_disable_unprepare(cqspi->clk);
++
++ if (pm_runtime_get_sync(&pdev->dev) >= 0)
++ clk_disable_unprepare(cqspi->clk);
+ probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ probe_pm_failed:
+--
+2.51.0
+
--- /dev/null
+From b5fe44ed0ac65183fc60a3cd676908bec064b979 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 12:51:12 +0100
+Subject: x86/xen: Fix sparse warning in enlighten_pv.c
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit e5aff444e3a7bdeef5ea796a2099fc3c60a070fa ]
+
+The sparse tool issues a warning for arch/x76/xen/enlighten_pv.c:
+
+ arch/x86/xen/enlighten_pv.c:120:9: sparse: sparse: incorrect type
+ in initializer (different address spaces)
+ expected void const [noderef] __percpu *__vpp_verify
+ got bool *
+
+This is due to the percpu variable xen_in_preemptible_hcall being
+exported via EXPORT_SYMBOL_GPL() instead of EXPORT_PER_CPU_SYMBOL_GPL().
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202512140856.Ic6FetG6-lkp@intel.com/
+Fixes: fdfd811ddde3 ("x86/xen: allow privcmd hypercalls to be preempted")
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20251215115112.15072-1-jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/xen/enlighten_pv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 72b58fa4bc17..bfcd6ffc57df 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -110,7 +110,7 @@ static int xen_cpu_dead_pv(unsigned int cpu);
+ * calls.
+ */
+ DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++EXPORT_PER_CPU_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+ /*
+ * In case of scheduling the flag must be cleared and restored after
+--
+2.51.0
+
--- /dev/null
+From 5ea610489cc67ff51151b751861ddf64d43063e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Mar 2025 11:12:14 -0400
+Subject: x86/xen: Move Xen upcall handler
+
+From: Brian Gerst <brgerst@gmail.com>
+
+[ Upstream commit 1ab7b5ed44ba9bce581e225f40219b793bc779d6 ]
+
+Move the upcall handler to Xen-specific files.
+
+No functional changes.
+
+Signed-off-by: Brian Gerst <brgerst@gmail.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/20250314151220.862768-2-brgerst@gmail.com
+Stable-dep-of: e5aff444e3a7 ("x86/xen: Fix sparse warning in enlighten_pv.c")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/common.c | 72 -------------------------------------
+ arch/x86/xen/enlighten_pv.c | 69 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 69 insertions(+), 72 deletions(-)
+
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index e72dac092245..3c88bdf96e7a 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -20,11 +20,6 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+
+-#ifdef CONFIG_XEN_PV
+-#include <xen/xen-ops.h>
+-#include <xen/events.h>
+-#endif
+-
+ #include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/traps.h>
+@@ -346,70 +341,3 @@ SYSCALL_DEFINE0(ni_syscall)
+ {
+ return -ENOSYS;
+ }
+-
+-#ifdef CONFIG_XEN_PV
+-#ifndef CONFIG_PREEMPTION
+-/*
+- * Some hypercalls issued by the toolstack can take many 10s of
+- * seconds. Allow tasks running hypercalls via the privcmd driver to
+- * be voluntarily preempted even if full kernel preemption is
+- * disabled.
+- *
+- * Such preemptible hypercalls are bracketed by
+- * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+- * calls.
+- */
+-DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+-
+-/*
+- * In case of scheduling the flag must be cleared and restored after
+- * returning from schedule as the task might move to a different CPU.
+- */
+-static __always_inline bool get_and_clear_inhcall(void)
+-{
+- bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+-
+- __this_cpu_write(xen_in_preemptible_hcall, false);
+- return inhcall;
+-}
+-
+-static __always_inline void restore_inhcall(bool inhcall)
+-{
+- __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+-}
+-#else
+-static __always_inline bool get_and_clear_inhcall(void) { return false; }
+-static __always_inline void restore_inhcall(bool inhcall) { }
+-#endif
+-
+-static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- struct pt_regs *old_regs = set_irq_regs(regs);
+-
+- inc_irq_stat(irq_hv_callback_count);
+-
+- xen_evtchn_do_upcall();
+-
+- set_irq_regs(old_regs);
+-}
+-
+-__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- irqentry_state_t state = irqentry_enter(regs);
+- bool inhcall;
+-
+- instrumentation_begin();
+- run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+-
+- inhcall = get_and_clear_inhcall();
+- if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
+- irqentry_exit_cond_resched();
+- instrumentation_end();
+- restore_inhcall(inhcall);
+- } else {
+- instrumentation_end();
+- irqentry_exit(regs, state);
+- }
+-}
+-#endif /* CONFIG_XEN_PV */
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 3df7c96e7388..72b58fa4bc17 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -72,6 +72,7 @@
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+ #include <asm/cpu.h>
++#include <asm/irq_stack.h>
+ #ifdef CONFIG_X86_IOPL_IOPERM
+ #include <asm/io_bitmap.h>
+ #endif
+@@ -97,6 +98,44 @@ void *xen_initial_gdt;
+ static int xen_cpu_up_prepare_pv(unsigned int cpu);
+ static int xen_cpu_dead_pv(unsigned int cpu);
+
++#ifndef CONFIG_PREEMPTION
++/*
++ * Some hypercalls issued by the toolstack can take many 10s of
++ * seconds. Allow tasks running hypercalls via the privcmd driver to
++ * be voluntarily preempted even if full kernel preemption is
++ * disabled.
++ *
++ * Such preemptible hypercalls are bracketed by
++ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
++ * calls.
++ */
++DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
++EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
++
++/*
++ * In case of scheduling the flag must be cleared and restored after
++ * returning from schedule as the task might move to a different CPU.
++ */
++static __always_inline bool get_and_clear_inhcall(void)
++{
++ bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
++
++ __this_cpu_write(xen_in_preemptible_hcall, false);
++ return inhcall;
++}
++
++static __always_inline void restore_inhcall(bool inhcall)
++{
++ __this_cpu_write(xen_in_preemptible_hcall, inhcall);
++}
++
++#else
++
++static __always_inline bool get_and_clear_inhcall(void) { return false; }
++static __always_inline void restore_inhcall(bool inhcall) { }
++
++#endif
++
+ struct tls_descs {
+ struct desc_struct desc[3];
+ };
+@@ -678,6 +717,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
+ }
+ #endif
+
++static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
++{
++ struct pt_regs *old_regs = set_irq_regs(regs);
++
++ inc_irq_stat(irq_hv_callback_count);
++
++ xen_evtchn_do_upcall();
++
++ set_irq_regs(old_regs);
++}
++
++__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
++{
++ irqentry_state_t state = irqentry_enter(regs);
++ bool inhcall;
++
++ instrumentation_begin();
++ run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
++
++ inhcall = get_and_clear_inhcall();
++ if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
++ irqentry_exit_cond_resched();
++ instrumentation_end();
++ restore_inhcall(inhcall);
++ } else {
++ instrumentation_end();
++ irqentry_exit(regs, state);
++ }
++}
++
+ struct trap_array_entry {
+ void (*orig)(void);
+ void (*xen)(void);
+--
+2.51.0
+