--- /dev/null
+From 3aa385a9c75c09b59dcab2ff76423439d23673ab Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Mon, 3 Nov 2025 10:36:18 +0100
+Subject: iio: accel: bmc150: Fix irq assumption regression
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 3aa385a9c75c09b59dcab2ff76423439d23673ab upstream.
+
+The code in bmc150-accel-core.c unconditionally calls
+bmc150_accel_set_interrupt() in the iio_buffer_setup_ops,
+such as on the runtime PM resume path giving a kernel
+splat like this if the device has no interrupts:
+
+Unable to handle kernel NULL pointer dereference at virtual
+ address 00000001 when read
+
+PC is at bmc150_accel_set_interrupt+0x98/0x194
+LR is at __pm_runtime_resume+0x5c/0x64
+(...)
+Call trace:
+bmc150_accel_set_interrupt from bmc150_accel_buffer_postenable+0x40/0x108
+bmc150_accel_buffer_postenable from __iio_update_buffers+0xbe0/0xcbc
+__iio_update_buffers from enable_store+0x84/0xc8
+enable_store from kernfs_fop_write_iter+0x154/0x1b4
+
+This bug seems to have been in the driver since the beginning,
+but it only manifests recently, I do not know why.
+
+Store the IRQ number in the state struct, as this is a common
+pattern in other drivers, then use this to determine if we have
+IRQ support or not.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/accel/bmc150-accel-core.c | 5 +++++
+ drivers/iio/accel/bmc150-accel.h | 1 +
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -526,6 +526,10 @@ static int bmc150_accel_set_interrupt(st
+ const struct bmc150_accel_interrupt_info *info = intr->info;
+ int ret;
+
++ /* We do not always have an IRQ */
++ if (data->irq <= 0)
++ return 0;
++
+ if (state) {
+ if (atomic_inc_return(&intr->users) > 1)
+ return 0;
+@@ -1699,6 +1703,7 @@ int bmc150_accel_core_probe(struct devic
+ }
+
+ if (irq > 0) {
++ data->irq = irq;
+ ret = devm_request_threaded_irq(dev, irq,
+ bmc150_accel_irq_handler,
+ bmc150_accel_irq_thread_handler,
+--- a/drivers/iio/accel/bmc150-accel.h
++++ b/drivers/iio/accel/bmc150-accel.h
+@@ -58,6 +58,7 @@ enum bmc150_accel_trigger_id {
+
+ struct bmc150_accel_data {
+ struct regmap *regmap;
++ int irq;
+ struct regulator_bulk_data regulators[2];
+ struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
+ struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
--- /dev/null
+From c92c1bc408e9e11ae3c7011b062fdd74c09283a3 Mon Sep 17 00:00:00 2001
+From: Valek Andrej <andrej.v@skyrain.eu>
+Date: Tue, 14 Oct 2025 09:13:44 +0200
+Subject: iio: accel: fix ADXL355 startup race condition
+
+From: Valek Andrej <andrej.v@skyrain.eu>
+
+commit c92c1bc408e9e11ae3c7011b062fdd74c09283a3 upstream.
+
+There is an race-condition where device is not full working after SW reset.
+Therefore it's necessary to wait some time after reset and verify shadow
+registers values by reading and comparing the values before/after reset.
+This mechanism is described in datasheet at least from revision D.
+
+Fixes: 12ed27863ea3 ("iio: accel: Add driver support for ADXL355")
+Signed-off-by: Valek Andrej <andrej.v@skyrain.eu>
+Signed-off-by: Kessler Markus <markus.kessler@hilti.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/accel/adxl355_core.c | 44 ++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 39 insertions(+), 5 deletions(-)
+
+--- a/drivers/iio/accel/adxl355_core.c
++++ b/drivers/iio/accel/adxl355_core.c
+@@ -56,6 +56,8 @@
+ #define ADXL355_POWER_CTL_DRDY_MSK BIT(2)
+ #define ADXL355_SELF_TEST_REG 0x2E
+ #define ADXL355_RESET_REG 0x2F
++#define ADXL355_BASE_ADDR_SHADOW_REG 0x50
++#define ADXL355_SHADOW_REG_COUNT 5
+
+ #define ADXL355_DEVID_AD_VAL 0xAD
+ #define ADXL355_DEVID_MST_VAL 0x1D
+@@ -294,7 +296,12 @@ static void adxl355_fill_3db_frequency_t
+ static int adxl355_setup(struct adxl355_data *data)
+ {
+ unsigned int regval;
++ int retries = 5; /* the number is chosen based on empirical reasons */
+ int ret;
++ u8 *shadow_regs __free(kfree) = kzalloc(ADXL355_SHADOW_REG_COUNT, GFP_KERNEL);
++
++ if (!shadow_regs)
++ return -ENOMEM;
+
+ ret = regmap_read(data->regmap, ADXL355_DEVID_AD_REG, ®val);
+ if (ret)
+@@ -321,14 +328,41 @@ static int adxl355_setup(struct adxl355_
+ if (regval != ADXL355_PARTID_VAL)
+ dev_warn(data->dev, "Invalid DEV ID 0x%02x\n", regval);
+
+- /*
+- * Perform a software reset to make sure the device is in a consistent
+- * state after start-up.
+- */
+- ret = regmap_write(data->regmap, ADXL355_RESET_REG, ADXL355_RESET_CODE);
++ /* Read shadow registers to be compared after reset */
++ ret = regmap_bulk_read(data->regmap,
++ ADXL355_BASE_ADDR_SHADOW_REG,
++ shadow_regs, ADXL355_SHADOW_REG_COUNT);
+ if (ret)
+ return ret;
+
++ do {
++ if (--retries == 0) {
++ dev_err(data->dev, "Shadow registers mismatch\n");
++ return -EIO;
++ }
++
++ /*
++ * Perform a software reset to make sure the device is in a consistent
++ * state after start-up.
++ */
++ ret = regmap_write(data->regmap, ADXL355_RESET_REG,
++ ADXL355_RESET_CODE);
++ if (ret)
++ return ret;
++
++ /* Wait at least 5ms after software reset */
++ usleep_range(5000, 10000);
++
++ /* Read shadow registers for comparison */
++ ret = regmap_bulk_read(data->regmap,
++ ADXL355_BASE_ADDR_SHADOW_REG,
++ data->buffer.buf,
++ ADXL355_SHADOW_REG_COUNT);
++ if (ret)
++ return ret;
++ } while (memcmp(shadow_regs, data->buffer.buf,
++ ADXL355_SHADOW_REG_COUNT));
++
+ ret = regmap_update_bits(data->regmap, ADXL355_POWER_CTL_REG,
+ ADXL355_POWER_CTL_DRDY_MSK,
+ FIELD_PREP(ADXL355_POWER_CTL_DRDY_MSK, 1));
--- /dev/null
+From ffc74ad539136ae9e16f7b5f2e4582e88018cd49 Mon Sep 17 00:00:00 2001
+From: Marcelo Schmitt <marcelo.schmitt@analog.com>
+Date: Thu, 18 Sep 2025 14:37:27 -0300
+Subject: iio: adc: ad4030: Fix _scale value for common-mode channels
+
+From: Marcelo Schmitt <marcelo.schmitt@analog.com>
+
+commit ffc74ad539136ae9e16f7b5f2e4582e88018cd49 upstream.
+
+Previously, the driver always used the amount of precision bits of
+differential input channels to provide the scale to mV. Though,
+differential and common-mode voltage channels have different amount of
+precision bits and the correct number of precision bits must be considered
+to get to a proper mV scale factor for each one. Use channel specific
+number of precision bits to provide the correct scale value for each
+channel.
+
+Fixes: de67f28abe58 ("iio: adc: ad4030: check scan_type for error")
+Fixes: 949abd1ca5a4 ("iio: adc: ad4030: add averaging support")
+Signed-off-by: Marcelo Schmitt <marcelo.schmitt@analog.com>
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ad4030.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/ad4030.c
++++ b/drivers/iio/adc/ad4030.c
+@@ -385,7 +385,7 @@ static int ad4030_get_chan_scale(struct
+ struct ad4030_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+
+- scan_type = iio_get_current_scan_type(indio_dev, st->chip->channels);
++ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
--- /dev/null
+From e2cc390a6629c76924a2740c54b144b9b28fca59 Mon Sep 17 00:00:00 2001
+From: David Lechner <dlechner@baylibre.com>
+Date: Fri, 10 Oct 2025 15:24:31 -0500
+Subject: iio: adc: ad7124: fix temperature channel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Lechner <dlechner@baylibre.com>
+
+commit e2cc390a6629c76924a2740c54b144b9b28fca59 upstream.
+
+Fix temperature channel not working due to gain and offset not being
+initialized. For channels other than the voltage ones calibration is
+skipped (which is OK). However that results in the calibration register
+values tracked in st->channels[i].cfg all being zero. These zeros are
+later written to hardware before a measurement is made which caused the
+raw temperature readings to be always 8388608 (0x800000).
+
+To fix it, we just make sure the gain and offset values are set to the
+default values and still return early without doing an internal
+calibration.
+
+While here, add a comment explaining why we don't bother calibrating
+the temperature channel.
+
+Fixes: 47036a03a303 ("iio: adc: ad7124: Implement internal calibration at probe time")
+Reviewed-by: Marcelo Schmitt <marcelo.schmitt@analog.com>
+Signed-off-by: David Lechner <dlechner@baylibre.com>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ad7124.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -1196,10 +1196,6 @@ static int __ad7124_calibrate_all(struct
+ int ret, i;
+
+ for (i = 0; i < st->num_channels; i++) {
+-
+- if (indio_dev->channels[i].type != IIO_VOLTAGE)
+- continue;
+-
+ /*
+ * For calibration the OFFSET register should hold its reset default
+ * value. For the GAIN register there is no such requirement but
+@@ -1210,6 +1206,14 @@ static int __ad7124_calibrate_all(struct
+ st->channels[i].cfg.calibration_gain = st->gain_default;
+
+ /*
++ * Only the main voltage input channels are important enough
++ * to be automatically calibrated here. For everything else,
++ * just use the default values set above.
++ */
++ if (indio_dev->channels[i].type != IIO_VOLTAGE)
++ continue;
++
++ /*
+ * Full-scale calibration isn't supported at gain 1, so skip in
+ * that case. Note that untypically full-scale calibration has
+ * to happen before zero-scale calibration. This only applies to
--- /dev/null
+From bd886cdcbf9e746f61c74035a3acd42e9108e115 Mon Sep 17 00:00:00 2001
+From: David Lechner <dlechner@baylibre.com>
+Date: Fri, 10 Oct 2025 10:44:45 -0500
+Subject: iio: adc: ad7280a: fix ad7280_store_balance_timer()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Lechner <dlechner@baylibre.com>
+
+commit bd886cdcbf9e746f61c74035a3acd42e9108e115 upstream.
+
+Use correct argument to iio_str_to_fixpoint() to parse 3 decimal places.
+
+iio_str_to_fixpoint() has a bit of an unintuitive API where the
+fract_mult parameter is the multiplier of the first decimal place as if
+it was already an integer. So to get 3 decimal places, fract_mult must
+be 100 rather than 1000.
+
+Fixes: 96ccdbc07a74 ("staging:iio:adc:ad7280a: Standardize extended ABI naming")
+Signed-off-by: David Lechner <dlechner@baylibre.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ad7280a.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/ad7280a.c
++++ b/drivers/iio/adc/ad7280a.c
+@@ -541,7 +541,7 @@ static ssize_t ad7280_store_balance_time
+ int val, val2;
+ int ret;
+
+- ret = iio_str_to_fixpoint(buf, 1000, &val, &val2);
++ ret = iio_str_to_fixpoint(buf, 100, &val, &val2);
+ if (ret)
+ return ret;
+
--- /dev/null
+From 632757312d7eb320b66ca60e0cfe098ec53cee08 Mon Sep 17 00:00:00 2001
+From: David Lechner <dlechner@baylibre.com>
+Date: Fri, 19 Sep 2025 15:50:34 -0500
+Subject: iio: adc: ad7380: fix SPI offload trigger rate
+
+From: David Lechner <dlechner@baylibre.com>
+
+commit 632757312d7eb320b66ca60e0cfe098ec53cee08 upstream.
+
+Add a special case to double the SPI offload trigger rate when all
+channels of a single-ended chip are enabled in a buffered read.
+
+The single-ended chips in the AD738x family can only do simultaneous
+sampling of half their channels and have a multiplexer to allow reading
+the other half. To comply with the IIO definition of sampling_frequency,
+we need to trigger twice as often when the sequencer is enabled to so
+that both banks can be read in a single sample period.
+
+Fixes: bbeaec81a03e ("iio: ad7380: add support for SPI offload")
+Signed-off-by: David Lechner <dlechner@baylibre.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ad7380.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/iio/adc/ad7380.c
++++ b/drivers/iio/adc/ad7380.c
+@@ -1227,6 +1227,14 @@ static int ad7380_offload_buffer_postena
+ if (ret)
+ return ret;
+
++ /*
++ * When the sequencer is required to read all channels, we need to
++ * trigger twice per sample period in order to read one complete set
++ * of samples.
++ */
++ if (st->seq)
++ config.periodic.frequency_hz *= 2;
++
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
+ if (ret)
+ spi_unoptimize_message(&st->offload_msg);
--- /dev/null
+From 9b45744bf09fc2a3287e05287141d6e123c125a7 Mon Sep 17 00:00:00 2001
+From: ChiYuan Huang <cy_huang@richtek.com>
+Date: Thu, 18 Sep 2025 11:10:59 +0800
+Subject: iio: adc: rtq6056: Correct the sign bit index
+
+From: ChiYuan Huang <cy_huang@richtek.com>
+
+commit 9b45744bf09fc2a3287e05287141d6e123c125a7 upstream.
+
+The vshunt/current reported register is a signed 16bit integer. The
+sign bit index should be '15', not '16'.
+
+Fixes: 4396f45d211b ("iio: adc: Add rtq6056 support")
+Reported-by: Andy Hsu <andy_ya_hsu@wiwynn.com>
+Signed-off-by: ChiYuan Huang <cy_huang@richtek.com>
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/rtq6056.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/rtq6056.c
++++ b/drivers/iio/adc/rtq6056.c
+@@ -300,7 +300,7 @@ static int rtq6056_adc_read_channel(stru
+ return IIO_VAL_INT;
+ case RTQ6056_REG_SHUNTVOLT:
+ case RTQ6056_REG_CURRENT:
+- *val = sign_extend32(regval, 16);
++ *val = sign_extend32(regval, 15);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
--- /dev/null
+From 8a6b7989ff0cd0a95c93be1927f2af7ad10f28de Mon Sep 17 00:00:00 2001
+From: Olivier Moysan <olivier.moysan@foss.st.com>
+Date: Thu, 2 Oct 2025 13:22:49 +0200
+Subject: iio: adc: stm32-dfsdm: fix st,adc-alt-channel property handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Olivier Moysan <olivier.moysan@foss.st.com>
+
+commit 8a6b7989ff0cd0a95c93be1927f2af7ad10f28de upstream.
+
+Initially st,adc-alt-channel property was defined as an enum in the DFSDM
+binding. The DFSDM binding has been changed to use the new IIO backend
+framework, along with the adoption of IIO generic channels.
+In this new binding st,adc-alt-channel is defined as a boolean property,
+but it is still handled has an enum in DFSDM driver.
+Fix st,adc-alt-channel property handling in DFSDM driver.
+
+Fixes: 3208fa0cd919 ("iio: adc: stm32-dfsdm: adopt generic channels bindings")
+Signed-off-by: Olivier Moysan <olivier.moysan@foss.st.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/stm32-dfsdm-adc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -725,9 +725,8 @@ static int stm32_dfsdm_generic_channel_p
+ }
+ df_ch->src = val;
+
+- ret = fwnode_property_read_u32(node, "st,adc-alt-channel", &df_ch->alt_si);
+- if (ret != -EINVAL)
+- df_ch->alt_si = 0;
++ if (fwnode_property_present(node, "st,adc-alt-channel"))
++ df_ch->alt_si = 1;
+
+ if (adc->dev_data->type == DFSDM_IIO) {
+ backend = devm_iio_backend_fwnode_get(&indio_dev->dev, NULL, node);
--- /dev/null
+From f9c198c3ccaf90a1a265fb2ffa8d4b093c3b0784 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nuno=20S=C3=A1?= <nuno.sa@analog.com>
+Date: Tue, 7 Oct 2025 10:15:22 +0100
+Subject: iio: buffer-dma: support getting the DMA channel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+commit f9c198c3ccaf90a1a265fb2ffa8d4b093c3b0784 upstream.
+
+Implement the .get_dma_dev() callback for DMA buffers by returning the
+device that owns the DMA channel. This allows the core DMABUF
+infrastructure to properly map DMA buffers using the correct device,
+avoiding the need for bounce buffers on systems where memory is mapped
+above the 32-bit range.
+
+The function returns the DMA queue's device, which is the actual device
+responsible for DMA operations in buffer-dma implementations.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/buffer/industrialio-buffer-dma.c | 6 ++++++
+ include/linux/iio/buffer-dma.h | 1 +
+ 2 files changed, 7 insertions(+)
+
+--- a/drivers/iio/buffer/industrialio-buffer-dma.c
++++ b/drivers/iio/buffer/industrialio-buffer-dma.c
+@@ -786,6 +786,12 @@ out_end_signalling:
+ }
+ EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, "IIO_DMA_BUFFER");
+
++struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer)
++{
++ return iio_buffer_to_queue(buffer)->dev;
++}
++EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_get_dma_dev, "IIO_DMA_BUFFER");
++
+ void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
+ {
+ struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+--- a/include/linux/iio/buffer-dma.h
++++ b/include/linux/iio/buffer-dma.h
+@@ -174,5 +174,6 @@ int iio_dma_buffer_enqueue_dmabuf(struct
+ size_t size, bool cyclic);
+ void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
+ void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
++struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer);
+
+ #endif
--- /dev/null
+From 3db847df994d475db7812dde90376f2848bcd30a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nuno=20S=C3=A1?= <nuno.sa@analog.com>
+Date: Tue, 7 Oct 2025 10:15:23 +0100
+Subject: iio: buffer-dmaengine: enable .get_dma_dev()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+commit 3db847df994d475db7812dde90376f2848bcd30a upstream.
+
+Wire up the .get_dma_dev() callback to use the DMA buffer infrastructure's
+implementation. This ensures that DMABUF operations use the correct DMA
+device for mapping, which is essential for proper operation on systems
+where memory is mapped above the 32-bit range.
+
+Without this callback, the core would fall back to using the IIO device's
+parent, which may not have the appropriate DMA mask configuration for
+high memory access.
+
+Fixes: 7a86d469983a ("iio: buffer-dmaengine: Support new DMABUF based userspace API")
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/buffer/industrialio-buffer-dmaengine.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
++++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+@@ -177,6 +177,8 @@ static const struct iio_buffer_access_fu
+ .lock_queue = iio_dma_buffer_lock_queue,
+ .unlock_queue = iio_dma_buffer_unlock_queue,
+
++ .get_dma_dev = iio_dma_buffer_get_dma_dev,
++
+ .modes = INDIO_BUFFER_HARDWARE,
+ .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
+ };
--- /dev/null
+From a514bb109eada64f798f1c86c17182229cc20fe7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nuno=20S=C3=A1?= <nuno.sa@analog.com>
+Date: Tue, 7 Oct 2025 10:15:21 +0100
+Subject: iio: buffer: support getting dma channel from the buffer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+commit a514bb109eada64f798f1c86c17182229cc20fe7 upstream.
+
+Add a new buffer accessor .get_dma_dev() in order to get the
+struct device responsible for actually providing the dma channel. We
+cannot assume that we can use the parent of the IIO device for mapping
+the DMA buffer. This becomes important on systems (like the Xilinx/AMD
+zynqMP Ultrascale) where memory (or part of it) is mapped above the
+32 bit range. On such systems and given that a device by default has
+a dma mask of 32 bits we would then need to rely on bounce buffers (to
+swiotlb) for mapping memory above the dma mask limit.
+
+In the process, add an iio_buffer_get_dma_dev() helper function to get
+the proper DMA device.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/industrialio-buffer.c | 21 ++++++++++++++++-----
+ include/linux/iio/buffer_impl.h | 2 ++
+ 2 files changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -1623,19 +1623,28 @@ static int iio_dma_resv_lock(struct dma_
+ return 0;
+ }
+
++static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev,
++ struct iio_buffer *buffer)
++{
++ if (buffer->access->get_dma_dev)
++ return buffer->access->get_dma_dev(buffer);
++
++ return indio_dev->dev.parent;
++}
++
+ static struct dma_buf_attachment *
+ iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
+ struct dma_buf *dmabuf, bool nonblock)
+ {
+- struct device *dev = ib->indio_dev->dev.parent;
+ struct iio_buffer *buffer = ib->buffer;
++ struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer);
+ struct dma_buf_attachment *attach = NULL;
+ struct iio_dmabuf_priv *priv;
+
+ guard(mutex)(&buffer->dmabufs_mutex);
+
+ list_for_each_entry(priv, &buffer->dmabufs, entry) {
+- if (priv->attach->dev == dev
++ if (priv->attach->dev == dma_dev
+ && priv->attach->dmabuf == dmabuf) {
+ attach = priv->attach;
+ break;
+@@ -1653,6 +1662,7 @@ static int iio_buffer_attach_dmabuf(stru
+ {
+ struct iio_dev *indio_dev = ib->indio_dev;
+ struct iio_buffer *buffer = ib->buffer;
++ struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
+ struct dma_buf_attachment *attach;
+ struct iio_dmabuf_priv *priv, *each;
+ struct dma_buf *dmabuf;
+@@ -1679,7 +1689,7 @@ static int iio_buffer_attach_dmabuf(stru
+ goto err_free_priv;
+ }
+
+- attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
++ attach = dma_buf_attach(dmabuf, dma_dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto err_dmabuf_put;
+@@ -1719,7 +1729,7 @@ static int iio_buffer_attach_dmabuf(stru
+ * combo. If we do, refuse to attach.
+ */
+ list_for_each_entry(each, &buffer->dmabufs, entry) {
+- if (each->attach->dev == indio_dev->dev.parent
++ if (each->attach->dev == dma_dev
+ && each->attach->dmabuf == dmabuf) {
+ /*
+ * We unlocked the reservation object, so going through
+@@ -1758,6 +1768,7 @@ static int iio_buffer_detach_dmabuf(stru
+ {
+ struct iio_buffer *buffer = ib->buffer;
+ struct iio_dev *indio_dev = ib->indio_dev;
++ struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
+ struct iio_dmabuf_priv *priv;
+ struct dma_buf *dmabuf;
+ int dmabuf_fd, ret = -EPERM;
+@@ -1772,7 +1783,7 @@ static int iio_buffer_detach_dmabuf(stru
+ guard(mutex)(&buffer->dmabufs_mutex);
+
+ list_for_each_entry(priv, &buffer->dmabufs, entry) {
+- if (priv->attach->dev == indio_dev->dev.parent
++ if (priv->attach->dev == dma_dev
+ && priv->attach->dmabuf == dmabuf) {
+ list_del(&priv->entry);
+
+--- a/include/linux/iio/buffer_impl.h
++++ b/include/linux/iio/buffer_impl.h
+@@ -50,6 +50,7 @@ struct sg_table;
+ * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
+ * object to this buffer. Requires a valid DMABUF fd, that
+ * was previouly attached to this buffer.
++ * @get_dma_dev: called to get the DMA channel associated with this buffer.
+ * @lock_queue: called when the core needs to lock the buffer queue;
+ * it is used when enqueueing DMABUF objects.
+ * @unlock_queue: used to unlock a previously locked buffer queue
+@@ -90,6 +91,7 @@ struct iio_buffer_access_funcs {
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence, struct sg_table *sgt,
+ size_t size, bool cyclic);
++ struct device * (*get_dma_dev)(struct iio_buffer *buffer);
+ void (*lock_queue)(struct iio_buffer *buffer);
+ void (*unlock_queue)(struct iio_buffer *buffer);
+
--- /dev/null
+From 21553258b94861a73d7f2cf15469d69240e1170d Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Fri, 10 Oct 2025 20:58:48 +0200
+Subject: iio:common:ssp_sensors: Fix an error handling path ssp_probe()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 21553258b94861a73d7f2cf15469d69240e1170d upstream.
+
+If an error occurs after a successful mfd_add_devices() call, it should be
+undone by a corresponding mfd_remove_devices() call, as already done in the
+remove function.
+
+Fixes: 50dd64d57eee ("iio: common: ssp_sensors: Add sensorhub driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/common/ssp_sensors/ssp_dev.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
++++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
+@@ -503,7 +503,7 @@ static int ssp_probe(struct spi_device *
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to setup spi\n");
+- return ret;
++ goto err_setup_spi;
+ }
+
+ data->fw_dl_state = SSP_FW_DL_STATE_NONE;
+@@ -568,6 +568,8 @@ err_read_reg:
+ err_setup_irq:
+ mutex_destroy(&data->pending_lock);
+ mutex_destroy(&data->comm_lock);
++err_setup_spi:
++ mfd_remove_devices(&spi->dev);
+
+ dev_err(&spi->dev, "Probe failed!\n");
+
--- /dev/null
+From 7b8dc11c0a830caa0d890c603d597161c6c26095 Mon Sep 17 00:00:00 2001
+From: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Date: Thu, 16 Oct 2025 07:20:38 +0200
+Subject: iio: humditiy: hdc3020: fix units for temperature and humidity measurement
+
+From: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+
+commit 7b8dc11c0a830caa0d890c603d597161c6c26095 upstream.
+
+According to the ABI the units after application of scale and offset are
+milli degrees for temperature measurements and milli percent for relative
+humidity measurements. Currently the resulting units are degree celsius for
+temperature measurements and percent for relative humidity measurements.
+Change scale factor to fix this issue.
+
+Fixes: c9180b8e39be ("iio: humidity: Add driver for ti HDC302x humidity sensors")
+Reported-by: Chris Lesiak <chris.lesiak@licorbio.com>
+Suggested-by: Chris Lesiak <chris.lesiak@licorbio.com>
+Reviewed-by: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Signed-off-by: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/humidity/hdc3020.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/humidity/hdc3020.c
++++ b/drivers/iio/humidity/hdc3020.c
+@@ -301,9 +301,9 @@ static int hdc3020_read_raw(struct iio_d
+ case IIO_CHAN_INFO_SCALE:
+ *val2 = 65536;
+ if (chan->type == IIO_TEMP)
+- *val = 175;
++ *val = 175 * MILLI;
+ else
+- *val = 100;
++ *val = 100 * MILLI;
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_CHAN_INFO_OFFSET:
--- /dev/null
+From cb372b4f46d4285e5d2c07ba734374151b8e34e7 Mon Sep 17 00:00:00 2001
+From: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Date: Thu, 16 Oct 2025 07:20:39 +0200
+Subject: iio: humditiy: hdc3020: fix units for thresholds and hysteresis
+
+From: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+
+commit cb372b4f46d4285e5d2c07ba734374151b8e34e7 upstream.
+
+According to the ABI the units after application of scale and offset are
+milli degree celsius for temperature thresholds and milli percent for
+relative humidity thresholds. Currently the resulting units are degree
+celsius for temperature thresholds and hysteresis and percent for relative
+humidity thresholds and hysteresis. Change scale factor to fix this issue.
+
+Fixes: 3ad0e7e5f0cb ("iio: humidity: hdc3020: add threshold events support")
+Reported-by: Chris Lesiak <chris.lesiak@licorbio.com>
+Reviewed-by: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Signed-off-by: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/humidity/hdc3020.c | 69 ++++++++++++++++++++++++-----------------
+ 1 file changed, 41 insertions(+), 28 deletions(-)
+
+--- a/drivers/iio/humidity/hdc3020.c
++++ b/drivers/iio/humidity/hdc3020.c
+@@ -72,6 +72,9 @@
+ #define HDC3020_MAX_TEMP_HYST_MICRO 164748607
+ #define HDC3020_MAX_HUM_MICRO 99220264
+
++/* Divide 65535 from the datasheet by 5 to avoid overflows */
++#define HDC3020_THRESH_FRACTION (65535 / 5)
++
+ struct hdc3020_data {
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+@@ -376,15 +379,18 @@ static int hdc3020_thresh_get_temp(u16 t
+ int temp;
+
+ /*
+- * Get the temperature threshold from 9 LSBs, shift them to get
+- * the truncated temperature threshold representation and
+- * calculate the threshold according to the formula in the
+- * datasheet. Result is degree celsius scaled by 65535.
++ * Get the temperature threshold from 9 LSBs, shift them to get the
++ * truncated temperature threshold representation and calculate the
++ * threshold according to the explicit formula in the datasheet:
++ * T(C) = -45 + (175 * temp) / 65535.
++ * Additionally scale by HDC3020_THRESH_FRACTION to avoid precision loss
++ * when calculating threshold and hysteresis values. Result is degree
++ * celsius scaled by HDC3020_THRESH_FRACTION.
+ */
+ temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) <<
+ HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+
+- return -2949075 + (175 * temp);
++ return -2949075 / 5 + (175 / 5 * temp);
+ }
+
+ static int hdc3020_thresh_get_hum(u16 thresh)
+@@ -394,13 +400,16 @@ static int hdc3020_thresh_get_hum(u16 th
+ /*
+ * Get the humidity threshold from 7 MSBs, shift them to get the
+ * truncated humidity threshold representation and calculate the
+- * threshold according to the formula in the datasheet. Result is
+- * percent scaled by 65535.
++ * threshold according to the explicit formula in the datasheet:
++ * RH(%) = 100 * hum / 65535.
++ * Additionally scale by HDC3020_THRESH_FRACTION to avoid precision loss
++ * when calculating threshold and hysteresis values. Result is percent
++ * scaled by HDC3020_THRESH_FRACTION.
+ */
+ hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT;
+
+- return hum * 100;
++ return hum * 100 / 5;
+ }
+
+ static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh)
+@@ -455,8 +464,8 @@ int hdc3020_thresh_clr(s64 s_thresh, s64
+ else
+ s_clr = s_thresh + s_hyst;
+
+- /* Divide by 65535 to get units of micro */
+- return div_s64(s_clr, 65535);
++ /* Divide by HDC3020_THRESH_FRACTION to get units of micro */
++ return div_s64(s_clr, HDC3020_THRESH_FRACTION);
+ }
+
+ static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val)
+@@ -507,7 +516,7 @@ static int hdc3020_write_thresh(struct i
+
+ clr = ret;
+ /* Scale value to include decimal part into calculations */
+- s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2);
++ s_val = (val < 0) ? (val * 1000 - val2) : (val * 1000 + val2);
+ switch (chan->type) {
+ case IIO_TEMP:
+ switch (info) {
+@@ -523,7 +532,8 @@ static int hdc3020_write_thresh(struct i
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000;
+- s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
++ s_hyst = div_s64(abs(s_thresh - s_clr),
++ HDC3020_THRESH_FRACTION);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Set old hysteresis */
+@@ -532,16 +542,17 @@ static int hdc3020_write_thresh(struct i
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_temp returns temperature
+- * in degree celsius scaled by 65535. Scale by 1000000
+- * to be able to subtract scaled hysteresis value.
++ * in degree celsius scaled by HDC3020_THRESH_FRACTION.
++ * Scale by 1000000 to be able to subtract scaled
++ * hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro degree celsius, scale by
+- * 65535 to get same units as s_thresh.
++ * HDC3020_THRESH_FRACTION to get same units as s_thresh.
+ */
+ s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO);
+- s_hyst = (s64)s_val * 65535;
++ s_hyst = (s64)s_val * HDC3020_THRESH_FRACTION;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO);
+ s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO);
+@@ -565,7 +576,8 @@ static int hdc3020_write_thresh(struct i
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000;
+- s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
++ s_hyst = div_s64(abs(s_thresh - s_clr),
++ HDC3020_THRESH_FRACTION);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Try to set old hysteresis */
+@@ -574,15 +586,16 @@ static int hdc3020_write_thresh(struct i
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_hum returns relative
+- * humidity in percent scaled by 65535. Scale by 1000000
+- * to be able to subtract scaled hysteresis value.
++ * humidity in percent scaled by HDC3020_THRESH_FRACTION.
++ * Scale by 1000000 to be able to subtract scaled
++ * hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ /*
+- * Units of s_val are in micro percent, scale by 65535
+- * to get same units as s_thresh.
++ * Units of s_val are in micro percent, scale by
++ * HDC3020_THRESH_FRACTION to get same units as s_thresh.
+ */
+- s_hyst = (s64)s_val * 65535;
++ s_hyst = (s64)s_val * HDC3020_THRESH_FRACTION;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, 0);
+ s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO);
+@@ -630,7 +643,7 @@ static int hdc3020_read_thresh(struct ii
+ thresh = hdc3020_thresh_get_temp(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+- *val = thresh;
++ *val = thresh * MILLI;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+@@ -638,18 +651,18 @@ static int hdc3020_read_thresh(struct ii
+ return ret;
+
+ clr = hdc3020_thresh_get_temp(ret);
+- *val = abs(thresh - clr);
++ *val = abs(thresh - clr) * MILLI;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+- *val2 = 65535;
++ *val2 = HDC3020_THRESH_FRACTION;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_HUMIDITYRELATIVE:
+ thresh = hdc3020_thresh_get_hum(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+- *val = thresh;
++ *val = thresh * MILLI;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+@@ -657,12 +670,12 @@ static int hdc3020_read_thresh(struct ii
+ return ret;
+
+ clr = hdc3020_thresh_get_hum(ret);
+- *val = abs(thresh - clr);
++ *val = abs(thresh - clr) * MILLI;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+- *val2 = 65535;
++ *val2 = HDC3020_THRESH_FRACTION;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EOPNOTSUPP;
--- /dev/null
+From 3af0c1fb1cdc351b64ff1a4bc06d491490c1f10a Mon Sep 17 00:00:00 2001
+From: Francesco Lavra <flavra@baylibre.com>
+Date: Fri, 17 Oct 2025 19:32:08 +0200
+Subject: iio: imu: st_lsm6dsx: fix array size for st_lsm6dsx_settings fields
+
+From: Francesco Lavra <flavra@baylibre.com>
+
+commit 3af0c1fb1cdc351b64ff1a4bc06d491490c1f10a upstream.
+
+The `decimator` and `batch` fields of struct st_lsm6dsx_settings
+are arrays indexed by sensor type, not by sensor hardware
+identifier; moreover, the `batch` field is only used for the
+accelerometer and gyroscope.
+Change the array size for `decimator` from ST_LSM6DSX_MAX_ID to
+ST_LSM6DSX_ID_MAX, and change the array size for `batch` from
+ST_LSM6DSX_MAX_ID to 2; move the enum st_lsm6dsx_sensor_id
+definition so that the ST_LSM6DSX_ID_MAX value is usable within
+the struct st_lsm6dsx_settings definition.
+
+Fixes: 801a6e0af0c6c ("iio: imu: st_lsm6dsx: add support to LSM6DSO")
+Signed-off-by: Francesco Lavra <flavra@baylibre.com>
+Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+@@ -270,6 +270,15 @@ struct st_lsm6dsx_event_settings {
+ u8 wakeup_src_x_mask;
+ };
+
++enum st_lsm6dsx_sensor_id {
++ ST_LSM6DSX_ID_GYRO,
++ ST_LSM6DSX_ID_ACC,
++ ST_LSM6DSX_ID_EXT0,
++ ST_LSM6DSX_ID_EXT1,
++ ST_LSM6DSX_ID_EXT2,
++ ST_LSM6DSX_ID_MAX
++};
++
+ enum st_lsm6dsx_ext_sensor_id {
+ ST_LSM6DSX_ID_MAGN,
+ };
+@@ -355,23 +364,14 @@ struct st_lsm6dsx_settings {
+ struct st_lsm6dsx_odr_table_entry odr_table[2];
+ struct st_lsm6dsx_samples_to_discard samples_to_discard[2];
+ struct st_lsm6dsx_fs_table_entry fs_table[2];
+- struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
+- struct st_lsm6dsx_reg batch[ST_LSM6DSX_MAX_ID];
++ struct st_lsm6dsx_reg decimator[ST_LSM6DSX_ID_MAX];
++ struct st_lsm6dsx_reg batch[2];
+ struct st_lsm6dsx_fifo_ops fifo_ops;
+ struct st_lsm6dsx_hw_ts_settings ts_settings;
+ struct st_lsm6dsx_shub_settings shub_settings;
+ struct st_lsm6dsx_event_settings event_settings;
+ };
+
+-enum st_lsm6dsx_sensor_id {
+- ST_LSM6DSX_ID_GYRO,
+- ST_LSM6DSX_ID_ACC,
+- ST_LSM6DSX_ID_EXT0,
+- ST_LSM6DSX_ID_EXT1,
+- ST_LSM6DSX_ID_EXT2,
+- ST_LSM6DSX_ID_MAX,
+-};
+-
+ enum st_lsm6dsx_fifo_mode {
+ ST_LSM6DSX_FIFO_BYPASS = 0x0,
+ ST_LSM6DSX_FIFO_CONT = 0x6,
--- /dev/null
+From 0bf1bfde53b30da7fd7f4a6c3db5b8e77888958d Mon Sep 17 00:00:00 2001
+From: Achim Gratz <Achim.Gratz@Stromeko.DE>
+Date: Sun, 28 Sep 2025 19:26:28 +0200
+Subject: iio: pressure: bmp280: correct meas_time_us calculation
+
+From: Achim Gratz <Achim.Gratz@Stromeko.DE>
+
+commit 0bf1bfde53b30da7fd7f4a6c3db5b8e77888958d upstream.
+
+Correction of meas_time_us initialization based on an observation and
+partial patch by David Lechner.
+
+The constant part of the measurement time (as described in the
+datasheet and implemented in the BM(P/E)2 Sensor API) was apparently
+forgotten (it was already correctly applied for the BMP380) and is now
+used.
+
+There was also another thinko in bmp280_wait_conv:
+data->oversampling_humid can actually have a value of 0 (for an
+oversampling_ratio of 1), so it can not be used to detect the presence
+of the humidity measurement capability. Use
+data->chip_info->oversampling_humid_avail instead, which is NULL for
+chips that cannot measure humidity and therefore must skip that part
+of the calculation.
+
+Closes: https://lore.kernel.org/linux-iio/875xgfg0wz.fsf@Gerda.invalid/
+Fixes: 26ccfaa9ddaa ("iio: pressure: bmp280: Use sleep and forced mode for oneshot captures")
+Suggested-by: David Lechner <dlechner@baylibre.com>
+Tested-by: Achim Gratz <Achim.Gratz@Stromeko.DE>
+Signed-off-by: Achim Gratz <Achim.Gratz@Stromeko.DE>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/pressure/bmp280-core.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -1042,13 +1042,16 @@ static int bmp280_wait_conv(struct bmp28
+ unsigned int reg, meas_time_us;
+ int ret;
+
+- /* Check if we are using a BME280 device */
+- if (data->oversampling_humid)
+- meas_time_us = BMP280_PRESS_HUMID_MEAS_OFFSET +
+- BIT(data->oversampling_humid) * BMP280_MEAS_DUR;
++ /* Constant part of the measurement time */
++ meas_time_us = BMP280_MEAS_OFFSET;
+
+- else
+- meas_time_us = 0;
++ /*
++ * Check if we are using a BME280 device,
++ * Humidity measurement time
++ */
++ if (data->chip_info->oversampling_humid_avail)
++ meas_time_us += BMP280_PRESS_HUMID_MEAS_OFFSET +
++ BIT(data->oversampling_humid) * BMP280_MEAS_DUR;
+
+ /* Pressure measurement time */
+ meas_time_us += BMP280_PRESS_HUMID_MEAS_OFFSET +
--- /dev/null
+From 841ecc979b18d3227fad5e2d6a1e6f92688776b5 Mon Sep 17 00:00:00 2001
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Date: Fri, 28 Nov 2025 16:53:46 +0000
+Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream.
+
+Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+
+cores can have more than 64 TLB entries. Therefore allocate an array
+for uniquification instead of placing too an small array on the stack.
+
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Co-developed-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+Cc: stable@vger.kernel.org # v6.17+
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -522,17 +523,26 @@ static int r4k_vpn_cmp(const void *a, co
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+-static void r4k_tlb_uniquify(void)
++static void __ref r4k_tlb_uniquify(void)
+ {
+- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
++ phys_addr_t tlb_vpn_size;
++ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+@@ -585,6 +595,10 @@ static void r4k_tlb_uniquify(void)
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
++ if (use_slab)
++ kfree(tlb_vpns);
++ else
++ memblock_free(tlb_vpns, tlb_vpn_size);
+ }
+
+ /*
--- /dev/null
+From 9f048fa487409e364cf866c957cf0b0d782ca5a3 Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@orcam.me.uk>
+Date: Thu, 13 Nov 2025 05:21:10 +0000
+Subject: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 9f048fa487409e364cf866c957cf0b0d782ca5a3 upstream.
+
+Depending on the particular CPU implementation a TLB shutdown may occur
+if multiple matching entries are detected upon the execution of a TLBP
+or the TLBWI/TLBWR instructions. Given that we don't know what entries
+we have been handed we need to be very careful with the initial TLB
+setup and avoid all these instructions.
+
+Therefore read all the TLB entries one by one with the TLBR instruction,
+bypassing the content addressing logic, and truncate any large pages in
+place so as to avoid a case in the second step where an incoming entry
+for a large page at a lower address overlaps with a replacement entry
+chosen at another index. Then preinitialize the TLB using addresses
+outside our usual unique range and avoiding clashes with any entries
+received, before making the usual call to local_flush_tlb_all().
+
+This fixes (at least) R4x00 cores if TLBP hits multiple matching TLB
+entries (SGI IP22 PROM for examples sets up all TLBs to the same virtual
+address).
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com> # Boston I6400, M5150 sim
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/tlb-r4k.c | 102 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 64 insertions(+), 38 deletions(-)
+
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -15,6 +15,7 @@
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
++#include <linux/sort.h>
+
+ #include <asm/cpu.h>
+ #include <asm/cpu-type.h>
+@@ -508,55 +509,79 @@ static int __init set_ntlb(char *str)
+
+ __setup("ntlb=", set_ntlb);
+
+-/* Initialise all TLB entries with unique values */
++
++/* Comparison function for EntryHi VPN fields. */
++static int r4k_vpn_cmp(const void *a, const void *b)
++{
++ long v = *(unsigned long *)a - *(unsigned long *)b;
++ int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
++ return s ? (v != 0) | v >> s : v;
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
+ static void r4k_tlb_uniquify(void)
+ {
+- int entry = num_wired_entries();
++ unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
++ int tlbsize = current_cpu_data.tlbsize;
++ int start = num_wired_entries();
++ unsigned long vpn_mask;
++ int cnt, ent, idx, i;
++
++ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
++ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
+ htw_stop();
+- write_c0_entrylo0(0);
+- write_c0_entrylo1(0);
+
+- while (entry < current_cpu_data.tlbsize) {
+- unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
+- unsigned long asid = 0;
+- int idx;
++ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
++ unsigned long vpn;
+
+- /* Skip wired MMID to make ginvt_mmid work */
+- if (cpu_has_mmid)
+- asid = MMID_KERNEL_WIRED + 1;
++ write_c0_index(i);
++ mtc0_tlbr_hazard();
++ tlb_read();
++ tlb_read_hazard();
++ vpn = read_c0_entryhi();
++ vpn &= vpn_mask & PAGE_MASK;
++ tlb_vpns[cnt] = vpn;
+
+- /* Check for match before using UNIQUE_ENTRYHI */
+- do {
+- if (cpu_has_mmid) {
+- write_c0_memorymapid(asid);
+- write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+- } else {
+- write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
+- }
+- mtc0_tlbw_hazard();
+- tlb_probe();
+- tlb_probe_hazard();
+- idx = read_c0_index();
+- /* No match or match is on current entry */
+- if (idx < 0 || idx == entry)
+- break;
+- /*
+- * If we hit a match, we need to try again with
+- * a different ASID.
+- */
+- asid++;
+- } while (asid < asid_mask);
+-
+- if (idx >= 0 && idx != entry)
+- panic("Unable to uniquify TLB entry %d", idx);
+-
+- write_c0_index(entry);
++ /* Prevent any large pages from overlapping regular ones. */
++ write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+- entry++;
++ tlbw_use_hazard();
+ }
+
++ sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++ write_c0_entrylo0(0);
++ write_c0_entrylo1(0);
++
++ idx = 0;
++ ent = tlbsize;
++ for (i = start; i < tlbsize; i++)
++ while (1) {
++ unsigned long entryhi, vpn;
++
++ entryhi = UNIQUE_ENTRYHI(ent);
++ vpn = entryhi & vpn_mask & PAGE_MASK;
++
++ if (idx >= cnt || vpn < tlb_vpns[idx]) {
++ write_c0_entryhi(entryhi);
++ write_c0_index(i);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++ ent++;
++ break;
++ } else if (vpn == tlb_vpns[idx]) {
++ ent++;
++ } else {
++ idx++;
++ }
++ }
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+@@ -602,6 +627,7 @@ static void r4k_tlb_configure(void)
+
+ /* From this point on the ARC firmware is dead. */
+ r4k_tlb_uniquify();
++ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+ }
--- /dev/null
+From 3126c9ccb4373d8758733c6699ba5ab93dbe5c9d Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 25 Nov 2025 09:08:45 -0500
+Subject: Revert "drm/amd/display: Move setup_stream_attribute"
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 3126c9ccb4373d8758733c6699ba5ab93dbe5c9d upstream.
+
+This reverts commit 2681bf4ae8d24df950138b8c9ea9c271cd62e414.
+
+This results in a blank screen on the HDMI port on some systems.
+Revert for now so as not to regress 6.18, can be addressed
+in 6.19 once the issue is root caused.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4652
+Cc: Sunpeng.Li@amd.com
+Cc: ivan.lipski@amd.com
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit d0e9de7a81503cdde37fb2d37f1d102f9e0f38fb)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 1 -
+ drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 2 --
+ drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c | 2 --
+ drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c | 7 -------
+ 5 files changed, 3 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+@@ -671,7 +671,6 @@ void dce110_enable_stream(struct pipe_ct
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+- link_hwss->setup_stream_attribute(pipe_ctx);
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
+ dc->hwss.update_info_frame(pipe_ctx);
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -3060,8 +3060,6 @@ void dcn20_enable_stream(struct pipe_ctx
+ link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+ }
+
+- link_hwss->setup_stream_attribute(pipe_ctx);
+-
+ if (dc->res_pool->dccg->funcs->set_pixel_rate_div)
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+@@ -968,8 +968,6 @@ void dcn401_enable_stream(struct pipe_ct
+ }
+ }
+
+- link_hwss->setup_stream_attribute(pipe_ctx);
+-
+ if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -2458,6 +2458,7 @@ void link_set_dpms_on(
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
+ enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
++ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ bool apply_edp_fast_boot_optimization =
+ pipe_ctx->stream->apply_edp_fast_boot_optimization;
+
+@@ -2501,6 +2502,8 @@ void link_set_dpms_on(
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
+ }
+
++ link_hwss->setup_stream_attribute(pipe_ctx);
++
+ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+
+ // Enable VPG before building infoframe
+--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+@@ -44,11 +44,6 @@ static void virtual_stream_encoder_dvi_s
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link) {}
+
+-static void virtual_stream_encoder_lvds_set_stream_attribute(
+- struct stream_encoder *enc,
+- struct dc_crtc_timing *crtc_timing)
+-{}
+-
+ static void virtual_stream_encoder_set_throttled_vcp_size(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp)
+@@ -120,8 +115,6 @@ static const struct stream_encoder_funcs
+ virtual_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ virtual_stream_encoder_dvi_set_stream_attribute,
+- .lvds_set_stream_attribute =
+- virtual_stream_encoder_lvds_set_stream_attribute,
+ .set_throttled_vcp_size =
+ virtual_stream_encoder_set_throttled_vcp_size,
+ .update_hdmi_info_packets =
--- /dev/null
+From 6d08340d1e354787d6c65a8c3cdd4d41ffb8a5ed Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Tue, 4 Nov 2025 22:54:02 +0100
+Subject: Revert "perf/x86: Always store regs->ip in perf_callchain_kernel()"
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 6d08340d1e354787d6c65a8c3cdd4d41ffb8a5ed upstream.
+
+This reverts commit 83f44ae0f8afcc9da659799db8693f74847e66b3.
+
+Currently we store initial stacktrace entry twice for non-HW ot_regs, which
+means callers that fail perf_hw_regs(regs) condition in perf_callchain_kernel.
+
+It's easy to reproduce this bpftrace:
+
+ # bpftrace -e 'tracepoint:sched:sched_process_exec { print(kstack()); }'
+ Attaching 1 probe...
+
+ bprm_execve+1767
+ bprm_execve+1767
+ do_execveat_common.isra.0+425
+ __x64_sys_execve+56
+ do_syscall_64+133
+ entry_SYSCALL_64_after_hwframe+118
+
+When perf_callchain_kernel calls unwind_start with first_frame, AFAICS
+we do not skip regs->ip, but it's added as part of the unwind process.
+Hence reverting the extra perf_callchain_store for non-hw regs leg.
+
+I was not able to bisect this, so I'm not really sure why this was needed
+in v5.2 and why it's not working anymore, but I could see double entries
+as far as v5.10.
+
+I did the test for both ORC and framepointer unwind with and without the
+this fix and except for the initial entry the stacktraces are the same.
+
+Acked-by: Song Liu <song@kernel.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20251104215405.168643-2-jolsa@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/core.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2787,13 +2787,13 @@ perf_callchain_kernel(struct perf_callch
+ return;
+ }
+
+- if (perf_callchain_store(entry, regs->ip))
+- return;
+-
+- if (perf_hw_regs(regs))
++ if (perf_hw_regs(regs)) {
++ if (perf_callchain_store(entry, regs->ip))
++ return;
+ unwind_start(&state, current, regs, NULL);
+- else
++ } else {
+ unwind_start(&state, current, NULL, (void *)regs->sp);
++ }
+
+ for (; !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
spi-nxp-fspi-propagate-fwnode-in-acpi-case-as-well.patch
spi-bcm63xx-fix-premature-cs-deassertion-on-rx-only-.patch
afs-fix-uninit-var-in-afs_alloc_anon_key.patch
+timekeeping-fix-error-code-in-tk_aux_sysfs_init.patch
+revert-drm-amd-display-move-setup_stream_attribute.patch
+revert-perf-x86-always-store-regs-ip-in-perf_callchain_kernel.patch
+iio-buffer-dma-support-getting-the-dma-channel.patch
+iio-buffer-dmaengine-enable-.get_dma_dev.patch
+iio-buffer-support-getting-dma-channel-from-the-buffer.patch
+iio-humditiy-hdc3020-fix-units-for-temperature-and-humidity-measurement.patch
+iio-humditiy-hdc3020-fix-units-for-thresholds-and-hysteresis.patch
+iio-imu-st_lsm6dsx-fix-array-size-for-st_lsm6dsx_settings-fields.patch
+iio-pressure-bmp280-correct-meas_time_us-calculation.patch
+iio-common-ssp_sensors-fix-an-error-handling-path-ssp_probe.patch
+iio-adc-stm32-dfsdm-fix-st-adc-alt-channel-property-handling.patch
+iio-accel-bmc150-fix-irq-assumption-regression.patch
+iio-accel-fix-adxl355-startup-race-condition.patch
+iio-adc-ad4030-fix-_scale-value-for-common-mode-channels.patch
+iio-adc-ad7124-fix-temperature-channel.patch
+iio-adc-ad7280a-fix-ad7280_store_balance_timer.patch
+iio-adc-ad7380-fix-spi-offload-trigger-rate.patch
+iio-adc-rtq6056-correct-the-sign-bit-index.patch
+mips-mm-prevent-a-tlb-shutdown-on-initial-uniquification.patch
+mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch
+virtio-net-avoid-unnecessary-checksum-calculation-on-guest-rx.patch
+vhost-rewind-next_avail_head-while-discarding-descriptors.patch
+tracing-fix-warn_on-in-tracing_buffers_mmap_close-for-split-vmas.patch
--- /dev/null
+From c7418164b463056bf4327b6a2abe638b78250f13 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Tue, 25 Nov 2025 16:55:19 +0300
+Subject: timekeeping: Fix error code in tk_aux_sysfs_init()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit c7418164b463056bf4327b6a2abe638b78250f13 upstream.
+
+If kobject_create_and_add() fails on the first iteration, then the error
+code is set to -ENOMEM which is correct. But if it fails in subsequent
+iterations then "ret" is zero, which means success, but it should be
+-ENOMEM.
+
+Set the error code to -ENOMEM correctly.
+
+Fixes: 7b5ab04f035f ("timekeeping: Fix resource leak in tk_aux_sysfs_init() error paths")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Malaya Kumar Rout <mrout@redhat.com>
+Link: https://patch.msgid.link/aSW1R8q5zoY_DgQE@stanley.mountain
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/timekeeping.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 08e0943b54da..4790da895203 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -3073,8 +3073,10 @@ static int __init tk_aux_sysfs_init(void)
+ char id[2] = { [0] = '0' + i, };
+ struct kobject *clk = kobject_create_and_add(id, auxo);
+
+- if (!clk)
++ if (!clk) {
++ ret = -ENOMEM;
+ goto err_clean;
++ }
+
+ ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
+ if (ret)
+--
+2.52.0
+
--- /dev/null
+From b042fdf18e89a347177a49e795d8e5184778b5b6 Mon Sep 17 00:00:00 2001
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+Date: Wed, 19 Nov 2025 12:10:19 +0530
+Subject: tracing: Fix WARN_ON in tracing_buffers_mmap_close for split VMAs
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+commit b042fdf18e89a347177a49e795d8e5184778b5b6 upstream.
+
+When a VMA is split (e.g., by partial munmap or MAP_FIXED), the kernel
+calls vm_ops->close on each portion. For trace buffer mappings, this
+results in ring_buffer_unmap() being called multiple times while
+ring_buffer_map() was only called once.
+
+This causes ring_buffer_unmap() to return -ENODEV on subsequent calls
+because user_mapped is already 0, triggering a WARN_ON.
+
+Trace buffer mappings cannot support partial mappings because the ring
+buffer structure requires the complete buffer including the meta page.
+
+Fix this by adding a may_split callback that returns -EINVAL to prevent
+VMA splits entirely.
+
+Cc: stable@vger.kernel.org
+Fixes: cf9f0f7c4c5bb ("tracing: Allow user-space mapping of the ring-buffer")
+Link: https://patch.msgid.link/20251119064019.25904-1-kartikey406@gmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=a72c325b042aae6403c7
+Tested-by: syzbot+a72c325b042aae6403c7@syzkaller.appspotmail.com
+Reported-by: syzbot+a72c325b042aae6403c7@syzkaller.appspotmail.com
+Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -8781,8 +8781,18 @@ static void tracing_buffers_mmap_close(s
+ put_snapshot_map(iter->tr);
+ }
+
++static int tracing_buffers_may_split(struct vm_area_struct *vma, unsigned long addr)
++{
++ /*
++ * Trace buffer mappings require the complete buffer including
++ * the meta page. Partial mappings are not supported.
++ */
++ return -EINVAL;
++}
++
+ static const struct vm_operations_struct tracing_buffers_vmops = {
+ .close = tracing_buffers_mmap_close,
++ .may_split = tracing_buffers_may_split,
+ };
+
+ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
--- /dev/null
+From 779bcdd4b9ae6566f309043c53c946e8ac0015fd Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Thu, 20 Nov 2025 10:29:50 +0800
+Subject: vhost: rewind next_avail_head while discarding descriptors
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 779bcdd4b9ae6566f309043c53c946e8ac0015fd upstream.
+
+When discarding descriptors with IN_ORDER, we should rewind
+next_avail_head otherwise it would run out of sync with
+last_avail_idx. This would cause driver to report
+"id X is not a head".
+
+Fixing this by returning the number of descriptors that is used for
+each buffer via vhost_get_vq_desc_n() so caller can use the value
+while discarding descriptors.
+
+Fixes: 67a873df0c41 ("vhost: basic in order support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://patch.msgid.link/20251120022950.10117-1-jasowang@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c | 53 ++++++++++++++++++------------
+ drivers/vhost/vhost.c | 76 +++++++++++++++++++++++++++++++++++--------
+ drivers/vhost/vhost.h | 10 +++++-
+ 3 files changed, 103 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 35ded4330431..8f7f50acb6d6 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -592,14 +592,15 @@ static void vhost_net_busy_poll(struct vhost_net *net,
+ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
+ struct vhost_net_virtqueue *tnvq,
+ unsigned int *out_num, unsigned int *in_num,
+- struct msghdr *msghdr, bool *busyloop_intr)
++ struct msghdr *msghdr, bool *busyloop_intr,
++ unsigned int *ndesc)
+ {
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
+
+- int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+- out_num, in_num, NULL, NULL);
++ int r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
++ out_num, in_num, NULL, NULL, ndesc);
+
+ if (r == tvq->num && tvq->busyloop_timeout) {
+ /* Flush batched packets first */
+@@ -610,8 +611,8 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
+
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
+
+- r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+- out_num, in_num, NULL, NULL);
++ r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
++ out_num, in_num, NULL, NULL, ndesc);
+ }
+
+ return r;
+@@ -642,12 +643,14 @@ static int get_tx_bufs(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct msghdr *msg,
+ unsigned int *out, unsigned int *in,
+- size_t *len, bool *busyloop_intr)
++ size_t *len, bool *busyloop_intr,
++ unsigned int *ndesc)
+ {
+ struct vhost_virtqueue *vq = &nvq->vq;
+ int ret;
+
+- ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
++ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg,
++ busyloop_intr, ndesc);
+
+ if (ret < 0 || ret == vq->num)
+ return ret;
+@@ -766,6 +769,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
+ int sent_pkts = 0;
+ bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
++ unsigned int ndesc = 0;
+
+ do {
+ bool busyloop_intr = false;
+@@ -774,7 +778,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
+ vhost_tx_batch(net, nvq, sock, &msg);
+
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+- &busyloop_intr);
++ &busyloop_intr, &ndesc);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+@@ -806,7 +810,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
+ goto done;
+ } else if (unlikely(err != -ENOSPC)) {
+ vhost_tx_batch(net, nvq, sock, &msg);
+- vhost_discard_vq_desc(vq, 1);
++ vhost_discard_vq_desc(vq, 1, ndesc);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+@@ -829,7 +833,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
+ err = sock->ops->sendmsg(sock, &msg, len);
+ if (unlikely(err < 0)) {
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+- vhost_discard_vq_desc(vq, 1);
++ vhost_discard_vq_desc(vq, 1, ndesc);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+@@ -868,6 +872,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+ int err;
+ struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info_msgzc *ubuf;
++ unsigned int ndesc = 0;
+ bool zcopy_used;
+ int sent_pkts = 0;
+
+@@ -879,7 +884,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+
+ busyloop_intr = false;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+- &busyloop_intr);
++ &busyloop_intr, &ndesc);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+@@ -941,7 +946,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
+ }
+ if (retry) {
+- vhost_discard_vq_desc(vq, 1);
++ vhost_discard_vq_desc(vq, 1, ndesc);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+@@ -1045,11 +1050,12 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
+ unsigned *iovcount,
+ struct vhost_log *log,
+ unsigned *log_num,
+- unsigned int quota)
++ unsigned int quota,
++ unsigned int *ndesc)
+ {
+ struct vhost_virtqueue *vq = &nvq->vq;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+- unsigned int out, in;
++ unsigned int out, in, desc_num, n = 0;
+ int seg = 0;
+ int headcount = 0;
+ unsigned d;
+@@ -1064,9 +1070,9 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
+ r = -ENOBUFS;
+ goto err;
+ }
+- r = vhost_get_vq_desc(vq, vq->iov + seg,
+- ARRAY_SIZE(vq->iov) - seg, &out,
+- &in, log, log_num);
++ r = vhost_get_vq_desc_n(vq, vq->iov + seg,
++ ARRAY_SIZE(vq->iov) - seg, &out,
++ &in, log, log_num, &desc_num);
+ if (unlikely(r < 0))
+ goto err;
+
+@@ -1093,6 +1099,7 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
+ ++headcount;
+ datalen -= len;
+ seg += in;
++ n += desc_num;
+ }
+
+ *iovcount = seg;
+@@ -1113,9 +1120,11 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
+ nheads[0] = headcount;
+ }
+
++ *ndesc = n;
++
+ return headcount;
+ err:
+- vhost_discard_vq_desc(vq, headcount);
++ vhost_discard_vq_desc(vq, headcount, n);
+ return r;
+ }
+
+@@ -1151,6 +1160,7 @@ static void handle_rx(struct vhost_net *net)
+ struct iov_iter fixup;
+ __virtio16 num_buffers;
+ int recv_pkts = 0;
++ unsigned int ndesc;
+
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
+ sock = vhost_vq_get_backend(vq);
+@@ -1182,7 +1192,8 @@ static void handle_rx(struct vhost_net *net)
+ headcount = get_rx_bufs(nvq, vq->heads + count,
+ vq->nheads + count,
+ vhost_len, &in, vq_log, &log,
+- likely(mergeable) ? UIO_MAXIOV : 1);
++ likely(mergeable) ? UIO_MAXIOV : 1,
++ &ndesc);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(headcount < 0))
+ goto out;
+@@ -1228,7 +1239,7 @@ static void handle_rx(struct vhost_net *net)
+ if (unlikely(err != sock_len)) {
+ pr_debug("Discarded rx packet: "
+ " len %d, expected %zd\n", err, sock_len);
+- vhost_discard_vq_desc(vq, headcount);
++ vhost_discard_vq_desc(vq, headcount, ndesc);
+ continue;
+ }
+ /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
+@@ -1252,7 +1263,7 @@ static void handle_rx(struct vhost_net *net)
+ copy_to_iter(&num_buffers, sizeof num_buffers,
+ &fixup) != sizeof num_buffers) {
+ vq_err(vq, "Failed num_buffers write");
+- vhost_discard_vq_desc(vq, headcount);
++ vhost_discard_vq_desc(vq, headcount, ndesc);
+ goto out;
+ }
+ nvq->done_idx += headcount;
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 8570fdf2e14a..a78226b37739 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2792,18 +2792,34 @@ static int get_indirect(struct vhost_virtqueue *vq,
+ return 0;
+ }
+
+-/* This looks in the virtqueue and for the first available buffer, and converts
+- * it to an iovec for convenient access. Since descriptors consist of some
+- * number of output then some number of input descriptors, it's actually two
+- * iovecs, but we pack them into one and note how many of each there were.
++/**
++ * vhost_get_vq_desc_n - Fetch the next available descriptor chain and build iovecs
++ * @vq: target virtqueue
++ * @iov: array that receives the scatter/gather segments
++ * @iov_size: capacity of @iov in elements
++ * @out_num: the number of output segments
++ * @in_num: the number of input segments
++ * @log: optional array to record addr/len for each writable segment; NULL if unused
++ * @log_num: optional output; number of entries written to @log when provided
++ * @ndesc: optional output; number of descriptors consumed from the available ring
++ * (useful for rollback via vhost_discard_vq_desc)
+ *
+- * This function returns the descriptor number found, or vq->num (which is
+- * never a valid descriptor number) if none was found. A negative code is
+- * returned on error. */
+-int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+- struct iovec iov[], unsigned int iov_size,
+- unsigned int *out_num, unsigned int *in_num,
+- struct vhost_log *log, unsigned int *log_num)
++ * Extracts one available descriptor chain from @vq and translates guest addresses
++ * into host iovecs.
++ *
++ * On success, advances @vq->last_avail_idx by 1 and @vq->next_avail_head by the
++ * number of descriptors consumed (also stored via @ndesc when non-NULL).
++ *
++ * Return:
++ * - head index in [0, @vq->num) on success;
++ * - @vq->num if no descriptor is currently available;
++ * - negative errno on failure
++ */
++int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
++ struct iovec iov[], unsigned int iov_size,
++ unsigned int *out_num, unsigned int *in_num,
++ struct vhost_log *log, unsigned int *log_num,
++ unsigned int *ndesc)
+ {
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ struct vring_desc desc;
+@@ -2921,17 +2937,49 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+ vq->last_avail_idx++;
+ vq->next_avail_head += c;
+
++ if (ndesc)
++ *ndesc = c;
++
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
+ return head;
+ }
++EXPORT_SYMBOL_GPL(vhost_get_vq_desc_n);
++
++/* This looks in the virtqueue and for the first available buffer, and converts
++ * it to an iovec for convenient access. Since descriptors consist of some
++ * number of output then some number of input descriptors, it's actually two
++ * iovecs, but we pack them into one and note how many of each there were.
++ *
++ * This function returns the descriptor number found, or vq->num (which is
++ * never a valid descriptor number) if none was found. A negative code is
++ * returned on error.
++ */
++int vhost_get_vq_desc(struct vhost_virtqueue *vq,
++ struct iovec iov[], unsigned int iov_size,
++ unsigned int *out_num, unsigned int *in_num,
++ struct vhost_log *log, unsigned int *log_num)
++{
++ return vhost_get_vq_desc_n(vq, iov, iov_size, out_num, in_num,
++ log, log_num, NULL);
++}
+ EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
+
+-/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
+-void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
++/**
++ * vhost_discard_vq_desc - Reverse the effect of vhost_get_vq_desc_n()
++ * @vq: target virtqueue
++ * @nbufs: number of buffers to roll back
++ * @ndesc: number of descriptors to roll back
++ *
++ * Rewinds the internal consumer cursors after a failed attempt to use buffers
++ * returned by vhost_get_vq_desc_n().
++ */
++void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int nbufs,
++ unsigned int ndesc)
+ {
+- vq->last_avail_idx -= n;
++ vq->next_avail_head -= ndesc;
++ vq->last_avail_idx -= nbufs;
+ }
+ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
+
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index 621a6d9a8791..b49f08e4a1b4 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -230,7 +230,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
+-void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
++
++int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
++ struct iovec iov[], unsigned int iov_size,
++ unsigned int *out_num, unsigned int *in_num,
++ struct vhost_log *log, unsigned int *log_num,
++ unsigned int *ndesc);
++
++void vhost_discard_vq_desc(struct vhost_virtqueue *, int nbuf,
++ unsigned int ndesc);
+
+ bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
+ bool vhost_vq_has_work(struct vhost_virtqueue *vq);
+--
+2.52.0
+
--- /dev/null
+From 1cd1c472343b06d6d32038636ce51bfa2251e3cf Mon Sep 17 00:00:00 2001
+From: Jon Kohler <jon@nutanix.com>
+Date: Tue, 25 Nov 2025 15:27:53 -0700
+Subject: virtio-net: avoid unnecessary checksum calculation on guest RX
+
+From: Jon Kohler <jon@nutanix.com>
+
+commit 1cd1c472343b06d6d32038636ce51bfa2251e3cf upstream.
+
+Commit a2fb4bc4e2a6 ("net: implement virtio helpers to handle UDP
+GSO tunneling.") inadvertently altered checksum offload behavior
+for guests not using UDP GSO tunneling.
+
+Before, tun_put_user called tun_vnet_hdr_from_skb, which passed
+has_data_valid = true to virtio_net_hdr_from_skb.
+
+After, tun_put_user began calling tun_vnet_hdr_tnl_from_skb instead,
+which passes has_data_valid = false into both call sites.
+
+This caused virtio hdr flags to not include VIRTIO_NET_HDR_F_DATA_VALID
+for SKBs where skb->ip_summed == CHECKSUM_UNNECESSARY. As a result,
+guests are forced to recalculate checksums unnecessarily.
+
+Restore the previous behavior by ensuring has_data_valid = true is
+passed in the !tnl_gso_type case, but only from tun side, as
+virtio_net_hdr_tnl_from_skb() is used also by the virtio_net driver,
+which in turn must not use VIRTIO_NET_HDR_F_DATA_VALID on tx.
+
+cc: stable@vger.kernel.org
+Fixes: a2fb4bc4e2a6 ("net: implement virtio helpers to handle UDP GSO tunneling.")
+Signed-off-by: Jon Kohler <jon@nutanix.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Link: https://patch.msgid.link/20251125222754.1737443-1-jon@nutanix.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/tun_vnet.h | 2 +-
+ drivers/net/virtio_net.c | 3 ++-
+ include/linux/virtio_net.h | 7 ++++---
+ 3 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/tun_vnet.h
++++ b/drivers/net/tun_vnet.h
+@@ -244,7 +244,7 @@ tun_vnet_hdr_tnl_from_skb(unsigned int f
+
+ if (virtio_net_hdr_tnl_from_skb(skb, tnl_hdr, has_tnl_offload,
+ tun_vnet_is_little_endian(flags),
+- vlan_hlen)) {
++ vlan_hlen, true)) {
+ struct virtio_net_hdr_v1 *hdr = &tnl_hdr->hash_hdr.hdr;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3340,7 +3340,8 @@ static int xmit_skb(struct send_queue *s
+ hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
+
+ if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
+- virtio_is_little_endian(vi->vdev), 0))
++ virtio_is_little_endian(vi->vdev), 0,
++ false))
+ return -EPROTO;
+
+ if (vi->mergeable_rx_bufs)
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -384,7 +384,8 @@ virtio_net_hdr_tnl_from_skb(const struct
+ struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool little_endian,
+- int vlan_hlen)
++ int vlan_hlen,
++ bool has_data_valid)
+ {
+ struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th;
+@@ -394,8 +395,8 @@ virtio_net_hdr_tnl_from_skb(const struct
+ tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM);
+ if (!tnl_gso_type)
+- return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
+- vlan_hlen);
++ return virtio_net_hdr_from_skb(skb, hdr, little_endian,
++ has_data_valid, vlan_hlen);
+
+ /* Tunnel support not negotiated but skb ask for it. */
+ if (!tnl_hdr_negotiated)