--- /dev/null
+From stable+bounces-204828-greg=kroah.com@vger.kernel.org Mon Jan 5 16:15:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 10:10:08 -0500
+Subject: ASoC: qcom: sdw: fix memory leak for sdw_stream_runtime
+To: stable@vger.kernel.org
+Cc: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>, Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, Stable@vger.kernel.org, Steev Klimaszewski <threeway@gmail.com>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105151008.2624877-2-sashal@kernel.org>
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+[ Upstream commit bcba17279327c6e85dee6a97014dc642e2dc93cc ]
+
+For some reason we endedup allocating sdw_stream_runtime for every cpu dai,
+this has two issues.
+1. we never set snd_soc_dai_set_stream for non soundwire dai, which
+ means there is no way that we can free this, resulting in memory leak
+2. startup and shutdown callbacks can be called without
+ hw_params callback called. This combination results in memory leak
+because machine driver sruntime array pointer is only set in hw_params
+callback.
+
+Fix this by
+ 1. adding a helper function to get sdw_runtime for substream
+which can be used by shutdown callback to get hold of sruntime to free.
+ 2. only allocate sdw_runtime for soundwire dais.
+
+Fixes: d32bac9cb09c ("ASoC: qcom: Add helper for allocating Soundwire stream runtime")
+Cc: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Tested-by: Steev Klimaszewski <threeway@gmail.com> # Thinkpad X13s
+Link: https://patch.msgid.link/20251022143349.1081513-2-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/sc7280.c | 2
+ sound/soc/qcom/sc8280xp.c | 2
+ sound/soc/qcom/sdw.c | 105 +++++++++++++++++++++++++---------------------
+ sound/soc/qcom/sdw.h | 1
+ sound/soc/qcom/sm8250.c | 2
+ sound/soc/qcom/x1e80100.c | 2
+ 6 files changed, 64 insertions(+), 50 deletions(-)
+
+--- a/sound/soc/qcom/sc7280.c
++++ b/sound/soc/qcom/sc7280.c
+@@ -317,7 +317,7 @@ static void sc7280_snd_shutdown(struct s
+ struct snd_soc_card *card = rtd->card;
+ struct sc7280_snd_data *data = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+- struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
++ struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
+
+ switch (cpu_dai->id) {
+ case MI2S_PRIMARY:
+--- a/sound/soc/qcom/sc8280xp.c
++++ b/sound/soc/qcom/sc8280xp.c
+@@ -69,7 +69,7 @@ static void sc8280xp_snd_shutdown(struct
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sc8280xp_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+- struct sdw_stream_runtime *sruntime = pdata->sruntime[cpu_dai->id];
++ struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
+
+ pdata->sruntime[cpu_dai->id] = NULL;
+ sdw_release_stream(sruntime);
+--- a/sound/soc/qcom/sdw.c
++++ b/sound/soc/qcom/sdw.c
+@@ -7,6 +7,37 @@
+ #include <sound/soc.h>
+ #include "sdw.h"
+
++static bool qcom_snd_is_sdw_dai(int id)
++{
++ switch (id) {
++ case WSA_CODEC_DMA_RX_0:
++ case WSA_CODEC_DMA_TX_0:
++ case WSA_CODEC_DMA_RX_1:
++ case WSA_CODEC_DMA_TX_1:
++ case WSA_CODEC_DMA_TX_2:
++ case RX_CODEC_DMA_RX_0:
++ case TX_CODEC_DMA_TX_0:
++ case RX_CODEC_DMA_RX_1:
++ case TX_CODEC_DMA_TX_1:
++ case RX_CODEC_DMA_RX_2:
++ case TX_CODEC_DMA_TX_2:
++ case RX_CODEC_DMA_RX_3:
++ case TX_CODEC_DMA_TX_3:
++ case RX_CODEC_DMA_RX_4:
++ case TX_CODEC_DMA_TX_4:
++ case RX_CODEC_DMA_RX_5:
++ case TX_CODEC_DMA_TX_5:
++ case RX_CODEC_DMA_RX_6:
++ case RX_CODEC_DMA_RX_7:
++ case SLIMBUS_0_RX...SLIMBUS_6_TX:
++ return true;
++ default:
++ break;
++ }
++
++ return false;
++}
++
+ /**
+ * qcom_snd_sdw_startup() - Helper to start Soundwire stream for SoC audio card
+ * @substream: The PCM substream from audio, as passed to snd_soc_ops->startup()
+@@ -27,6 +58,9 @@ int qcom_snd_sdw_startup(struct snd_pcm_
+ struct snd_soc_dai *codec_dai;
+ int ret, i;
+
++ if (!qcom_snd_is_sdw_dai(cpu_dai->id))
++ return 0;
++
+ sruntime = sdw_alloc_stream(cpu_dai->name, SDW_STREAM_PCM);
+ if (!sruntime)
+ return -ENOMEM;
+@@ -61,19 +95,8 @@ int qcom_snd_sdw_prepare(struct snd_pcm_
+ if (!sruntime)
+ return 0;
+
+- switch (cpu_dai->id) {
+- case WSA_CODEC_DMA_RX_0:
+- case WSA_CODEC_DMA_RX_1:
+- case RX_CODEC_DMA_RX_0:
+- case RX_CODEC_DMA_RX_1:
+- case TX_CODEC_DMA_TX_0:
+- case TX_CODEC_DMA_TX_1:
+- case TX_CODEC_DMA_TX_2:
+- case TX_CODEC_DMA_TX_3:
+- break;
+- default:
++ if (!qcom_snd_is_sdw_dai(cpu_dai->id))
+ return 0;
+- }
+
+ if (*stream_prepared)
+ return 0;
+@@ -101,9 +124,7 @@ int qcom_snd_sdw_prepare(struct snd_pcm_
+ }
+ EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
+
+-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+- struct snd_pcm_hw_params *params,
+- struct sdw_stream_runtime **psruntime)
++struct sdw_stream_runtime *qcom_snd_sdw_get_stream(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+@@ -111,21 +132,23 @@ int qcom_snd_sdw_hw_params(struct snd_pc
+ struct sdw_stream_runtime *sruntime;
+ int i;
+
+- switch (cpu_dai->id) {
+- case WSA_CODEC_DMA_RX_0:
+- case RX_CODEC_DMA_RX_0:
+- case RX_CODEC_DMA_RX_1:
+- case TX_CODEC_DMA_TX_0:
+- case TX_CODEC_DMA_TX_1:
+- case TX_CODEC_DMA_TX_2:
+- case TX_CODEC_DMA_TX_3:
+- for_each_rtd_codec_dais(rtd, i, codec_dai) {
+- sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
+- if (sruntime != ERR_PTR(-ENOTSUPP))
+- *psruntime = sruntime;
+- }
+- break;
++ if (!qcom_snd_is_sdw_dai(cpu_dai->id))
++ return NULL;
++
++ for_each_rtd_codec_dais(rtd, i, codec_dai) {
++ sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
++ if (sruntime != ERR_PTR(-ENOTSUPP))
++ return sruntime;
+ }
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(qcom_snd_sdw_get_stream);
++
++int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params,
++ struct sdw_stream_runtime **psruntime)
++{
++ *psruntime = qcom_snd_sdw_get_stream(substream);
+
+ return 0;
+
+@@ -138,23 +161,13 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+- switch (cpu_dai->id) {
+- case WSA_CODEC_DMA_RX_0:
+- case WSA_CODEC_DMA_RX_1:
+- case RX_CODEC_DMA_RX_0:
+- case RX_CODEC_DMA_RX_1:
+- case TX_CODEC_DMA_TX_0:
+- case TX_CODEC_DMA_TX_1:
+- case TX_CODEC_DMA_TX_2:
+- case TX_CODEC_DMA_TX_3:
+- if (sruntime && *stream_prepared) {
+- sdw_disable_stream(sruntime);
+- sdw_deprepare_stream(sruntime);
+- *stream_prepared = false;
+- }
+- break;
+- default:
+- break;
++ if (!qcom_snd_is_sdw_dai(cpu_dai->id))
++ return 0;
++
++ if (sruntime && *stream_prepared) {
++ sdw_disable_stream(sruntime);
++ sdw_deprepare_stream(sruntime);
++ *stream_prepared = false;
+ }
+
+ return 0;
+--- a/sound/soc/qcom/sdw.h
++++ b/sound/soc/qcom/sdw.h
+@@ -10,6 +10,7 @@ int qcom_snd_sdw_startup(struct snd_pcm_
+ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *runtime,
+ bool *stream_prepared);
++struct sdw_stream_runtime *qcom_snd_sdw_get_stream(struct snd_pcm_substream *stream);
+ int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sdw_stream_runtime **psruntime);
+--- a/sound/soc/qcom/sm8250.c
++++ b/sound/soc/qcom/sm8250.c
+@@ -86,7 +86,7 @@ static void sm2450_snd_shutdown(struct s
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sm8250_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
++ struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
+
+ data->sruntime[cpu_dai->id] = NULL;
+ sdw_release_stream(sruntime);
+--- a/sound/soc/qcom/x1e80100.c
++++ b/sound/soc/qcom/x1e80100.c
+@@ -55,7 +55,7 @@ static void x1e80100_snd_shutdown(struct
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct x1e80100_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
++ struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
+
+ data->sruntime[cpu_dai->id] = NULL;
+ sdw_release_stream(sruntime);
--- /dev/null
+From stable+bounces-204839-greg=kroah.com@vger.kernel.org Mon Jan 5 17:15:20 2026
+From: Biju <biju.das.au@gmail.com>
+Date: Mon, 5 Jan 2026 15:33:04 +0000
+Subject: ASoC: renesas: rz-ssi: Fix channel swap issue in full duplex mode
+To: stable@vger.kernel.org
+Cc: Biju Das <biju.das.jz@bp.renesas.com>, stable@kernel.org, Tony Tang <tony.tang.ks@renesas.com>, Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>, Mark Brown <broonie@kernel.org>
+Message-ID: <20260105153304.252300-1-biju.das.jz@bp.renesas.com>
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 52a525011cb8e293799a085436f026f2958403f9 ]
+
+The full duplex audio starts with half duplex mode and then switch to
+full duplex mode (another FIFO reset) when both playback/capture
+streams available leading to random audio left/right channel swap
+issue. Fix this channel swap issue by detecting the full duplex
+condition by populating struct dup variable in startup() callback
+and synchronize starting both the play and capture at the same time
+in rz_ssi_start().
+
+Cc: stable@kernel.org
+Fixes: 4f8cd05a4305 ("ASoC: sh: rz-ssi: Add full duplex support")
+Co-developed-by: Tony Tang <tony.tang.ks@renesas.com>
+Signed-off-by: Tony Tang <tony.tang.ks@renesas.com>
+Reviewed-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://patch.msgid.link/20251114073709.4376-2-biju.das.jz@bp.renesas.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sh/rz-ssi.c | 51 ++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 43 insertions(+), 8 deletions(-)
+
+--- a/sound/soc/sh/rz-ssi.c
++++ b/sound/soc/sh/rz-ssi.c
+@@ -132,6 +132,12 @@ struct rz_ssi_priv {
+ bool bckp_rise; /* Bit clock polarity (SSICR.BCKP) */
+ bool dma_rt;
+
++ struct {
++ bool tx_active;
++ bool rx_active;
++ bool one_stream_triggered;
++ } dup;
++
+ /* Full duplex communication support */
+ struct {
+ unsigned int rate;
+@@ -352,13 +358,12 @@ static int rz_ssi_start(struct rz_ssi_pr
+ bool is_full_duplex;
+ u32 ssicr, ssifcr;
+
+- is_full_duplex = rz_ssi_is_stream_running(&ssi->playback) ||
+- rz_ssi_is_stream_running(&ssi->capture);
++ is_full_duplex = ssi->dup.tx_active && ssi->dup.rx_active;
+ ssicr = rz_ssi_reg_readl(ssi, SSICR);
+ ssifcr = rz_ssi_reg_readl(ssi, SSIFCR);
+ if (!is_full_duplex) {
+ ssifcr &= ~0xF;
+- } else {
++ } else if (ssi->dup.one_stream_triggered) {
+ rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0);
+ rz_ssi_set_idle(ssi);
+ ssifcr &= ~SSIFCR_FIFO_RST;
+@@ -394,12 +399,16 @@ static int rz_ssi_start(struct rz_ssi_pr
+ SSISR_RUIRQ), 0);
+
+ strm->running = 1;
+- if (is_full_duplex)
+- ssicr |= SSICR_TEN | SSICR_REN;
+- else
++ if (!is_full_duplex) {
+ ssicr |= is_play ? SSICR_TEN : SSICR_REN;
+-
+- rz_ssi_reg_writel(ssi, SSICR, ssicr);
++ rz_ssi_reg_writel(ssi, SSICR, ssicr);
++ } else if (ssi->dup.one_stream_triggered) {
++ ssicr |= SSICR_TEN | SSICR_REN;
++ rz_ssi_reg_writel(ssi, SSICR, ssicr);
++ ssi->dup.one_stream_triggered = false;
++ } else {
++ ssi->dup.one_stream_triggered = true;
++ }
+
+ return 0;
+ }
+@@ -897,6 +906,30 @@ static int rz_ssi_dai_set_fmt(struct snd
+ return 0;
+ }
+
++static int rz_ssi_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai);
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ ssi->dup.tx_active = true;
++ else
++ ssi->dup.rx_active = true;
++
++ return 0;
++}
++
++static void rz_ssi_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai);
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ ssi->dup.tx_active = false;
++ else
++ ssi->dup.rx_active = false;
++}
++
+ static bool rz_ssi_is_valid_hw_params(struct rz_ssi_priv *ssi, unsigned int rate,
+ unsigned int channels,
+ unsigned int sample_width,
+@@ -962,6 +995,8 @@ static int rz_ssi_dai_hw_params(struct s
+ }
+
+ static const struct snd_soc_dai_ops rz_ssi_dai_ops = {
++ .startup = rz_ssi_startup,
++ .shutdown = rz_ssi_shutdown,
+ .trigger = rz_ssi_dai_trigger,
+ .set_fmt = rz_ssi_dai_set_fmt,
+ .hw_params = rz_ssi_dai_hw_params,
--- /dev/null
+From stable+bounces-204816-greg=kroah.com@vger.kernel.org Mon Jan 5 15:54:31 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 09:47:43 -0500
+Subject: ASoC: renesas: rz-ssi: Fix rz_ssi_priv::hw_params_cache::sample_width
+To: stable@vger.kernel.org
+Cc: Biju Das <biju.das.jz@bp.renesas.com>, stable@kernel.org, Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105144743.2610088-1-sashal@kernel.org>
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 2bae7beda19f3b2dc6ab2062c94df19c27923712 ]
+
+The strm->sample_width is not filled during rz_ssi_dai_hw_params(). This
+wrong value is used for caching sample_width in struct hw_params_cache.
+Fix this issue by replacing 'strm->sample_width'->'params_width(params)'
+in rz_ssi_dai_hw_params(). After this drop the variable sample_width
+from struct rz_ssi_stream as it is unused.
+
+Cc: stable@kernel.org
+Fixes: 4f8cd05a4305 ("ASoC: sh: rz-ssi: Add full duplex support")
+Reviewed-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://patch.msgid.link/20251114073709.4376-3-biju.das.jz@bp.renesas.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sh/rz-ssi.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+--- a/sound/soc/sh/rz-ssi.c
++++ b/sound/soc/sh/rz-ssi.c
+@@ -12,6 +12,7 @@
+ #include <linux/module.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
++#include <sound/pcm_params.h>
+ #include <sound/soc.h>
+
+ /* REGISTER OFFSET */
+@@ -85,7 +86,6 @@ struct rz_ssi_stream {
+ int fifo_sample_size; /* sample capacity of SSI FIFO */
+ int dma_buffer_pos; /* The address for the next DMA descriptor */
+ int period_counter; /* for keeping track of periods transferred */
+- int sample_width;
+ int buffer_pos; /* current frame position in the buffer */
+ int running; /* 0=stopped, 1=running */
+
+@@ -231,10 +231,7 @@ static inline bool rz_ssi_is_stream_runn
+ static void rz_ssi_stream_init(struct rz_ssi_stream *strm,
+ struct snd_pcm_substream *substream)
+ {
+- struct snd_pcm_runtime *runtime = substream->runtime;
+-
+ rz_ssi_set_substream(strm, substream);
+- strm->sample_width = samples_to_bytes(runtime, 1);
+ strm->dma_buffer_pos = 0;
+ strm->period_counter = 0;
+ strm->buffer_pos = 0;
+@@ -960,9 +957,9 @@ static int rz_ssi_dai_hw_params(struct s
+ struct snd_soc_dai *dai)
+ {
+ struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai);
+- struct rz_ssi_stream *strm = rz_ssi_stream_get(ssi, substream);
+ unsigned int sample_bits = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min;
++ unsigned int sample_width = params_width(params);
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+
+@@ -980,16 +977,14 @@ static int rz_ssi_dai_hw_params(struct s
+
+ if (rz_ssi_is_stream_running(&ssi->playback) ||
+ rz_ssi_is_stream_running(&ssi->capture)) {
+- if (rz_ssi_is_valid_hw_params(ssi, rate, channels,
+- strm->sample_width, sample_bits))
++ if (rz_ssi_is_valid_hw_params(ssi, rate, channels, sample_width, sample_bits))
+ return 0;
+
+ dev_err(ssi->dev, "Full duplex needs same HW params\n");
+ return -EINVAL;
+ }
+
+- rz_ssi_cache_hw_params(ssi, rate, channels, strm->sample_width,
+- sample_bits);
++ rz_ssi_cache_hw_params(ssi, rate, channels, sample_width, sample_bits);
+
+ return rz_ssi_clk_setup(ssi, rate, channels);
+ }
--- /dev/null
+From stable+bounces-204835-greg=kroah.com@vger.kernel.org Mon Jan 5 16:17:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 10:11:02 -0500
+Subject: block: handle zone management operations completions
+To: stable@vger.kernel.org
+Cc: Damien Le Moal <dlemoal@kernel.org>, Christoph Hellwig <hch@lst.de>, Johannes Thumshirn <johannes.thumshirn@wdc.com>, Chaitanya Kulkarni <kch@nvidia.com>, Hannes Reinecke <hare@suse.de>, "Martin K. Petersen" <martin.petersen@oracle.com>, Jens Axboe <axboe@kernel.dk>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105151102.2626035-1-sashal@kernel.org>
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit efae226c2ef19528ffd81d29ba0eecf1b0896ca2 ]
+
+The functions blk_zone_wplug_handle_reset_or_finish() and
+blk_zone_wplug_handle_reset_all() both modify the zone write pointer
+offset of zone write plugs that are the target of a reset, reset all or
+finish zone management operation. However, these functions do this
+modification before the BIO is executed. So if the zone operation fails,
+the modified zone write pointer offsets become invalid.
+
+Avoid this by modifying the zone write pointer offset of a zone write
+plug that is the target of a zone management operation when the
+operation completes. To do so, modify blk_zone_bio_endio() to call the
+new function blk_zone_mgmt_bio_endio() which in turn calls the functions
+blk_zone_reset_all_bio_endio(), blk_zone_reset_bio_endio() or
+blk_zone_finish_bio_endio() depending on the operation of the completed
+BIO, to modify a zone write plug write pointer offset accordingly.
+These functions are called only if the BIO execution was successful.
+
+Fixes: dd291d77cc90 ("block: Introduce zone write plugging")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ adapted bdev_zone_is_seq() check to disk_zone_is_conv() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-zoned.c | 141 +++++++++++++++++++++++++++++++++++-------------------
+ block/blk.h | 14 +++++
+ 2 files changed, 106 insertions(+), 49 deletions(-)
+
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -73,6 +73,11 @@ struct blk_zone_wplug {
+ struct gendisk *disk;
+ };
+
++static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
++{
++ return 1U << disk->zone_wplugs_hash_bits;
++}
++
+ /*
+ * Zone write plug flags bits:
+ * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
+@@ -712,71 +717,91 @@ static int disk_zone_sync_wp_offset(stru
+ disk_report_zones_cb, &args);
+ }
+
+-static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
+- unsigned int wp_offset)
++static void blk_zone_reset_bio_endio(struct bio *bio)
+ {
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+- sector_t sector = bio->bi_iter.bi_sector;
+ struct blk_zone_wplug *zwplug;
+- unsigned long flags;
+-
+- /* Conventional zones cannot be reset nor finished. */
+- if (disk_zone_is_conv(disk, sector)) {
+- bio_io_error(bio);
+- return true;
+- }
+-
+- /*
+- * No-wait reset or finish BIOs do not make much sense as the callers
+- * issue these as blocking operations in most cases. To avoid issues
+- * the BIO execution potentially failing with BLK_STS_AGAIN, warn about
+- * REQ_NOWAIT being set and ignore that flag.
+- */
+- if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
+- bio->bi_opf &= ~REQ_NOWAIT;
+
+ /*
+- * If we have a zone write plug, set its write pointer offset to 0
+- * (reset case) or to the zone size (finish case). This will abort all
+- * BIOs plugged for the target zone. It is fine as resetting or
+- * finishing zones while writes are still in-flight will result in the
++ * If we have a zone write plug, set its write pointer offset to 0.
++ * This will abort all BIOs plugged for the target zone. It is fine as
++ * resetting zones while writes are still in-flight will result in the
+ * writes failing anyway.
+ */
+- zwplug = disk_get_zone_wplug(disk, sector);
++ zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
+ if (zwplug) {
++ unsigned long flags;
++
+ spin_lock_irqsave(&zwplug->lock, flags);
+- disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
++ disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ disk_put_zone_wplug(zwplug);
+ }
+-
+- return false;
+ }
+
+-static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
++static void blk_zone_reset_all_bio_endio(struct bio *bio)
+ {
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct blk_zone_wplug *zwplug;
+ unsigned long flags;
+- sector_t sector;
++ unsigned int i;
+
+- /*
+- * Set the write pointer offset of all zone write plugs to 0. This will
+- * abort all plugged BIOs. It is fine as resetting zones while writes
+- * are still in-flight will result in the writes failing anyway.
+- */
+- for (sector = 0; sector < get_capacity(disk);
+- sector += disk->queue->limits.chunk_sectors) {
+- zwplug = disk_get_zone_wplug(disk, sector);
+- if (zwplug) {
++ /* Update the condition of all zone write plugs. */
++ rcu_read_lock();
++ for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
++ hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[i],
++ node) {
+ spin_lock_irqsave(&zwplug->lock, flags);
+ disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+- disk_put_zone_wplug(zwplug);
+ }
+ }
++ rcu_read_unlock();
++}
+
+- return false;
++static void blk_zone_finish_bio_endio(struct bio *bio)
++{
++ struct block_device *bdev = bio->bi_bdev;
++ struct gendisk *disk = bdev->bd_disk;
++ struct blk_zone_wplug *zwplug;
++
++ /*
++ * If we have a zone write plug, set its write pointer offset to the
++ * zone size. This will abort all BIOs plugged for the target zone. It
++ * is fine as resetting zones while writes are still in-flight will
++ * result in the writes failing anyway.
++ */
++ zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
++ if (zwplug) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&zwplug->lock, flags);
++ disk_zone_wplug_set_wp_offset(disk, zwplug,
++ bdev_zone_sectors(bdev));
++ spin_unlock_irqrestore(&zwplug->lock, flags);
++ disk_put_zone_wplug(zwplug);
++ }
++}
++
++void blk_zone_mgmt_bio_endio(struct bio *bio)
++{
++ /* If the BIO failed, we have nothing to do. */
++ if (bio->bi_status != BLK_STS_OK)
++ return;
++
++ switch (bio_op(bio)) {
++ case REQ_OP_ZONE_RESET:
++ blk_zone_reset_bio_endio(bio);
++ return;
++ case REQ_OP_ZONE_RESET_ALL:
++ blk_zone_reset_all_bio_endio(bio);
++ return;
++ case REQ_OP_ZONE_FINISH:
++ blk_zone_finish_bio_endio(bio);
++ return;
++ default:
++ return;
++ }
+ }
+
+ static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
+@@ -1119,6 +1144,32 @@ static void blk_zone_wplug_handle_native
+ disk_put_zone_wplug(zwplug);
+ }
+
++static bool blk_zone_wplug_handle_zone_mgmt(struct bio *bio)
++{
++ struct gendisk *disk = bio->bi_bdev->bd_disk;
++
++ if (bio_op(bio) != REQ_OP_ZONE_RESET_ALL &&
++ disk_zone_is_conv(disk, bio->bi_iter.bi_sector)) {
++ /*
++ * Zone reset and zone finish operations do not apply to
++ * conventional zones.
++ */
++ bio_io_error(bio);
++ return true;
++ }
++
++ /*
++ * No-wait zone management BIOs do not make much sense as the callers
++ * issue these as blocking operations in most cases. To avoid issues
++ * with the BIO execution potentially failing with BLK_STS_AGAIN, warn
++ * about REQ_NOWAIT being set and ignore that flag.
++ */
++ if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
++ bio->bi_opf &= ~REQ_NOWAIT;
++
++ return false;
++}
++
+ /**
+ * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
+ * @bio: The BIO being submitted
+@@ -1166,12 +1217,9 @@ bool blk_zone_plug_bio(struct bio *bio,
+ case REQ_OP_WRITE_ZEROES:
+ return blk_zone_wplug_handle_write(bio, nr_segs);
+ case REQ_OP_ZONE_RESET:
+- return blk_zone_wplug_handle_reset_or_finish(bio, 0);
+ case REQ_OP_ZONE_FINISH:
+- return blk_zone_wplug_handle_reset_or_finish(bio,
+- bdev_zone_sectors(bdev));
+ case REQ_OP_ZONE_RESET_ALL:
+- return blk_zone_wplug_handle_reset_all(bio);
++ return blk_zone_wplug_handle_zone_mgmt(bio);
+ default:
+ return false;
+ }
+@@ -1328,11 +1376,6 @@ put_zwplug:
+ disk_put_zone_wplug(zwplug);
+ }
+
+-static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
+-{
+- return 1U << disk->zone_wplugs_hash_bits;
+-}
+-
+ void disk_init_zone_resources(struct gendisk *disk)
+ {
+ spin_lock_init(&disk->zone_wplugs_lock);
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -486,10 +486,24 @@ static inline void blk_zone_update_reque
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
+ bio->bi_iter.bi_sector = rq->__sector;
+ }
++void blk_zone_mgmt_bio_endio(struct bio *bio);
+ void blk_zone_write_plug_bio_endio(struct bio *bio);
+ static inline void blk_zone_bio_endio(struct bio *bio)
+ {
+ /*
++ * Zone management BIOs may impact zone write plugs (e.g. a zone reset
++ * changes a zone write plug zone write pointer offset), but these
++ * operation do not go through zone write plugging as they may operate
++ * on zones that do not have a zone write
++ * plug. blk_zone_mgmt_bio_endio() handles the potential changes to zone
++ * write plugs that are present.
++ */
++ if (op_is_zone_mgmt(bio_op(bio))) {
++ blk_zone_mgmt_bio_endio(bio);
++ return;
++ }
++
++ /*
+ * For write BIOs to zoned devices, signal the completion of the BIO so
+ * that the next write BIO can be submitted by zone write plugging.
+ */
--- /dev/null
+From 3d970eda003441f66551a91fda16478ac0711617 Mon Sep 17 00:00:00 2001
+From: Ankit Garg <nktgrg@google.com>
+Date: Fri, 19 Dec 2025 10:29:45 +0000
+Subject: gve: defer interrupt enabling until NAPI registration
+
+From: Ankit Garg <nktgrg@google.com>
+
+commit 3d970eda003441f66551a91fda16478ac0711617 upstream.
+
+Currently, interrupts are automatically enabled immediately upon
+request. This allows interrupt to fire before the associated NAPI
+context is fully initialized and cause failures like below:
+
+[ 0.946369] Call Trace:
+[ 0.946369] <IRQ>
+[ 0.946369] __napi_poll+0x2a/0x1e0
+[ 0.946369] net_rx_action+0x2f9/0x3f0
+[ 0.946369] handle_softirqs+0xd6/0x2c0
+[ 0.946369] ? handle_edge_irq+0xc1/0x1b0
+[ 0.946369] __irq_exit_rcu+0xc3/0xe0
+[ 0.946369] common_interrupt+0x81/0xa0
+[ 0.946369] </IRQ>
+[ 0.946369] <TASK>
+[ 0.946369] asm_common_interrupt+0x22/0x40
+[ 0.946369] RIP: 0010:pv_native_safe_halt+0xb/0x10
+
+Use the `IRQF_NO_AUTOEN` flag when requesting interrupts to prevent auto
+enablement and explicitly enable the interrupt in NAPI initialization
+path (and disable it during NAPI teardown).
+
+This ensures that interrupt lifecycle is strictly coupled with
+readiness of NAPI context.
+
+Cc: stable@vger.kernel.org
+Fixes: 1dfc2e46117e ("gve: Refactor napi add and remove functions")
+Signed-off-by: Ankit Garg <nktgrg@google.com>
+Reviewed-by: Jordan Rhee <jordanrhee@google.com>
+Reviewed-by: Joshua Washington <joshwash@google.com>
+Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
+Link: https://patch.msgid.link/20251219102945.2193617-1-hramamurthy@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 2 +-
+ drivers/net/ethernet/google/gve/gve_utils.c | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -500,7 +500,7 @@ static int gve_alloc_notify_blocks(struc
+ block->priv = priv;
+ err = request_irq(priv->msix_vectors[msix_idx].vector,
+ gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
+- 0, block->name, block);
++ IRQF_NO_AUTOEN, block->name, block);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to receive msix vector %d\n", i);
+--- a/drivers/net/ethernet/google/gve/gve_utils.c
++++ b/drivers/net/ethernet/google/gve/gve_utils.c
+@@ -111,11 +111,13 @@ void gve_add_napi(struct gve_priv *priv,
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
++ enable_irq(block->irq);
+ }
+
+ void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+ {
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
++ disable_irq(block->irq);
+ netif_napi_del(&block->napi);
+ }
--- /dev/null
+From stable+bounces-204925-greg=kroah.com@vger.kernel.org Mon Jan 5 22:05:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:05:30 -0500
+Subject: media: amphion: Add a frame flush mode for decoder
+To: stable@vger.kernel.org
+Cc: Ming Qian <ming.qian@oss.nxp.com>, Nicolas Dufresne <nicolas.dufresne@collabora.com>, Sebastian Fricke <sebastian.fricke@collabora.com>, Hans Verkuil <hverkuil@xs4all.nl>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105210532.2800255-1-sashal@kernel.org>
+
+From: Ming Qian <ming.qian@oss.nxp.com>
+
+[ Upstream commit 9ea16ba6eaf93f25f61855751f71e2e701709ddf ]
+
+By default the amphion decoder will pre-parse 3 frames before starting
+to decode the first frame. Alternatively, a block of flush padding data
+can be appended to the frame, which will ensure that the decoder can
+start decoding immediately after parsing the flush padding data, thus
+potentially reducing decoding latency.
+
+This mode was previously only enabled, when the display delay was set to
+0. Allow the user to manually toggle the use of that mode via a module
+parameter called low_latency, which enables the mode without
+changing the display order.
+
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Sebastian Fricke <sebastian.fricke@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
+Stable-dep-of: 634c2cd17bd0 ("media: amphion: Remove vpu_vb_is_codecconfig")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/amphion/vpu_malone.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -25,6 +25,10 @@
+ #include "vpu_imx8q.h"
+ #include "vpu_malone.h"
+
++static bool low_latency;
++module_param(low_latency, bool, 0644);
++MODULE_PARM_DESC(low_latency, "Set low latency frame flush mode: 0 (disable) or 1 (enable)");
++
+ #define CMD_SIZE 25600
+ #define MSG_SIZE 25600
+ #define CODEC_SIZE 0x1000
+@@ -1562,7 +1566,15 @@ static int vpu_malone_input_frame_data(s
+
+ vpu_malone_update_wptr(str_buf, wptr);
+
+- if (disp_imm && !vpu_vb_is_codecconfig(vbuf)) {
++ /*
++ * Enable the low latency flush mode if display delay is set to 0
++ * or the low latency frame flush mode if it is set to 1.
++ * The low latency flush mode requires some padding data to be appended to each frame,
++ * but there must not be any padding data between the sequence header and the frame.
++ * This module is currently only supported for the H264 and HEVC formats,
++ * for other formats, vpu_malone_add_scode() will return 0.
++ */
++ if ((disp_imm || low_latency) && !vpu_vb_is_codecconfig(vbuf)) {
+ ret = vpu_malone_add_scode(inst->core->iface,
+ inst->id,
+ &inst->stream_buffer,
--- /dev/null
+From stable+bounces-204926-greg=kroah.com@vger.kernel.org Mon Jan 5 22:05:39 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:05:31 -0500
+Subject: media: amphion: Make some vpu_v4l2 functions static
+To: stable@vger.kernel.org
+Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>, Ming Qian <ming.qian@oss.nxp.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105210532.2800255-2-sashal@kernel.org>
+
+From: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+[ Upstream commit 5d1e54bb4dc6741284a3ed587e994308ddee2f16 ]
+
+Some functions defined in vpu_v4l2.c are never used outside of that
+compilation unit. Make them static.
+
+Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Reviewed-by: Ming Qian <ming.qian@oss.nxp.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Stable-dep-of: 634c2cd17bd0 ("media: amphion: Remove vpu_vb_is_codecconfig")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/amphion/vpu_v4l2.c | 12 +++++++++---
+ drivers/media/platform/amphion/vpu_v4l2.h | 8 --------
+ 2 files changed, 9 insertions(+), 11 deletions(-)
+
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -24,6 +24,11 @@
+ #include "vpu_msgs.h"
+ #include "vpu_helpers.h"
+
++static char *vpu_type_name(u32 type)
++{
++ return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
++}
++
+ void vpu_inst_lock(struct vpu_inst *inst)
+ {
+ mutex_lock(&inst->lock);
+@@ -42,7 +47,7 @@ dma_addr_t vpu_get_vb_phy_addr(struct vb
+ vb->planes[plane_no].data_offset;
+ }
+
+-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
++static unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
+ {
+ if (plane_no >= vb->num_planes)
+ return 0;
+@@ -81,7 +86,7 @@ void vpu_v4l2_set_error(struct vpu_inst
+ vpu_inst_unlock(inst);
+ }
+
+-int vpu_notify_eos(struct vpu_inst *inst)
++static int vpu_notify_eos(struct vpu_inst *inst)
+ {
+ static const struct v4l2_event ev = {
+ .id = 0,
+@@ -562,7 +567,8 @@ static void vpu_vb2_buf_finish(struct vb
+ call_void_vop(inst, on_queue_empty, q->type);
+ }
+
+-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
++static void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type,
++ enum vb2_buffer_state state)
+ {
+ struct vb2_v4l2_buffer *buf;
+
+--- a/drivers/media/platform/amphion/vpu_v4l2.h
++++ b/drivers/media/platform/amphion/vpu_v4l2.h
+@@ -26,15 +26,12 @@ void vpu_skip_frame(struct vpu_inst *ins
+ struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence);
+ struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx);
+ void vpu_v4l2_set_error(struct vpu_inst *inst);
+-int vpu_notify_eos(struct vpu_inst *inst);
+ int vpu_notify_source_change(struct vpu_inst *inst);
+ int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos);
+-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
+ int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
+ bool vpu_is_source_empty(struct vpu_inst *inst);
+
+ dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no);
+-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no);
+ static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
+ {
+ if (V4L2_TYPE_IS_OUTPUT(type))
+@@ -43,11 +40,6 @@ static inline struct vpu_format *vpu_get
+ return &inst->cap_format;
+ }
+
+-static inline char *vpu_type_name(u32 type)
+-{
+- return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
+-}
+-
+ static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf)
+ {
+ #ifdef V4L2_BUF_FLAG_CODECCONFIG
--- /dev/null
+From stable+bounces-204927-greg=kroah.com@vger.kernel.org Mon Jan 5 22:05:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:05:32 -0500
+Subject: media: amphion: Remove vpu_vb_is_codecconfig
+To: stable@vger.kernel.org
+Cc: Ming Qian <ming.qian@oss.nxp.com>, Nicolas Dufresne <nicolas.dufresne@collabora.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105210532.2800255-3-sashal@kernel.org>
+
+From: Ming Qian <ming.qian@oss.nxp.com>
+
+[ Upstream commit 634c2cd17bd021487c57b95973bddb14be8002ff ]
+
+Currently the function vpu_vb_is_codecconfig() always returns 0.
+Delete it and its related code.
+
+Fixes: 3cd084519c6f ("media: amphion: add vpu v4l2 m2m support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/amphion/vpu_malone.c | 23 +++--------------------
+ drivers/media/platform/amphion/vpu_v4l2.c | 10 ----------
+ drivers/media/platform/amphion/vpu_v4l2.h | 10 ----------
+ 3 files changed, 3 insertions(+), 40 deletions(-)
+
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -1315,22 +1315,18 @@ static int vpu_malone_insert_scode_vc1_g
+ {
+ if (!scode->inst->total_input_count)
+ return 0;
+- if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
+- scode->need_data = 0;
+ return 0;
+ }
+
+ static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
+ {
+- struct vb2_v4l2_buffer *vbuf;
+ u8 nal_hdr[MALONE_VC1_NAL_HEADER_LEN];
+ u32 *data = NULL;
+ int ret;
+
+- vbuf = to_vb2_v4l2_buffer(scode->vb);
+ data = vb2_plane_vaddr(scode->vb, 0);
+
+- if (scode->inst->total_input_count == 0 || vpu_vb_is_codecconfig(vbuf))
++ if (scode->inst->total_input_count == 0)
+ return 0;
+ if (MALONE_VC1_CONTAIN_NAL(*data))
+ return 0;
+@@ -1351,8 +1347,6 @@ static int vpu_malone_insert_scode_vc1_l
+ int size = 0;
+ u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
+
+- if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
+- scode->need_data = 0;
+ if (scode->inst->total_input_count)
+ return 0;
+ scode->need_data = 0;
+@@ -1538,7 +1532,7 @@ static int vpu_malone_input_frame_data(s
+ scode.vb = vb;
+ scode.wptr = wptr;
+ scode.need_data = 1;
+- if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf))
++ if (vbuf->sequence == 0)
+ ret = vpu_malone_insert_scode(&scode, SCODE_SEQUENCE);
+
+ if (ret < 0)
+@@ -1574,7 +1568,7 @@ static int vpu_malone_input_frame_data(s
+ * This module is currently only supported for the H264 and HEVC formats,
+ * for other formats, vpu_malone_add_scode() will return 0.
+ */
+- if ((disp_imm || low_latency) && !vpu_vb_is_codecconfig(vbuf)) {
++ if (disp_imm || low_latency) {
+ ret = vpu_malone_add_scode(inst->core->iface,
+ inst->id,
+ &inst->stream_buffer,
+@@ -1621,7 +1615,6 @@ int vpu_malone_input_frame(struct vpu_sh
+ struct vpu_inst *inst, struct vb2_buffer *vb)
+ {
+ struct vpu_dec_ctrl *hc = shared->priv;
+- struct vb2_v4l2_buffer *vbuf;
+ struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[inst->id];
+ u32 disp_imm = hc->codec_param[inst->id].disp_imm;
+ u32 size;
+@@ -1635,16 +1628,6 @@ int vpu_malone_input_frame(struct vpu_sh
+ return ret;
+ size = ret;
+
+- /*
+- * if buffer only contain codec data, and the timestamp is invalid,
+- * don't put the invalid timestamp to resync
+- * merge the data to next frame
+- */
+- vbuf = to_vb2_v4l2_buffer(vb);
+- if (vpu_vb_is_codecconfig(vbuf)) {
+- inst->extra_size += size;
+- return 0;
+- }
+ if (inst->extra_size) {
+ size += inst->extra_size;
+ inst->extra_size = 0;
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -349,16 +349,6 @@ struct vb2_v4l2_buffer *vpu_next_src_buf
+ if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
+ return NULL;
+
+- while (vpu_vb_is_codecconfig(src_buf)) {
+- v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
+- vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
+- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+-
+- src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
+- if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
+- return NULL;
+- }
+-
+ return src_buf;
+ }
+
+--- a/drivers/media/platform/amphion/vpu_v4l2.h
++++ b/drivers/media/platform/amphion/vpu_v4l2.h
+@@ -39,14 +39,4 @@ static inline struct vpu_format *vpu_get
+ else
+ return &inst->cap_format;
+ }
+-
+-static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf)
+-{
+-#ifdef V4L2_BUF_FLAG_CODECCONFIG
+- return (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG) ? 1 : 0;
+-#else
+- return 0;
+-#endif
+-}
+-
+ #endif
--- /dev/null
+From stable+bounces-204957-greg=kroah.com@vger.kernel.org Tue Jan 6 00:33:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 18:31:58 -0500
+Subject: media: mediatek: vcodec: Use spinlock for context list protection lock
+To: stable@vger.kernel.org
+Cc: Chen-Yu Tsai <wenst@chromium.org>, Yunfei Dong <yunfei.dong@mediatek.com>, Fei Shao <fshao@chromium.org>, Tomasz Figa <tfiga@chromium.org>, Nicolas Dufresne <nicolas.dufresne@collabora.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105233158.2846053-1-sashal@kernel.org>
+
+From: Chen-Yu Tsai <wenst@chromium.org>
+
+[ Upstream commit a5844227e0f030d2af2d85d4aed10c5eca6ca176 ]
+
+Previously a mutex was added to protect the encoder and decoder context
+lists from unexpected changes originating from the SCP IP block, causing
+the context pointer to go invalid, resulting in a NULL pointer
+dereference in the IPI handler.
+
+Turns out on the MT8173, the VPU IPI handler is called from hard IRQ
+context. This causes a big warning from the scheduler. This was first
+reported downstream on the ChromeOS kernels, but is also reproducible
+on mainline using Fluster with the FFmpeg v4l2m2m decoders. Even though
+the actual capture format is not supported, the affected code paths
+are triggered.
+
+Since this lock just protects the context list and operations on it are
+very fast, it should be OK to switch to a spinlock.
+
+Fixes: 6467cda18c9f ("media: mediatek: vcodec: adding lock to protect decoder context list")
+Fixes: afaaf3a0f647 ("media: mediatek: vcodec: adding lock to protect encoder context list")
+Cc: Yunfei Dong <yunfei.dong@mediatek.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Chen-Yu Tsai <wenst@chromium.org>
+Reviewed-by: Fei Shao <fshao@chromium.org>
+Reviewed-by: Tomasz Figa <tfiga@chromium.org>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ adapted file_to_dec_ctx() and file_to_enc_ctx() helper calls to equivalent fh_to_dec_ctx(file->private_data) and fh_to_enc_ctx(file->private_data) pattern ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c | 10 +++++---
+ drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c | 12 +++++-----
+ drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h | 2 -
+ drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c | 5 ++--
+ drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c | 12 +++++-----
+ drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h | 2 -
+ drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c | 5 ++--
+ 7 files changed, 28 insertions(+), 20 deletions(-)
+
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+@@ -47,30 +47,32 @@ static void mtk_vcodec_vpu_reset_dec_han
+ {
+ struct mtk_vcodec_dec_dev *dev = priv;
+ struct mtk_vcodec_dec_ctx *ctx;
++ unsigned long flags;
+
+ dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
+
+- mutex_lock(&dev->dev_ctx_lock);
++ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
+ }
+- mutex_unlock(&dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
+ }
+
+ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
+ {
+ struct mtk_vcodec_enc_dev *dev = priv;
+ struct mtk_vcodec_enc_ctx *ctx;
++ unsigned long flags;
+
+ dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
+
+- mutex_lock(&dev->dev_ctx_lock);
++ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
+ }
+- mutex_unlock(&dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
+ }
+
+ static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
+--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+@@ -198,6 +198,7 @@ static int fops_vcodec_open(struct file
+ struct mtk_vcodec_dec_ctx *ctx = NULL;
+ int ret = 0, i, hw_count;
+ struct vb2_queue *src_vq;
++ unsigned long flags;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+@@ -268,9 +269,9 @@ static int fops_vcodec_open(struct file
+
+ ctx->dev->vdec_pdata->init_vdec_params(ctx);
+
+- mutex_lock(&dev->dev_ctx_lock);
++ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_add(&ctx->list, &dev->ctx_list);
+- mutex_unlock(&dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
+ mtk_vcodec_dbgfs_create(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+@@ -295,6 +296,7 @@ static int fops_vcodec_release(struct fi
+ {
+ struct mtk_vcodec_dec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(file->private_data);
++ unsigned long flags;
+
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] decoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+@@ -313,9 +315,9 @@ static int fops_vcodec_release(struct fi
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ mtk_vcodec_dbgfs_remove(dev, ctx->id);
+- mutex_lock(&dev->dev_ctx_lock);
++ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_del_init(&ctx->list);
+- mutex_unlock(&dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+@@ -408,7 +410,7 @@ static int mtk_vcodec_probe(struct platf
+ for (i = 0; i < MTK_VDEC_HW_MAX; i++)
+ mutex_init(&dev->dec_mutex[i]);
+ mutex_init(&dev->dev_mutex);
+- mutex_init(&dev->dev_ctx_lock);
++ spin_lock_init(&dev->dev_ctx_lock);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
++++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+@@ -283,7 +283,7 @@ struct mtk_vcodec_dec_dev {
+ /* decoder hardware mutex lock */
+ struct mutex dec_mutex[MTK_VDEC_HW_MAX];
+ struct mutex dev_mutex;
+- struct mutex dev_ctx_lock;
++ spinlock_t dev_ctx_lock;
+ struct workqueue_struct *decode_workqueue;
+
+ spinlock_t irqlock;
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+@@ -75,16 +75,17 @@ static void handle_get_param_msg_ack(con
+ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vdec_vpu_inst *vpu)
+ {
+ struct mtk_vcodec_dec_ctx *ctx;
++ unsigned long flags;
+ int ret = false;
+
+- mutex_lock(&dec_dev->dev_ctx_lock);
++ spin_lock_irqsave(&dec_dev->dev_ctx_lock, flags);
+ list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
+ if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
+ ret = true;
+ break;
+ }
+ }
+- mutex_unlock(&dec_dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dec_dev->dev_ctx_lock, flags);
+
+ return ret;
+ }
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+@@ -117,6 +117,7 @@ static int fops_vcodec_open(struct file
+ struct mtk_vcodec_enc_ctx *ctx = NULL;
+ int ret = 0;
+ struct vb2_queue *src_vq;
++ unsigned long flags;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+@@ -177,9 +178,9 @@ static int fops_vcodec_open(struct file
+ mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
+ ctx->id, ctx, ctx->m2m_ctx);
+
+- mutex_lock(&dev->dev_ctx_lock);
++ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_add(&ctx->list, &dev->ctx_list);
+- mutex_unlock(&dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
+@@ -204,6 +205,7 @@ static int fops_vcodec_release(struct fi
+ {
+ struct mtk_vcodec_enc_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(file->private_data);
++ unsigned long flags;
+
+ mtk_v4l2_venc_dbg(1, ctx, "[%d] encoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+@@ -214,9 +216,9 @@ static int fops_vcodec_release(struct fi
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+- mutex_lock(&dev->dev_ctx_lock);
++ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_del_init(&ctx->list);
+- mutex_unlock(&dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+@@ -298,7 +300,7 @@ static int mtk_vcodec_probe(struct platf
+
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dev_mutex);
+- mutex_init(&dev->dev_ctx_lock);
++ spin_lock_init(&dev->dev_ctx_lock);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+@@ -206,7 +206,7 @@ struct mtk_vcodec_enc_dev {
+ /* encoder hardware mutex lock */
+ struct mutex enc_mutex;
+ struct mutex dev_mutex;
+- struct mutex dev_ctx_lock;
++ spinlock_t dev_ctx_lock;
+ struct workqueue_struct *encode_workqueue;
+
+ int enc_irq;
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+@@ -45,16 +45,17 @@ static void handle_enc_encode_msg(struct
+ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct venc_vpu_inst *vpu)
+ {
+ struct mtk_vcodec_enc_ctx *ctx;
++ unsigned long flags;
+ int ret = false;
+
+- mutex_lock(&enc_dev->dev_ctx_lock);
++ spin_lock_irqsave(&enc_dev->dev_ctx_lock, flags);
+ list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
+ if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
+ ret = true;
+ break;
+ }
+ }
+- mutex_unlock(&enc_dev->dev_ctx_lock);
++ spin_unlock_irqrestore(&enc_dev->dev_ctx_lock, flags);
+
+ return ret;
+ }
--- /dev/null
+From sashal@kernel.org Mon Jan 5 18:42:17 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 12:42:04 -0500
+Subject: mm/balloon_compaction: convert balloon_page_delete() to balloon_page_finalize()
+To: stable@vger.kernel.org
+Cc: "David Hildenbrand" <david@redhat.com>, "Lorenzo Stoakes" <lorenzo.stoakes@oracle.com>, "Alistair Popple" <apopple@nvidia.com>, "Al Viro" <viro@zeniv.linux.org.uk>, "Arnd Bergmann" <arnd@arndb.de>, "Brendan Jackman" <jackmanb@google.com>, "Byungchul Park" <byungchul@sk.com>, "Chengming Zhou" <chengming.zhou@linux.dev>, "Christian Brauner" <brauner@kernel.org>, "Christophe Leroy" <christophe.leroy@csgroup.eu>, "Eugenio Pé rez" <eperezma@redhat.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Gregory Price" <gourry@gourry.net>, "Harry Yoo" <harry.yoo@oracle.com>, "Huang, Ying" <ying.huang@linux.alibaba.com>, "Jan Kara" <jack@suse.cz>, "Jason Gunthorpe" <jgg@ziepe.ca>, "Jason Wang" <jasowang@redhat.com>, "Jerrin Shaji George" <jerrin.shaji-george@broadcom.com>, "Johannes Weiner" <hannes@cmpxchg.org>, "John Hubbard" <jhubbard@nvidia.com>, "Jonathan Corbet" <corbet@lwn.net>, "Joshua Hahn" <joshua.hahnjy@gmail.com>, "Liam Howlett" <liam.howlett@oracle.com>, "Madhavan Srinivasan" <maddy@linux.ibm.com>, "Mathew Brost" <matthew.brost@intel.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, "Miaohe Lin" <linmiaohe@huawei.com>, "Michael Ellerman" <mpe@ellerman.id.au>, "Michael S. Tsirkin" <mst@redhat.com>, "Michal Hocko" <mhocko@suse.com>, "Mike Rapoport" <rppt@kernel.org>, "Minchan Kim" <minchan@kernel.org>, "Naoya Horiguchi" <nao.horiguchi@gmail.com>, "Nicholas Piggin" <npiggin@gmail.com>, "Oscar Salvador" <osalvador@suse.de>, "Peter Xu" <peterx@redhat.com>, "Qi Zheng" <zhengqi.arch@bytedance.com>, "Rakie Kim" <rakie.kim@sk.com>, "Rik van Riel" <riel@surriel.com>, "Sergey Senozhatsky" <senozhatsky@chromium.org>, "Shakeel Butt" <shakeel.butt@linux.dev>, "Suren Baghdasaryan" <surenb@google.com>, "Vlastimil Babka" <vbabka@suse.cz>, "Xuan Zhuo" <xuanzhuo@linux.alibaba.com>, "xu xin" <xu.xin16@zte.com.cn>, "Zi Yan" <ziy@nvidia.com>, "Andrew Morton" <akpm@linux-foundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260105174205.2697666-2-sashal@kernel.org>
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit 15504b1163007bbfbd9a63460d5c14737c16e96d ]
+
+Let's move the removal of the page from the balloon list into the single
+caller, to remove the dependency on the PG_isolated flag and clarify
+locking requirements.
+
+Note that for now, balloon_page_delete() was used on two paths:
+
+(1) Removing a page from the balloon for deflation through
+ balloon_page_list_dequeue()
+(2) Removing an isolated page from the balloon for migration in the
+ per-driver migration handlers. Isolated pages were already removed from
+ the balloon list during isolation.
+
+So instead of relying on the flag, we can just distinguish both cases
+directly and handle it accordingly in the caller.
+
+We'll shuffle the operations a bit such that they logically make more
+sense (e.g., remove from the list before clearing flags).
+
+In balloon migration functions we can now move the balloon_page_finalize()
+out of the balloon lock and perform the finalization just before dropping
+the balloon reference.
+
+Document that the page lock is currently required when modifying the
+movability aspects of a page; hopefully we can soon decouple this from the
+page lock.
+
+Link: https://lkml.kernel.org/r/20250704102524.326966-3-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Eugenio Pé rez <eperezma@redhat.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: Harry Yoo <harry.yoo@oracle.com>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jason Wang <jasowang@redhat.com>
+Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Mathew Brost <matthew.brost@intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Qi Zheng <zhengqi.arch@bytedance.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Cc: xu xin <xu.xin16@zte.com.cn>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 0da2ba35c0d5 ("powerpc/pseries/cmm: adjust BALLOON_MIGRATE when migrating pages")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/cmm.c | 2 -
+ drivers/misc/vmw_balloon.c | 3 --
+ drivers/virtio/virtio_balloon.c | 4 ---
+ include/linux/balloon_compaction.h | 43 +++++++++++++----------------------
+ mm/balloon_compaction.c | 3 +-
+ 5 files changed, 21 insertions(+), 34 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/cmm.c
++++ b/arch/powerpc/platforms/pseries/cmm.c
+@@ -532,7 +532,6 @@ static int cmm_migratepage(struct balloo
+
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ balloon_page_insert(b_dev_info, newpage);
+- balloon_page_delete(page);
+ b_dev_info->isolated_pages--;
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+@@ -542,6 +541,7 @@ static int cmm_migratepage(struct balloo
+ */
+ plpar_page_set_active(page);
+
++ balloon_page_finalize(page);
+ /* balloon page list reference */
+ put_page(page);
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -1778,8 +1778,7 @@ static int vmballoon_migratepage(struct
+ * @pages_lock . We keep holding @comm_lock since we will need it in a
+ * second.
+ */
+- balloon_page_delete(page);
+-
++ balloon_page_finalize(page);
+ put_page(page);
+
+ /* Inflate */
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -866,15 +866,13 @@ static int virtballoon_migratepage(struc
+ tell_host(vb, vb->inflate_vq);
+
+ /* balloon's page migration 2nd step -- deflate "page" */
+- spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
+- balloon_page_delete(page);
+- spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
+ vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
+ set_page_pfns(vb, vb->pfns, page);
+ tell_host(vb, vb->deflate_vq);
+
+ mutex_unlock(&vb->balloon_lock);
+
++ balloon_page_finalize(page);
+ put_page(page); /* balloon reference */
+
+ return MIGRATEPAGE_SUCCESS;
+--- a/include/linux/balloon_compaction.h
++++ b/include/linux/balloon_compaction.h
+@@ -98,27 +98,6 @@ static inline void balloon_page_insert(s
+ }
+
+ /*
+- * balloon_page_delete - delete a page from balloon's page list and clear
+- * the page->private assignement accordingly.
+- * @page : page to be released from balloon's page list
+- *
+- * Caller must ensure the page is locked and the spin_lock protecting balloon
+- * pages list is held before deleting a page from the balloon device.
+- */
+-static inline void balloon_page_delete(struct page *page)
+-{
+- __ClearPageOffline(page);
+- __ClearPageMovable(page);
+- set_page_private(page, 0);
+- /*
+- * No touch page.lru field once @page has been isolated
+- * because VM is using the field.
+- */
+- if (!PageIsolated(page))
+- list_del(&page->lru);
+-}
+-
+-/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+@@ -141,12 +120,6 @@ static inline void balloon_page_insert(s
+ list_add(&page->lru, &balloon->pages);
+ }
+
+-static inline void balloon_page_delete(struct page *page)
+-{
+- __ClearPageOffline(page);
+- list_del(&page->lru);
+-}
+-
+ static inline gfp_t balloon_mapping_gfp_mask(void)
+ {
+ return GFP_HIGHUSER;
+@@ -155,6 +128,22 @@ static inline gfp_t balloon_mapping_gfp_
+ #endif /* CONFIG_BALLOON_COMPACTION */
+
+ /*
++ * balloon_page_finalize - prepare a balloon page that was removed from the
++ * balloon list for release to the page allocator
++ * @page: page to be released to the page allocator
++ *
++ * Caller must ensure that the page is locked.
++ */
++static inline void balloon_page_finalize(struct page *page)
++{
++ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
++ __ClearPageMovable(page);
++ set_page_private(page, 0);
++ }
++ __ClearPageOffline(page);
++}
++
++/*
+ * balloon_page_push - insert a page into a page list.
+ * @head : pointer to list
+ * @page : page to be added
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -93,7 +93,8 @@ size_t balloon_page_list_dequeue(struct
+ if (!trylock_page(page))
+ continue;
+
+- balloon_page_delete(page);
++ list_del(&page->lru);
++ balloon_page_finalize(page);
+ __count_vm_event(BALLOON_DEFLATE);
+ list_add(&page->lru, pages);
+ unlock_page(page);
--- /dev/null
+From stable+bounces-204876-greg=kroah.com@vger.kernel.org Mon Jan 5 18:42:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 12:42:03 -0500
+Subject: mm/balloon_compaction: we cannot have isolated pages in the balloon list
+To: stable@vger.kernel.org
+Cc: "David Hildenbrand" <david@redhat.com>, "Zi Yan" <ziy@nvidia.com>, "Lorenzo Stoakes" <lorenzo.stoakes@oracle.com>, "Alistair Popple" <apopple@nvidia.com>, "Al Viro" <viro@zeniv.linux.org.uk>, "Arnd Bergmann" <arnd@arndb.de>, "Brendan Jackman" <jackmanb@google.com>, "Byungchul Park" <byungchul@sk.com>, "Chengming Zhou" <chengming.zhou@linux.dev>, "Christian Brauner" <brauner@kernel.org>, "Christophe Leroy" <christophe.leroy@csgroup.eu>, "Eugenio Pé rez" <eperezma@redhat.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Gregory Price" <gourry@gourry.net>, "Huang, Ying" <ying.huang@linux.alibaba.com>, "Jan Kara" <jack@suse.cz>, "Jason Gunthorpe" <jgg@ziepe.ca>, "Jason Wang" <jasowang@redhat.com>, "Jerrin Shaji George" <jerrin.shaji-george@broadcom.com>, "Johannes Weiner" <hannes@cmpxchg.org>, "John Hubbard" <jhubbard@nvidia.com>, "Jonathan Corbet" <corbet@lwn.net>, "Joshua Hahn" <joshua.hahnjy@gmail.com>, "Liam Howlett" <liam.howlett@oracle.com>, "Madhavan Srinivasan" <maddy@linux.ibm.com>, "Mathew Brost" <matthew.brost@intel.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, "Miaohe Lin" <linmiaohe@huawei.com>, "Michael Ellerman" <mpe@ellerman.id.au>, "Michael S. Tsirkin" <mst@redhat.com>, "Michal Hocko" <mhocko@suse.com>, "Mike Rapoport" <rppt@kernel.org>, "Minchan Kim" <minchan@kernel.org>, "Naoya Horiguchi" <nao.horiguchi@gmail.com>, "Nicholas Piggin" <npiggin@gmail.com>, "Oscar Salvador" <osalvador@suse.de>, "Peter Xu" <peterx@redhat.com>, "Qi Zheng" <zhengqi.arch@bytedance.com>, "Rakie Kim" <rakie.kim@sk.com>, "Rik van Riel" <riel@surriel.com>, "Sergey Senozhatsky" <senozhatsky@chromium.org>, "Shakeel Butt" <shakeel.butt@linux.dev>, "Suren Baghdasaryan" <surenb@google.com>, "Vlastimil Babka" <vbabka@suse.cz>, "Xuan Zhuo" <xuanzhuo@linux.alibaba.com>, "xu xin" <xu.xin16@zte.com.cn>, "Harry Yoo" <harry.yoo@oracle.com>, "Andrew Morton" <akpm@linux-foundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260105174205.2697666-1-sashal@kernel.org>
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit fb05f992b6bbb4702307d96f00703ee637b24dbf ]
+
+Patch series "mm/migration: rework movable_ops page migration (part 1)",
+v2.
+
+In the future, as we decouple "struct page" from "struct folio", pages
+that support "non-lru page migration" -- movable_ops page migration such
+as memory balloons and zsmalloc -- will no longer be folios. They will
+not have ->mapping, ->lru, and likely no refcount and no page lock. But
+they will have a type and flags 🙂
+
+This is the first part (other parts not written yet) of decoupling
+movable_ops page migration from folio migration.
+
+In this series, we get rid of the ->mapping usage, and start cleaning up
+the code + separating it from folio migration.
+
+Migration core will have to be further reworked to not treat movable_ops
+pages like folios. This is the first step into that direction.
+
+This patch (of 29):
+
+The core will set PG_isolated only after mops->isolate_page() was called.
+In case of the balloon, that is where we will remove it from the balloon
+list. So we cannot have isolated pages in the balloon list.
+
+Let's drop this unnecessary check.
+
+Link: https://lkml.kernel.org/r/20250704102524.326966-2-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Eugenio Pé rez <eperezma@redhat.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jason Wang <jasowang@redhat.com>
+Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Mathew Brost <matthew.brost@intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Qi Zheng <zhengqi.arch@bytedance.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Cc: xu xin <xu.xin16@zte.com.cn>
+Cc: Harry Yoo <harry.yoo@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 0da2ba35c0d5 ("powerpc/pseries/cmm: adjust BALLOON_MIGRATE when migrating pages")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/balloon_compaction.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -93,12 +93,6 @@ size_t balloon_page_list_dequeue(struct
+ if (!trylock_page(page))
+ continue;
+
+- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) &&
+- PageIsolated(page)) {
+- /* raced with isolation */
+- unlock_page(page);
+- continue;
+- }
+ balloon_page_delete(page);
+ __count_vm_event(BALLOON_DEFLATE);
+ list_add(&page->lru, pages);
--- /dev/null
+From stable+bounces-204872-greg=kroah.com@vger.kernel.org Mon Jan 5 18:42:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 12:34:20 -0500
+Subject: PCI: brcmstb: Fix disabling L0s capability
+To: stable@vger.kernel.org
+Cc: Jim Quinlan <james.quinlan@broadcom.com>, Bjorn Helgaas <bhelgaas@google.com>, Manivannan Sadhasivam <mani@kernel.org>, Florian Fainelli <florian.fainelli@broadcom.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105173420.2692565-3-sashal@kernel.org>
+
+From: Jim Quinlan <james.quinlan@broadcom.com>
+
+[ Upstream commit 9583f9d22991d2cfb5cc59a2552040c4ae98d998 ]
+
+caab002d5069 ("PCI: brcmstb: Disable L0s component of ASPM if requested")
+set PCI_EXP_LNKCAP_ASPM_L1 and (optionally) PCI_EXP_LNKCAP_ASPM_L0S in
+PCI_EXP_LNKCAP (aka PCIE_RC_CFG_PRIV1_LINK_CAPABILITY in brcmstb).
+
+But instead of using PCI_EXP_LNKCAP_ASPM_L1 and PCI_EXP_LNKCAP_ASPM_L0S
+directly, it used PCIE_LINK_STATE_L1 and PCIE_LINK_STATE_L0S, which are
+Linux-created values that only coincidentally matched the PCIe spec.
+b478e162f227 ("PCI/ASPM: Consolidate link state defines") later changed
+them so they no longer matched the PCIe spec, so the bits ended up in the
+wrong place in PCI_EXP_LNKCAP.
+
+Use PCI_EXP_LNKCAP_ASPM_L0S to clear L0s support when there's an
+'aspm-no-l0s' property. Rely on brcmstb hardware to advertise L0s and/or
+L1 support otherwise.
+
+Fixes: caab002d5069 ("PCI: brcmstb: Disable L0s component of ASPM if requested")
+Reported-by: Bjorn Helgaas <bhelgaas@google.com>
+Closes: https://lore.kernel.org/linux-pci/20250925194424.GA2197200@bhelgaas
+Signed-off-by: Jim Quinlan <james.quinlan@broadcom.com>
+[mani: reworded subject and description, added closes tag and CCed stable]
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251003170436.1446030-1-james.quinlan@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pcie-brcmstb.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -47,7 +47,6 @@
+
+ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
+ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
+-#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+
+ #define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
+ #define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8
+@@ -1029,7 +1028,7 @@ static int brcm_pcie_setup(struct brcm_p
+ void __iomem *base = pcie->base;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+- u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap;
++ u32 tmp, burst, num_lanes, num_lanes_cap;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
+@@ -1129,12 +1128,9 @@ static int brcm_pcie_setup(struct brcm_p
+
+
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+- aspm_support = PCIE_LINK_STATE_L1;
+- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+- aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+- u32p_replace_bits(&tmp, aspm_support,
+- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
++ if (of_property_read_bool(pcie->np, "aspm-no-l0s"))
++ tmp &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
--- /dev/null
+From stable+bounces-204870-greg=kroah.com@vger.kernel.org Mon Jan 5 18:41:47 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 12:34:18 -0500
+Subject: PCI: brcmstb: Reuse pcie_cfg_data structure
+To: stable@vger.kernel.org
+Cc: "Stanimir Varbanov" <svarbanov@suse.de>, "Florian Fainelil" <florian.fainelli@broadcom.com>, "Jim Quinlan" <james.quinlan@broadcom.com>, "Ivan T. Ivanov" <iivanov@suse.de>, "Krzysztof Wilczyński" <kwilczynski@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260105173420.2692565-1-sashal@kernel.org>
+
+From: Stanimir Varbanov <svarbanov@suse.de>
+
+[ Upstream commit 10dbedad3c8188ce8b68559d43b7aaee7dafba25 ]
+
+Instead of copying fields from the pcie_cfg_data structure to
+brcm_pcie, reference it directly.
+
+Signed-off-by: Stanimir Varbanov <svarbanov@suse.de>
+Reviewed-by: Florian Fainelil <florian.fainelli@broadcom.com>
+Reviewed-by: Jim Quinlan <james.quinlan@broadcom.com>
+Tested-by: Ivan T. Ivanov <iivanov@suse.de>
+Link: https://lore.kernel.org/r/20250224083559.47645-6-svarbanov@suse.de
+[kwilczynski: commit log]
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Stable-dep-of: 9583f9d22991 ("PCI: brcmstb: Fix disabling L0s capability")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pcie-brcmstb.c | 72 +++++++++++++++-------------------
+ 1 file changed, 32 insertions(+), 40 deletions(-)
+
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -191,11 +191,11 @@
+ #define SSC_STATUS_PLL_LOCK_MASK 0x800
+ #define PCIE_BRCM_MAX_MEMC 3
+
+-#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX])
+-#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA])
+-#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1])
+-#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG])
+-#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE])
++#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
++#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
++#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
++#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
++#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
+
+ /* Rescal registers */
+ #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
+@@ -276,8 +276,6 @@ struct brcm_pcie {
+ int gen;
+ u64 msi_target_addr;
+ struct brcm_msi *msi;
+- const int *reg_offsets;
+- enum pcie_soc_base soc_base;
+ struct reset_control *rescal;
+ struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+@@ -285,17 +283,14 @@ struct brcm_pcie {
+ int num_memc;
+ u64 memc_size[PCIE_BRCM_MAX_MEMC];
+ u32 hw_rev;
+- int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+- int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
+- bool has_phy;
+- u8 num_inbound_wins;
++ const struct pcie_cfg_data *cfg;
+ };
+
+ static inline bool is_bmips(const struct brcm_pcie *pcie)
+ {
+- return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425;
++ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
+ }
+
+ /*
+@@ -855,7 +850,7 @@ static int brcm_pcie_get_inbound_wins(st
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+- if (pcie->soc_base != BCM7712)
++ if (pcie->cfg->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+@@ -872,10 +867,10 @@ static int brcm_pcie_get_inbound_wins(st
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+- if (pcie->soc_base == BCM7712)
++ if (pcie->cfg->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
+
+- if (n > pcie->num_inbound_wins)
++ if (n > pcie->cfg->num_inbound_wins)
+ break;
+ }
+
+@@ -889,7 +884,7 @@ static int brcm_pcie_get_inbound_wins(st
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+- if (pcie->soc_base == BCM7712)
++ if (pcie->cfg->soc_base == BCM7712)
+ return n;
+
+ ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
+@@ -1012,7 +1007,7 @@ static void set_inbound_win_registers(st
+ * 7712:
+ * All of their BARs need to be set.
+ */
+- if (pcie->soc_base == BCM7712) {
++ if (pcie->cfg->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+@@ -1036,15 +1031,15 @@ static int brcm_pcie_setup(struct brcm_p
+ int memc, ret;
+
+ /* Reset the bridge */
+- ret = pcie->bridge_sw_init_set(pcie, 1);
++ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
+
+ /* Ensure that PERST# is asserted; some bootloaders may deassert it. */
+- if (pcie->soc_base == BCM2711) {
+- ret = pcie->perst_set(pcie, 1);
++ if (pcie->cfg->soc_base == BCM2711) {
++ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret) {
+- pcie->bridge_sw_init_set(pcie, 0);
++ pcie->cfg->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
+@@ -1052,7 +1047,7 @@ static int brcm_pcie_setup(struct brcm_p
+ usleep_range(100, 200);
+
+ /* Take the bridge out of reset */
+- ret = pcie->bridge_sw_init_set(pcie, 0);
++ ret = pcie->cfg->bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
+
+@@ -1072,9 +1067,9 @@ static int brcm_pcie_setup(struct brcm_p
+ */
+ if (is_bmips(pcie))
+ burst = 0x1; /* 256 bytes */
+- else if (pcie->soc_base == BCM2711)
++ else if (pcie->cfg->soc_base == BCM2711)
+ burst = 0x0; /* 128 bytes */
+- else if (pcie->soc_base == BCM7278)
++ else if (pcie->cfg->soc_base == BCM7278)
+ burst = 0x3; /* 512 bytes */
+ else
+ burst = 0x2; /* 512 bytes */
+@@ -1199,7 +1194,7 @@ static void brcm_extend_rbus_timeout(str
+ u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+
+ /* 7712 does not have this (RGR1) timer */
+- if (pcie->soc_base == BCM7712)
++ if (pcie->cfg->soc_base == BCM7712)
+ return;
+
+ /* Each unit in timeout register is 1/216,000,000 seconds */
+@@ -1281,7 +1276,7 @@ static int brcm_pcie_start_link(struct b
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
+ /* Unassert the fundamental reset */
+- ret = pcie->perst_set(pcie, 0);
++ ret = pcie->cfg->perst_set(pcie, 0);
+ if (ret)
+ return ret;
+
+@@ -1465,12 +1460,12 @@ static int brcm_phy_cntl(struct brcm_pci
+
+ static inline int brcm_phy_start(struct brcm_pcie *pcie)
+ {
+- return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
++ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
+ }
+
+ static inline int brcm_phy_stop(struct brcm_pcie *pcie)
+ {
+- return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
++ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
+ }
+
+ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
+@@ -1481,7 +1476,7 @@ static int brcm_pcie_turn_off(struct brc
+ if (brcm_pcie_link_up(pcie))
+ brcm_pcie_enter_l23(pcie);
+ /* Assert fundamental reset */
+- ret = pcie->perst_set(pcie, 1);
++ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret)
+ return ret;
+
+@@ -1496,7 +1491,7 @@ static int brcm_pcie_turn_off(struct brc
+ writel(tmp, base + HARD_DEBUG(pcie));
+
+ /* Shutdown PCIe bridge */
+- ret = pcie->bridge_sw_init_set(pcie, 1);
++ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
+
+ return ret;
+ }
+@@ -1584,7 +1579,7 @@ static int brcm_pcie_resume_noirq(struct
+ goto err_reset;
+
+ /* Take bridge out of reset so we can access the SERDES reg */
+- pcie->bridge_sw_init_set(pcie, 0);
++ pcie->cfg->bridge_sw_init_set(pcie, 0);
+
+ /* SERDES_IDDQ = 0 */
+ tmp = readl(base + HARD_DEBUG(pcie));
+@@ -1805,12 +1800,7 @@ static int brcm_pcie_probe(struct platfo
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->dev = &pdev->dev;
+ pcie->np = np;
+- pcie->reg_offsets = data->offsets;
+- pcie->soc_base = data->soc_base;
+- pcie->perst_set = data->perst_set;
+- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+- pcie->has_phy = data->has_phy;
+- pcie->num_inbound_wins = data->num_inbound_wins;
++ pcie->cfg = data;
+
+ pcie->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pcie->base))
+@@ -1845,7 +1835,7 @@ static int brcm_pcie_probe(struct platfo
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+- pcie->bridge_sw_init_set(pcie, 0);
++ pcie->cfg->bridge_sw_init_set(pcie, 0);
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+@@ -1884,7 +1874,8 @@ static int brcm_pcie_probe(struct platfo
+ goto fail;
+
+ pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
+- if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
++ if (pcie->cfg->soc_base == BCM4908 &&
++ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
+ ret = -ENODEV;
+ goto fail;
+@@ -1904,7 +1895,8 @@ static int brcm_pcie_probe(struct platfo
+ }
+ }
+
+- bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
++ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
++ &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->sysdata = pcie;
+
+ platform_set_drvdata(pdev, pcie);
--- /dev/null
+From stable+bounces-204871-greg=kroah.com@vger.kernel.org Mon Jan 5 18:34:28 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 12:34:19 -0500
+Subject: PCI: brcmstb: Set MLW based on "num-lanes" DT property if present
+To: stable@vger.kernel.org
+Cc: Jim Quinlan <james.quinlan@broadcom.com>, Manivannan Sadhasivam <mani@kernel.org>, Florian Fainelli <florian.fainelli@broadcom.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105173420.2692565-2-sashal@kernel.org>
+
+From: Jim Quinlan <james.quinlan@broadcom.com>
+
+[ Upstream commit a364d10ffe361fb34c3838d33604da493045de1e ]
+
+By default, the driver relies on the default hardware defined value for the
+Max Link Width (MLW) capability. But if the "num-lanes" DT property is
+present, assume that the chip's default capability information is incorrect
+or undesired, and use the specified value instead.
+
+Signed-off-by: Jim Quinlan <james.quinlan@broadcom.com>
+[mani: reworded the description and comments]
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250530224035.41886-3-james.quinlan@broadcom.com
+Stable-dep-of: 9583f9d22991 ("PCI: brcmstb: Fix disabling L0s capability")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pcie-brcmstb.c | 27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -46,6 +46,7 @@
+ #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
+
+ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
++#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
+ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+
+ #define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
+@@ -55,6 +56,9 @@
+ #define PCIE_RC_DL_MDIO_WR_DATA 0x1104
+ #define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+
++#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804
++#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8
++
+ #define PCIE_MISC_MISC_CTRL 0x4008
+ #define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
+ #define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
+@@ -1025,7 +1029,7 @@ static int brcm_pcie_setup(struct brcm_p
+ void __iomem *base = pcie->base;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+- u32 tmp, burst, aspm_support;
++ u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
+@@ -1133,6 +1137,27 @@ static int brcm_pcie_setup(struct brcm_p
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
++ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
++ num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
++ num_lanes = 0;
++
++ /*
++ * Use hardware negotiated Max Link Width value by default. If the
++ * "num-lanes" DT property is present, assume that the chip's default
++ * link width capability information is incorrect/undesired and use the
++ * specified value instead.
++ */
++ if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) &&
++ num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) {
++ u32p_replace_bits(&tmp, num_lanes,
++ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
++ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
++ tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1);
++ u32p_replace_bits(&tmp, 1,
++ PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK);
++ writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1);
++ }
++
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
--- /dev/null
+From stable+bounces-204878-greg=kroah.com@vger.kernel.org Mon Jan 5 18:47:39 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 12:42:05 -0500
+Subject: powerpc/pseries/cmm: adjust BALLOON_MIGRATE when migrating pages
+To: stable@vger.kernel.org
+Cc: David Hildenbrand <david@redhat.com>, "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>, Christophe Leroy <christophe.leroy@csgroup.eu>, Madhavan Srinivasan <maddy@linux.ibm.com>, Michael Ellerman <mpe@ellerman.id.au>, Nicholas Piggin <npiggin@gmail.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260105174205.2697666-3-sashal@kernel.org>
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit 0da2ba35c0d532ca0fe7af698b17d74c4d084b9a ]
+
+Let's properly adjust BALLOON_MIGRATE like the other drivers.
+
+Note that the INFLATE/DEFLATE events are triggered from the core when
+enqueueing/dequeueing pages.
+
+This was found by code inspection.
+
+Link: https://lkml.kernel.org/r/20251021100606.148294-3-david@redhat.com
+Fixes: fe030c9b85e6 ("powerpc/pseries/cmm: Implement balloon compaction")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/cmm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/platforms/pseries/cmm.c
++++ b/arch/powerpc/platforms/pseries/cmm.c
+@@ -532,6 +532,7 @@ static int cmm_migratepage(struct balloo
+
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ balloon_page_insert(b_dev_info, newpage);
++ __count_vm_event(BALLOON_MIGRATE);
+ b_dev_info->isolated_pages--;
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
idpf-stop-tx-if-there-are-insufficient-buffer-resources.patch
idpf-remove-obsolete-stashing-code.patch
hrtimers-make-hrtimer_update_function-less-expensive.patch
+gve-defer-interrupt-enabling-until-napi-registration.patch
+asoc-renesas-rz-ssi-fix-channel-swap-issue-in-full-duplex-mode.patch
+block-handle-zone-management-operations-completions.patch
+soundwire-stream-extend-sdw_alloc_stream-to-take-type-parameter.patch
+asoc-qcom-sdw-fix-memory-leak-for-sdw_stream_runtime.patch
+asoc-renesas-rz-ssi-fix-rz_ssi_priv-hw_params_cache-sample_width.patch
+pci-brcmstb-reuse-pcie_cfg_data-structure.patch
+pci-brcmstb-set-mlw-based-on-num-lanes-dt-property-if-present.patch
+pci-brcmstb-fix-disabling-l0s-capability.patch
+mm-balloon_compaction-we-cannot-have-isolated-pages-in-the-balloon-list.patch
+mm-balloon_compaction-convert-balloon_page_delete-to-balloon_page_finalize.patch
+powerpc-pseries-cmm-adjust-balloon_migrate-when-migrating-pages.patch
+media-mediatek-vcodec-use-spinlock-for-context-list-protection-lock.patch
+media-amphion-add-a-frame-flush-mode-for-decoder.patch
+media-amphion-make-some-vpu_v4l2-functions-static.patch
+media-amphion-remove-vpu_vb_is_codecconfig.patch
+vfio-pci-disable-qword-access-to-the-pci-rom-bar.patch
--- /dev/null
+From stable+bounces-204827-greg=kroah.com@vger.kernel.org Mon Jan 5 16:14:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 10:10:07 -0500
+Subject: soundwire: stream: extend sdw_alloc_stream() to take 'type' parameter
+To: stable@vger.kernel.org
+Cc: "Pierre-Louis Bossart" <pierre-louis.bossart@linux.dev>, "Bard Liao" <yung-chuan.liao@linux.intel.com>, "Péter Ujfalusi" <peter.ujfalusi@linux.intel.com>, "Liam Girdwood" <liam.r.girdwood@intel.com>, "Ranjani Sridharan" <ranjani.sridharan@linux.intel.com>, shumingf@realtek.com, "Vinod Koul" <vkoul@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260105151008.2624877-1-sashal@kernel.org>
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
+
+[ Upstream commit dc90bbefa792031d89fe2af9ad4a6febd6be96a9 ]
+
+In the existing definition of sdw_stream_runtime, the 'type' member is
+never set and defaults to PCM. To prepare for the BPT/BRA support, we
+need to special-case streams and make use of the 'type'.
+
+No functional change for now, the implicit PCM type is now explicit.
+
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
+Signed-off-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Liam Girdwood <liam.r.girdwood@intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Tested-by: shumingf@realtek.com
+Link: https://lore.kernel.org/r/20250227140615.8147-5-yung-chuan.liao@linux.intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: bcba17279327 ("ASoC: qcom: sdw: fix memory leak for sdw_stream_runtime")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/driver-api/soundwire/stream.rst | 2 +-
+ drivers/soundwire/stream.c | 6 ++++--
+ include/linux/soundwire/sdw.h | 2 +-
+ sound/soc/qcom/sdw.c | 2 +-
+ 4 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/Documentation/driver-api/soundwire/stream.rst
++++ b/Documentation/driver-api/soundwire/stream.rst
+@@ -291,7 +291,7 @@ per stream. From ASoC DPCM framework, th
+
+ .. code-block:: c
+
+- int sdw_alloc_stream(char * stream_name);
++ int sdw_alloc_stream(char * stream_name, enum sdw_stream_type type);
+
+ The SoundWire core provides a sdw_startup_stream() helper function,
+ typically called during a dailink .startup() callback, which performs
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1744,12 +1744,13 @@ static int set_stream(struct snd_pcm_sub
+ * sdw_alloc_stream() - Allocate and return stream runtime
+ *
+ * @stream_name: SoundWire stream name
++ * @type: stream type (could be PCM ,PDM or BPT)
+ *
+ * Allocates a SoundWire stream runtime instance.
+ * sdw_alloc_stream should be called only once per stream. Typically
+ * invoked from ALSA/ASoC machine/platform driver.
+ */
+-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name)
++struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type)
+ {
+ struct sdw_stream_runtime *stream;
+
+@@ -1761,6 +1762,7 @@ struct sdw_stream_runtime *sdw_alloc_str
+ INIT_LIST_HEAD(&stream->master_list);
+ stream->state = SDW_STREAM_ALLOCATED;
+ stream->m_rt_count = 0;
++ stream->type = type;
+
+ return stream;
+ }
+@@ -1789,7 +1791,7 @@ int sdw_startup_stream(void *sdw_substre
+ if (!name)
+ return -ENOMEM;
+
+- sdw_stream = sdw_alloc_stream(name);
++ sdw_stream = sdw_alloc_stream(name, SDW_STREAM_PCM);
+ if (!sdw_stream) {
+ dev_err(rtd->dev, "alloc stream failed for substream DAI %s\n", substream->name);
+ ret = -ENOMEM;
+--- a/include/linux/soundwire/sdw.h
++++ b/include/linux/soundwire/sdw.h
+@@ -1024,7 +1024,7 @@ struct sdw_stream_runtime {
+ int m_rt_count;
+ };
+
+-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
++struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type);
+ void sdw_release_stream(struct sdw_stream_runtime *stream);
+
+ int sdw_compute_params(struct sdw_bus *bus);
+--- a/sound/soc/qcom/sdw.c
++++ b/sound/soc/qcom/sdw.c
+@@ -27,7 +27,7 @@ int qcom_snd_sdw_startup(struct snd_pcm_
+ struct snd_soc_dai *codec_dai;
+ int ret, i;
+
+- sruntime = sdw_alloc_stream(cpu_dai->name);
++ sruntime = sdw_alloc_stream(cpu_dai->name, SDW_STREAM_PCM);
+ if (!sruntime)
+ return -ENOMEM;
+
--- /dev/null
+From stable+bounces-205024-greg=kroah.com@vger.kernel.org Tue Jan 6 03:41:16 2026
+From: Kevin Tian <kevin.tian@intel.com>
+Date: Tue, 6 Jan 2026 02:44:28 +0000
+Subject: vfio/pci: Disable qword access to the PCI ROM bar
+To: stable@vger.kernel.org
+Cc: Kevin Tian <kevin.tian@intel.com>, Farrah Chen <farrah.chen@intel.com>, Alex Williamson <alex@shazbot.org>
+Message-ID: <20260106024428.1334406-1-kevin.tian@intel.com>
+
+From: Kevin Tian <kevin.tian@intel.com>
+
+[ Upstream commit dc85a46928c41423ad89869baf05a589e2975575 ]
+
+Commit 2b938e3db335 ("vfio/pci: Enable iowrite64 and ioread64 for vfio
+pci") enables qword access to the PCI bar resources. However certain
+devices (e.g. Intel X710) are observed with problem upon qword accesses
+to the rom bar, e.g. triggering PCI aer errors.
+
+This is triggered by Qemu which caches the rom content by simply does a
+pread() of the remaining size until it gets the full contents. The other
+bars would only perform operations at the same access width as their
+guest drivers.
+
+Instead of trying to identify all broken devices, universally disable
+qword access to the rom bar i.e. going back to the old way which worked
+reliably for years.
+
+Reported-by: Farrah Chen <farrah.chen@intel.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220740
+Fixes: 2b938e3db335 ("vfio/pci: Enable iowrite64 and ioread64 for vfio pci")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kevin Tian <kevin.tian@intel.com>
+Tested-by: Farrah Chen <farrah.chen@intel.com>
+Link: https://lore.kernel.org/r/20251218081650.555015-2-kevin.tian@intel.com
+Signed-off-by: Alex Williamson <alex@shazbot.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/pci/nvgrace-gpu/main.c | 4 ++--
+ drivers/vfio/pci/vfio_pci_rdwr.c | 24 ++++++++++++++++++------
+ include/linux/vfio_pci_core.h | 10 +++++++++-
+ 3 files changed, 29 insertions(+), 9 deletions(-)
+
+--- a/drivers/vfio/pci/nvgrace-gpu/main.c
++++ b/drivers/vfio/pci/nvgrace-gpu/main.c
+@@ -482,7 +482,7 @@ nvgrace_gpu_map_and_read(struct nvgrace_
+ ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
+ nvdev->resmem.ioaddr,
+ buf, offset, mem_count,
+- 0, 0, false);
++ 0, 0, false, VFIO_PCI_IO_WIDTH_8);
+ }
+
+ return ret;
+@@ -600,7 +600,7 @@ nvgrace_gpu_map_and_write(struct nvgrace
+ ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
+ nvdev->resmem.ioaddr,
+ (char __user *)buf, pos, mem_count,
+- 0, 0, true);
++ 0, 0, true, VFIO_PCI_IO_WIDTH_8);
+ }
+
+ return ret;
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -141,7 +141,8 @@ VFIO_IORDWR(64)
+ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+- size_t x_end, bool iswrite)
++ size_t x_end, bool iswrite,
++ enum vfio_pci_io_width max_width)
+ {
+ ssize_t done = 0;
+ int ret;
+@@ -157,7 +158,7 @@ ssize_t vfio_pci_core_do_io_rw(struct vf
+ fillable = 0;
+
+ #if defined(ioread64) && defined(iowrite64)
+- if (fillable >= 8 && !(off % 8)) {
++ if (fillable >= 8 && !(off % 8) && max_width >= 8) {
+ ret = vfio_pci_iordwr64(vdev, iswrite, test_mem,
+ io, buf, off, &filled);
+ if (ret)
+@@ -165,13 +166,13 @@ ssize_t vfio_pci_core_do_io_rw(struct vf
+
+ } else
+ #endif
+- if (fillable >= 4 && !(off % 4)) {
++ if (fillable >= 4 && !(off % 4) && max_width >= 4) {
+ ret = vfio_pci_iordwr32(vdev, iswrite, test_mem,
+ io, buf, off, &filled);
+ if (ret)
+ return ret;
+
+- } else if (fillable >= 2 && !(off % 2)) {
++ } else if (fillable >= 2 && !(off % 2) && max_width >= 2) {
+ ret = vfio_pci_iordwr16(vdev, iswrite, test_mem,
+ io, buf, off, &filled);
+ if (ret)
+@@ -242,6 +243,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_
+ void __iomem *io;
+ struct resource *res = &vdev->pdev->resource[bar];
+ ssize_t done;
++ enum vfio_pci_io_width max_width = VFIO_PCI_IO_WIDTH_8;
+
+ if (pci_resource_start(pdev, bar))
+ end = pci_resource_len(pdev, bar);
+@@ -268,6 +270,16 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_
+ goto out;
+ }
+ x_end = end;
++
++ /*
++ * Certain devices (e.g. Intel X710) don't support qword
++ * access to the ROM bar. Otherwise PCI AER errors might be
++ * triggered.
++ *
++ * Disable qword access to the ROM bar universally, which
++ * worked reliably for years before qword access is enabled.
++ */
++ max_width = VFIO_PCI_IO_WIDTH_4;
+ } else {
+ int ret = vfio_pci_core_setup_barmap(vdev, bar);
+ if (ret) {
+@@ -284,7 +296,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_
+ }
+
+ done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
+- count, x_start, x_end, iswrite);
++ count, x_start, x_end, iswrite, max_width);
+
+ if (done >= 0)
+ *ppos += done;
+@@ -353,7 +365,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_
+ * to the memory enable bit in the command register.
+ */
+ done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count,
+- 0, 0, iswrite);
++ 0, 0, iswrite, VFIO_PCI_IO_WIDTH_8);
+
+ vga_put(vdev->pdev, rsrc);
+
+--- a/include/linux/vfio_pci_core.h
++++ b/include/linux/vfio_pci_core.h
+@@ -102,6 +102,13 @@ struct vfio_pci_core_device {
+ struct rw_semaphore memory_lock;
+ };
+
++enum vfio_pci_io_width {
++ VFIO_PCI_IO_WIDTH_1 = 1,
++ VFIO_PCI_IO_WIDTH_2 = 2,
++ VFIO_PCI_IO_WIDTH_4 = 4,
++ VFIO_PCI_IO_WIDTH_8 = 8,
++};
++
+ /* Will be exported for vfio pci drivers usage */
+ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+@@ -137,7 +144,8 @@ pci_ers_result_t vfio_pci_core_aer_err_d
+ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+- size_t x_end, bool iswrite);
++ size_t x_end, bool iswrite,
++ enum vfio_pci_io_width max_width);
+ bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt,
+ loff_t reg_start, size_t reg_cnt,
+ loff_t *buf_offset,