--- /dev/null
+From stable+bounces-242431-greg=kroah.com@vger.kernel.org Fri May 1 15:34:35 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 09:34:23 -0400
+Subject: ALSA: aoa: i2sbus: clear stale prepared state
+To: stable@vger.kernel.org
+Cc: "Cássio Gabriel" <cassiogabrielcontato@gmail.com>, "kernel test robot" <lkp@intel.com>, "Takashi Iwai" <tiwai@suse.de>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260501133423.3365447-2-sashal@kernel.org>
+
+From: Cássio Gabriel <cassiogabrielcontato@gmail.com>
+
+[ Upstream commit 5ed060d5491597490fb53ec69da3edc4b1e8c165 ]
+
+The i2sbus PCM code uses pi->active to constrain the sibling stream to
+an already prepared duplex format and rate in i2sbus_pcm_open().
+
+That state is set from i2sbus_pcm_prepare(), but the current code only
+clears it on close. As a result, the sibling stream can inherit stale
+constraints after the prepared state has been torn down.
+
+Clear pi->active when hw_params() or hw_free() tears down the prepared
+state, and set it again only after prepare succeeds.
+
+Replace the stale FIXME in the duplex constraint comment with a description
+of the current driver behavior: i2sbus still programs a single shared
+transport configuration for both directions, so mixed formats are not
+supported in duplex mode.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202604010125.AvkWBYKI-lkp@intel.com/
+Fixes: f3d9478b2ce4 ("[ALSA] snd-aoa: add snd-aoa")
+Cc: stable@vger.kernel.org
+Signed-off-by: Cássio Gabriel <cassiogabrielcontato@gmail.com>
+Link: https://patch.msgid.link/20260331-aoa-i2sbus-clear-stale-active-v2-1-3764ae2889a1@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/aoa/soundbus/i2sbus/pcm.c | 55 ++++++++++++++++++++++++++++++++--------
+ 1 file changed, 44 insertions(+), 11 deletions(-)
+
+--- a/sound/aoa/soundbus/i2sbus/pcm.c
++++ b/sound/aoa/soundbus/i2sbus/pcm.c
+@@ -165,17 +165,16 @@ static int i2sbus_pcm_open(struct i2sbus
+ * currently in use (if any). */
+ hw->rate_min = 5512;
+ hw->rate_max = 192000;
+- /* if the other stream is active, then we can only
+- * support what it is currently using.
+- * FIXME: I lied. This comment is wrong. We can support
+- * anything that works with the same serial format, ie.
+- * when recording 24 bit sound we can well play 16 bit
+- * sound at the same time iff using the same transfer mode.
++ /* If the other stream is already prepared, keep this stream
++ * on the same duplex format and rate.
++ *
++ * i2sbus_pcm_prepare() still programs one shared transport
++ * configuration for both directions, so mixed duplex formats
++ * are not supported here.
+ */
+ if (other->active) {
+- /* FIXME: is this guaranteed by the alsa api? */
+ hw->formats &= pcm_format_to_bits(i2sdev->format);
+- /* see above, restrict rates to the one we already have */
++ /* Restrict rates to the one already in use. */
+ hw->rate_min = i2sdev->rate;
+ hw->rate_max = i2sdev->rate;
+ }
+@@ -283,6 +282,23 @@ void i2sbus_wait_for_stop_both(struct i2
+ }
+ #endif
+
++static void i2sbus_pcm_clear_active(struct i2sbus_dev *i2sdev, int in)
++{
++ struct pcm_info *pi;
++
++ guard(mutex)(&i2sdev->lock);
++
++ get_pcm_info(i2sdev, in, &pi, NULL);
++ pi->active = 0;
++}
++
++static inline int i2sbus_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params, int in)
++{
++ i2sbus_pcm_clear_active(snd_pcm_substream_chip(substream), in);
++ return 0;
++}
++
+ static inline int i2sbus_hw_free(struct snd_pcm_substream *substream, int in)
+ {
+ struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream);
+@@ -291,14 +307,27 @@ static inline int i2sbus_hw_free(struct
+ get_pcm_info(i2sdev, in, &pi, NULL);
+ if (pi->dbdma_ring.stopping)
+ i2sbus_wait_for_stop(i2sdev, pi);
++ i2sbus_pcm_clear_active(i2sdev, in);
+ return 0;
+ }
+
++static int i2sbus_playback_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ return i2sbus_hw_params(substream, params, 0);
++}
++
+ static int i2sbus_playback_hw_free(struct snd_pcm_substream *substream)
+ {
+ return i2sbus_hw_free(substream, 0);
+ }
+
++static int i2sbus_record_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ return i2sbus_hw_params(substream, params, 1);
++}
++
+ static int i2sbus_record_hw_free(struct snd_pcm_substream *substream)
+ {
+ return i2sbus_hw_free(substream, 1);
+@@ -335,7 +364,6 @@ static int i2sbus_pcm_prepare(struct i2s
+ return -EINVAL;
+
+ runtime = pi->substream->runtime;
+- pi->active = 1;
+ if (other->active &&
+ ((i2sdev->format != runtime->format)
+ || (i2sdev->rate != runtime->rate)))
+@@ -450,9 +478,11 @@ static int i2sbus_pcm_prepare(struct i2s
+
+ /* early exit if already programmed correctly */
+ /* not locking these is fine since we touch them only in this function */
+- if (in_le32(&i2sdev->intfregs->serial_format) == sfr
+- && in_le32(&i2sdev->intfregs->data_word_sizes) == dws)
++ if (in_le32(&i2sdev->intfregs->serial_format) == sfr &&
++ in_le32(&i2sdev->intfregs->data_word_sizes) == dws) {
++ pi->active = 1;
+ return 0;
++ }
+
+ /* let's notify the codecs about clocks going away.
+ * For now we only do mastering on the i2s cell... */
+@@ -490,6 +520,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ if (cii->codec->switch_clock)
+ cii->codec->switch_clock(cii, CLOCK_SWITCH_SLAVE);
+
++ pi->active = 1;
+ return 0;
+ }
+
+@@ -746,6 +777,7 @@ static snd_pcm_uframes_t i2sbus_playback
+ static const struct snd_pcm_ops i2sbus_playback_ops = {
+ .open = i2sbus_playback_open,
+ .close = i2sbus_playback_close,
++ .hw_params = i2sbus_playback_hw_params,
+ .hw_free = i2sbus_playback_hw_free,
+ .prepare = i2sbus_playback_prepare,
+ .trigger = i2sbus_playback_trigger,
+@@ -814,6 +846,7 @@ static snd_pcm_uframes_t i2sbus_record_p
+ static const struct snd_pcm_ops i2sbus_record_ops = {
+ .open = i2sbus_record_open,
+ .close = i2sbus_record_close,
++ .hw_params = i2sbus_record_hw_params,
+ .hw_free = i2sbus_record_hw_free,
+ .prepare = i2sbus_record_prepare,
+ .trigger = i2sbus_record_trigger,
--- /dev/null
+From stable+bounces-242476-greg=kroah.com@vger.kernel.org Fri May 1 19:17:20 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 13:13:33 -0400
+Subject: ALSA: aoa: Skip devices with no codecs in i2sbus_resume()
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501171333.3681629-2-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit fd7df93013c5118812e63a52635dc6c3a805a1de ]
+
+In i2sbus_resume(), skip devices with an empty codec list, which avoids
+using an uninitialized 'sysclock_factor' in the 32-bit format path in
+i2sbus_pcm_prepare().
+
+In i2sbus_pcm_prepare(), replace two list_for_each_entry() loops with a
+single list_first_entry() now that the codec list is guaranteed to be
+non-empty by all callers.
+
+Fixes: f3d9478b2ce4 ("[ALSA] snd-aoa: add snd-aoa")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Link: https://patch.msgid.link/20260310102921.210109-3-thorsten.blum@linux.dev
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/aoa/soundbus/i2sbus/core.c | 3 +++
+ sound/aoa/soundbus/i2sbus/pcm.c | 16 +++++-----------
+ 2 files changed, 8 insertions(+), 11 deletions(-)
+
+--- a/sound/aoa/soundbus/i2sbus/core.c
++++ b/sound/aoa/soundbus/i2sbus/core.c
+@@ -410,6 +410,9 @@ static int i2sbus_resume(struct macio_de
+ int err, ret = 0;
+
+ list_for_each_entry(i2sdev, &control->list, item) {
++ if (list_empty(&i2sdev->sound.codec_list))
++ continue;
++
+ /* reset i2s bus format etc. */
+ i2sbus_pcm_prepare_both(i2sdev);
+
+--- a/sound/aoa/soundbus/i2sbus/pcm.c
++++ b/sound/aoa/soundbus/i2sbus/pcm.c
+@@ -411,6 +411,9 @@ static int i2sbus_pcm_prepare(struct i2s
+ /* set stop command */
+ command->command = cpu_to_le16(DBDMA_STOP);
+
++ cii = list_first_entry(&i2sdev->sound.codec_list,
++ struct codec_info_item, list);
++
+ /* ok, let's set the serial format and stuff */
+ switch (runtime->format) {
+ /* 16 bit formats */
+@@ -418,13 +421,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ case SNDRV_PCM_FORMAT_U16_BE:
+ /* FIXME: if we add different bus factors we need to
+ * do more here!! */
+- bi.bus_factor = 0;
+- list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
+- bi.bus_factor = cii->codec->bus_factor;
+- break;
+- }
+- if (!bi.bus_factor)
+- return -ENODEV;
++ bi.bus_factor = cii->codec->bus_factor;
+ input_16bit = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S32_BE:
+@@ -438,10 +435,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ return -EINVAL;
+ }
+ /* we assume all sysclocks are the same! */
+- list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
+- bi.sysclock_factor = cii->codec->sysclock_factor;
+- break;
+- }
++ bi.sysclock_factor = cii->codec->sysclock_factor;
+
+ if (clock_and_divisors(bi.sysclock_factor,
+ bi.bus_factor,
--- /dev/null
+From stable+bounces-242430-greg=kroah.com@vger.kernel.org Fri May 1 15:34:29 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 09:34:22 -0400
+Subject: ALSA: aoa: Use guard() for mutex locks
+To: stable@vger.kernel.org
+Cc: Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501133423.3365447-1-sashal@kernel.org>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1cb6ecbb372002ef9e531c5377e5f60122411e40 ]
+
+Replace the manual mutex lock/unlock pairs with guard() for code
+simplification.
+
+Only code refactoring, and no behavior change.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20250829151335.7342-14-tiwai@suse.de
+Stable-dep-of: 5ed060d54915 ("ALSA: aoa: i2sbus: clear stale prepared state")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/aoa/codecs/onyx.c | 104 +++++++++++-------------------------
+ sound/aoa/codecs/tas.c | 113 +++++++++++++---------------------------
+ sound/aoa/core/gpio-feature.c | 20 ++-----
+ sound/aoa/core/gpio-pmf.c | 26 +++------
+ sound/aoa/soundbus/i2sbus/pcm.c | 76 ++++++++------------------
+ 5 files changed, 112 insertions(+), 227 deletions(-)
+
+--- a/sound/aoa/codecs/onyx.c
++++ b/sound/aoa/codecs/onyx.c
+@@ -122,10 +122,9 @@ static int onyx_snd_vol_get(struct snd_k
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ s8 l, r;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT, &r);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] = l + VOLUME_RANGE_SHIFT;
+ ucontrol->value.integer.value[1] = r + VOLUME_RANGE_SHIFT;
+@@ -146,15 +145,13 @@ static int onyx_snd_vol_put(struct snd_k
+ ucontrol->value.integer.value[1] > -1 + VOLUME_RANGE_SHIFT)
+ return -EINVAL;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT, &r);
+
+ if (l + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[0] &&
+- r + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[1]) {
+- mutex_unlock(&onyx->mutex);
++ r + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ onyx_write_register(onyx, ONYX_REG_DAC_ATTEN_LEFT,
+ ucontrol->value.integer.value[0]
+@@ -162,7 +159,6 @@ static int onyx_snd_vol_put(struct snd_k
+ onyx_write_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT,
+ ucontrol->value.integer.value[1]
+ - VOLUME_RANGE_SHIFT);
+- mutex_unlock(&onyx->mutex);
+
+ return 1;
+ }
+@@ -198,9 +194,8 @@ static int onyx_snd_inputgain_get(struct
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 ig;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &ig);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] =
+ (ig & ONYX_ADC_PGA_GAIN_MASK) + INPUTGAIN_RANGE_SHIFT;
+@@ -217,14 +212,13 @@ static int onyx_snd_inputgain_put(struct
+ if (ucontrol->value.integer.value[0] < 3 + INPUTGAIN_RANGE_SHIFT ||
+ ucontrol->value.integer.value[0] > 28 + INPUTGAIN_RANGE_SHIFT)
+ return -EINVAL;
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
+ n = v;
+ n &= ~ONYX_ADC_PGA_GAIN_MASK;
+ n |= (ucontrol->value.integer.value[0] - INPUTGAIN_RANGE_SHIFT)
+ & ONYX_ADC_PGA_GAIN_MASK;
+ onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, n);
+- mutex_unlock(&onyx->mutex);
+
+ return n != v;
+ }
+@@ -252,9 +246,8 @@ static int onyx_snd_capture_source_get(s
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ s8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.enumerated.item[0] = !!(v&ONYX_ADC_INPUT_MIC);
+
+@@ -265,13 +258,12 @@ static void onyx_set_capture_source(stru
+ {
+ s8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
+ v &= ~ONYX_ADC_INPUT_MIC;
+ if (mic)
+ v |= ONYX_ADC_INPUT_MIC;
+ onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, v);
+- mutex_unlock(&onyx->mutex);
+ }
+
+ static int onyx_snd_capture_source_put(struct snd_kcontrol *kcontrol,
+@@ -312,9 +304,8 @@ static int onyx_snd_mute_get(struct snd_
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 c;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &c);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] = !(c & ONYX_MUTE_LEFT);
+ ucontrol->value.integer.value[1] = !(c & ONYX_MUTE_RIGHT);
+@@ -329,9 +320,9 @@ static int onyx_snd_mute_put(struct snd_
+ u8 v = 0, c = 0;
+ int err = -EBUSY;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ if (onyx->analog_locked)
+- goto out_unlock;
++ return -EBUSY;
+
+ onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &v);
+ c = v;
+@@ -342,9 +333,6 @@ static int onyx_snd_mute_put(struct snd_
+ c |= ONYX_MUTE_RIGHT;
+ err = onyx_write_register(onyx, ONYX_REG_DAC_CONTROL, c);
+
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+ return !err ? (v != c) : err;
+ }
+
+@@ -373,9 +361,8 @@ static int onyx_snd_single_bit_get(struc
+ u8 address = (pv >> 8) & 0xff;
+ u8 mask = pv & 0xff;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, address, &c);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] = !!(c & mask) ^ polarity;
+
+@@ -394,11 +381,10 @@ static int onyx_snd_single_bit_put(struc
+ u8 address = (pv >> 8) & 0xff;
+ u8 mask = pv & 0xff;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ if (spdiflock && onyx->spdif_locked) {
+ /* even if alsamixer doesn't care.. */
+- err = -EBUSY;
+- goto out_unlock;
++ return -EBUSY;
+ }
+ onyx_read_register(onyx, address, &v);
+ c = v;
+@@ -407,9 +393,6 @@ static int onyx_snd_single_bit_put(struc
+ c |= mask;
+ err = onyx_write_register(onyx, address, c);
+
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+ return !err ? (v != c) : err;
+ }
+
+@@ -490,7 +473,7 @@ static int onyx_spdif_get(struct snd_kco
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO1, &v);
+ ucontrol->value.iec958.status[0] = v & 0x3e;
+
+@@ -502,7 +485,6 @@ static int onyx_spdif_get(struct snd_kco
+
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
+ ucontrol->value.iec958.status[4] = v & 0x0f;
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -513,7 +495,7 @@ static int onyx_spdif_put(struct snd_kco
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO1, &v);
+ v = (v & ~0x3e) | (ucontrol->value.iec958.status[0] & 0x3e);
+ onyx_write_register(onyx, ONYX_REG_DIG_INFO1, v);
+@@ -528,7 +510,6 @@ static int onyx_spdif_put(struct snd_kco
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
+ v = (v & ~0x0f) | (ucontrol->value.iec958.status[4] & 0x0f);
+ onyx_write_register(onyx, ONYX_REG_DIG_INFO4, v);
+- mutex_unlock(&onyx->mutex);
+
+ return 1;
+ }
+@@ -673,14 +654,13 @@ static int onyx_usable(struct codec_info
+ struct onyx *onyx = cii->codec_data;
+ int spdif_enabled, analog_enabled;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
+ spdif_enabled = !!(v & ONYX_SPDIF_ENABLE);
+ onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &v);
+ analog_enabled =
+ (v & (ONYX_MUTE_RIGHT|ONYX_MUTE_LEFT))
+ != (ONYX_MUTE_RIGHT|ONYX_MUTE_LEFT);
+- mutex_unlock(&onyx->mutex);
+
+ switch (ti->tag) {
+ case 0: return 1;
+@@ -696,9 +676,8 @@ static int onyx_prepare(struct codec_inf
+ {
+ u8 v;
+ struct onyx *onyx = cii->codec_data;
+- int err = -EBUSY;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+
+ #ifdef SNDRV_PCM_FMTBIT_COMPRESSED_16BE
+ if (substream->runtime->format == SNDRV_PCM_FMTBIT_COMPRESSED_16BE) {
+@@ -707,10 +686,9 @@ static int onyx_prepare(struct codec_inf
+ if (onyx_write_register(onyx,
+ ONYX_REG_DAC_CONTROL,
+ v | ONYX_MUTE_RIGHT | ONYX_MUTE_LEFT))
+- goto out_unlock;
++ return -EBUSY;
+ onyx->analog_locked = 1;
+- err = 0;
+- goto out_unlock;
++ return 0;
+ }
+ #endif
+ switch (substream->runtime->rate) {
+@@ -720,8 +698,7 @@ static int onyx_prepare(struct codec_inf
+ /* these rates are ok for all outputs */
+ /* FIXME: program spdif channel control bits here so that
+ * userspace doesn't have to if it only plays pcm! */
+- err = 0;
+- goto out_unlock;
++ return 0;
+ default:
+ /* got some rate that the digital output can't do,
+ * so disable and lock it */
+@@ -729,16 +706,12 @@ static int onyx_prepare(struct codec_inf
+ if (onyx_write_register(onyx,
+ ONYX_REG_DIG_INFO4,
+ v & ~ONYX_SPDIF_ENABLE))
+- goto out_unlock;
++ return -EBUSY;
+ onyx->spdif_locked = 1;
+- err = 0;
+- goto out_unlock;
++ return 0;
+ }
+
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+- return err;
++ return -EBUSY;
+ }
+
+ static int onyx_open(struct codec_info_item *cii,
+@@ -746,9 +719,8 @@ static int onyx_open(struct codec_info_i
+ {
+ struct onyx *onyx = cii->codec_data;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx->open_count++;
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -758,11 +730,10 @@ static int onyx_close(struct codec_info_
+ {
+ struct onyx *onyx = cii->codec_data;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx->open_count--;
+ if (!onyx->open_count)
+ onyx->spdif_locked = onyx->analog_locked = 0;
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -772,7 +743,7 @@ static int onyx_switch_clock(struct code
+ {
+ struct onyx *onyx = cii->codec_data;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ /* this *MUST* be more elaborate later... */
+ switch (what) {
+ case CLOCK_SWITCH_PREPARE_SLAVE:
+@@ -784,7 +755,6 @@ static int onyx_switch_clock(struct code
+ default: /* silence warning */
+ break;
+ }
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -795,27 +765,21 @@ static int onyx_suspend(struct codec_inf
+ {
+ struct onyx *onyx = cii->codec_data;
+ u8 v;
+- int err = -ENXIO;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ if (onyx_read_register(onyx, ONYX_REG_CONTROL, &v))
+- goto out_unlock;
++ return -ENXIO;
+ onyx_write_register(onyx, ONYX_REG_CONTROL, v | ONYX_ADPSV | ONYX_DAPSV);
+ /* Apple does a sleep here but the datasheet says to do it on resume */
+- err = 0;
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+- return err;
++ return 0;
+ }
+
+ static int onyx_resume(struct codec_info_item *cii)
+ {
+ struct onyx *onyx = cii->codec_data;
+ u8 v;
+- int err = -ENXIO;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+
+ /* reset codec */
+ onyx->codec.gpio->methods->set_hw_reset(onyx->codec.gpio, 0);
+@@ -827,17 +791,13 @@ static int onyx_resume(struct codec_info
+
+ /* take codec out of suspend (if it still is after reset) */
+ if (onyx_read_register(onyx, ONYX_REG_CONTROL, &v))
+- goto out_unlock;
++ return -ENXIO;
+ onyx_write_register(onyx, ONYX_REG_CONTROL, v & ~(ONYX_ADPSV | ONYX_DAPSV));
+ /* FIXME: should divide by sample rate, but 8k is the lowest we go */
+ msleep(2205000/8000);
+ /* reset all values */
+ onyx_register_init(onyx);
+- err = 0;
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+- return err;
++ return 0;
+ }
+
+ #endif /* CONFIG_PM */
+--- a/sound/aoa/codecs/tas.c
++++ b/sound/aoa/codecs/tas.c
+@@ -235,10 +235,9 @@ static int tas_snd_vol_get(struct snd_kc
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->cached_volume_l;
+ ucontrol->value.integer.value[1] = tas->cached_volume_r;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -254,18 +253,15 @@ static int tas_snd_vol_put(struct snd_kc
+ ucontrol->value.integer.value[1] > 177)
+ return -EINVAL;
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ if (tas->cached_volume_l == ucontrol->value.integer.value[0]
+- && tas->cached_volume_r == ucontrol->value.integer.value[1]) {
+- mutex_unlock(&tas->mtx);
++ && tas->cached_volume_r == ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ tas->cached_volume_l = ucontrol->value.integer.value[0];
+ tas->cached_volume_r = ucontrol->value.integer.value[1];
+ if (tas->hw_enabled)
+ tas_set_volume(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -285,10 +281,9 @@ static int tas_snd_mute_get(struct snd_k
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = !tas->mute_l;
+ ucontrol->value.integer.value[1] = !tas->mute_r;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -297,18 +292,15 @@ static int tas_snd_mute_put(struct snd_k
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ if (tas->mute_l == !ucontrol->value.integer.value[0]
+- && tas->mute_r == !ucontrol->value.integer.value[1]) {
+- mutex_unlock(&tas->mtx);
++ && tas->mute_r == !ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ tas->mute_l = !ucontrol->value.integer.value[0];
+ tas->mute_r = !ucontrol->value.integer.value[1];
+ if (tas->hw_enabled)
+ tas_set_volume(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -337,10 +329,9 @@ static int tas_snd_mixer_get(struct snd_
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+ int idx = kcontrol->private_value;
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->mixer_l[idx];
+ ucontrol->value.integer.value[1] = tas->mixer_r[idx];
+- mutex_unlock(&tas->mtx);
+
+ return 0;
+ }
+@@ -351,19 +342,16 @@ static int tas_snd_mixer_put(struct snd_
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+ int idx = kcontrol->private_value;
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ if (tas->mixer_l[idx] == ucontrol->value.integer.value[0]
+- && tas->mixer_r[idx] == ucontrol->value.integer.value[1]) {
+- mutex_unlock(&tas->mtx);
++ && tas->mixer_r[idx] == ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ tas->mixer_l[idx] = ucontrol->value.integer.value[0];
+ tas->mixer_r[idx] = ucontrol->value.integer.value[1];
+
+ if (tas->hw_enabled)
+ tas_set_mixer(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -396,9 +384,8 @@ static int tas_snd_drc_range_get(struct
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->drc_range;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -411,16 +398,13 @@ static int tas_snd_drc_range_put(struct
+ ucontrol->value.integer.value[0] > TAS3004_DRC_MAX)
+ return -EINVAL;
+
+- mutex_lock(&tas->mtx);
+- if (tas->drc_range == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->drc_range == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->drc_range = ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas3004_set_drc(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -440,9 +424,8 @@ static int tas_snd_drc_switch_get(struct
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->drc_enabled;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -451,16 +434,13 @@ static int tas_snd_drc_switch_put(struct
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
+- if (tas->drc_enabled == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->drc_enabled == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->drc_enabled = !!ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas3004_set_drc(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -486,9 +466,8 @@ static int tas_snd_capture_source_get(st
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.enumerated.item[0] = !!(tas->acr & TAS_ACR_INPUT_B);
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -500,7 +479,7 @@ static int tas_snd_capture_source_put(st
+
+ if (ucontrol->value.enumerated.item[0] > 1)
+ return -EINVAL;
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ oldacr = tas->acr;
+
+ /*
+@@ -512,13 +491,10 @@ static int tas_snd_capture_source_put(st
+ if (ucontrol->value.enumerated.item[0])
+ tas->acr |= TAS_ACR_INPUT_B | TAS_ACR_B_MONAUREAL |
+ TAS_ACR_B_MON_SEL_RIGHT;
+- if (oldacr == tas->acr) {
+- mutex_unlock(&tas->mtx);
++ if (oldacr == tas->acr)
+ return 0;
+- }
+ if (tas->hw_enabled)
+ tas_write_reg(tas, TAS_REG_ACR, 1, &tas->acr);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -557,9 +533,8 @@ static int tas_snd_treble_get(struct snd
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->treble;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -571,16 +546,13 @@ static int tas_snd_treble_put(struct snd
+ if (ucontrol->value.integer.value[0] < TAS3004_TREBLE_MIN ||
+ ucontrol->value.integer.value[0] > TAS3004_TREBLE_MAX)
+ return -EINVAL;
+- mutex_lock(&tas->mtx);
+- if (tas->treble == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->treble == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->treble = ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas_set_treble(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -608,9 +580,8 @@ static int tas_snd_bass_get(struct snd_k
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->bass;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -622,16 +593,13 @@ static int tas_snd_bass_put(struct snd_k
+ if (ucontrol->value.integer.value[0] < TAS3004_BASS_MIN ||
+ ucontrol->value.integer.value[0] > TAS3004_BASS_MAX)
+ return -EINVAL;
+- mutex_lock(&tas->mtx);
+- if (tas->bass == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->bass == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->bass = ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas_set_bass(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -722,13 +690,13 @@ static int tas_switch_clock(struct codec
+ break;
+ case CLOCK_SWITCH_SLAVE:
+ /* Clocks are back, re-init the codec */
+- mutex_lock(&tas->mtx);
+- tas_reset_init(tas);
+- tas_set_volume(tas);
+- tas_set_mixer(tas);
+- tas->hw_enabled = 1;
+- tas->codec.gpio->methods->all_amps_restore(tas->codec.gpio);
+- mutex_unlock(&tas->mtx);
++ scoped_guard(mutex, &tas->mtx) {
++ tas_reset_init(tas);
++ tas_set_volume(tas);
++ tas_set_mixer(tas);
++ tas->hw_enabled = 1;
++ tas->codec.gpio->methods->all_amps_restore(tas->codec.gpio);
++ }
+ break;
+ default:
+ /* doesn't happen as of now */
+@@ -743,23 +711,21 @@ static int tas_switch_clock(struct codec
+ * our i2c device is suspended, and then take note of that! */
+ static int tas_suspend(struct tas *tas)
+ {
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ tas->hw_enabled = 0;
+ tas->acr |= TAS_ACR_ANALOG_PDOWN;
+ tas_write_reg(tas, TAS_REG_ACR, 1, &tas->acr);
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+ static int tas_resume(struct tas *tas)
+ {
+ /* reset codec */
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ tas_reset_init(tas);
+ tas_set_volume(tas);
+ tas_set_mixer(tas);
+ tas->hw_enabled = 1;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -802,14 +768,13 @@ static int tas_init_codec(struct aoa_cod
+ return -EINVAL;
+ }
+
+- mutex_lock(&tas->mtx);
+- if (tas_reset_init(tas)) {
+- printk(KERN_ERR PFX "tas failed to initialise\n");
+- mutex_unlock(&tas->mtx);
+- return -ENXIO;
++ scoped_guard(mutex, &tas->mtx) {
++ if (tas_reset_init(tas)) {
++ printk(KERN_ERR PFX "tas failed to initialise\n");
++ return -ENXIO;
++ }
++ tas->hw_enabled = 1;
+ }
+- tas->hw_enabled = 1;
+- mutex_unlock(&tas->mtx);
+
+ if (tas->codec.soundbus_dev->attach_codec(tas->codec.soundbus_dev,
+ aoa_get_card(),
+--- a/sound/aoa/core/gpio-feature.c
++++ b/sound/aoa/core/gpio-feature.c
+@@ -212,10 +212,9 @@ static void ftr_handle_notify(struct wor
+ struct gpio_notification *notif =
+ container_of(work, struct gpio_notification, work.work);
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+ if (notif->notify)
+ notif->notify(notif->data);
+- mutex_unlock(¬if->mutex);
+ }
+
+ static void gpio_enable_dual_edge(int gpio)
+@@ -341,19 +340,17 @@ static int ftr_set_notify(struct gpio_ru
+ if (!irq)
+ return -ENODEV;
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+
+ old = notif->notify;
+
+- if (!old && !notify) {
+- err = 0;
+- goto out_unlock;
+- }
++ if (!old && !notify)
++ return 0;
+
+ if (old && notify) {
+ if (old == notify && notif->data == data)
+ err = 0;
+- goto out_unlock;
++ return err;
+ }
+
+ if (old && !notify)
+@@ -362,16 +359,13 @@ static int ftr_set_notify(struct gpio_ru
+ if (!old && notify) {
+ err = request_irq(irq, ftr_handle_notify_irq, 0, name, notif);
+ if (err)
+- goto out_unlock;
++ return err;
+ }
+
+ notif->notify = notify;
+ notif->data = data;
+
+- err = 0;
+- out_unlock:
+- mutex_unlock(¬if->mutex);
+- return err;
++ return 0;
+ }
+
+ static int ftr_get_detect(struct gpio_runtime *rt,
+--- a/sound/aoa/core/gpio-pmf.c
++++ b/sound/aoa/core/gpio-pmf.c
+@@ -74,10 +74,9 @@ static void pmf_handle_notify(struct wor
+ struct gpio_notification *notif =
+ container_of(work, struct gpio_notification, work.work);
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+ if (notif->notify)
+ notif->notify(notif->data);
+- mutex_unlock(¬if->mutex);
+ }
+
+ static void pmf_gpio_init(struct gpio_runtime *rt)
+@@ -154,19 +153,17 @@ static int pmf_set_notify(struct gpio_ru
+ return -EINVAL;
+ }
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+
+ old = notif->notify;
+
+- if (!old && !notify) {
+- err = 0;
+- goto out_unlock;
+- }
++ if (!old && !notify)
++ return 0;
+
+ if (old && notify) {
+ if (old == notify && notif->data == data)
+ err = 0;
+- goto out_unlock;
++ return err;
+ }
+
+ if (old && !notify) {
+@@ -178,10 +175,8 @@ static int pmf_set_notify(struct gpio_ru
+ if (!old && notify) {
+ irq_client = kzalloc(sizeof(struct pmf_irq_client),
+ GFP_KERNEL);
+- if (!irq_client) {
+- err = -ENOMEM;
+- goto out_unlock;
+- }
++ if (!irq_client)
++ return -ENOMEM;
+ irq_client->data = notif;
+ irq_client->handler = pmf_handle_notify_irq;
+ irq_client->owner = THIS_MODULE;
+@@ -192,17 +187,14 @@ static int pmf_set_notify(struct gpio_ru
+ printk(KERN_ERR "snd-aoa: gpio layer failed to"
+ " register %s irq (%d)\n", name, err);
+ kfree(irq_client);
+- goto out_unlock;
++ return err;
+ }
+ notif->gpio_private = irq_client;
+ }
+ notif->notify = notify;
+ notif->data = data;
+
+- err = 0;
+- out_unlock:
+- mutex_unlock(¬if->mutex);
+- return err;
++ return 0;
+ }
+
+ static int pmf_get_detect(struct gpio_runtime *rt,
+--- a/sound/aoa/soundbus/i2sbus/pcm.c
++++ b/sound/aoa/soundbus/i2sbus/pcm.c
+@@ -79,11 +79,10 @@ static int i2sbus_pcm_open(struct i2sbus
+ u64 formats = 0;
+ unsigned int rates = 0;
+ struct transfer_info v;
+- int result = 0;
+ int bus_factor = 0, sysclock_factor = 0;
+ int found_this;
+
+- mutex_lock(&i2sdev->lock);
++ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, &other);
+
+@@ -92,8 +91,7 @@ static int i2sbus_pcm_open(struct i2sbus
+
+ if (pi->active) {
+ /* alsa messed up */
+- result = -EBUSY;
+- goto out_unlock;
++ return -EBUSY;
+ }
+
+ /* we now need to assign the hw */
+@@ -117,10 +115,8 @@ static int i2sbus_pcm_open(struct i2sbus
+ ti++;
+ }
+ }
+- if (!masks_inited || !bus_factor || !sysclock_factor) {
+- result = -ENODEV;
+- goto out_unlock;
+- }
++ if (!masks_inited || !bus_factor || !sysclock_factor)
++ return -ENODEV;
+ /* bus dependent stuff */
+ hw->info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME |
+@@ -194,15 +190,12 @@ static int i2sbus_pcm_open(struct i2sbus
+ hw->periods_max = MAX_DBDMA_COMMANDS;
+ err = snd_pcm_hw_constraint_integer(pi->substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+- if (err < 0) {
+- result = err;
+- goto out_unlock;
+- }
++ if (err < 0)
++ return err;
+ list_for_each_entry(cii, &sdev->codec_list, list) {
+ if (cii->codec->open) {
+ err = cii->codec->open(cii, pi->substream);
+ if (err) {
+- result = err;
+ /* unwind */
+ found_this = 0;
+ list_for_each_entry_reverse(rev,
+@@ -214,14 +207,12 @@ static int i2sbus_pcm_open(struct i2sbus
+ if (rev == cii)
+ found_this = 1;
+ }
+- goto out_unlock;
++ return err;
+ }
+ }
+ }
+
+- out_unlock:
+- mutex_unlock(&i2sdev->lock);
+- return result;
++ return 0;
+ }
+
+ #undef CHECK_RATE
+@@ -232,7 +223,7 @@ static int i2sbus_pcm_close(struct i2sbu
+ struct pcm_info *pi;
+ int err = 0, tmp;
+
+- mutex_lock(&i2sdev->lock);
++ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, NULL);
+
+@@ -246,7 +237,6 @@ static int i2sbus_pcm_close(struct i2sbu
+
+ pi->substream = NULL;
+ pi->active = 0;
+- mutex_unlock(&i2sdev->lock);
+ return err;
+ }
+
+@@ -330,33 +320,26 @@ static int i2sbus_pcm_prepare(struct i2s
+ int input_16bit;
+ struct pcm_info *pi, *other;
+ int cnt;
+- int result = 0;
+ unsigned int cmd, stopaddr;
+
+- mutex_lock(&i2sdev->lock);
++ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, &other);
+
+- if (pi->dbdma_ring.running) {
+- result = -EBUSY;
+- goto out_unlock;
+- }
++ if (pi->dbdma_ring.running)
++ return -EBUSY;
+ if (pi->dbdma_ring.stopping)
+ i2sbus_wait_for_stop(i2sdev, pi);
+
+- if (!pi->substream || !pi->substream->runtime) {
+- result = -EINVAL;
+- goto out_unlock;
+- }
++ if (!pi->substream || !pi->substream->runtime)
++ return -EINVAL;
+
+ runtime = pi->substream->runtime;
+ pi->active = 1;
+ if (other->active &&
+ ((i2sdev->format != runtime->format)
+- || (i2sdev->rate != runtime->rate))) {
+- result = -EINVAL;
+- goto out_unlock;
+- }
++ || (i2sdev->rate != runtime->rate)))
++ return -EINVAL;
+
+ i2sdev->format = runtime->format;
+ i2sdev->rate = runtime->rate;
+@@ -412,10 +395,8 @@ static int i2sbus_pcm_prepare(struct i2s
+ bi.bus_factor = cii->codec->bus_factor;
+ break;
+ }
+- if (!bi.bus_factor) {
+- result = -ENODEV;
+- goto out_unlock;
+- }
++ if (!bi.bus_factor)
++ return -ENODEV;
+ input_16bit = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S32_BE:
+@@ -426,8 +407,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ input_16bit = 0;
+ break;
+ default:
+- result = -EINVAL;
+- goto out_unlock;
++ return -EINVAL;
+ }
+ /* we assume all sysclocks are the same! */
+ list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
+@@ -438,10 +418,8 @@ static int i2sbus_pcm_prepare(struct i2s
+ if (clock_and_divisors(bi.sysclock_factor,
+ bi.bus_factor,
+ runtime->rate,
+- &sfr) < 0) {
+- result = -EINVAL;
+- goto out_unlock;
+- }
++ &sfr) < 0)
++ return -EINVAL;
+ switch (bi.bus_factor) {
+ case 32:
+ sfr |= I2S_SF_SERIAL_FORMAT_I2S_32X;
+@@ -457,10 +435,8 @@ static int i2sbus_pcm_prepare(struct i2s
+ int err = 0;
+ if (cii->codec->prepare)
+ err = cii->codec->prepare(cii, &bi, pi->substream);
+- if (err) {
+- result = err;
+- goto out_unlock;
+- }
++ if (err)
++ return err;
+ }
+ /* codecs are fine with it, so set our clocks */
+ if (input_16bit)
+@@ -476,7 +452,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ /* not locking these is fine since we touch them only in this function */
+ if (in_le32(&i2sdev->intfregs->serial_format) == sfr
+ && in_le32(&i2sdev->intfregs->data_word_sizes) == dws)
+- goto out_unlock;
++ return 0;
+
+ /* let's notify the codecs about clocks going away.
+ * For now we only do mastering on the i2s cell... */
+@@ -514,9 +490,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ if (cii->codec->switch_clock)
+ cii->codec->switch_clock(cii, CLOCK_SWITCH_SLAVE);
+
+- out_unlock:
+- mutex_unlock(&i2sdev->lock);
+- return result;
++ return 0;
+ }
+
+ #ifdef CONFIG_PM
--- /dev/null
+From stable+bounces-242624-greg=kroah.com@vger.kernel.org Sun May 3 01:30:24 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 May 2026 19:30:00 -0400
+Subject: block: relax pgmap check in bio_add_page for compatible zone device pages
+To: stable@vger.kernel.org
+Cc: Naman Jain <namjain@linux.microsoft.com>, Christoph Hellwig <hch@lst.de>, Jens Axboe <axboe@kernel.dk>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260502233000.914887-1-sashal@kernel.org>
+
+From: Naman Jain <namjain@linux.microsoft.com>
+
+[ Upstream commit 41c665aae2b5dbecddddcc8ace344caf630cc7a4 ]
+
+bio_add_page() and bio_integrity_add_page() reject pages from different
+dev_pagemaps entirely, returning 0 even when those pages have compatible
+DMA mapping requirements. This forces callers to start a new bio when
+buffers span pgmap boundaries, even though the pages could safely coexist
+as separate bvec entries.
+
+This matters for guests where memory is registered through
+devm_memremap_pages() with MEMORY_DEVICE_GENERIC in multiple calls,
+creating separate dev_pagemaps for each chunk. When a direct I/O buffer
+spans two such chunks, bio_add_page() rejects the second page, forcing an
+unnecessary bio split or I/O failure.
+
+Introduce zone_device_pages_compatible() in blk.h to check whether two
+pages can coexist in the same bio as separate bvec entries. The block DMA
+iterator (blk_dma_map_iter_start) caches the P2PDMA mapping state from the
+first segment and applies it to all others, so P2PDMA pages from different
+pgmaps must not be mixed, and neither must P2PDMA and non-P2PDMA pages.
+All other combinations (MEMORY_DEVICE_GENERIC pages from different pgmaps,
+or MEMORY_DEVICE_GENERIC with normal RAM) use the same dma_map_phys path
+and are safe.
+
+Replace the blanket zone_device_pages_have_same_pgmap() rejection with
+zone_device_pages_compatible(), while keeping
+zone_device_pages_have_same_pgmap() as a merge guard.
+Pages from different pgmaps can be added as separate bvec entries but
+must not be coalesced into the same segment, as that would make
+it impossible to recover the correct pgmap via page_pgmap().
+
+Fixes: 49580e690755 ("block: add check when merging zone device pages")
+Cc: stable@vger.kernel.org
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://patch.msgid.link/20260410153414.4159050-3-namjain@linux.microsoft.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ restructured combined `if` into explicit `bv` block ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bio-integrity.c | 2 ++
+ block/bio.c | 14 +++++++++-----
+ block/blk.h | 19 +++++++++++++++++++
+ 3 files changed, 30 insertions(+), 5 deletions(-)
+
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -167,6 +167,8 @@ int bio_integrity_add_page(struct bio *b
+ struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
+ bool same_page = false;
+
++ if (!zone_device_pages_compatible(bv->bv_page, page))
++ return 0;
+ if (bvec_try_merge_hw_page(q, bv, page, len, offset,
+ &same_page)) {
+ bip->bip_iter.bi_size += len;
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1155,11 +1155,15 @@ int bio_add_page(struct bio *bio, struct
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return 0;
+
+- if (bio->bi_vcnt > 0 &&
+- bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
+- page, len, offset, &same_page)) {
+- bio->bi_iter.bi_size += len;
+- return len;
++ if (bio->bi_vcnt > 0) {
++ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
++
++ if (!zone_device_pages_compatible(bv->bv_page, page))
++ return 0;
++ if (bvec_try_merge_page(bv, page, len, offset, &same_page)) {
++ bio->bi_iter.bi_size += len;
++ return len;
++ }
+ }
+
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -124,6 +124,25 @@ static inline bool biovec_phys_mergeable
+ return true;
+ }
+
++/*
++ * Check if two pages from potentially different zone device pgmaps can
++ * coexist as separate bvec entries in the same bio.
++ *
++ * The block DMA iterator (blk_dma_map_iter_start) caches the P2PDMA mapping
++ * state from the first segment and applies it to all subsequent segments, so
++ * P2PDMA pages from different pgmaps must not be mixed in the same bio.
++ *
++ * Other zone device types (FS_DAX, GENERIC) use the same dma_map_phys() path
++ * as normal RAM. PRIVATE and COHERENT pages never appear in bios.
++ */
++static inline bool zone_device_pages_compatible(const struct page *a,
++ const struct page *b)
++{
++ if (is_pci_p2pdma_page(a) || is_pci_p2pdma_page(b))
++ return zone_device_pages_have_same_pgmap(a, b);
++ return true;
++}
++
+ static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
+ struct bio_vec *bprv, unsigned int offset)
+ {
--- /dev/null
+From stable+bounces-241122-greg=kroah.com@vger.kernel.org Sat Apr 25 12:35:44 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 06:35:38 -0400
+Subject: f2fs: fix to do sanity check on dcc->discard_cmd_cnt conditionally
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, stable@kernel.org, syzbot+62538b67389ee582837a@syzkaller.appspotmail.com, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425103538.3545481-1-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 6af249c996f7d73a3435f9e577956fa259347d18 ]
+
+Syzbot reported a f2fs bug as below:
+
+------------[ cut here ]------------
+kernel BUG at fs/f2fs/segment.c:1900!
+Oops: invalid opcode: 0000 [#1] SMP KASAN PTI
+CPU: 1 UID: 0 PID: 6527 Comm: syz.5.110 Not tainted syzkaller #0 PREEMPT_{RT,(full)}
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2026
+RIP: 0010:f2fs_issue_discard_timeout+0x59b/0x5a0 fs/f2fs/segment.c:1900
+Code: d9 80 e1 07 80 c1 03 38 c1 0f 8c d6 fe ff ff 48 89 df e8 a8 5e fa fd e9 c9 fe ff ff e8 4e 46 94 fd 90 0f 0b e8 46 46 94 fd 90 <0f> 0b 0f 1f 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3
+RSP: 0018:ffffc9000494f940 EFLAGS: 00010283
+RAX: ffffffff843009ca RBX: 0000000000000001 RCX: 0000000000080000
+RDX: ffffc9001ca78000 RSI: 00000000000029f3 RDI: 00000000000029f4
+RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+R10: dffffc0000000000 R11: ffffed100893a431 R12: 1ffff1100893a430
+R13: 1ffff1100c2b702c R14: dffffc0000000000 R15: ffff8880449d2160
+FS: 00007ffa35fed6c0(0000) GS:ffff88812643d000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f2b68634000 CR3: 0000000039f62000 CR4: 00000000003526f0
+Call Trace:
+ <TASK>
+ __f2fs_remount fs/f2fs/super.c:2960 [inline]
+ f2fs_reconfigure+0x108a/0x1710 fs/f2fs/super.c:5443
+ reconfigure_super+0x227/0x8a0 fs/super.c:1080
+ do_remount fs/namespace.c:3391 [inline]
+ path_mount+0xdc5/0x10e0 fs/namespace.c:4151
+ do_mount fs/namespace.c:4172 [inline]
+ __do_sys_mount fs/namespace.c:4361 [inline]
+ __se_sys_mount+0x31d/0x420 fs/namespace.c:4338
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x14d/0xf80 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7ffa37dbda0a
+
+The root cause is there will be race condition in between f2fs_ioc_fitrim()
+and f2fs_remount():
+
+- f2fs_remount - f2fs_ioc_fitrim
+ - f2fs_issue_discard_timeout
+ - __issue_discard_cmd
+ - __drop_discard_cmd
+ - __wait_all_discard_cmd
+ - f2fs_trim_fs
+ - f2fs_write_checkpoint
+ - f2fs_clear_prefree_segments
+ - f2fs_issue_discard
+ - __issue_discard_async
+ - __queue_discard_cmd
+ - __update_discard_tree_range
+ - __insert_discard_cmd
+ - __create_discard_cmd
+ : atomic_inc(&dcc->discard_cmd_cnt);
+ - sanity check on dcc->discard_cmd_cnt (expect discard_cmd_cnt to be zero)
+
+This will only happen when fitrim races w/ remount rw, if we remount to
+readonly filesystem, remount will wait until mnt_pcp.mnt_writers to zero,
+that means fitrim is not in process at that time.
+
+Cc: stable@kernel.org
+Fixes: 2482c4325dfe ("f2fs: detect bug_on in f2fs_wait_discard_bios")
+Reported-by: syzbot+62538b67389ee582837a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-f2fs-devel/69b07d7c.050a0220.8df7.09a1.GAE@google.com
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ dereferenced flags pointer (`*flags & SB_RDONLY`) to match `int *flags` remount signature ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/f2fs.h | 2 +-
+ fs/f2fs/segment.c | 6 +++---
+ fs/f2fs/super.c | 11 ++++++++---
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3782,7 +3782,7 @@ bool f2fs_is_checkpointed_data(struct f2
+ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
+ void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
+ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
+-bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
++bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check);
+ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc);
+ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1885,7 +1885,7 @@ void f2fs_stop_discard_thread(struct f2f
+ *
+ * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
+ */
+-bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
++bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check)
+ {
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_policy dpolicy;
+@@ -1902,7 +1902,7 @@ bool f2fs_issue_discard_timeout(struct f
+ /* just to make sure there is no pending discard commands */
+ __wait_all_discard_cmd(sbi, NULL);
+
+- f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
++ f2fs_bug_on(sbi, need_check && atomic_read(&dcc->discard_cmd_cnt));
+ return !dropped;
+ }
+
+@@ -2371,7 +2371,7 @@ static void destroy_discard_cmd_control(
+ * Recovery can cache discard commands, so in error path of
+ * fill_super(), it needs to give a chance to handle them.
+ */
+- f2fs_issue_discard_timeout(sbi);
++ f2fs_issue_discard_timeout(sbi, true);
+
+ kfree(dcc);
+ SM_I(sbi)->dcc_info = NULL;
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1628,7 +1628,7 @@ static void f2fs_put_super(struct super_
+ }
+
+ /* be sure to wait for any on-going discard commands */
+- done = f2fs_issue_discard_timeout(sbi);
++ done = f2fs_issue_discard_timeout(sbi, true);
+ if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT | CP_TRIMMED,
+@@ -1767,7 +1767,7 @@ static int f2fs_unfreeze(struct super_bl
+ * will recover after removal of snapshot.
+ */
+ if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
+- f2fs_issue_discard_timeout(sbi);
++ f2fs_issue_discard_timeout(sbi, true);
+
+ clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
+ return 0;
+@@ -2535,7 +2535,12 @@ static int f2fs_remount(struct super_blo
+ need_stop_discard = true;
+ } else {
+ f2fs_stop_discard_thread(sbi);
+- f2fs_issue_discard_timeout(sbi);
++ /*
++ * f2fs_ioc_fitrim() won't race w/ "remount ro"
++ * so it's safe to check discard_cmd_cnt in
++ * f2fs_issue_discard_timeout().
++ */
++ f2fs_issue_discard_timeout(sbi, *flags & SB_RDONLY);
+ need_restart_discard = true;
+ }
+ }
--- /dev/null
+From stable+bounces-240655-greg=kroah.com@vger.kernel.org Fri Apr 24 15:05:07 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 09:03:55 -0400
+Subject: f2fs: fix UAF caused by decrementing sbi->nr_pages[] in f2fs_write_end_io()
+To: stable@vger.kernel.org
+Cc: Yongpeng Yang <yangyongpeng@xiaomi.com>, stable@kernel.org, syzbot+6e4cb1cac5efc96ea0ca@syzkaller.appspotmail.com, Chao Yu <chao@kernel.org>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424130355.1917635-1-sashal@kernel.org>
+
+From: Yongpeng Yang <yangyongpeng@xiaomi.com>
+
+[ Upstream commit 2d9c4a4ed4eef1f82c5b16b037aee8bad819fd53 ]
+
+The xfstests case "generic/107" and syzbot have both reported a NULL
+pointer dereference.
+
+The concurrent scenario that triggers the panic is as follows:
+
+F2FS_WB_CP_DATA write callback umount
+ - f2fs_write_checkpoint
+ - f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA)
+- blk_mq_end_request
+ - bio_endio
+ - f2fs_write_end_io
+ : dec_page_count(sbi, F2FS_WB_CP_DATA)
+ : wake_up(&sbi->cp_wait)
+ - kill_f2fs_super
+ - kill_block_super
+ - f2fs_put_super
+ : iput(sbi->node_inode)
+ : sbi->node_inode = NULL
+ : f2fs_in_warm_node_list
+ - is_node_folio // sbi->node_inode is NULL and panic
+
+The root cause is that f2fs_put_super() calls iput(sbi->node_inode) and
+sets sbi->node_inode to NULL after sbi->nr_pages[F2FS_WB_CP_DATA] is
+decremented to zero. As a result, f2fs_in_warm_node_list() may
+dereference a NULL node_inode when checking whether a folio belongs to
+the node inode, leading to a panic.
+
+This patch fixes the issue by calling f2fs_in_warm_node_list() before
+decrementing sbi->nr_pages[F2FS_WB_CP_DATA], thus preventing the
+use-after-free condition.
+
+Cc: stable@kernel.org
+Fixes: 50fa53eccf9f ("f2fs: fix to avoid broken of dnode block list")
+Reported-by: syzbot+6e4cb1cac5efc96ea0ca@syzkaller.appspotmail.com
+Signed-off-by: Yongpeng Yang <yangyongpeng@xiaomi.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ folio => page ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/data.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -355,6 +355,8 @@ static void f2fs_write_end_io(struct bio
+
+ f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
+ page_folio(page)->index != nid_of_node(page));
++ if (f2fs_in_warm_node_list(sbi, page))
++ f2fs_del_fsync_node_entry(sbi, page);
+
+ dec_page_count(sbi, type);
+
+@@ -366,8 +368,6 @@ static void f2fs_write_end_io(struct bio
+ wq_has_sleeper(&sbi->cp_wait))
+ wake_up(&sbi->cp_wait);
+
+- if (f2fs_in_warm_node_list(sbi, page))
+- f2fs_del_fsync_node_entry(sbi, page);
+ clear_page_private_gcing(page);
+ end_page_writeback(page);
+ }
--- /dev/null
+From stable+bounces-242640-greg=kroah.com@vger.kernel.org Sun May 3 06:38:08 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2026 00:37:46 -0400
+Subject: iio: frequency: admv1013: add dev variable
+To: stable@vger.kernel.org
+Cc: Antoniu Miclaus <antoniu.miclaus@analog.com>, Andy Shevchenko <andriy.shevchenko@intel.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260503043747.985863-1-sashal@kernel.org>
+
+From: Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+[ Upstream commit e61b5bb0e91390adee41eaddc0a1a7d55d5652b2 ]
+
+Introduce a local struct device pointer in functions that reference
+&spi->dev for device-managed resource calls and device property reads,
+improving code readability.
+
+Signed-off-by: Antoniu Miclaus <antoniu.miclaus@analog.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: aac0a51b1670 ("iio: frequency: admv1013: fix NULL pointer dereference on str")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/frequency/admv1013.c | 29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+--- a/drivers/iio/frequency/admv1013.c
++++ b/drivers/iio/frequency/admv1013.c
+@@ -518,11 +518,11 @@ static int admv1013_properties_parse(str
+ {
+ int ret;
+ const char *str;
+- struct spi_device *spi = st->spi;
++ struct device *dev = &st->spi->dev;
+
+- st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable");
++ st->det_en = device_property_read_bool(dev, "adi,detector-enable");
+
+- ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
++ ret = device_property_read_string(dev, "adi,input-mode", &str);
+ if (ret)
+ st->input_mode = ADMV1013_IQ_MODE;
+
+@@ -533,7 +533,7 @@ static int admv1013_properties_parse(str
+ else
+ return -EINVAL;
+
+- ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
++ ret = device_property_read_string(dev, "adi,quad-se-mode", &str);
+ if (ret)
+ st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+
+@@ -546,11 +546,11 @@ static int admv1013_properties_parse(str
+ else
+ return -EINVAL;
+
+- ret = devm_regulator_bulk_get_enable(&st->spi->dev,
++ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(admv1013_vcc_regs),
+ admv1013_vcc_regs);
+ if (ret) {
+- dev_err_probe(&spi->dev, ret,
++ dev_err_probe(dev, ret,
+ "Failed to request VCC regulators\n");
+ return ret;
+ }
+@@ -562,9 +562,10 @@ static int admv1013_probe(struct spi_dev
+ {
+ struct iio_dev *indio_dev;
+ struct admv1013_state *st;
++ struct device *dev = &spi->dev;
+ int ret, vcm_uv;
+
+- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
++ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+@@ -581,20 +582,20 @@ static int admv1013_probe(struct spi_dev
+ if (ret)
+ return ret;
+
+- ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vcm");
++ ret = devm_regulator_get_enable_read_voltage(dev, "vcm");
+ if (ret < 0)
+- return dev_err_probe(&spi->dev, ret,
++ return dev_err_probe(dev, ret,
+ "failed to get the common-mode voltage\n");
+
+ vcm_uv = ret;
+
+- st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
++ st->clkin = devm_clk_get_enabled(dev, "lo_in");
+ if (IS_ERR(st->clkin))
+- return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
++ return dev_err_probe(dev, PTR_ERR(st->clkin),
+ "failed to get the LO input clock\n");
+
+ st->nb.notifier_call = admv1013_freq_change;
+- ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
++ ret = devm_clk_notifier_register(dev, st->clkin, &st->nb);
+ if (ret)
+ return ret;
+
+@@ -606,11 +607,11 @@ static int admv1013_probe(struct spi_dev
+ return ret;
+ }
+
+- ret = devm_add_action_or_reset(&spi->dev, admv1013_powerdown, st);
++ ret = devm_add_action_or_reset(dev, admv1013_powerdown, st);
+ if (ret)
+ return ret;
+
+- return devm_iio_device_register(&spi->dev, indio_dev);
++ return devm_iio_device_register(dev, indio_dev);
+ }
+
+ static const struct spi_device_id admv1013_id[] = {
--- /dev/null
+From stable+bounces-242641-greg=kroah.com@vger.kernel.org Sun May 3 06:38:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2026 00:37:47 -0400
+Subject: iio: frequency: admv1013: fix NULL pointer dereference on str
+To: stable@vger.kernel.org
+Cc: "Antoniu Miclaus" <antoniu.miclaus@analog.com>, "Nuno Sá" <nuno.sa@analog.com>, "Andy Shevchenko" <andriy.shevchenko@intel.com>, Stable@vger.kernel.org, "Jonathan Cameron" <Jonathan.Cameron@huawei.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260503043747.985863-2-sashal@kernel.org>
+
+From: Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+[ Upstream commit aac0a51b16700b403a55b67ba495de021db78763 ]
+
+When device_property_read_string() fails, str is left uninitialized
+but the code falls through to strcmp(str, ...), dereferencing a garbage
+pointer. Replace manual read/strcmp with
+device_property_match_property_string() and consolidate the SE mode
+enums into a single sequential enum, mapping to hardware register
+values via a switch consistent with other bitfields in the driver.
+
+Several cleanup patches have been applied to this driver recently so
+this will need a manual backport.
+
+Fixes: da35a7b526d9 ("iio: frequency: admv1013: add support for ADMV1013")
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Signed-off-by: Antoniu Miclaus <antoniu.miclaus@analog.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/frequency/admv1013.c | 67 ++++++++++++++++++++++-----------------
+ 1 file changed, 38 insertions(+), 29 deletions(-)
+
+--- a/drivers/iio/frequency/admv1013.c
++++ b/drivers/iio/frequency/admv1013.c
+@@ -85,9 +85,9 @@ enum {
+ };
+
+ enum {
+- ADMV1013_SE_MODE_POS = 6,
+- ADMV1013_SE_MODE_NEG = 9,
+- ADMV1013_SE_MODE_DIFF = 12
++ ADMV1013_SE_MODE_POS,
++ ADMV1013_SE_MODE_NEG,
++ ADMV1013_SE_MODE_DIFF,
+ };
+
+ struct admv1013_state {
+@@ -470,10 +470,23 @@ static int admv1013_init(struct admv1013
+ if (ret)
+ return ret;
+
+- data = FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, st->quad_se_mode);
++ switch (st->quad_se_mode) {
++ case ADMV1013_SE_MODE_POS:
++ data = 6;
++ break;
++ case ADMV1013_SE_MODE_NEG:
++ data = 9;
++ break;
++ case ADMV1013_SE_MODE_DIFF:
++ data = 12;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
+- ADMV1013_QUAD_SE_MODE_MSK, data);
++ ADMV1013_QUAD_SE_MODE_MSK,
++ FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, data));
+ if (ret)
+ return ret;
+
+@@ -514,37 +527,33 @@ static void admv1013_powerdown(void *dat
+ admv1013_spi_update_bits(data, ADMV1013_REG_ENABLE, enable_reg_msk, enable_reg);
+ }
+
++static const char * const admv1013_input_modes[] = {
++ [ADMV1013_IQ_MODE] = "iq",
++ [ADMV1013_IF_MODE] = "if",
++};
++
++static const char * const admv1013_quad_se_modes[] = {
++ [ADMV1013_SE_MODE_POS] = "se-pos",
++ [ADMV1013_SE_MODE_NEG] = "se-neg",
++ [ADMV1013_SE_MODE_DIFF] = "diff",
++};
++
+ static int admv1013_properties_parse(struct admv1013_state *st)
+ {
+ int ret;
+- const char *str;
+ struct device *dev = &st->spi->dev;
+
+ st->det_en = device_property_read_bool(dev, "adi,detector-enable");
+
+- ret = device_property_read_string(dev, "adi,input-mode", &str);
+- if (ret)
+- st->input_mode = ADMV1013_IQ_MODE;
+-
+- if (!strcmp(str, "iq"))
+- st->input_mode = ADMV1013_IQ_MODE;
+- else if (!strcmp(str, "if"))
+- st->input_mode = ADMV1013_IF_MODE;
+- else
+- return -EINVAL;
+-
+- ret = device_property_read_string(dev, "adi,quad-se-mode", &str);
+- if (ret)
+- st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+-
+- if (!strcmp(str, "diff"))
+- st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+- else if (!strcmp(str, "se-pos"))
+- st->quad_se_mode = ADMV1013_SE_MODE_POS;
+- else if (!strcmp(str, "se-neg"))
+- st->quad_se_mode = ADMV1013_SE_MODE_NEG;
+- else
+- return -EINVAL;
++ ret = device_property_match_property_string(dev, "adi,input-mode",
++ admv1013_input_modes,
++ ARRAY_SIZE(admv1013_input_modes));
++ st->input_mode = ret >= 0 ? ret : ADMV1013_IQ_MODE;
++
++ ret = device_property_match_property_string(dev, "adi,quad-se-mode",
++ admv1013_quad_se_modes,
++ ARRAY_SIZE(admv1013_quad_se_modes));
++ st->quad_se_mode = ret >= 0 ? ret : ADMV1013_SE_MODE_DIFF;
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(admv1013_vcc_regs),
--- /dev/null
+From stable+bounces-241109-greg=kroah.com@vger.kernel.org Sat Apr 25 11:07:32 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 05:07:21 -0400
+Subject: ksmbd: replace connection list with hash table
+To: stable@vger.kernel.org
+Cc: Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425090722.3316820-2-sashal@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 0bcc831be535269556f59cb70396f7e34f03a276 ]
+
+Replace connection list with hash table to improve lookup performance.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: def036ef87f8 ("ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 23 +++++++++++------------
+ fs/smb/server/connection.h | 6 ++++--
+ fs/smb/server/smb2pdu.c | 4 ++--
+ fs/smb/server/transport_rdma.c | 5 +++++
+ fs/smb/server/transport_tcp.c | 25 +++++++++++++++++++++----
+ 5 files changed, 43 insertions(+), 20 deletions(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -19,7 +19,7 @@ static DEFINE_MUTEX(init_lock);
+
+ static struct ksmbd_conn_ops default_conn_ops;
+
+-LIST_HEAD(conn_list);
++DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS);
+ DECLARE_RWSEM(conn_list_lock);
+
+ /**
+@@ -33,7 +33,7 @@ DECLARE_RWSEM(conn_list_lock);
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+ down_write(&conn_list_lock);
+- list_del(&conn->conns_list);
++ hash_del(&conn->hlist);
+ up_write(&conn_list_lock);
+
+ xa_destroy(&conn->sessions);
+@@ -78,7 +78,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+
+ init_waitqueue_head(&conn->req_running_q);
+ init_waitqueue_head(&conn->r_count_q);
+- INIT_LIST_HEAD(&conn->conns_list);
+ INIT_LIST_HEAD(&conn->requests);
+ INIT_LIST_HEAD(&conn->async_requests);
+ spin_lock_init(&conn->request_lock);
+@@ -91,19 +90,17 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+
+ init_rwsem(&conn->session_lock);
+
+- down_write(&conn_list_lock);
+- list_add(&conn->conns_list, &conn_list);
+- up_write(&conn_list_lock);
+ return conn;
+ }
+
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ {
+ struct ksmbd_conn *t;
++ int bkt;
+ bool ret = false;
+
+ down_read(&conn_list_lock);
+- list_for_each_entry(t, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, t, hlist) {
+ if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+ continue;
+
+@@ -164,9 +161,10 @@ void ksmbd_conn_unlock(struct ksmbd_conn
+ void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
+ struct ksmbd_conn *conn;
++ int bkt;
+
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id))
+ WRITE_ONCE(conn->status, status);
+ }
+@@ -182,14 +180,14 @@ int ksmbd_conn_wait_idle_sess_id(struct
+ {
+ struct ksmbd_conn *conn;
+ int rc, retry_count = 0, max_timeout = 120;
+- int rcount = 1;
++ int rcount = 1, bkt;
+
+ retry_idle:
+ if (retry_count >= max_timeout)
+ return -EIO;
+
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
+ if (conn == curr_conn)
+ rcount = 2;
+@@ -480,10 +478,11 @@ static void stop_sessions(void)
+ {
+ struct ksmbd_conn *conn;
+ struct ksmbd_transport *t;
++ int bkt;
+
+ again:
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ t = conn->transport;
+ ksmbd_conn_set_exiting(conn);
+ if (t->ops->shutdown) {
+@@ -494,7 +493,7 @@ again:
+ }
+ up_read(&conn_list_lock);
+
+- if (!list_empty(&conn_list)) {
++ if (!hash_empty(conn_list)) {
+ msleep(100);
+ goto again;
+ }
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -52,11 +52,12 @@ struct ksmbd_conn {
+ u8 inet6_addr[16];
+ #endif
+ };
++ unsigned int inet_hash;
+ char *request_buf;
+ struct ksmbd_transport *transport;
+ struct nls_table *local_nls;
+ struct unicode_map *um;
+- struct list_head conns_list;
++ struct hlist_node hlist;
+ struct rw_semaphore session_lock;
+ /* smb session 1 per user */
+ struct xarray sessions;
+@@ -151,7 +152,8 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_SEND_TIMEOUT (5 * HZ)
+ #define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
+
+-extern struct list_head conn_list;
++#define CONN_HASH_BITS 12
++extern DECLARE_HASHTABLE(conn_list, CONN_HASH_BITS);
+ extern struct rw_semaphore conn_list_lock;
+
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7427,7 +7427,7 @@ int smb2_lock(struct ksmbd_work *work)
+ int nolock = 0;
+ LIST_HEAD(lock_list);
+ LIST_HEAD(rollback_list);
+- int prior_lock = 0;
++ int prior_lock = 0, bkt;
+
+ WORK_BUFFERS(work, req, rsp);
+
+@@ -7537,7 +7537,7 @@ int smb2_lock(struct ksmbd_work *work)
+ nolock = 1;
+ /* check locks in connection list */
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ spin_lock(&conn->llist_lock);
+ list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+ if (file_inode(cmp_lock->fl->c.flc_file) !=
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -381,6 +381,11 @@ static struct smb_direct_transport *allo
+ conn = ksmbd_conn_alloc();
+ if (!conn)
+ goto err;
++
++ down_write(&conn_list_lock);
++ hash_add(conn_list, &conn->hlist, 0);
++ up_write(&conn_list_lock);
++
+ conn->transport = KSMBD_TRANS(t);
+ KSMBD_TRANS(t)->conn = conn;
+ KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -89,13 +89,21 @@ static struct tcp_transport *alloc_trans
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (client_sk->sk->sk_family == AF_INET6)
++ if (client_sk->sk->sk_family == AF_INET6) {
+ memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16);
+- else
++ conn->inet_hash = ipv6_addr_hash(&client_sk->sk->sk_v6_daddr);
++ } else {
+ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
++ conn->inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
++ }
+ #else
+ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
++ conn->inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
+ #endif
++ down_write(&conn_list_lock);
++ hash_add(conn_list, &conn->hlist, conn->inet_hash);
++ up_write(&conn_list_lock);
++
+ conn->transport = KSMBD_TRANS(t);
+ KSMBD_TRANS(t)->conn = conn;
+ KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
+@@ -242,7 +250,7 @@ static int ksmbd_kthread_fn(void *p)
+ struct socket *client_sk = NULL;
+ struct interface *iface = (struct interface *)p;
+ struct ksmbd_conn *conn;
+- int ret;
++ int ret, inet_hash;
+ unsigned int max_ip_conns;
+
+ while (!kthread_should_stop()) {
+@@ -267,9 +275,18 @@ static int ksmbd_kthread_fn(void *p)
+ /*
+ * Limits repeated connections from clients with the same IP.
+ */
++#if IS_ENABLED(CONFIG_IPV6)
++ if (client_sk->sk->sk_family == AF_INET6)
++ inet_hash = ipv6_addr_hash(&client_sk->sk->sk_v6_daddr);
++ else
++ inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
++#else
++ inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
++#endif
++
+ max_ip_conns = 0;
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each_possible(conn_list, conn, hlist, inet_hash) {
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (client_sk->sk->sk_family == AF_INET6) {
+ if (memcmp(&client_sk->sk->sk_v6_daddr,
--- /dev/null
+From stable+bounces-241110-greg=kroah.com@vger.kernel.org Sat Apr 25 11:07:34 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 05:07:22 -0400
+Subject: ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()
+To: stable@vger.kernel.org
+Cc: DaeMyung Kang <charsyam@gmail.com>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425090722.3316820-3-sashal@kernel.org>
+
+From: DaeMyung Kang <charsyam@gmail.com>
+
+[ Upstream commit def036ef87f8641c1c525d5ae17438d7a1006491 ]
+
+rcount is intended to be connection-specific: 2 for curr_conn, 1 for
+every other connection sharing the same session. However, it is
+initialised only once before the hash iteration and is never reset.
+After the loop visits curr_conn, later sibling connections are also
+checked against rcount == 2, so a sibling with req_running == 1 is
+incorrectly treated as idle. This makes the outcome depend on the
+hash iteration order: whether a given sibling is checked against the
+loose (< 2) or the strict (< 1) threshold is decided by whether it
+happens to be visited before or after curr_conn.
+
+The function's contract is "wait until every connection sharing this
+session is idle" so that destroy_previous_session() can safely tear
+the session down. The latched rcount violates that contract and
+reopens the teardown race window the wait logic was meant to close:
+destroy_previous_session() may proceed before sibling channels have
+actually quiesced, overlapping session teardown with in-flight work
+on those connections.
+
+Recompute rcount inside the loop so each connection is compared
+against its own threshold regardless of iteration order.
+
+This is a code-inspection fix for an iteration-order-dependent logic
+error; a targeted reproducer would require SMB3 multichannel with
+in-flight work on a sibling channel landing after curr_conn in hash
+order, which is not something that can be triggered reliably.
+
+Fixes: 76e98a158b20 ("ksmbd: fix race condition between destroy_previous_session() and smb2 operations()")
+Cc: stable@vger.kernel.org
+Signed-off-by: DaeMyung Kang <charsyam@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -180,7 +180,7 @@ int ksmbd_conn_wait_idle_sess_id(struct
+ {
+ struct ksmbd_conn *conn;
+ int rc, retry_count = 0, max_timeout = 120;
+- int rcount = 1, bkt;
++ int rcount, bkt;
+
+ retry_idle:
+ if (retry_count >= max_timeout)
+@@ -189,8 +189,7 @@ retry_idle:
+ down_read(&conn_list_lock);
+ hash_for_each(conn_list, bkt, conn, hlist) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
+- if (conn == curr_conn)
+- rcount = 2;
++ rcount = (conn == curr_conn) ? 2 : 1;
+ if (atomic_read(&conn->req_running) >= rcount) {
+ rc = wait_event_timeout(conn->req_running_q,
+ atomic_read(&conn->req_running) < rcount,
--- /dev/null
+From stable+bounces-241108-greg=kroah.com@vger.kernel.org Sat Apr 25 11:07:33 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 05:07:20 -0400
+Subject: ksmbd: use msleep instaed of schedule_timeout_interruptible()
+To: stable@vger.kernel.org
+Cc: Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425090722.3316820-1-sashal@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit f75f8bdd4ff4830abe31a1b94892eb12b85b9535 ]
+
+use msleep instaed of schedule_timeout_interruptible()
+to guarantee the task delays as expected.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: def036ef87f8 ("ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -495,7 +495,7 @@ again:
+ up_read(&conn_list_lock);
+
+ if (!list_empty(&conn_list)) {
+- schedule_timeout_interruptible(HZ / 10); /* 100ms */
++ msleep(100);
+ goto again;
+ }
+ }
--- /dev/null
+From stable+bounces-241690-greg=kroah.com@vger.kernel.org Tue Apr 28 17:25:06 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2026 11:23:54 -0400
+Subject: lib: test_hmm: evict device pages on file close to avoid use-after-free
+To: stable@vger.kernel.org
+Cc: Alistair Popple <apopple@nvidia.com>, Zenghui Yu <zenghui.yu@linux.dev>, Balbir Singh <balbirs@nvidia.com>, David Hildenbrand <david@kernel.org>, Jason Gunthorpe <jgg@ziepe.ca>, Leon Romanovsky <leon@kernel.org>, Liam Howlett <liam.howlett@oracle.com>, "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>, Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>, Suren Baghdasaryan <surenb@google.com>, Matthew Brost <matthew.brost@intel.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260428152354.3033526-1-sashal@kernel.org>
+
+From: Alistair Popple <apopple@nvidia.com>
+
+[ Upstream commit 744dd97752ef1076a8d8672bb0d8aa2c7abc1144 ]
+
+Patch series "Minor hmm_test fixes and cleanups".
+
+Two bugfixes a cleanup for the HMM kernel selftests. These were mostly
+reported by Zenghui Yu with special thanks to Lorenzo for analysing and
+pointing out the problems.
+
+This patch (of 3):
+
+When dmirror_fops_release() is called it frees the dmirror struct but
+doesn't migrate device private pages back to system memory first. This
+leaves those pages with a dangling zone_device_data pointer to the freed
+dmirror.
+
+If a subsequent fault occurs on those pages (eg. during coredump) the
+dmirror_devmem_fault() callback dereferences the stale pointer causing a
+kernel panic. This was reported [1] when running mm/ksft_hmm.sh on arm64,
+where a test failure triggered SIGABRT and the resulting coredump walked
+the VMAs faulting in the stale device private pages.
+
+Fix this by calling dmirror_device_evict_chunk() for each devmem chunk in
+dmirror_fops_release() to migrate all device private pages back to system
+memory before freeing the dmirror struct. The function is moved earlier
+in the file to avoid a forward declaration.
+
+Link: https://lore.kernel.org/20260331063445.3551404-1-apopple@nvidia.com
+Link: https://lore.kernel.org/20260331063445.3551404-2-apopple@nvidia.com
+Fixes: b2ef9f5a5cb3 ("mm/hmm/test: add selftest driver for HMM")
+Signed-off-by: Alistair Popple <apopple@nvidia.com>
+Reported-by: Zenghui Yu <zenghui.yu@linux.dev>
+Closes: https://lore.kernel.org/linux-mm/8bd0396a-8997-4d2e-a13f-5aac033083d7@linux.dev/
+Reviewed-by: Balbir Singh <balbirs@nvidia.com>
+Tested-by: Zenghui Yu <zenghui.yu@linux.dev>
+Cc: David Hildenbrand <david@kernel.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Leon Romanovsky <leon@kernel.org>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Zenghui Yu <zenghui.yu@linux.dev>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ kept the existing simpler `dmirror_device_evict_chunk()` body instead of the upstream compound-folio version ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/test_hmm.c | 86 ++++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 49 insertions(+), 37 deletions(-)
+
+--- a/lib/test_hmm.c
++++ b/lib/test_hmm.c
+@@ -183,11 +183,60 @@ static int dmirror_fops_open(struct inod
+ return 0;
+ }
+
++static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
++{
++ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
++ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
++ unsigned long npages = end_pfn - start_pfn + 1;
++ unsigned long i;
++ unsigned long *src_pfns;
++ unsigned long *dst_pfns;
++
++ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
++
++ migrate_device_range(src_pfns, start_pfn, npages);
++ for (i = 0; i < npages; i++) {
++ struct page *dpage, *spage;
++
++ spage = migrate_pfn_to_page(src_pfns[i]);
++ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
++ continue;
++
++ if (WARN_ON(!is_device_private_page(spage) &&
++ !is_device_coherent_page(spage)))
++ continue;
++ spage = BACKING_PAGE(spage);
++ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
++ lock_page(dpage);
++ copy_highpage(dpage, spage);
++ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
++ if (src_pfns[i] & MIGRATE_PFN_WRITE)
++ dst_pfns[i] |= MIGRATE_PFN_WRITE;
++ }
++ migrate_device_pages(src_pfns, dst_pfns, npages);
++ migrate_device_finalize(src_pfns, dst_pfns, npages);
++ kvfree(src_pfns);
++ kvfree(dst_pfns);
++}
++
+ static int dmirror_fops_release(struct inode *inode, struct file *filp)
+ {
+ struct dmirror *dmirror = filp->private_data;
++ struct dmirror_device *mdevice = dmirror->mdevice;
++ int i;
+
+ mmu_interval_notifier_remove(&dmirror->notifier);
++
++ if (mdevice->devmem_chunks) {
++ for (i = 0; i < mdevice->devmem_count; i++) {
++ struct dmirror_chunk *devmem =
++ mdevice->devmem_chunks[i];
++
++ dmirror_device_evict_chunk(devmem);
++ }
++ }
++
+ xa_destroy(&dmirror->pt);
+ kfree(dmirror);
+ return 0;
+@@ -1214,43 +1263,6 @@ static int dmirror_snapshot(struct dmirr
+ return ret;
+ }
+
+-static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+-{
+- unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+- unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+- unsigned long npages = end_pfn - start_pfn + 1;
+- unsigned long i;
+- unsigned long *src_pfns;
+- unsigned long *dst_pfns;
+-
+- src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+- dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+-
+- migrate_device_range(src_pfns, start_pfn, npages);
+- for (i = 0; i < npages; i++) {
+- struct page *dpage, *spage;
+-
+- spage = migrate_pfn_to_page(src_pfns[i]);
+- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+- continue;
+-
+- if (WARN_ON(!is_device_private_page(spage) &&
+- !is_device_coherent_page(spage)))
+- continue;
+- spage = BACKING_PAGE(spage);
+- dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+- lock_page(dpage);
+- copy_highpage(dpage, spage);
+- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+- if (src_pfns[i] & MIGRATE_PFN_WRITE)
+- dst_pfns[i] |= MIGRATE_PFN_WRITE;
+- }
+- migrate_device_pages(src_pfns, dst_pfns, npages);
+- migrate_device_finalize(src_pfns, dst_pfns, npages);
+- kvfree(src_pfns);
+- kvfree(dst_pfns);
+-}
+-
+ /* Removes free pages from the free list so they can't be re-allocated */
+ static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
+ {
--- /dev/null
+From stable+bounces-242499-greg=kroah.com@vger.kernel.org Fri May 1 21:29:34 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 15:29:27 -0400
+Subject: media: rc: igorplugusb: heed coherency rules
+To: stable@vger.kernel.org
+Cc: Oliver Neukum <oneukum@suse.com>, Sean Young <sean@mess.org>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501192927.3999837-1-sashal@kernel.org>
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit eac69475b01fe1e861dfe3960b57fa95671c132e ]
+
+In a control request, the USB request structure
+can be subject to DMA on some HCs. Hence it must obey
+the rules for DMA coherency. Allocate it separately.
+
+Fixes: b1c97193c6437 ("[media] rc: port IgorPlug-USB to rc-core")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ replaced kzalloc_obj(*ir->request, GFP_KERNEL) with kzalloc(sizeof(*ir->request), GFP_KERNEL) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/rc/igorplugusb.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/rc/igorplugusb.c
++++ b/drivers/media/rc/igorplugusb.c
+@@ -34,7 +34,7 @@ struct igorplugusb {
+ struct device *dev;
+
+ struct urb *urb;
+- struct usb_ctrlrequest request;
++ struct usb_ctrlrequest *request;
+
+ struct timer_list timer;
+
+@@ -122,7 +122,7 @@ static void igorplugusb_cmd(struct igorp
+ {
+ int ret;
+
+- ir->request.bRequest = cmd;
++ ir->request->bRequest = cmd;
+ ir->urb->transfer_flags = 0;
+ ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
+ if (ret && ret != -EPERM)
+@@ -164,13 +164,17 @@ static int igorplugusb_probe(struct usb_
+ if (!ir)
+ return -ENOMEM;
+
++ ir->request = kzalloc(sizeof(*ir->request), GFP_KERNEL);
++ if (!ir->request)
++ goto fail;
++
+ ir->dev = &intf->dev;
+
+ timer_setup(&ir->timer, igorplugusb_timer, 0);
+
+- ir->request.bRequest = GET_INFRACODE;
+- ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+- ir->request.wLength = cpu_to_le16(MAX_PACKET);
++ ir->request->bRequest = GET_INFRACODE;
++ ir->request->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
++ ir->request->wLength = cpu_to_le16(MAX_PACKET);
+
+ ir->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ir->urb)
+@@ -228,6 +232,7 @@ fail:
+ usb_free_urb(ir->urb);
+ rc_free_device(ir->rc);
+ kfree(ir->buf_in);
++ kfree(ir->request);
+
+ return ret;
+ }
+@@ -243,6 +248,7 @@ static void igorplugusb_disconnect(struc
+ usb_unpoison_urb(ir->urb);
+ usb_free_urb(ir->urb);
+ kfree(ir->buf_in);
++ kfree(ir->request);
+ }
+
+ static const struct usb_device_id igorplugusb_table[] = {
--- /dev/null
+From stable+bounces-242457-greg=kroah.com@vger.kernel.org Fri May 1 17:54:33 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 11:54:18 -0400
+Subject: media: rc: ttusbir: respect DMA coherency rules
+To: stable@vger.kernel.org
+Cc: Oliver Neukum <oneukum@suse.com>, Sean Young <sean@mess.org>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501155418.3610742-1-sashal@kernel.org>
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit 50acaad3d202c064779db8dc3d010007347f59c7 ]
+
+Buffers must not share a cache line with other data structures.
+Allocate separately.
+
+Fixes: 0938069fa0897 ("[media] rc: Add support for the TechnoTrend USB IR Receiver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ kept kzalloc(sizeof(*tt), GFP_KERNEL) instead of kzalloc_obj() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/rc/ttusbir.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/media/rc/ttusbir.c
++++ b/drivers/media/rc/ttusbir.c
+@@ -32,7 +32,7 @@ struct ttusbir {
+
+ struct led_classdev led;
+ struct urb *bulk_urb;
+- uint8_t bulk_buffer[5];
++ u8 *bulk_buffer;
+ int bulk_out_endp, iso_in_endp;
+ bool led_on, is_led_on;
+ atomic_t led_complete;
+@@ -186,13 +186,16 @@ static int ttusbir_probe(struct usb_inte
+ struct rc_dev *rc;
+ int i, j, ret;
+ int altsetting = -1;
++ u8 *buffer;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
++ buffer = kzalloc(5, GFP_KERNEL);
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
+- if (!tt || !rc) {
++ if (!tt || !rc || buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
++ tt->bulk_buffer = buffer;
+
+ /* find the correct alt setting */
+ for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
+@@ -281,8 +284,8 @@ static int ttusbir_probe(struct usb_inte
+ tt->bulk_buffer[3] = 0x01;
+
+ usb_fill_bulk_urb(tt->bulk_urb, tt->udev, usb_sndbulkpipe(tt->udev,
+- tt->bulk_out_endp), tt->bulk_buffer, sizeof(tt->bulk_buffer),
+- ttusbir_bulk_complete, tt);
++ tt->bulk_out_endp), tt->bulk_buffer, 5,
++ ttusbir_bulk_complete, tt);
+
+ tt->led.name = "ttusbir:green:power";
+ tt->led.default_trigger = "rc-feedback";
+@@ -351,6 +354,7 @@ out:
+ kfree(tt);
+ }
+ rc_free_device(rc);
++ kfree(buffer);
+
+ return ret;
+ }
+@@ -373,6 +377,7 @@ static void ttusbir_disconnect(struct us
+ }
+ usb_kill_urb(tt->bulk_urb);
+ usb_free_urb(tt->bulk_urb);
++ kfree(tt->bulk_buffer);
+ usb_set_intfdata(intf, NULL);
+ kfree(tt);
+ }
--- /dev/null
+From sashal@kernel.org Tue Apr 28 17:24:19 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2026 11:24:10 -0400
+Subject: mm/migrate: factor out movable_ops page handling into migrate_movable_ops_page()
+To: stable@vger.kernel.org
+Cc: "David Hildenbrand" <david@redhat.com>, "Zi Yan" <ziy@nvidia.com>, "Lorenzo Stoakes" <lorenzo.stoakes@oracle.com>, "Alistair Popple" <apopple@nvidia.com>, "Al Viro" <viro@zeniv.linux.org.uk>, "Arnd Bergmann" <arnd@arndb.de>, "Brendan Jackman" <jackmanb@google.com>, "Byungchul Park" <byungchul@sk.com>, "Chengming Zhou" <chengming.zhou@linux.dev>, "Christian Brauner" <brauner@kernel.org>, "Christophe Leroy" <christophe.leroy@csgroup.eu>, "Eugenio Pé rez" <eperezma@redhat.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Gregory Price" <gourry@gourry.net>, "Harry Yoo" <harry.yoo@oracle.com>, "Huang, Ying" <ying.huang@linux.alibaba.com>, "Jan Kara" <jack@suse.cz>, "Jason Gunthorpe" <jgg@ziepe.ca>, "Jason Wang" <jasowang@redhat.com>, "Jerrin Shaji George" <jerrin.shaji-george@broadcom.com>, "Johannes Weiner" <hannes@cmpxchg.org>, "John Hubbard" <jhubbard@nvidia.com>, "Jonathan Corbet" <corbet@lwn.net>, "Joshua Hahn" <joshua.hahnjy@gmail.com>, "Liam Howlett" <liam.howlett@oracle.com>, "Madhavan Srinivasan" <maddy@linux.ibm.com>, "Mathew Brost" <matthew.brost@intel.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, "Miaohe Lin" <linmiaohe@huawei.com>, "Michael Ellerman" <mpe@ellerman.id.au>, "Michael S. Tsirkin" <mst@redhat.com>, "Michal Hocko" <mhocko@suse.com>, "Mike Rapoport" <rppt@kernel.org>, "Minchan Kim" <minchan@kernel.org>, "Naoya Horiguchi" <nao.horiguchi@gmail.com>, "Nicholas Piggin" <npiggin@gmail.com>, "Oscar Salvador" <osalvador@suse.de>, "Peter Xu" <peterx@redhat.com>, "Qi Zheng" <zhengqi.arch@bytedance.com>, "Rakie Kim" <rakie.kim@sk.com>, "Rik van Riel" <riel@surriel.com>, "Sergey Senozhatsky" <senozhatsky@chromium.org>, "Shakeel Butt" <shakeel.butt@linux.dev>, "Suren Baghdasaryan" <surenb@google.com>, "Vlastimil Babka" <vbabka@suse.cz>, "Xuan Zhuo" <xuanzhuo@linux.alibaba.com>, "xu xin" <xu.xin16@zte.com.cn>, "Andrew Morton" <akpm@linux-foundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260428152412.3034119-1-sashal@kernel.org>
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit b9ed00483d4cbacca04edb11984d8daf09e9ae22 ]
+
+Let's factor it out, simplifying the calling code.
+
+Before this change, we would have called flush_dcache_folio() also on
+movable_ops pages. As documented in Documentation/core-api/cachetlb.rst:
+
+ "This routine need only be called for page cache pages which can
+ potentially ever be mapped into the address space of a user
+ process."
+
+So don't do it for movable_ops pages. If there would ever be such a
+movable_ops page user, it should do the flushing itself after performing
+the copy.
+
+Note that we can now change folio_mapping_flags() to folio_test_anon() to
+make it clearer, because movable_ops pages will never take that path.
+
+[akpm@linux-foundation.org: fix kerneldoc]
+Link: https://lkml.kernel.org/r/20250704102524.326966-10-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Eugenio Pé rez <eperezma@redhat.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: Harry Yoo <harry.yoo@oracle.com>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jason Wang <jasowang@redhat.com>
+Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Mathew Brost <matthew.brost@intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Qi Zheng <zhengqi.arch@bytedance.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Cc: xu xin <xu.xin16@zte.com.cn>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: a2e0c0668a34 ("mm: migrate: requeue destination folio on deferred split queue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 84 +++++++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 47 insertions(+), 37 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -130,6 +130,47 @@ static void putback_movable_folio(struct
+ folio_clear_isolated(folio);
+ }
+
++/**
++ * migrate_movable_ops_page - migrate an isolated movable_ops page
++ * @dst: The destination page.
++ * @src: The source page.
++ * @mode: The migration mode.
++ *
++ * Migrate an isolated movable_ops page.
++ *
++ * If the src page was already released by its owner, the src page is
++ * un-isolated (putback) and migration succeeds; the migration core will be the
++ * owner of both pages.
++ *
++ * If the src page was not released by its owner and the migration was
++ * successful, the owner of the src page and the dst page are swapped and
++ * the src page is un-isolated.
++ *
++ * If migration fails, the ownership stays unmodified and the src page
++ * remains isolated: migration may be retried later or the page can be putback.
++ *
++ * TODO: migration core will treat both pages as folios and lock them before
++ * this call to unlock them after this call. Further, the folio refcounts on
++ * src and dst are also released by migration core. These pages will not be
++ * folios in the future, so that must be reworked.
++ *
++ * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error
++ * code.
++ */
++static int migrate_movable_ops_page(struct page *dst, struct page *src,
++ enum migrate_mode mode)
++{
++ int rc = MIGRATEPAGE_SUCCESS;
++
++ VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
++ /* If the page was released by it's owner, there is nothing to do. */
++ if (PageMovable(src))
++ rc = page_movable_ops(src)->migrate_page(dst, src, mode);
++ if (rc == MIGRATEPAGE_SUCCESS)
++ ClearPageIsolated(src);
++ return rc;
++}
++
+ /*
+ * Put previously isolated pages back onto the appropriate lists
+ * from where they were once taken off for compaction/migration.
+@@ -1044,51 +1085,20 @@ static int move_to_new_folio(struct foli
+ mode);
+ else
+ rc = fallback_migrate_folio(mapping, dst, src, mode);
+- } else {
+- const struct movable_operations *mops;
+
+- /*
+- * In case of non-lru page, it could be released after
+- * isolation step. In that case, we shouldn't try migration.
+- */
+- VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
+- if (!folio_test_movable(src)) {
+- rc = MIGRATEPAGE_SUCCESS;
+- folio_clear_isolated(src);
++ if (rc != MIGRATEPAGE_SUCCESS)
+ goto out;
+- }
+-
+- mops = folio_movable_ops(src);
+- rc = mops->migrate_page(&dst->page, &src->page, mode);
+- WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
+- !folio_test_isolated(src));
+- }
+-
+- /*
+- * When successful, old pagecache src->mapping must be cleared before
+- * src is freed; but stats require that PageAnon be left as PageAnon.
+- */
+- if (rc == MIGRATEPAGE_SUCCESS) {
+- if (__folio_test_movable(src)) {
+- VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
+-
+- /*
+- * We clear PG_movable under page_lock so any compactor
+- * cannot try to migrate this page.
+- */
+- folio_clear_isolated(src);
+- }
+-
+ /*
+- * Anonymous and movable src->mapping will be cleared by
+- * free_pages_prepare so don't reset it here for keeping
+- * the type to work PageAnon, for example.
++ * For pagecache folios, src->mapping must be cleared before src
++ * is freed. Anonymous folios must stay anonymous until freed.
+ */
+- if (!folio_mapping_flags(src))
++ if (!folio_test_anon(src))
+ src->mapping = NULL;
+
+ if (likely(!folio_is_zone_device(dst)))
+ flush_dcache_folio(dst);
++ } else {
++ rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+ }
+ out:
+ return rc;
--- /dev/null
+From sashal@kernel.org Tue Apr 28 17:24:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2026 11:24:11 -0400
+Subject: mm/migrate: move movable_ops page handling out of move_to_new_folio()
+To: stable@vger.kernel.org
+Cc: "David Hildenbrand" <david@redhat.com>, "Zi Yan" <ziy@nvidia.com>, "Harry Yoo" <harry.yoo@oracle.com>, "Lorenzo Stoakes" <lorenzo.stoakes@oracle.com>, "Alistair Popple" <apopple@nvidia.com>, "Al Viro" <viro@zeniv.linux.org.uk>, "Arnd Bergmann" <arnd@arndb.de>, "Brendan Jackman" <jackmanb@google.com>, "Byungchul Park" <byungchul@sk.com>, "Chengming Zhou" <chengming.zhou@linux.dev>, "Christian Brauner" <brauner@kernel.org>, "Christophe Leroy" <christophe.leroy@csgroup.eu>, "Eugenio Pé rez" <eperezma@redhat.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Gregory Price" <gourry@gourry.net>, "Huang, Ying" <ying.huang@linux.alibaba.com>, "Jan Kara" <jack@suse.cz>, "Jason Gunthorpe" <jgg@ziepe.ca>, "Jason Wang" <jasowang@redhat.com>, "Jerrin Shaji George" <jerrin.shaji-george@broadcom.com>, "Johannes Weiner" <hannes@cmpxchg.org>, "John Hubbard" <jhubbard@nvidia.com>, "Jonathan Corbet" <corbet@lwn.net>, "Joshua Hahn" <joshua.hahnjy@gmail.com>, "Liam Howlett" <liam.howlett@oracle.com>, "Madhavan Srinivasan" <maddy@linux.ibm.com>, "Mathew Brost" <matthew.brost@intel.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, "Miaohe Lin" <linmiaohe@huawei.com>, "Michael Ellerman" <mpe@ellerman.id.au>, "Michael S. Tsirkin" <mst@redhat.com>, "Michal Hocko" <mhocko@suse.com>, "Mike Rapoport" <rppt@kernel.org>, "Minchan Kim" <minchan@kernel.org>, "Naoya Horiguchi" <nao.horiguchi@gmail.com>, "Nicholas Piggin" <npiggin@gmail.com>, "Oscar Salvador" <osalvador@suse.de>, "Peter Xu" <peterx@redhat.com>, "Qi Zheng" <zhengqi.arch@bytedance.com>, "Rakie Kim" <rakie.kim@sk.com>, "Rik van Riel" <riel@surriel.com>, "Sergey Senozhatsky" <senozhatsky@chromium.org>, "Shakeel Butt" <shakeel.butt@linux.dev>, "Suren Baghdasaryan" <surenb@google.com>, "Vlastimil Babka" <vbabka@suse.cz>, "Xuan Zhuo" <xuanzhuo@linux.alibaba.com>, "xu xin" <xu.xin16@zte.com.cn>, "Andrew Morton" <akpm@linux-foundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260428152412.3034119-2-sashal@kernel.org>
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit be4a3e9c185264e9ad0fe02c1c5d81b8386bd50c ]
+
+Let's move that handling directly into migrate_folio_move(), so we can
+simplify move_to_new_folio(). While at it, fixup the documentation a bit.
+
+Note that unmap_and_move_huge_page() does not care, because it only deals
+with actual folios. (we only support migration of individual movable_ops
+pages)
+
+Link: https://lkml.kernel.org/r/20250704102524.326966-12-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Eugenio Pé rez <eperezma@redhat.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jason Wang <jasowang@redhat.com>
+Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Mathew Brost <matthew.brost@intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Qi Zheng <zhengqi.arch@bytedance.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Cc: xu xin <xu.xin16@zte.com.cn>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: a2e0c0668a34 ("mm: migrate: requeue destination folio on deferred split queue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 63 ++++++++++++++++++++++++++++-------------------------------
+ 1 file changed, 30 insertions(+), 33 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1047,11 +1047,12 @@ static int fallback_migrate_folio(struct
+ }
+
+ /*
+- * Move a page to a newly allocated page
+- * The page is locked and all ptes have been successfully removed.
++ * Move a src folio to a newly allocated dst folio.
+ *
+- * The new page will have replaced the old page if this function
+- * is successful.
++ * The src and dst folios are locked and the src folios was unmapped from
++ * the page tables.
++ *
++ * On success, the src folio was replaced by the dst folio.
+ *
+ * Return value:
+ * < 0 - error code
+@@ -1060,34 +1061,30 @@ static int fallback_migrate_folio(struct
+ static int move_to_new_folio(struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+ {
++ struct address_space *mapping = folio_mapping(src);
+ int rc = -EAGAIN;
+- bool is_lru = !__folio_test_movable(src);
+
+ VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
+ VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
+
+- if (likely(is_lru)) {
+- struct address_space *mapping = folio_mapping(src);
+-
+- if (!mapping)
+- rc = migrate_folio(mapping, dst, src, mode);
+- else if (mapping_inaccessible(mapping))
+- rc = -EOPNOTSUPP;
+- else if (mapping->a_ops->migrate_folio)
+- /*
+- * Most folios have a mapping and most filesystems
+- * provide a migrate_folio callback. Anonymous folios
+- * are part of swap space which also has its own
+- * migrate_folio callback. This is the most common path
+- * for page migration.
+- */
+- rc = mapping->a_ops->migrate_folio(mapping, dst, src,
+- mode);
+- else
+- rc = fallback_migrate_folio(mapping, dst, src, mode);
++ if (!mapping)
++ rc = migrate_folio(mapping, dst, src, mode);
++ else if (mapping_inaccessible(mapping))
++ rc = -EOPNOTSUPP;
++ else if (mapping->a_ops->migrate_folio)
++ /*
++ * Most folios have a mapping and most filesystems
++ * provide a migrate_folio callback. Anonymous folios
++ * are part of swap space which also has its own
++ * migrate_folio callback. This is the most common path
++ * for page migration.
++ */
++ rc = mapping->a_ops->migrate_folio(mapping, dst, src,
++ mode);
++ else
++ rc = fallback_migrate_folio(mapping, dst, src, mode);
+
+- if (rc != MIGRATEPAGE_SUCCESS)
+- goto out;
++ if (rc == MIGRATEPAGE_SUCCESS) {
+ /*
+ * For pagecache folios, src->mapping must be cleared before src
+ * is freed. Anonymous folios must stay anonymous until freed.
+@@ -1097,10 +1094,7 @@ static int move_to_new_folio(struct foli
+
+ if (likely(!folio_is_zone_device(dst)))
+ flush_dcache_folio(dst);
+- } else {
+- rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+ }
+-out:
+ return rc;
+ }
+
+@@ -1351,20 +1345,23 @@ static int migrate_folio_move(free_folio
+ int rc;
+ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+- bool is_lru = !__folio_test_movable(src);
+ struct list_head *prev;
+
+ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ prev = dst->lru.prev;
+ list_del(&dst->lru);
+
++ if (unlikely(__folio_test_movable(src))) {
++ rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
++ if (rc)
++ goto out;
++ goto out_unlock_both;
++ }
++
+ rc = move_to_new_folio(dst, src, mode);
+ if (rc)
+ goto out;
+
+- if (unlikely(!is_lru))
+- goto out_unlock_both;
+-
+ /*
+ * When successful, push dst to LRU immediately: so that if it
+ * turns out to be an mlocked page, remove_migration_ptes() will
--- /dev/null
+From stable+bounces-241694-greg=kroah.com@vger.kernel.org Tue Apr 28 17:26:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2026 11:24:12 -0400
+Subject: mm: migrate: requeue destination folio on deferred split queue
+To: stable@vger.kernel.org
+Cc: Usama Arif <usama.arif@linux.dev>, Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>, "David Hildenbrand (Arm)" <david@kernel.org>, SeongJae Park <sj@kernel.org>, Wei Yang <richard.weiyang@gmail.com>, Alistair Popple <apopple@nvidia.com>, Byungchul Park <byungchul@sk.com>, Gregory Price <gourry@gourry.net>, "Huang, Ying" <ying.huang@linux.alibaba.com>, Joshua Hahn <joshua.hahnjy@gmail.com>, Matthew Brost <matthew.brost@intel.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, Nico Pache <npache@redhat.com>, Rakie Kim <rakie.kim@sk.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260428152412.3034119-3-sashal@kernel.org>
+
+From: Usama Arif <usama.arif@linux.dev>
+
+[ Upstream commit a2e0c0668a3486f96b86c50e02872c8e94fd4f9c ]
+
+During folio migration, __folio_migrate_mapping() removes the source folio
+from the deferred split queue, but the destination folio is never
+re-queued. This causes underutilized THPs to escape the shrinker after
+NUMA migration, since they silently drop off the deferred split list.
+
+Fix this by recording whether the source folio was on the deferred split
+queue and its partially mapped state before move_to_new_folio() unqueues
+it, and re-queuing the destination folio after a successful migration if
+it was.
+
+By the time migrate_folio_move() runs, partially mapped folios without a
+pin have already been split by migrate_pages_batch(). So only two cases
+remain on the deferred list at this point:
+ 1. Partially mapped folios with a pin (split failed).
+ 2. Fully mapped but potentially underused folios. The recorded
+ partially_mapped state is forwarded to deferred_split_folio() so that
+ the destination folio is correctly re-queued in both cases.
+
+Because THPs are removed from the deferred_list, THP shinker cannot
+split the underutilized THPs in time. As a result, users will show
+less free memory than before.
+
+Link: https://lkml.kernel.org/r/20260312104723.1351321-1-usama.arif@linux.dev
+Fixes: dafff3f4c850 ("mm: split underused THPs")
+Signed-off-by: Usama Arif <usama.arif@linux.dev>
+Reported-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Acked-by: SeongJae Park <sj@kernel.org>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Ying Huang <ying.huang@linux.alibaba.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1345,6 +1345,8 @@ static int migrate_folio_move(free_folio
+ int rc;
+ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
++ bool src_deferred_split = false;
++ bool src_partially_mapped = false;
+ struct list_head *prev;
+
+ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+@@ -1358,6 +1360,12 @@ static int migrate_folio_move(free_folio
+ goto out_unlock_both;
+ }
+
++ if (folio_order(src) > 1 &&
++ !data_race(list_empty(&src->_deferred_list))) {
++ src_deferred_split = true;
++ src_partially_mapped = folio_test_partially_mapped(src);
++ }
++
+ rc = move_to_new_folio(dst, src, mode);
+ if (rc)
+ goto out;
+@@ -1378,6 +1386,15 @@ static int migrate_folio_move(free_folio
+ if (old_page_state & PAGE_WAS_MAPPED)
+ remove_migration_ptes(src, dst, 0);
+
++ /*
++ * Requeue the destination folio on the deferred split queue if
++ * the source was on the queue. The source is unqueued in
++ * __folio_migrate_mapping(), so we recorded the state from
++ * before move_to_new_folio().
++ */
++ if (src_deferred_split)
++ deferred_split_folio(dst, src_partially_mapped);
++
+ out_unlock_both:
+ folio_unlock(dst);
+ set_page_owner_migrate_reason(&dst->page, reason);
--- /dev/null
+From stable+bounces-242460-greg=kroah.com@vger.kernel.org Fri May 1 17:57:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 11:54:40 -0400
+Subject: mm/zsmalloc: copy KMSAN metadata in zs_page_migrate()
+To: stable@vger.kernel.org
+Cc: Shigeru Yoshida <syoshida@redhat.com>, Sergey Senozhatsky <senozhatsky@chromium.org>, Mark-PK Tsai <mark-pk.tsai@mediatek.com>, Minchan Kim <minchan@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501155440.3611449-1-sashal@kernel.org>
+
+From: Shigeru Yoshida <syoshida@redhat.com>
+
+[ Upstream commit 4fb61d95ad21c3b6f1c09f357ff49d70abb0535e ]
+
+zs_page_migrate() uses copy_page() to copy the contents of a zspage page
+during migration. However, copy_page() is not instrumented by KMSAN, so
+the shadow and origin metadata of the destination page are not updated.
+
+As a result, subsequent accesses to the migrated page are reported as
+use-after-free by KMSAN, despite the data being correctly copied.
+
+Add a kmsan_copy_page_meta() call after copy_page() to propagate the KMSAN
+metadata to the new page, matching what copy_highpage() does internally.
+
+Link: https://lkml.kernel.org/r/20260321132912.93434-1-syoshida@redhat.com
+Fixes: afb2d666d025 ("zsmalloc: use copy_page for full page copy")
+Signed-off-by: Shigeru Yoshida <syoshida@redhat.com>
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Mark-PK Tsai <mark-pk.tsai@mediatek.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ translated zpdesc_page(newzpdesc/zpdesc) arguments to newpage/page ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/zsmalloc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1808,6 +1808,7 @@ static int zs_page_migrate(struct page *
+ */
+ d_addr = kmap_atomic(newpage);
+ copy_page(d_addr, s_addr);
++ kmsan_copy_page_meta(newpage, page);
+ kunmap_atomic(d_addr);
+
+ for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
--- /dev/null
+From stable+bounces-242859-greg=kroah.com@vger.kernel.org Mon May 4 09:26:50 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 03:26:37 -0400
+Subject: net: bridge: use a stable FDB dst snapshot in RCU readers
+To: stable@vger.kernel.org
+Cc: Zhengchuan Liang <zcliangcn@gmail.com>, stable@kernel.org, Yifan Wu <yifanwucs@gmail.com>, Juefei Pu <tomapufckgml@gmail.com>, Yuan Tan <yuantan098@gmail.com>, Xin Liu <bird@lzu.edu.cn>, Ren Wei <enjou1224z@gmail.com>, Ren Wei <n05ec@lzu.edu.cn>, Ido Schimmel <idosch@nvidia.com>, Nikolay Aleksandrov <razor@blackwall.org>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504072637.1829085-1-sashal@kernel.org>
+
+From: Zhengchuan Liang <zcliangcn@gmail.com>
+
+[ Upstream commit df4601653201de21b487c3e7fffd464790cab808 ]
+
+Local FDB entries can be rewritten in place by `fdb_delete_local()`, which
+updates `f->dst` to another port or to `NULL` while keeping the entry
+alive. Several bridge RCU readers inspect `f->dst`, including
+`br_fdb_fillbuf()` through the `brforward_read()` sysfs path.
+
+These readers currently load `f->dst` multiple times and can therefore
+observe inconsistent values across the check and later dereference.
+In `br_fdb_fillbuf()`, this means a concurrent local-FDB update can change
+`f->dst` after the NULL check and before the `port_no` dereference,
+leading to a NULL-ptr-deref.
+
+Fix this by taking a single `READ_ONCE()` snapshot of `f->dst` in each
+affected RCU reader and using that snapshot for the rest of the access
+sequence. Also publish the in-place `f->dst` updates in `fdb_delete_local()`
+with `WRITE_ONCE()` so the readers and writer use matching access patterns.
+
+Fixes: 960b589f86c7 ("bridge: Properly check if local fdb entry can be deleted in br_fdb_change_mac_address")
+Cc: stable@kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Tested-by: Ren Wei <enjou1224z@gmail.com>
+Signed-off-by: Zhengchuan Liang <zcliangcn@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/6570fabb85ecadb8baaf019efe856f407711c7b9.1776043229.git.zcliangcn@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ kept `*idx < cb->args[2]` instead of `*idx < ctx->fdb_idx` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_arp_nd_proxy.c | 8 +++++---
+ net/bridge/br_fdb.c | 28 ++++++++++++++++++----------
+ 2 files changed, 23 insertions(+), 13 deletions(-)
+
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -199,11 +199,12 @@ void br_do_proxy_suppress_arp(struct sk_
+
+ f = br_fdb_find_rcu(br, n->ha, vid);
+ if (f) {
++ const struct net_bridge_port *dst = READ_ONCE(f->dst);
+ bool replied = false;
+
+ if ((p && (p->flags & BR_PROXYARP)) ||
+- (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) ||
+- br_is_neigh_suppress_enabled(f->dst, vid)) {
++ (dst && (dst->flags & BR_PROXYARP_WIFI)) ||
++ br_is_neigh_suppress_enabled(dst, vid)) {
+ if (!vid)
+ br_arp_send(br, p, skb->dev, sip, tip,
+ sha, n->ha, sha, 0, 0);
+@@ -463,9 +464,10 @@ void br_do_suppress_nd(struct sk_buff *s
+
+ f = br_fdb_find_rcu(br, n->ha, vid);
+ if (f) {
++ const struct net_bridge_port *dst = READ_ONCE(f->dst);
+ bool replied = false;
+
+- if (br_is_neigh_suppress_enabled(f->dst, vid)) {
++ if (br_is_neigh_suppress_enabled(dst, vid)) {
+ if (vid != 0)
+ br_nd_send(br, p, skb, n,
+ skb->vlan_proto,
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -243,6 +243,7 @@ struct net_device *br_fdb_find_port(cons
+ const unsigned char *addr,
+ __u16 vid)
+ {
++ const struct net_bridge_port *dst;
+ struct net_bridge_fdb_entry *f;
+ struct net_device *dev = NULL;
+ struct net_bridge *br;
+@@ -255,8 +256,11 @@ struct net_device *br_fdb_find_port(cons
+ br = netdev_priv(br_dev);
+ rcu_read_lock();
+ f = br_fdb_find_rcu(br, addr, vid);
+- if (f && f->dst)
+- dev = f->dst->dev;
++ if (f) {
++ dst = READ_ONCE(f->dst);
++ if (dst)
++ dev = dst->dev;
++ }
+ rcu_read_unlock();
+
+ return dev;
+@@ -353,7 +357,7 @@ static void fdb_delete_local(struct net_
+ vg = nbp_vlan_group(op);
+ if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+ (!vid || br_vlan_find(vg, vid))) {
+- f->dst = op;
++ WRITE_ONCE(f->dst, op);
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+ return;
+ }
+@@ -364,7 +368,7 @@ static void fdb_delete_local(struct net_
+ /* Maybe bridge device has same hw addr? */
+ if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+ (!vid || (v && br_vlan_should_use(v)))) {
+- f->dst = NULL;
++ WRITE_ONCE(f->dst, NULL);
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+ return;
+ }
+@@ -827,6 +831,7 @@ int br_fdb_test_addr(struct net_device *
+ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
+ unsigned long maxnum, unsigned long skip)
+ {
++ const struct net_bridge_port *dst;
+ struct net_bridge_fdb_entry *f;
+ struct __fdb_entry *fe = buf;
+ unsigned long delta;
+@@ -843,7 +848,8 @@ int br_fdb_fillbuf(struct net_bridge *br
+ continue;
+
+ /* ignore pseudo entry for local MAC address */
+- if (!f->dst)
++ dst = READ_ONCE(f->dst);
++ if (!dst)
+ continue;
+
+ if (skip) {
+@@ -855,8 +861,8 @@ int br_fdb_fillbuf(struct net_bridge *br
+ memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+
+ /* due to ABI compat need to split into hi/lo */
+- fe->port_no = f->dst->port_no;
+- fe->port_hi = f->dst->port_no >> 8;
++ fe->port_no = dst->port_no;
++ fe->port_hi = dst->port_no >> 8;
+
+ fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+ if (!test_bit(BR_FDB_STATIC, &f->flags)) {
+@@ -981,9 +987,11 @@ int br_fdb_dump(struct sk_buff *skb,
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
++ const struct net_bridge_port *dst = READ_ONCE(f->dst);
++
+ if (*idx < cb->args[2])
+ goto skip;
+- if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
++ if (filter_dev && (!dst || dst->dev != filter_dev)) {
+ if (filter_dev != dev)
+ goto skip;
+ /* !f->dst is a special case for bridge
+@@ -991,10 +999,10 @@ int br_fdb_dump(struct sk_buff *skb,
+ * Therefore need a little more filtering
+ * we only want to dump the !f->dst case
+ */
+- if (f->dst)
++ if (dst)
+ goto skip;
+ }
+- if (!filter_dev && f->dst)
++ if (!filter_dev && dst)
+ goto skip;
+
+ err = fdb_fill_info(skb, br, f,
--- /dev/null
+From stable+bounces-242863-greg=kroah.com@vger.kernel.org Mon May 4 09:49:01 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 03:46:22 -0400
+Subject: net: mctp: fix don't require received header reserved bits to be zero
+To: stable@vger.kernel.org
+Cc: Yuan Zhaoming <yuanzm2@lenovo.com>, Jeremy Kerr <jk@codeconstruct.com.au>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504074622.1856099-1-sashal@kernel.org>
+
+From: Yuan Zhaoming <yuanzm2@lenovo.com>
+
+[ Upstream commit a663bac71a2f0b3ac6c373168ca57b2a6e6381aa ]
+
+>From the MCTP Base specification (DSP0236 v1.2.1), the first byte of
+the MCTP header contains a 4 bit reserved field, and 4 bit version.
+
+On our current receive path, we require those 4 reserved bits to be
+zero, but the 9500-8i card is non-conformant, and may set these
+reserved bits.
+
+DSP0236 states that the reserved bits must be written as zero, and
+ignored when read. While the device might not conform to the former,
+we should accept these message to conform to the latter.
+
+Relax our check on the MCTP version byte to allow non-zero bits in the
+reserved field.
+
+Fixes: 889b7da23abf ("mctp: Add initial routing framework")
+Signed-off-by: Yuan Zhaoming <yuanzm2@lenovo.com>
+Cc: stable@vger.kernel.org
+Acked-by: Jeremy Kerr <jk@codeconstruct.com.au>
+Link: https://patch.msgid.link/20260417141340.5306-1-yuanzhaoming901030@126.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/mctp.h | 3 +++
+ net/mctp/route.c | 8 ++++++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -26,6 +26,9 @@ struct mctp_hdr {
+ #define MCTP_VER_MIN 1
+ #define MCTP_VER_MAX 1
+
++/* Definitions for ver field */
++#define MCTP_HDR_VER_MASK GENMASK(3, 0)
++
+ /* Definitions for flags_seq_tag field */
+ #define MCTP_HDR_FLAG_SOM BIT(7)
+ #define MCTP_HDR_FLAG_EOM BIT(6)
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -388,6 +388,7 @@ static int mctp_route_input(struct mctp_
+ unsigned long f;
+ u8 tag, flags;
+ int rc;
++ u8 ver;
+
+ msk = NULL;
+ rc = -EINVAL;
+@@ -411,7 +412,8 @@ static int mctp_route_input(struct mctp_
+ netid = mctp_cb(skb)->net;
+ skb_pull(skb, sizeof(struct mctp_hdr));
+
+- if (mh->ver != 1)
++ ver = mh->ver & MCTP_HDR_VER_MASK;
++ if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
+ goto out;
+
+ flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
+@@ -1197,6 +1199,7 @@ static int mctp_pkttype_receive(struct s
+ struct mctp_skb_cb *cb;
+ struct mctp_route *rt;
+ struct mctp_hdr *mh;
++ u8 ver;
+
+ rcu_read_lock();
+ mdev = __mctp_dev_get(dev);
+@@ -1214,7 +1217,8 @@ static int mctp_pkttype_receive(struct s
+
+ /* We have enough for a header; decode and route */
+ mh = mctp_hdr(skb);
+- if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
++ ver = mh->ver & MCTP_HDR_VER_MASK;
++ if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
+ goto err_drop;
+
+ /* source must be valid unicast or null; drop reserved ranges and
--- /dev/null
+From stable+bounces-242816-greg=kroah.com@vger.kernel.org Sun May 3 21:14:29 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2026 15:14:19 -0400
+Subject: net: qrtr: ns: Limit the maximum number of lookups
+To: stable@vger.kernel.org
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260503191419.1286355-1-sashal@kernel.org>
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit 5640227d9a21c6a8be249a10677b832e7f40dc55 ]
+
+Current code does no bound checking on the number of lookups a client can
+perform. Though the code restricts the lookups to local clients, there is
+still a possibility of a malicious local client sending a flood of
+NEW_LOOKUP messages over the same socket.
+
+Fix this issue by limiting the maximum number of lookups to 64 globally.
+Since the nameserver allows only atmost one local observer, this global
+lookup count will ensure that the lookups stay within the limit.
+
+Note that, limit of 64 is chosen based on the current platform
+requirements. If requirement changes in the future, this limit can be
+increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-2-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ adapted comment block to only mention QRTR_NS_MAX_LOOKUPS and kept kzalloc() instead of kzalloc_obj() due to missing prerequisite commits ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -22,6 +22,7 @@ static struct {
+ struct socket *sock;
+ struct sockaddr_qrtr bcast_sq;
+ struct list_head lookups;
++ u32 lookup_count;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ void (*saved_data_ready)(struct sock *sk);
+@@ -76,6 +77,11 @@ struct qrtr_node {
+ */
+ #define QRTR_NS_MAX_SERVERS 256
+
++/* Max lookup limit is chosen based on the current platform requirements. If the
++ * requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_LOOKUPS 64
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+ struct qrtr_node *node;
+@@ -444,6 +450,7 @@ static int ctrl_cmd_del_client(struct so
+
+ list_del(&lookup->li);
+ kfree(lookup);
++ qrtr_ns.lookup_count--;
+ }
+
+ /* Remove the server belonging to this port but don't broadcast
+@@ -561,6 +568,11 @@ static int ctrl_cmd_new_lookup(struct so
+ if (from->sq_node != qrtr_ns.local_node)
+ return -EINVAL;
+
++ if (qrtr_ns.lookup_count >= QRTR_NS_MAX_LOOKUPS) {
++ pr_err_ratelimited("QRTR client node exceeds max lookup limit!\n");
++ return -ENOSPC;
++ }
++
+ lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+@@ -569,6 +581,7 @@ static int ctrl_cmd_new_lookup(struct so
+ lookup->service = service;
+ lookup->instance = instance;
+ list_add_tail(&lookup->li, &qrtr_ns.lookups);
++ qrtr_ns.lookup_count++;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.service = service;
+@@ -609,6 +622,7 @@ static void ctrl_cmd_del_lookup(struct s
+
+ list_del(&lookup->li);
+ kfree(lookup);
++ qrtr_ns.lookup_count--;
+ }
+ }
+
--- /dev/null
+From stable+bounces-242814-greg=kroah.com@vger.kernel.org Sun May 3 21:14:20 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2026 15:14:12 -0400
+Subject: net: qrtr: ns: Limit the maximum server registration per node
+To: stable@vger.kernel.org
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>, Yiming Qian <yimingqian591@gmail.com>, Simon Horman <horms@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260503191412.1286176-1-sashal@kernel.org>
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit d5ee2ff98322337951c56398e79d51815acbf955 ]
+
+Current code does no bound checking on the number of servers added per
+node. A malicious client can flood NEW_SERVER messages and exhaust memory.
+
+Fix this issue by limiting the maximum number of server registrations to
+256 per node. If the NEW_SERVER message is received for an old port, then
+don't restrict it as it will get replaced. While at it, also rate limit
+the error messages in the failure path of qrtr_ns_worker().
+
+Note that the limit of 256 is chosen based on the current platform
+requirements. If requirement changes in the future, this limit can be
+increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Reported-by: Yiming Qian <yimingqian591@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-1-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c | 26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -68,8 +68,14 @@ struct qrtr_server {
+ struct qrtr_node {
+ unsigned int id;
+ struct xarray servers;
++ u32 server_count;
+ };
+
++/* Max server limit is chosen based on the current platform requirements. If the
++ * requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_SERVERS 256
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+ struct qrtr_node *node;
+@@ -230,6 +236,17 @@ static struct qrtr_server *server_add(un
+ if (!service || !port)
+ return NULL;
+
++ node = node_get(node_id);
++ if (!node)
++ return NULL;
++
++ /* Make sure the new servers per port are capped at the maximum value */
++ old = xa_load(&node->servers, port);
++ if (!old && node->server_count >= QRTR_NS_MAX_SERVERS) {
++ pr_err_ratelimited("QRTR client node %u exceeds max server limit!\n", node_id);
++ return NULL;
++ }
++
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+@@ -239,10 +256,6 @@ static struct qrtr_server *server_add(un
+ srv->node = node_id;
+ srv->port = port;
+
+- node = node_get(node_id);
+- if (!node)
+- goto err;
+-
+ /* Delete the old server on the same port */
+ old = xa_store(&node->servers, port, srv, GFP_KERNEL);
+ if (old) {
+@@ -253,6 +266,8 @@ static struct qrtr_server *server_add(un
+ } else {
+ kfree(old);
+ }
++ } else {
++ node->server_count++;
+ }
+
+ trace_qrtr_ns_server_add(srv->service, srv->instance,
+@@ -293,6 +308,7 @@ static int server_del(struct qrtr_node *
+ }
+
+ kfree(srv);
++ node->server_count--;
+
+ return 0;
+ }
+@@ -681,7 +697,7 @@ static void qrtr_ns_worker(struct work_s
+ }
+
+ if (ret < 0)
+- pr_err("failed while handling packet from %d:%d",
++ pr_err_ratelimited("failed while handling packet from %d:%d",
+ sq.sq_node, sq.sq_port);
+ }
+
--- /dev/null
+From stable+bounces-242866-greg=kroah.com@vger.kernel.org Mon May 4 09:59:28 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 03:59:19 -0400
+Subject: net: qrtr: ns: Limit the total number of nodes
+To: stable@vger.kernel.org
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504075919.1865941-2-sashal@kernel.org>
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit 27d5e84e810b0849d08b9aec68e48570461ce313 ]
+
+Currently, the nameserver doesn't limit the number of nodes it handles.
+This can be an attack vector if a malicious client starts registering
+random nodes, leading to memory exhaustion.
+
+Hence, limit the maximum number of nodes to 64. Note that, limit of 64 is
+chosen based on the current platform requirements. If requirement changes
+in the future, this limit can be increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-4-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ dropped comment/define changes for missing QRTR_NS_MAX_SERVERS/LOOKUPS prereqs and kept plain kzalloc instead of kzalloc_obj ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -82,6 +82,13 @@ struct qrtr_node {
+ */
+ #define QRTR_NS_MAX_LOOKUPS 64
+
++/* Max nodes limit is chosen based on the current platform requirements.
++ * If the requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_NODES 64
++
++static u8 node_count;
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+ struct qrtr_node *node;
+@@ -90,6 +97,11 @@ static struct qrtr_node *node_get(unsign
+ if (node)
+ return node;
+
++ if (node_count >= QRTR_NS_MAX_NODES) {
++ pr_err_ratelimited("QRTR clients exceed max node limit!\n");
++ return NULL;
++ }
++
+ /* If node didn't exist, allocate and insert it to the tree */
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+@@ -103,6 +115,8 @@ static struct qrtr_node *node_get(unsign
+ return NULL;
+ }
+
++ node_count++;
++
+ return node;
+ }
+
+@@ -409,6 +423,7 @@ static int ctrl_cmd_bye(struct sockaddr_
+ delete_node:
+ xa_erase(&nodes, from->sq_node);
+ kfree(node);
++ node_count--;
+
+ return ret;
+ }
--- /dev/null
+From stable+bounces-242573-greg=kroah.com@vger.kernel.org Sat May 2 04:12:43 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 22:12:37 -0400
+Subject: RDMA/mana_ib: Disable RX steering on RSS QP destroy
+To: stable@vger.kernel.org
+Cc: Long Li <longli@microsoft.com>, Leon Romanovsky <leon@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260502021238.4167696-1-sashal@kernel.org>
+
+From: Long Li <longli@microsoft.com>
+
+[ Upstream commit dbeb256e8dd87233d891b170c0b32a6466467036 ]
+
+When an RSS QP is destroyed (e.g. DPDK exit), mana_ib_destroy_qp_rss()
+destroys the RX WQ objects but does not disable vPort RX steering in
+firmware. This leaves stale steering configuration that still points to
+the destroyed RX objects.
+
+If traffic continues to arrive (e.g. peer VM is still transmitting) and
+the VF interface is subsequently brought up (mana_open), the firmware
+may deliver completions using stale CQ IDs from the old RX objects.
+These CQ IDs can be reused by the ethernet driver for new TX CQs,
+causing RX completions to land on TX CQs:
+
+ WARNING: mana_poll_tx_cq+0x1b8/0x220 [mana] (is_sq == false)
+ WARNING: mana_gd_process_eq_events+0x209/0x290 (cq_table lookup fails)
+
+Fix this by disabling vPort RX steering before destroying RX WQ objects.
+Note that mana_fence_rqs() cannot be used here because the fence
+completion is delivered on the CQ, which is polled by user-mode (e.g.
+DPDK) and not visible to the kernel driver.
+
+Refactor the disable logic into a shared mana_disable_vport_rx() in
+mana_en, exported for use by mana_ib, replacing the duplicate code.
+The ethernet driver's mana_dealloc_queues() is also updated to call
+this common function.
+
+Fixes: 0266a177631d ("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Long Li <longli@microsoft.com>
+Link: https://patch.msgid.link/20260325194100.1929056-1-longli@microsoft.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+[ kept early-return error handling and used unquoted NET_MANA namespace in EXPORT_SYMBOL_NS ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mana/qp.c | 15 +++++++++++++++
+ drivers/net/ethernet/microsoft/mana/mana_en.c | 11 ++++++++++-
+ include/net/mana/mana.h | 1 +
+ 3 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mana/qp.c
++++ b/drivers/infiniband/hw/mana/qp.c
+@@ -601,6 +601,21 @@ static int mana_ib_destroy_qp_rss(struct
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
+ mpc = netdev_priv(ndev);
+
++ /* Disable vPort RX steering before destroying RX WQ objects.
++ * Otherwise firmware still routes traffic to the destroyed queues,
++ * which can cause bogus completions on reused CQ IDs when the
++ * ethernet driver later creates new queues on mana_open().
++ *
++ * Unlike the ethernet teardown path, mana_fence_rqs() cannot be
++ * used here because the fence completion CQE is delivered on the
++ * CQ which is polled by userspace (e.g. DPDK), so there is no way
++ * for the kernel to wait for fence completion.
++ *
++ * This is best effort — if it fails there is not much we can do,
++ * and mana_cfg_vport_steering() already logs the error.
++ */
++ mana_disable_vport_rx(mpc);
++
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ ibwq = ind_tbl->ind_tbl[i];
+ wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -2392,6 +2392,13 @@ static void mana_rss_table_init(struct m
+ ethtool_rxfh_indir_default(i, apc->num_queues);
+ }
+
++int mana_disable_vport_rx(struct mana_port_context *apc)
++{
++ return mana_cfg_vport_steering(apc, TRI_STATE_FALSE, false, false,
++ false);
++}
++EXPORT_SYMBOL_NS(mana_disable_vport_rx, NET_MANA);
++
+ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
+ bool update_hash, bool update_tab)
+ {
+@@ -2676,12 +2683,14 @@ static int mana_dealloc_queues(struct ne
+ */
+
+ apc->rss_state = TRI_STATE_FALSE;
+- err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
++ err = mana_disable_vport_rx(apc);
+ if (err) {
+ netdev_err(ndev, "Failed to disable vPort: %d\n", err);
+ return err;
+ }
+
++ mana_fence_rqs(apc);
++
+ mana_destroy_vport(apc);
+
+ return 0;
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -473,6 +473,7 @@ struct mana_port_context {
+ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+ int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
+ bool update_hash, bool update_tab);
++int mana_disable_vport_rx(struct mana_port_context *apc);
+
+ int mana_alloc_queues(struct net_device *ndev);
+ int mana_attach(struct net_device *ndev);
--- /dev/null
+From stable+bounces-242653-greg=kroah.com@vger.kernel.org Sun May 3 12:59:43 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2026 06:59:34 -0400
+Subject: rxrpc: Fix potential UAF after skb_unshare() failure
+To: stable@vger.kernel.org
+Cc: David Howells <dhowells@redhat.com>, Marc Dionne <marc.dionne@auristor.com>, Jeffrey Altman <jaltman@auristor.com>, Simon Horman <horms@kernel.org>, linux-afs@lists.infradead.org, stable@kernel.org, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260503105934.1030665-1-sashal@kernel.org>
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 1f2740150f904bfa60e4bad74d65add3ccb5e7f8 ]
+
+If skb_unshare() fails to unshare a packet due to allocation failure in
+rxrpc_input_packet(), the skb pointer in the parent (rxrpc_io_thread())
+will be NULL'd out. This will likely cause the call to
+trace_rxrpc_rx_done() to oops.
+
+Fix this by moving the unsharing down to where rxrpc_input_call_event()
+calls rxrpc_input_call_packet(). There are a number of places prior to
+that where we ignore DATA packets for a variety of reasons (such as the
+call already being complete) for which an unshare is then avoided.
+
+And with that, rxrpc_input_packet() doesn't need to take a pointer to the
+pointer to the packet, so change that to just a pointer.
+
+Fixes: 2d1faf7a0ca3 ("rxrpc: Simplify skbuff accounting in receive path")
+Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Jeffrey Altman <jaltman@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+cc: stable@kernel.org
+Link: https://patch.msgid.link/20260422161438.2593376-4-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ adapted to per-skb rxrpc_input_call_event() signature ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/trace/events/rxrpc.h | 4 ++--
+ net/rxrpc/ar-internal.h | 1 -
+ net/rxrpc/call_event.c | 24 ++++++++++++++++++++++--
+ net/rxrpc/io_thread.c | 24 ++----------------------
+ net/rxrpc/skbuff.c | 9 ---------
+ 5 files changed, 26 insertions(+), 36 deletions(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -127,8 +127,7 @@
+ E_(rxrpc_call_poke_timer_now, "Timer-now")
+
+ #define rxrpc_skb_traces \
+- EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
+- EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
++ EM(rxrpc_skb_get_call_rx, "GET call-rx ") \
+ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
+ EM(rxrpc_skb_get_conn_work, "GET conn-work") \
+ EM(rxrpc_skb_get_last_nack, "GET last-nack") \
+@@ -153,6 +152,7 @@
+ EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_skb_see_reject, "SEE reject ") \
+ EM(rxrpc_skb_see_rotate, "SEE rotate ") \
++ EM(rxrpc_skb_see_unshare_nomem, "SEE unshar-nm") \
+ E_(rxrpc_skb_see_version, "SEE version ")
+
+ #define rxrpc_local_traces \
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -1260,7 +1260,6 @@ int rxrpc_server_keyring(struct rxrpc_so
+ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
+ void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
+-void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_purge_queue(struct sk_buff_head *);
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -342,8 +342,28 @@ bool rxrpc_input_call_event(struct rxrpc
+ if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
+ goto out;
+
+- if (skb)
+- rxrpc_input_call_packet(call, skb);
++ if (skb) {
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++
++ if (sp->hdr.securityIndex != 0 &&
++ skb_cloned(skb)) {
++ /* Unshare the packet so that it can be modified for
++ * in-place decryption.
++ */
++ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
++
++ if (nskb) {
++ rxrpc_new_skb(nskb, rxrpc_skb_new_unshared);
++ rxrpc_input_call_packet(call, nskb);
++ rxrpc_free_skb(nskb, rxrpc_skb_put_input);
++ } else {
++ /* OOM - Drop the packet. */
++ rxrpc_see_skb(skb, rxrpc_skb_see_unshare_nomem);
++ }
++ } else {
++ rxrpc_input_call_packet(call, skb);
++ }
++ }
+
+ /* If we see our async-event poke, check for timeout trippage. */
+ now = ktime_get_real();
+--- a/net/rxrpc/io_thread.c
++++ b/net/rxrpc/io_thread.c
+@@ -178,13 +178,12 @@ static bool rxrpc_extract_abort(struct s
+ /*
+ * Process packets received on the local endpoint
+ */
+-static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
++static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff *skb)
+ {
+ struct rxrpc_connection *conn;
+ struct sockaddr_rxrpc peer_srx;
+ struct rxrpc_skb_priv *sp;
+ struct rxrpc_peer *peer = NULL;
+- struct sk_buff *skb = *_skb;
+ bool ret = false;
+
+ skb_pull(skb, sizeof(struct udphdr));
+@@ -230,25 +229,6 @@ static bool rxrpc_input_packet(struct rx
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
+ if (sp->hdr.seq == 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
+-
+- /* Unshare the packet so that it can be modified for in-place
+- * decryption.
+- */
+- if (sp->hdr.securityIndex != 0) {
+- skb = skb_unshare(skb, GFP_ATOMIC);
+- if (!skb) {
+- rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
+- *_skb = NULL;
+- return just_discard;
+- }
+-
+- if (skb != *_skb) {
+- rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare);
+- *_skb = skb;
+- rxrpc_new_skb(skb, rxrpc_skb_new_unshared);
+- sp = rxrpc_skb(skb);
+- }
+- }
+ break;
+
+ case RXRPC_PACKET_TYPE_CHALLENGE:
+@@ -490,7 +470,7 @@ int rxrpc_io_thread(void *data)
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_PACKET:
+ skb->priority = 0;
+- if (!rxrpc_input_packet(local, &skb))
++ if (!rxrpc_input_packet(local, skb))
+ rxrpc_reject_packet(local, skb);
+ trace_rxrpc_rx_done(skb->mark, skb->priority);
+ rxrpc_free_skb(skb, rxrpc_skb_put_input);
+--- a/net/rxrpc/skbuff.c
++++ b/net/rxrpc/skbuff.c
+@@ -47,15 +47,6 @@ void rxrpc_get_skb(struct sk_buff *skb,
+ }
+
+ /*
+- * Note the dropping of a ref on a socket buffer by the core.
+- */
+-void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
+-{
+- int n = atomic_inc_return(&rxrpc_n_rx_skbs);
+- trace_rxrpc_skb(skb, 0, n, why);
+-}
+-
+-/*
+ * Note the destruction of a socket buffer.
+ */
+ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
rtmutex-use-waiter-task-instead-of-current-in-remove_waiter.patch
scsi-sd-fix-missing-put_disk-when-device_add-disk_dev-fails.patch
seg6-fix-seg6-lwtunnel-output-redirect-for-l2-reduced-encap-mode.patch
+smb-client-validate-the-whole-dacl-before-rewriting-it-in-cifsacl.patch
+f2fs-fix-uaf-caused-by-decrementing-sbi-nr_pages-in-f2fs_write_end_io.patch
+lib-test_hmm-evict-device-pages-on-file-close-to-avoid-use-after-free.patch
+f2fs-fix-to-do-sanity-check-on-dcc-discard_cmd_cnt-conditionally.patch
+ksmbd-use-msleep-instaed-of-schedule_timeout_interruptible.patch
+ksmbd-replace-connection-list-with-hash-table.patch
+ksmbd-reset-rcount-per-connection-in-ksmbd_conn_wait_idle_sess_id.patch
+thermal-core-fix-thermal-zone-governor-cleanup-issues.patch
+wifi-mt76-mt792x-describe-usb-wfsys-reset-with-a-descriptor.patch
+wifi-mt76-mt792x-fix-mt7925u-usb-wfsys-reset-handling.patch
+wifi-mwifiex-fix-use-after-free-in-mwifiex_adapter_cleanup.patch
+mm-migrate-factor-out-movable_ops-page-handling-into-migrate_movable_ops_page.patch
+mm-migrate-move-movable_ops-page-handling-out-of-move_to_new_folio.patch
+mm-migrate-requeue-destination-folio-on-deferred-split-queue.patch
+alsa-aoa-use-guard-for-mutex-locks.patch
+alsa-aoa-i2sbus-clear-stale-prepared-state.patch
+mm-zsmalloc-copy-kmsan-metadata-in-zs_page_migrate.patch
+media-rc-ttusbir-respect-dma-coherency-rules.patch
+alsa-aoa-skip-devices-with-no-codecs-in-i2sbus_resume.patch
+media-rc-igorplugusb-heed-coherency-rules.patch
+rdma-mana_ib-disable-rx-steering-on-rss-qp-destroy.patch
+block-relax-pgmap-check-in-bio_add_page-for-compatible-zone-device-pages.patch
+iio-frequency-admv1013-add-dev-variable.patch
+iio-frequency-admv1013-fix-null-pointer-dereference-on-str.patch
+rxrpc-fix-potential-uaf-after-skb_unshare-failure.patch
+net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch
+net-qrtr-ns-limit-the-maximum-number-of-lookups.patch
+net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch
+net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch
+net-qrtr-ns-limit-the-total-number-of-nodes.patch
+spi-fix-resource-leaks-on-device-setup-failure.patch
--- /dev/null
+From stable+bounces-240987-greg=kroah.com@vger.kernel.org Fri Apr 24 16:46:47 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 10:46:09 -0400
+Subject: smb: client: validate the whole DACL before rewriting it in cifsacl
+To: stable@vger.kernel.org
+Cc: Michael Bommarito <michael.bommarito@gmail.com>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424144609.2186088-1-sashal@kernel.org>
+
+From: Michael Bommarito <michael.bommarito@gmail.com>
+
+[ Upstream commit 0a8cf165566ba55a39fd0f4de172119dd646d39a ]
+
+build_sec_desc() and id_mode_to_cifs_acl() derive a DACL pointer from a
+server-supplied dacloffset and then use the incoming ACL to rebuild the
+chmod/chown security descriptor.
+
+The original fix only checked that the struct smb_acl header fits before
+reading dacl_ptr->size or dacl_ptr->num_aces. That avoids the immediate
+header-field OOB read, but the rewrite helpers still walk ACEs based on
+pdacl->num_aces with no structural validation of the incoming DACL body.
+
+A malicious server can return a truncated DACL that still contains a
+header, claims one or more ACEs, and then drive
+replace_sids_and_copy_aces() or set_chmod_dacl() past the validated
+extent while they compare or copy attacker-controlled ACEs.
+
+Factor the DACL structural checks into validate_dacl(), extend them to
+validate each ACE against the DACL bounds, and use the shared validator
+before the chmod/chown rebuild paths. parse_dacl() reuses the same
+validator so the read-side parser and write-side rewrite paths agree on
+what constitutes a well-formed incoming DACL.
+
+Fixes: bc3e9dd9d104 ("cifs: Change SIDs in ACEs while transferring file ownership.")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-6
+Assisted-by: Codex:gpt-5-4
+Signed-off-by: Michael Bommarito <michael.bommarito@gmail.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ no kmalloc_objs ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.c | 116 +++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 85 insertions(+), 31 deletions(-)
+
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -758,6 +758,77 @@ static void dump_ace(struct smb_ace *pac
+ }
+ #endif
+
++static int validate_dacl(struct smb_acl *pdacl, char *end_of_acl)
++{
++ int i, ace_hdr_size, ace_size, min_ace_size;
++ u16 dacl_size, num_aces;
++ char *acl_base, *end_of_dacl;
++ struct smb_ace *pace;
++
++ if (!pdacl)
++ return 0;
++
++ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl)) {
++ cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ return -EINVAL;
++ }
++
++ dacl_size = le16_to_cpu(pdacl->size);
++ if (dacl_size < sizeof(struct smb_acl) ||
++ end_of_acl < (char *)pdacl + dacl_size) {
++ cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ return -EINVAL;
++ }
++
++ num_aces = le16_to_cpu(pdacl->num_aces);
++ if (!num_aces)
++ return 0;
++
++ ace_hdr_size = offsetof(struct smb_ace, sid) +
++ offsetof(struct smb_sid, sub_auth);
++ min_ace_size = ace_hdr_size + sizeof(__le32);
++ if (num_aces > (dacl_size - sizeof(struct smb_acl)) / min_ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ return -EINVAL;
++ }
++
++ end_of_dacl = (char *)pdacl + dacl_size;
++ acl_base = (char *)pdacl;
++ ace_size = sizeof(struct smb_acl);
++
++ for (i = 0; i < num_aces; ++i) {
++ if (end_of_dacl - acl_base < ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++
++ pace = (struct smb_ace *)(acl_base + ace_size);
++ acl_base = (char *)pace;
++
++ if (end_of_dacl - acl_base < ace_hdr_size ||
++ pace->sid.num_subauth == 0 ||
++ pace->sid.num_subauth > SID_MAX_SUB_AUTHORITIES) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++
++ ace_size = ace_hdr_size + sizeof(__le32) * pace->sid.num_subauth;
++ if (end_of_dacl - acl_base < ace_size ||
++ le16_to_cpu(pace->size) < ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++
++ ace_size = le16_to_cpu(pace->size);
++ if (end_of_dacl - acl_base < ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
++
+ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+ struct cifs_fattr *fattr, bool mode_from_special_sid)
+@@ -765,7 +836,7 @@ static void parse_dacl(struct smb_acl *p
+ int i;
+ u16 num_aces = 0;
+ int acl_size;
+- char *acl_base;
++ char *acl_base, *end_of_dacl;
+ struct smb_ace **ppace;
+
+ /* BB need to add parm so we can store the SID BB */
+@@ -777,12 +848,8 @@ static void parse_dacl(struct smb_acl *p
+ return;
+ }
+
+- /* validate that we do not go past end of acl */
+- if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
+- end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+- cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ if (validate_dacl(pdacl, end_of_acl))
+ return;
+- }
+
+ cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
+ le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+@@ -793,6 +860,7 @@ static void parse_dacl(struct smb_acl *p
+ user/group/other have no permissions */
+ fattr->cf_mode &= ~(0777);
+
++ end_of_dacl = (char *)pdacl + le16_to_cpu(pdacl->size);
+ acl_base = (char *)pdacl;
+ acl_size = sizeof(struct smb_acl);
+
+@@ -800,36 +868,16 @@ static void parse_dacl(struct smb_acl *p
+ if (num_aces > 0) {
+ umode_t denied_mode = 0;
+
+- if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
+- (offsetof(struct smb_ace, sid) +
+- offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
+- return;
+-
+ ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
+ GFP_KERNEL);
+ if (!ppace)
+ return;
+
+ for (i = 0; i < num_aces; ++i) {
+- if (end_of_acl - acl_base < acl_size)
+- break;
+-
+ ppace[i] = (struct smb_ace *) (acl_base + acl_size);
+- acl_base = (char *)ppace[i];
+- acl_size = offsetof(struct smb_ace, sid) +
+- offsetof(struct smb_sid, sub_auth);
+-
+- if (end_of_acl - acl_base < acl_size ||
+- ppace[i]->sid.num_subauth == 0 ||
+- ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+- (end_of_acl - acl_base <
+- acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+- (le16_to_cpu(ppace[i]->size) <
+- acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+- break;
+
+ #ifdef CONFIG_CIFS_DEBUG2
+- dump_ace(ppace[i], end_of_acl);
++ dump_ace(ppace[i], end_of_dacl);
+ #endif
+ if (mode_from_special_sid &&
+ ppace[i]->sid.num_subauth >= 3 &&
+@@ -872,6 +920,7 @@ static void parse_dacl(struct smb_acl *p
+ (void *)ppace[i],
+ sizeof(struct smb_ace)); */
+
++ acl_base = (char *)ppace[i];
+ acl_size = le16_to_cpu(ppace[i]->size);
+ }
+
+@@ -1295,10 +1344,9 @@ static int build_sec_desc(struct smb_nts
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+- if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
+- cifs_dbg(VFS, "Server returned illegal ACL size\n");
+- return -EINVAL;
+- }
++ rc = validate_dacl(dacl_ptr, end_of_acl);
++ if (rc)
++ return rc;
+ }
+
+ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+@@ -1669,6 +1717,12 @@ id_mode_to_cifs_acl(struct inode *inode,
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
++ rc = validate_dacl(dacl_ptr, (char *)pntsd + secdesclen);
++ if (rc) {
++ kfree(pntsd);
++ cifs_put_tlink(tlink);
++ return rc;
++ }
+ if (mode_from_sid)
+ nsecdesclen +=
+ le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
--- /dev/null
+From stable+bounces-242998-greg=kroah.com@vger.kernel.org Mon May 4 13:32:51 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 07:32:43 -0400
+Subject: spi: fix resource leaks on device setup failure
+To: stable@vger.kernel.org
+Cc: Johan Hovold <johan@kernel.org>, Saravana Kannan <saravanak@kernel.org>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504113243.2090208-1-sashal@kernel.org>
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit db357034f7e0cf23f233f414a8508312dfe8fbbe ]
+
+Make sure to call controller cleanup() if spi_setup() fails while
+registering a device to avoid leaking any resources allocated by
+setup().
+
+Fixes: c7299fea6769 ("spi: Fix spi device unregister flow")
+Cc: stable@vger.kernel.org # 5.13
+Cc: Saravana Kannan <saravanak@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260410154907.129248-2-johan@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi.c | 61 ++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 24 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -42,6 +42,8 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_st
+
+ #include "internals.h"
+
++static int __spi_setup(struct spi_device *spi, bool initial_setup);
++
+ static DEFINE_IDR(spi_master_idr);
+
+ static void spidev_release(struct device *dev)
+@@ -735,7 +737,7 @@ static int __spi_add_device(struct spi_d
+ * normally rely on the device being setup. Devices
+ * using SPI_CS_HIGH can't coexist well otherwise...
+ */
+- status = spi_setup(spi);
++ status = __spi_setup(spi, true);
+ if (status < 0) {
+ dev_err(dev, "can't setup %s, status %d\n",
+ dev_name(&spi->dev), status);
+@@ -3879,27 +3881,7 @@ static int spi_set_cs_timing(struct spi_
+ return status;
+ }
+
+-/**
+- * spi_setup - setup SPI mode and clock rate
+- * @spi: the device whose settings are being modified
+- * Context: can sleep, and no requests are queued to the device
+- *
+- * SPI protocol drivers may need to update the transfer mode if the
+- * device doesn't work with its default. They may likewise need
+- * to update clock rates or word sizes from initial values. This function
+- * changes those settings, and must be called from a context that can sleep.
+- * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+- * effect the next time the device is selected and data is transferred to
+- * or from it. When this function returns, the SPI device is deselected.
+- *
+- * Note that this call will fail if the protocol driver specifies an option
+- * that the underlying controller or its driver does not support. For
+- * example, not all hardware supports wire transfers using nine bit words,
+- * LSB-first wire encoding, or active-high chipselects.
+- *
+- * Return: zero on success, else a negative error code.
+- */
+-int spi_setup(struct spi_device *spi)
++static int __spi_setup(struct spi_device *spi, bool initial_setup)
+ {
+ unsigned bad_bits, ugly_bits;
+ int status;
+@@ -3984,7 +3966,7 @@ int spi_setup(struct spi_device *spi)
+ status = spi_set_cs_timing(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+- return status;
++ goto err_cleanup;
+ }
+
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+@@ -3993,7 +3975,7 @@ int spi_setup(struct spi_device *spi)
+ mutex_unlock(&spi->controller->io_mutex);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+- return status;
++ goto err_cleanup;
+ }
+
+ /*
+@@ -4030,6 +4012,37 @@ int spi_setup(struct spi_device *spi)
+ status);
+
+ return status;
++
++err_cleanup:
++ if (initial_setup)
++ spi_cleanup(spi);
++
++ return status;
++}
++
++/**
++ * spi_setup - setup SPI mode and clock rate
++ * @spi: the device whose settings are being modified
++ * Context: can sleep, and no requests are queued to the device
++ *
++ * SPI protocol drivers may need to update the transfer mode if the
++ * device doesn't work with its default. They may likewise need
++ * to update clock rates or word sizes from initial values. This function
++ * changes those settings, and must be called from a context that can sleep.
++ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
++ * effect the next time the device is selected and data is transferred to
++ * or from it. When this function returns, the SPI device is deselected.
++ *
++ * Note that this call will fail if the protocol driver specifies an option
++ * that the underlying controller or its driver does not support. For
++ * example, not all hardware supports wire transfers using nine bit words,
++ * LSB-first wire encoding, or active-high chipselects.
++ *
++ * Return: zero on success, else a negative error code.
++ */
++int spi_setup(struct spi_device *spi)
++{
++ return __spi_setup(spi, false);
+ }
+ EXPORT_SYMBOL_GPL(spi_setup);
+
--- /dev/null
+From stable+bounces-242163-greg=kroah.com@vger.kernel.org Thu Apr 30 18:54:17 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 12:45:15 -0400
+Subject: thermal: core: Fix thermal zone governor cleanup issues
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430164515.1851850-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 41ff66baf81c6541f4f985dd7eac4494d03d9440 ]
+
+If thermal_zone_device_register_with_trips() fails after adding
+a thermal governor to the thermal zone being registered, the
+governor is not removed from it as appropriate which may lead to
+a memory leak.
+
+In turn, thermal_zone_device_unregister() calls thermal_set_governor()
+without acquiring the thermal zone lock beforehand which may race with
+a governor update via sysfs and may lead to a use-after-free in that
+case.
+
+Address these issues by adding two thermal_set_governor() calls, one to
+thermal_release() to remove the governor from the given thermal zone,
+and one to the thermal zone registration error path to cover failures
+preceding the thermal zone device registration.
+
+Fixes: e33df1d2f3a0 ("thermal: let governors have private data for each thermal zone")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://patch.msgid.link/5092923.31r3eYUQgx@rafael.j.wysocki
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/thermal_core.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -917,6 +917,7 @@ static void thermal_release(struct devic
+ sizeof("thermal_zone") - 1)) {
+ tz = to_thermal_zone(dev);
+ thermal_zone_destroy_device_groups(tz);
++ thermal_set_governor(tz, NULL);
+ mutex_destroy(&tz->lock);
+ complete(&tz->removal);
+ } else if (!strncmp(dev_name(dev), "cooling_device",
+@@ -1483,8 +1484,10 @@ thermal_zone_device_register_with_trips(
+ /* sys I/F */
+ /* Add nodes that are always present via .groups */
+ result = thermal_zone_create_device_groups(tz);
+- if (result)
++ if (result) {
++ thermal_set_governor(tz, NULL);
+ goto remove_id;
++ }
+
+ /* A new thermal zone needs to be updated anyway. */
+ atomic_set(&tz->need_update, 1);
+@@ -1630,8 +1633,6 @@ void thermal_zone_device_unregister(stru
+
+ cancel_delayed_work_sync(&tz->poll_queue);
+
+- thermal_set_governor(tz, NULL);
+-
+ thermal_remove_hwmon_sysfs(tz);
+ ida_free(&thermal_tz_ida, tz->id);
+ ida_destroy(&tz->ida);
--- /dev/null
+From stable+bounces-242164-greg=kroah.com@vger.kernel.org Thu Apr 30 18:50:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 12:45:17 -0400
+Subject: wifi: mt76: mt792x: describe USB WFSYS reset with a descriptor
+To: stable@vger.kernel.org
+Cc: Sean Wang <sean.wang@mediatek.com>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430164518.1852033-1-sashal@kernel.org>
+
+From: Sean Wang <sean.wang@mediatek.com>
+
+[ Upstream commit e6f48512c1ceebcd1ce6bb83df3b3d56a261507d ]
+
+Prepare mt792xu_wfsys_reset() for chips that share the same USB WFSYS
+reset flow but use different register definitions.
+
+This is a pure refactor of the current mt7921u path and keeps the reset
+sequence unchanged.
+
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Link: https://patch.msgid.link/20260311002825.15502-1-sean.wang@kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Stable-dep-of: 56154fef47d1 ("wifi: mt76: mt792x: fix mt7925u USB WFSYS reset handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt792x_usb.c | 40 +++++++++++++++++++-----
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+@@ -206,6 +206,24 @@ static void mt792xu_epctl_rst_opt(struct
+ mt792xu_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
+ }
+
++struct mt792xu_wfsys_desc {
++ u32 rst_reg;
++ u32 done_reg;
++ u32 done_mask;
++ u32 done_val;
++ u32 delay_ms;
++ bool need_status_sel;
++};
++
++static const struct mt792xu_wfsys_desc mt7921_wfsys_desc = {
++ .rst_reg = MT_CBTOP_RGU_WF_SUBSYS_RST,
++ .done_reg = MT_UDMA_CONN_INFRA_STATUS,
++ .done_mask = MT_UDMA_CONN_WFSYS_INIT_DONE,
++ .done_val = MT_UDMA_CONN_WFSYS_INIT_DONE,
++ .delay_ms = 0,
++ .need_status_sel = true,
++};
++
+ int mt792xu_dma_init(struct mt792x_dev *dev, bool resume)
+ {
+ int err;
+@@ -236,25 +254,31 @@ EXPORT_SYMBOL_GPL(mt792xu_dma_init);
+
+ int mt792xu_wfsys_reset(struct mt792x_dev *dev)
+ {
++ const struct mt792xu_wfsys_desc *desc = &mt7921_wfsys_desc;
+ u32 val;
+ int i;
+
+ mt792xu_epctl_rst_opt(dev, false);
+
+- val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
++ val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg);
+ val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+- mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
++ mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val);
+
+- usleep_range(10, 20);
++ if (desc->delay_ms)
++ msleep(desc->delay_ms);
++ else
++ usleep_range(10, 20);
+
+- val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
++ val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg);
+ val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+- mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
++ mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val);
++
++ if (desc->need_status_sel)
++ mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+
+- mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+ for (i = 0; i < MT792x_WFSYS_INIT_RETRY_COUNT; i++) {
+- val = mt792xu_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
+- if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
++ val = mt792xu_uhw_rr(&dev->mt76, desc->done_reg);
++ if ((val & desc->done_mask) == desc->done_val)
+ break;
+
+ msleep(100);
--- /dev/null
+From stable+bounces-242165-greg=kroah.com@vger.kernel.org Thu Apr 30 18:51:40 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 12:45:18 -0400
+Subject: wifi: mt76: mt792x: fix mt7925u USB WFSYS reset handling
+To: stable@vger.kernel.org
+Cc: Sean Wang <sean.wang@mediatek.com>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430164518.1852033-2-sashal@kernel.org>
+
+From: Sean Wang <sean.wang@mediatek.com>
+
+[ Upstream commit 56154fef47d104effa9f29ed3db4f805cbc0d640 ]
+
+mt7925u uses different reset/status registers from mt7921u. Reusing the
+mt7921u register set causes the WFSYS reset to fail.
+
+Add a chip-specific descriptor in mt792xu_wfsys_reset() to select the
+correct registers and fix mt7925u failing to initialize after a warm
+reboot.
+
+Fixes: d28e1a48952e ("wifi: mt76: mt792x: introduce mt792x-usb module")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Link: https://patch.msgid.link/20260311002825.15502-2-sean.wang@kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt792x_regs.h | 4 ++++
+ drivers/net/wireless/mediatek/mt76/mt792x_usb.c | 13 ++++++++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+@@ -390,6 +390,10 @@
+ #define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600)
+ #define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+
++#define MT7925_CBTOP_RGU_WF_SUBSYS_RST 0x70028600
++#define MT7925_WFSYS_INIT_DONE_ADDR 0x184c1604
++#define MT7925_WFSYS_INIT_DONE 0x00001d1e
++
+ #define MT_HW_BOUND 0x70010020
+ #define MT_HW_CHIPID 0x70010200
+ #define MT_HW_REV 0x70010204
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+@@ -224,6 +224,15 @@ static const struct mt792xu_wfsys_desc m
+ .need_status_sel = true,
+ };
+
++static const struct mt792xu_wfsys_desc mt7925_wfsys_desc = {
++ .rst_reg = MT7925_CBTOP_RGU_WF_SUBSYS_RST,
++ .done_reg = MT7925_WFSYS_INIT_DONE_ADDR,
++ .done_mask = U32_MAX,
++ .done_val = MT7925_WFSYS_INIT_DONE,
++ .delay_ms = 20,
++ .need_status_sel = false,
++};
++
+ int mt792xu_dma_init(struct mt792x_dev *dev, bool resume)
+ {
+ int err;
+@@ -254,7 +263,9 @@ EXPORT_SYMBOL_GPL(mt792xu_dma_init);
+
+ int mt792xu_wfsys_reset(struct mt792x_dev *dev)
+ {
+- const struct mt792xu_wfsys_desc *desc = &mt7921_wfsys_desc;
++ const struct mt792xu_wfsys_desc *desc = is_mt7925(&dev->mt76) ?
++ &mt7925_wfsys_desc :
++ &mt7921_wfsys_desc;
+ u32 val;
+ int i;
+
--- /dev/null
+From stable+bounces-242161-greg=kroah.com@vger.kernel.org Thu Apr 30 18:38:53 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 12:31:55 -0400
+Subject: wifi: mwifiex: fix use-after-free in mwifiex_adapter_cleanup()
+To: stable@vger.kernel.org
+Cc: Daniel Hodges <git@danielhodges.dev>, Johannes Berg <johannes.berg@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430163155.1837709-1-sashal@kernel.org>
+
+From: Daniel Hodges <git@danielhodges.dev>
+
+[ Upstream commit ae5e95d4157481693be2317e3ffcd84e36010cbb ]
+
+The mwifiex_adapter_cleanup() function uses timer_delete()
+(non-synchronous) for the wakeup_timer before the adapter structure is
+freed. This is incorrect because timer_delete() does not wait for any
+running timer callback to complete.
+
+If the wakeup_timer callback (wakeup_timer_fn) is executing when
+mwifiex_adapter_cleanup() is called, the callback will continue to
+access adapter fields (adapter->hw_status, adapter->if_ops.card_reset,
+etc.) which may be freed by mwifiex_free_adapter() called later in the
+mwifiex_remove_card() path.
+
+Use timer_delete_sync() instead to ensure any running timer callback has
+completed before returning.
+
+Fixes: 4636187da60b ("mwifiex: add wakeup timer based recovery mechanism")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Hodges <git@danielhodges.dev>
+Link: https://patch.msgid.link/20260206194401.2346-1-git@danielhodges.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ changed `timer_delete_sync()` to `del_timer_sync()` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/marvell/mwifiex/init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/init.c
++++ b/drivers/net/wireless/marvell/mwifiex/init.c
+@@ -390,7 +390,7 @@ static void mwifiex_invalidate_lists(str
+ static void
+ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
+ {
+- del_timer(&adapter->wakeup_timer);
++ del_timer_sync(&adapter->wakeup_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
+ mwifiex_cancel_all_pending_cmd(adapter);
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);