--- /dev/null
+From stable+bounces-242459-greg=kroah.com@vger.kernel.org Fri May 1 17:54:30 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 11:54:21 -0400
+Subject: ALSA: aoa: i2sbus: clear stale prepared state
+To: stable@vger.kernel.org
+Cc: "Cássio Gabriel" <cassiogabrielcontato@gmail.com>, "kernel test robot" <lkp@intel.com>, "Takashi Iwai" <tiwai@suse.de>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260501155421.3610801-2-sashal@kernel.org>
+
+From: Cássio Gabriel <cassiogabrielcontato@gmail.com>
+
+[ Upstream commit 5ed060d5491597490fb53ec69da3edc4b1e8c165 ]
+
+The i2sbus PCM code uses pi->active to constrain the sibling stream to
+an already prepared duplex format and rate in i2sbus_pcm_open().
+
+That state is set from i2sbus_pcm_prepare(), but the current code only
+clears it on close. As a result, the sibling stream can inherit stale
+constraints after the prepared state has been torn down.
+
+Clear pi->active when hw_params() or hw_free() tears down the prepared
+state, and set it again only after prepare succeeds.
+
+Replace the stale FIXME in the duplex constraint comment with a description
+of the current driver behavior: i2sbus still programs a single shared
+transport configuration for both directions, so mixed formats are not
+supported in duplex mode.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202604010125.AvkWBYKI-lkp@intel.com/
+Fixes: f3d9478b2ce4 ("[ALSA] snd-aoa: add snd-aoa")
+Cc: stable@vger.kernel.org
+Signed-off-by: Cássio Gabriel <cassiogabrielcontato@gmail.com>
+Link: https://patch.msgid.link/20260331-aoa-i2sbus-clear-stale-active-v2-1-3764ae2889a1@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/aoa/soundbus/i2sbus/pcm.c | 55 ++++++++++++++++++++++++++++++++--------
+ 1 file changed, 44 insertions(+), 11 deletions(-)
+
+--- a/sound/aoa/soundbus/i2sbus/pcm.c
++++ b/sound/aoa/soundbus/i2sbus/pcm.c
+@@ -165,17 +165,16 @@ static int i2sbus_pcm_open(struct i2sbus
+ * currently in use (if any). */
+ hw->rate_min = 5512;
+ hw->rate_max = 192000;
+- /* if the other stream is active, then we can only
+- * support what it is currently using.
+- * FIXME: I lied. This comment is wrong. We can support
+- * anything that works with the same serial format, ie.
+- * when recording 24 bit sound we can well play 16 bit
+- * sound at the same time iff using the same transfer mode.
++ /* If the other stream is already prepared, keep this stream
++ * on the same duplex format and rate.
++ *
++ * i2sbus_pcm_prepare() still programs one shared transport
++ * configuration for both directions, so mixed duplex formats
++ * are not supported here.
+ */
+ if (other->active) {
+- /* FIXME: is this guaranteed by the alsa api? */
+ hw->formats &= pcm_format_to_bits(i2sdev->format);
+- /* see above, restrict rates to the one we already have */
++ /* Restrict rates to the one already in use. */
+ hw->rate_min = i2sdev->rate;
+ hw->rate_max = i2sdev->rate;
+ }
+@@ -283,6 +282,23 @@ void i2sbus_wait_for_stop_both(struct i2
+ }
+ #endif
+
++static void i2sbus_pcm_clear_active(struct i2sbus_dev *i2sdev, int in)
++{
++ struct pcm_info *pi;
++
++ guard(mutex)(&i2sdev->lock);
++
++ get_pcm_info(i2sdev, in, &pi, NULL);
++ pi->active = 0;
++}
++
++static inline int i2sbus_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params, int in)
++{
++ i2sbus_pcm_clear_active(snd_pcm_substream_chip(substream), in);
++ return 0;
++}
++
+ static inline int i2sbus_hw_free(struct snd_pcm_substream *substream, int in)
+ {
+ struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream);
+@@ -291,14 +307,27 @@ static inline int i2sbus_hw_free(struct
+ get_pcm_info(i2sdev, in, &pi, NULL);
+ if (pi->dbdma_ring.stopping)
+ i2sbus_wait_for_stop(i2sdev, pi);
++ i2sbus_pcm_clear_active(i2sdev, in);
+ return 0;
+ }
+
++static int i2sbus_playback_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ return i2sbus_hw_params(substream, params, 0);
++}
++
+ static int i2sbus_playback_hw_free(struct snd_pcm_substream *substream)
+ {
+ return i2sbus_hw_free(substream, 0);
+ }
+
++static int i2sbus_record_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ return i2sbus_hw_params(substream, params, 1);
++}
++
+ static int i2sbus_record_hw_free(struct snd_pcm_substream *substream)
+ {
+ return i2sbus_hw_free(substream, 1);
+@@ -335,7 +364,6 @@ static int i2sbus_pcm_prepare(struct i2s
+ return -EINVAL;
+
+ runtime = pi->substream->runtime;
+- pi->active = 1;
+ if (other->active &&
+ ((i2sdev->format != runtime->format)
+ || (i2sdev->rate != runtime->rate)))
+@@ -450,9 +478,11 @@ static int i2sbus_pcm_prepare(struct i2s
+
+ /* early exit if already programmed correctly */
+ /* not locking these is fine since we touch them only in this function */
+- if (in_le32(&i2sdev->intfregs->serial_format) == sfr
+- && in_le32(&i2sdev->intfregs->data_word_sizes) == dws)
++ if (in_le32(&i2sdev->intfregs->serial_format) == sfr &&
++ in_le32(&i2sdev->intfregs->data_word_sizes) == dws) {
++ pi->active = 1;
+ return 0;
++ }
+
+ /* let's notify the codecs about clocks going away.
+ * For now we only do mastering on the i2s cell... */
+@@ -490,6 +520,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ if (cii->codec->switch_clock)
+ cii->codec->switch_clock(cii, CLOCK_SWITCH_SLAVE);
+
++ pi->active = 1;
+ return 0;
+ }
+
+@@ -746,6 +777,7 @@ static snd_pcm_uframes_t i2sbus_playback
+ static const struct snd_pcm_ops i2sbus_playback_ops = {
+ .open = i2sbus_playback_open,
+ .close = i2sbus_playback_close,
++ .hw_params = i2sbus_playback_hw_params,
+ .hw_free = i2sbus_playback_hw_free,
+ .prepare = i2sbus_playback_prepare,
+ .trigger = i2sbus_playback_trigger,
+@@ -814,6 +846,7 @@ static snd_pcm_uframes_t i2sbus_record_p
+ static const struct snd_pcm_ops i2sbus_record_ops = {
+ .open = i2sbus_record_open,
+ .close = i2sbus_record_close,
++ .hw_params = i2sbus_record_hw_params,
+ .hw_free = i2sbus_record_hw_free,
+ .prepare = i2sbus_record_prepare,
+ .trigger = i2sbus_record_trigger,
--- /dev/null
+From stable+bounces-242478-greg=kroah.com@vger.kernel.org Fri May 1 19:27:59 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 13:27:47 -0400
+Subject: ALSA: aoa: Skip devices with no codecs in i2sbus_resume()
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501172747.3765281-2-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit fd7df93013c5118812e63a52635dc6c3a805a1de ]
+
+In i2sbus_resume(), skip devices with an empty codec list, which avoids
+using an uninitialized 'sysclock_factor' in the 32-bit format path in
+i2sbus_pcm_prepare().
+
+In i2sbus_pcm_prepare(), replace two list_for_each_entry() loops with a
+single list_first_entry() now that the codec list is guaranteed to be
+non-empty by all callers.
+
+Fixes: f3d9478b2ce4 ("[ALSA] snd-aoa: add snd-aoa")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Link: https://patch.msgid.link/20260310102921.210109-3-thorsten.blum@linux.dev
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/aoa/soundbus/i2sbus/core.c | 3 +++
+ sound/aoa/soundbus/i2sbus/pcm.c | 16 +++++-----------
+ 2 files changed, 8 insertions(+), 11 deletions(-)
+
+--- a/sound/aoa/soundbus/i2sbus/core.c
++++ b/sound/aoa/soundbus/i2sbus/core.c
+@@ -411,6 +411,9 @@ static int i2sbus_resume(struct macio_de
+ int err, ret = 0;
+
+ list_for_each_entry(i2sdev, &control->list, item) {
++ if (list_empty(&i2sdev->sound.codec_list))
++ continue;
++
+ /* reset i2s bus format etc. */
+ i2sbus_pcm_prepare_both(i2sdev);
+
+--- a/sound/aoa/soundbus/i2sbus/pcm.c
++++ b/sound/aoa/soundbus/i2sbus/pcm.c
+@@ -411,6 +411,9 @@ static int i2sbus_pcm_prepare(struct i2s
+ /* set stop command */
+ command->command = cpu_to_le16(DBDMA_STOP);
+
++ cii = list_first_entry(&i2sdev->sound.codec_list,
++ struct codec_info_item, list);
++
+ /* ok, let's set the serial format and stuff */
+ switch (runtime->format) {
+ /* 16 bit formats */
+@@ -418,13 +421,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ case SNDRV_PCM_FORMAT_U16_BE:
+ /* FIXME: if we add different bus factors we need to
+ * do more here!! */
+- bi.bus_factor = 0;
+- list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
+- bi.bus_factor = cii->codec->bus_factor;
+- break;
+- }
+- if (!bi.bus_factor)
+- return -ENODEV;
++ bi.bus_factor = cii->codec->bus_factor;
+ input_16bit = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S32_BE:
+@@ -438,10 +435,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ return -EINVAL;
+ }
+ /* we assume all sysclocks are the same! */
+- list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
+- bi.sysclock_factor = cii->codec->sysclock_factor;
+- break;
+- }
++ bi.sysclock_factor = cii->codec->sysclock_factor;
+
+ if (clock_and_divisors(bi.sysclock_factor,
+ bi.bus_factor,
--- /dev/null
+From stable+bounces-242458-greg=kroah.com@vger.kernel.org Fri May 1 17:56:08 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 11:54:20 -0400
+Subject: ALSA: aoa: Use guard() for mutex locks
+To: stable@vger.kernel.org
+Cc: Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501155421.3610801-1-sashal@kernel.org>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1cb6ecbb372002ef9e531c5377e5f60122411e40 ]
+
+Replace the manual mutex lock/unlock pairs with guard() for code
+simplification.
+
+Only code refactoring, and no behavior change.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20250829151335.7342-14-tiwai@suse.de
+Stable-dep-of: 5ed060d54915 ("ALSA: aoa: i2sbus: clear stale prepared state")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/aoa/codecs/onyx.c | 104 +++++++++++-------------------------
+ sound/aoa/codecs/tas.c | 113 +++++++++++++---------------------------
+ sound/aoa/core/gpio-feature.c | 20 ++-----
+ sound/aoa/core/gpio-pmf.c | 26 +++------
+ sound/aoa/soundbus/i2sbus/pcm.c | 76 ++++++++------------------
+ 5 files changed, 112 insertions(+), 227 deletions(-)
+
+--- a/sound/aoa/codecs/onyx.c
++++ b/sound/aoa/codecs/onyx.c
+@@ -121,10 +121,9 @@ static int onyx_snd_vol_get(struct snd_k
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ s8 l, r;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT, &r);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] = l + VOLUME_RANGE_SHIFT;
+ ucontrol->value.integer.value[1] = r + VOLUME_RANGE_SHIFT;
+@@ -145,15 +144,13 @@ static int onyx_snd_vol_put(struct snd_k
+ ucontrol->value.integer.value[1] > -1 + VOLUME_RANGE_SHIFT)
+ return -EINVAL;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l);
+ onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT, &r);
+
+ if (l + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[0] &&
+- r + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[1]) {
+- mutex_unlock(&onyx->mutex);
++ r + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ onyx_write_register(onyx, ONYX_REG_DAC_ATTEN_LEFT,
+ ucontrol->value.integer.value[0]
+@@ -161,7 +158,6 @@ static int onyx_snd_vol_put(struct snd_k
+ onyx_write_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT,
+ ucontrol->value.integer.value[1]
+ - VOLUME_RANGE_SHIFT);
+- mutex_unlock(&onyx->mutex);
+
+ return 1;
+ }
+@@ -197,9 +193,8 @@ static int onyx_snd_inputgain_get(struct
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 ig;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &ig);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] =
+ (ig & ONYX_ADC_PGA_GAIN_MASK) + INPUTGAIN_RANGE_SHIFT;
+@@ -216,14 +211,13 @@ static int onyx_snd_inputgain_put(struct
+ if (ucontrol->value.integer.value[0] < 3 + INPUTGAIN_RANGE_SHIFT ||
+ ucontrol->value.integer.value[0] > 28 + INPUTGAIN_RANGE_SHIFT)
+ return -EINVAL;
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
+ n = v;
+ n &= ~ONYX_ADC_PGA_GAIN_MASK;
+ n |= (ucontrol->value.integer.value[0] - INPUTGAIN_RANGE_SHIFT)
+ & ONYX_ADC_PGA_GAIN_MASK;
+ onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, n);
+- mutex_unlock(&onyx->mutex);
+
+ return n != v;
+ }
+@@ -251,9 +245,8 @@ static int onyx_snd_capture_source_get(s
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ s8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.enumerated.item[0] = !!(v&ONYX_ADC_INPUT_MIC);
+
+@@ -264,13 +257,12 @@ static void onyx_set_capture_source(stru
+ {
+ s8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
+ v &= ~ONYX_ADC_INPUT_MIC;
+ if (mic)
+ v |= ONYX_ADC_INPUT_MIC;
+ onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, v);
+- mutex_unlock(&onyx->mutex);
+ }
+
+ static int onyx_snd_capture_source_put(struct snd_kcontrol *kcontrol,
+@@ -311,9 +303,8 @@ static int onyx_snd_mute_get(struct snd_
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 c;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &c);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] = !(c & ONYX_MUTE_LEFT);
+ ucontrol->value.integer.value[1] = !(c & ONYX_MUTE_RIGHT);
+@@ -328,9 +319,9 @@ static int onyx_snd_mute_put(struct snd_
+ u8 v = 0, c = 0;
+ int err = -EBUSY;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ if (onyx->analog_locked)
+- goto out_unlock;
++ return -EBUSY;
+
+ onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &v);
+ c = v;
+@@ -341,9 +332,6 @@ static int onyx_snd_mute_put(struct snd_
+ c |= ONYX_MUTE_RIGHT;
+ err = onyx_write_register(onyx, ONYX_REG_DAC_CONTROL, c);
+
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+ return !err ? (v != c) : err;
+ }
+
+@@ -372,9 +360,8 @@ static int onyx_snd_single_bit_get(struc
+ u8 address = (pv >> 8) & 0xff;
+ u8 mask = pv & 0xff;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, address, &c);
+- mutex_unlock(&onyx->mutex);
+
+ ucontrol->value.integer.value[0] = !!(c & mask) ^ polarity;
+
+@@ -393,11 +380,10 @@ static int onyx_snd_single_bit_put(struc
+ u8 address = (pv >> 8) & 0xff;
+ u8 mask = pv & 0xff;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ if (spdiflock && onyx->spdif_locked) {
+ /* even if alsamixer doesn't care.. */
+- err = -EBUSY;
+- goto out_unlock;
++ return -EBUSY;
+ }
+ onyx_read_register(onyx, address, &v);
+ c = v;
+@@ -406,9 +392,6 @@ static int onyx_snd_single_bit_put(struc
+ c |= mask;
+ err = onyx_write_register(onyx, address, c);
+
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+ return !err ? (v != c) : err;
+ }
+
+@@ -489,7 +472,7 @@ static int onyx_spdif_get(struct snd_kco
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO1, &v);
+ ucontrol->value.iec958.status[0] = v & 0x3e;
+
+@@ -501,7 +484,6 @@ static int onyx_spdif_get(struct snd_kco
+
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
+ ucontrol->value.iec958.status[4] = v & 0x0f;
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -512,7 +494,7 @@ static int onyx_spdif_put(struct snd_kco
+ struct onyx *onyx = snd_kcontrol_chip(kcontrol);
+ u8 v;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO1, &v);
+ v = (v & ~0x3e) | (ucontrol->value.iec958.status[0] & 0x3e);
+ onyx_write_register(onyx, ONYX_REG_DIG_INFO1, v);
+@@ -527,7 +509,6 @@ static int onyx_spdif_put(struct snd_kco
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
+ v = (v & ~0x0f) | (ucontrol->value.iec958.status[4] & 0x0f);
+ onyx_write_register(onyx, ONYX_REG_DIG_INFO4, v);
+- mutex_unlock(&onyx->mutex);
+
+ return 1;
+ }
+@@ -672,14 +653,13 @@ static int onyx_usable(struct codec_info
+ struct onyx *onyx = cii->codec_data;
+ int spdif_enabled, analog_enabled;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
+ spdif_enabled = !!(v & ONYX_SPDIF_ENABLE);
+ onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &v);
+ analog_enabled =
+ (v & (ONYX_MUTE_RIGHT|ONYX_MUTE_LEFT))
+ != (ONYX_MUTE_RIGHT|ONYX_MUTE_LEFT);
+- mutex_unlock(&onyx->mutex);
+
+ switch (ti->tag) {
+ case 0: return 1;
+@@ -695,9 +675,8 @@ static int onyx_prepare(struct codec_inf
+ {
+ u8 v;
+ struct onyx *onyx = cii->codec_data;
+- int err = -EBUSY;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+
+ #ifdef SNDRV_PCM_FMTBIT_COMPRESSED_16BE
+ if (substream->runtime->format == SNDRV_PCM_FMTBIT_COMPRESSED_16BE) {
+@@ -706,10 +685,9 @@ static int onyx_prepare(struct codec_inf
+ if (onyx_write_register(onyx,
+ ONYX_REG_DAC_CONTROL,
+ v | ONYX_MUTE_RIGHT | ONYX_MUTE_LEFT))
+- goto out_unlock;
++ return -EBUSY;
+ onyx->analog_locked = 1;
+- err = 0;
+- goto out_unlock;
++ return 0;
+ }
+ #endif
+ switch (substream->runtime->rate) {
+@@ -719,8 +697,7 @@ static int onyx_prepare(struct codec_inf
+ /* these rates are ok for all outputs */
+ /* FIXME: program spdif channel control bits here so that
+ * userspace doesn't have to if it only plays pcm! */
+- err = 0;
+- goto out_unlock;
++ return 0;
+ default:
+ /* got some rate that the digital output can't do,
+ * so disable and lock it */
+@@ -728,16 +705,12 @@ static int onyx_prepare(struct codec_inf
+ if (onyx_write_register(onyx,
+ ONYX_REG_DIG_INFO4,
+ v & ~ONYX_SPDIF_ENABLE))
+- goto out_unlock;
++ return -EBUSY;
+ onyx->spdif_locked = 1;
+- err = 0;
+- goto out_unlock;
++ return 0;
+ }
+
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+- return err;
++ return -EBUSY;
+ }
+
+ static int onyx_open(struct codec_info_item *cii,
+@@ -745,9 +718,8 @@ static int onyx_open(struct codec_info_i
+ {
+ struct onyx *onyx = cii->codec_data;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx->open_count++;
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -757,11 +729,10 @@ static int onyx_close(struct codec_info_
+ {
+ struct onyx *onyx = cii->codec_data;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ onyx->open_count--;
+ if (!onyx->open_count)
+ onyx->spdif_locked = onyx->analog_locked = 0;
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -771,7 +742,7 @@ static int onyx_switch_clock(struct code
+ {
+ struct onyx *onyx = cii->codec_data;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ /* this *MUST* be more elaborate later... */
+ switch (what) {
+ case CLOCK_SWITCH_PREPARE_SLAVE:
+@@ -783,7 +754,6 @@ static int onyx_switch_clock(struct code
+ default: /* silence warning */
+ break;
+ }
+- mutex_unlock(&onyx->mutex);
+
+ return 0;
+ }
+@@ -794,27 +764,21 @@ static int onyx_suspend(struct codec_inf
+ {
+ struct onyx *onyx = cii->codec_data;
+ u8 v;
+- int err = -ENXIO;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+ if (onyx_read_register(onyx, ONYX_REG_CONTROL, &v))
+- goto out_unlock;
++ return -ENXIO;
+ onyx_write_register(onyx, ONYX_REG_CONTROL, v | ONYX_ADPSV | ONYX_DAPSV);
+ /* Apple does a sleep here but the datasheet says to do it on resume */
+- err = 0;
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+- return err;
++ return 0;
+ }
+
+ static int onyx_resume(struct codec_info_item *cii)
+ {
+ struct onyx *onyx = cii->codec_data;
+ u8 v;
+- int err = -ENXIO;
+
+- mutex_lock(&onyx->mutex);
++ guard(mutex)(&onyx->mutex);
+
+ /* reset codec */
+ onyx->codec.gpio->methods->set_hw_reset(onyx->codec.gpio, 0);
+@@ -826,17 +790,13 @@ static int onyx_resume(struct codec_info
+
+ /* take codec out of suspend (if it still is after reset) */
+ if (onyx_read_register(onyx, ONYX_REG_CONTROL, &v))
+- goto out_unlock;
++ return -ENXIO;
+ onyx_write_register(onyx, ONYX_REG_CONTROL, v & ~(ONYX_ADPSV | ONYX_DAPSV));
+ /* FIXME: should divide by sample rate, but 8k is the lowest we go */
+ msleep(2205000/8000);
+ /* reset all values */
+ onyx_register_init(onyx);
+- err = 0;
+- out_unlock:
+- mutex_unlock(&onyx->mutex);
+-
+- return err;
++ return 0;
+ }
+
+ #endif /* CONFIG_PM */
+--- a/sound/aoa/codecs/tas.c
++++ b/sound/aoa/codecs/tas.c
+@@ -235,10 +235,9 @@ static int tas_snd_vol_get(struct snd_kc
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->cached_volume_l;
+ ucontrol->value.integer.value[1] = tas->cached_volume_r;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -254,18 +253,15 @@ static int tas_snd_vol_put(struct snd_kc
+ ucontrol->value.integer.value[1] > 177)
+ return -EINVAL;
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ if (tas->cached_volume_l == ucontrol->value.integer.value[0]
+- && tas->cached_volume_r == ucontrol->value.integer.value[1]) {
+- mutex_unlock(&tas->mtx);
++ && tas->cached_volume_r == ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ tas->cached_volume_l = ucontrol->value.integer.value[0];
+ tas->cached_volume_r = ucontrol->value.integer.value[1];
+ if (tas->hw_enabled)
+ tas_set_volume(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -285,10 +281,9 @@ static int tas_snd_mute_get(struct snd_k
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = !tas->mute_l;
+ ucontrol->value.integer.value[1] = !tas->mute_r;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -297,18 +292,15 @@ static int tas_snd_mute_put(struct snd_k
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ if (tas->mute_l == !ucontrol->value.integer.value[0]
+- && tas->mute_r == !ucontrol->value.integer.value[1]) {
+- mutex_unlock(&tas->mtx);
++ && tas->mute_r == !ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ tas->mute_l = !ucontrol->value.integer.value[0];
+ tas->mute_r = !ucontrol->value.integer.value[1];
+ if (tas->hw_enabled)
+ tas_set_volume(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -337,10 +329,9 @@ static int tas_snd_mixer_get(struct snd_
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+ int idx = kcontrol->private_value;
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->mixer_l[idx];
+ ucontrol->value.integer.value[1] = tas->mixer_r[idx];
+- mutex_unlock(&tas->mtx);
+
+ return 0;
+ }
+@@ -351,19 +342,16 @@ static int tas_snd_mixer_put(struct snd_
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+ int idx = kcontrol->private_value;
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ if (tas->mixer_l[idx] == ucontrol->value.integer.value[0]
+- && tas->mixer_r[idx] == ucontrol->value.integer.value[1]) {
+- mutex_unlock(&tas->mtx);
++ && tas->mixer_r[idx] == ucontrol->value.integer.value[1])
+ return 0;
+- }
+
+ tas->mixer_l[idx] = ucontrol->value.integer.value[0];
+ tas->mixer_r[idx] = ucontrol->value.integer.value[1];
+
+ if (tas->hw_enabled)
+ tas_set_mixer(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -396,9 +384,8 @@ static int tas_snd_drc_range_get(struct
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->drc_range;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -411,16 +398,13 @@ static int tas_snd_drc_range_put(struct
+ ucontrol->value.integer.value[0] > TAS3004_DRC_MAX)
+ return -EINVAL;
+
+- mutex_lock(&tas->mtx);
+- if (tas->drc_range == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->drc_range == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->drc_range = ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas3004_set_drc(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -440,9 +424,8 @@ static int tas_snd_drc_switch_get(struct
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->drc_enabled;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -451,16 +434,13 @@ static int tas_snd_drc_switch_put(struct
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
+- if (tas->drc_enabled == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->drc_enabled == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->drc_enabled = !!ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas3004_set_drc(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -486,9 +466,8 @@ static int tas_snd_capture_source_get(st
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.enumerated.item[0] = !!(tas->acr & TAS_ACR_INPUT_B);
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -500,7 +479,7 @@ static int tas_snd_capture_source_put(st
+
+ if (ucontrol->value.enumerated.item[0] > 1)
+ return -EINVAL;
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ oldacr = tas->acr;
+
+ /*
+@@ -512,13 +491,10 @@ static int tas_snd_capture_source_put(st
+ if (ucontrol->value.enumerated.item[0])
+ tas->acr |= TAS_ACR_INPUT_B | TAS_ACR_B_MONAUREAL |
+ TAS_ACR_B_MON_SEL_RIGHT;
+- if (oldacr == tas->acr) {
+- mutex_unlock(&tas->mtx);
++ if (oldacr == tas->acr)
+ return 0;
+- }
+ if (tas->hw_enabled)
+ tas_write_reg(tas, TAS_REG_ACR, 1, &tas->acr);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -557,9 +533,8 @@ static int tas_snd_treble_get(struct snd
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->treble;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -571,16 +546,13 @@ static int tas_snd_treble_put(struct snd
+ if (ucontrol->value.integer.value[0] < TAS3004_TREBLE_MIN ||
+ ucontrol->value.integer.value[0] > TAS3004_TREBLE_MAX)
+ return -EINVAL;
+- mutex_lock(&tas->mtx);
+- if (tas->treble == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->treble == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->treble = ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas_set_treble(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -608,9 +580,8 @@ static int tas_snd_bass_get(struct snd_k
+ {
+ struct tas *tas = snd_kcontrol_chip(kcontrol);
+
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ ucontrol->value.integer.value[0] = tas->bass;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -622,16 +593,13 @@ static int tas_snd_bass_put(struct snd_k
+ if (ucontrol->value.integer.value[0] < TAS3004_BASS_MIN ||
+ ucontrol->value.integer.value[0] > TAS3004_BASS_MAX)
+ return -EINVAL;
+- mutex_lock(&tas->mtx);
+- if (tas->bass == ucontrol->value.integer.value[0]) {
+- mutex_unlock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
++ if (tas->bass == ucontrol->value.integer.value[0])
+ return 0;
+- }
+
+ tas->bass = ucontrol->value.integer.value[0];
+ if (tas->hw_enabled)
+ tas_set_bass(tas);
+- mutex_unlock(&tas->mtx);
+ return 1;
+ }
+
+@@ -722,13 +690,13 @@ static int tas_switch_clock(struct codec
+ break;
+ case CLOCK_SWITCH_SLAVE:
+ /* Clocks are back, re-init the codec */
+- mutex_lock(&tas->mtx);
+- tas_reset_init(tas);
+- tas_set_volume(tas);
+- tas_set_mixer(tas);
+- tas->hw_enabled = 1;
+- tas->codec.gpio->methods->all_amps_restore(tas->codec.gpio);
+- mutex_unlock(&tas->mtx);
++ scoped_guard(mutex, &tas->mtx) {
++ tas_reset_init(tas);
++ tas_set_volume(tas);
++ tas_set_mixer(tas);
++ tas->hw_enabled = 1;
++ tas->codec.gpio->methods->all_amps_restore(tas->codec.gpio);
++ }
+ break;
+ default:
+ /* doesn't happen as of now */
+@@ -743,23 +711,21 @@ static int tas_switch_clock(struct codec
+ * our i2c device is suspended, and then take note of that! */
+ static int tas_suspend(struct tas *tas)
+ {
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ tas->hw_enabled = 0;
+ tas->acr |= TAS_ACR_ANALOG_PDOWN;
+ tas_write_reg(tas, TAS_REG_ACR, 1, &tas->acr);
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+ static int tas_resume(struct tas *tas)
+ {
+ /* reset codec */
+- mutex_lock(&tas->mtx);
++ guard(mutex)(&tas->mtx);
+ tas_reset_init(tas);
+ tas_set_volume(tas);
+ tas_set_mixer(tas);
+ tas->hw_enabled = 1;
+- mutex_unlock(&tas->mtx);
+ return 0;
+ }
+
+@@ -802,14 +768,13 @@ static int tas_init_codec(struct aoa_cod
+ return -EINVAL;
+ }
+
+- mutex_lock(&tas->mtx);
+- if (tas_reset_init(tas)) {
+- printk(KERN_ERR PFX "tas failed to initialise\n");
+- mutex_unlock(&tas->mtx);
+- return -ENXIO;
++ scoped_guard(mutex, &tas->mtx) {
++ if (tas_reset_init(tas)) {
++ printk(KERN_ERR PFX "tas failed to initialise\n");
++ return -ENXIO;
++ }
++ tas->hw_enabled = 1;
+ }
+- tas->hw_enabled = 1;
+- mutex_unlock(&tas->mtx);
+
+ if (tas->codec.soundbus_dev->attach_codec(tas->codec.soundbus_dev,
+ aoa_get_card(),
+--- a/sound/aoa/core/gpio-feature.c
++++ b/sound/aoa/core/gpio-feature.c
+@@ -212,10 +212,9 @@ static void ftr_handle_notify(struct wor
+ struct gpio_notification *notif =
+ container_of(work, struct gpio_notification, work.work);
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+ if (notif->notify)
+ notif->notify(notif->data);
+- mutex_unlock(¬if->mutex);
+ }
+
+ static void gpio_enable_dual_edge(int gpio)
+@@ -341,19 +340,17 @@ static int ftr_set_notify(struct gpio_ru
+ if (!irq)
+ return -ENODEV;
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+
+ old = notif->notify;
+
+- if (!old && !notify) {
+- err = 0;
+- goto out_unlock;
+- }
++ if (!old && !notify)
++ return 0;
+
+ if (old && notify) {
+ if (old == notify && notif->data == data)
+ err = 0;
+- goto out_unlock;
++ return err;
+ }
+
+ if (old && !notify)
+@@ -362,16 +359,13 @@ static int ftr_set_notify(struct gpio_ru
+ if (!old && notify) {
+ err = request_irq(irq, ftr_handle_notify_irq, 0, name, notif);
+ if (err)
+- goto out_unlock;
++ return err;
+ }
+
+ notif->notify = notify;
+ notif->data = data;
+
+- err = 0;
+- out_unlock:
+- mutex_unlock(¬if->mutex);
+- return err;
++ return 0;
+ }
+
+ static int ftr_get_detect(struct gpio_runtime *rt,
+--- a/sound/aoa/core/gpio-pmf.c
++++ b/sound/aoa/core/gpio-pmf.c
+@@ -74,10 +74,9 @@ static void pmf_handle_notify(struct wor
+ struct gpio_notification *notif =
+ container_of(work, struct gpio_notification, work.work);
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+ if (notif->notify)
+ notif->notify(notif->data);
+- mutex_unlock(¬if->mutex);
+ }
+
+ static void pmf_gpio_init(struct gpio_runtime *rt)
+@@ -154,19 +153,17 @@ static int pmf_set_notify(struct gpio_ru
+ return -EINVAL;
+ }
+
+- mutex_lock(¬if->mutex);
++ guard(mutex)(¬if->mutex);
+
+ old = notif->notify;
+
+- if (!old && !notify) {
+- err = 0;
+- goto out_unlock;
+- }
++ if (!old && !notify)
++ return 0;
+
+ if (old && notify) {
+ if (old == notify && notif->data == data)
+ err = 0;
+- goto out_unlock;
++ return err;
+ }
+
+ if (old && !notify) {
+@@ -178,10 +175,8 @@ static int pmf_set_notify(struct gpio_ru
+ if (!old && notify) {
+ irq_client = kzalloc(sizeof(struct pmf_irq_client),
+ GFP_KERNEL);
+- if (!irq_client) {
+- err = -ENOMEM;
+- goto out_unlock;
+- }
++ if (!irq_client)
++ return -ENOMEM;
+ irq_client->data = notif;
+ irq_client->handler = pmf_handle_notify_irq;
+ irq_client->owner = THIS_MODULE;
+@@ -192,17 +187,14 @@ static int pmf_set_notify(struct gpio_ru
+ printk(KERN_ERR "snd-aoa: gpio layer failed to"
+ " register %s irq (%d)\n", name, err);
+ kfree(irq_client);
+- goto out_unlock;
++ return err;
+ }
+ notif->gpio_private = irq_client;
+ }
+ notif->notify = notify;
+ notif->data = data;
+
+- err = 0;
+- out_unlock:
+- mutex_unlock(¬if->mutex);
+- return err;
++ return 0;
+ }
+
+ static int pmf_get_detect(struct gpio_runtime *rt,
+--- a/sound/aoa/soundbus/i2sbus/pcm.c
++++ b/sound/aoa/soundbus/i2sbus/pcm.c
+@@ -79,11 +79,10 @@ static int i2sbus_pcm_open(struct i2sbus
+ u64 formats = 0;
+ unsigned int rates = 0;
+ struct transfer_info v;
+- int result = 0;
+ int bus_factor = 0, sysclock_factor = 0;
+ int found_this;
+
+- mutex_lock(&i2sdev->lock);
++ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, &other);
+
+@@ -92,8 +91,7 @@ static int i2sbus_pcm_open(struct i2sbus
+
+ if (pi->active) {
+ /* alsa messed up */
+- result = -EBUSY;
+- goto out_unlock;
++ return -EBUSY;
+ }
+
+ /* we now need to assign the hw */
+@@ -117,10 +115,8 @@ static int i2sbus_pcm_open(struct i2sbus
+ ti++;
+ }
+ }
+- if (!masks_inited || !bus_factor || !sysclock_factor) {
+- result = -ENODEV;
+- goto out_unlock;
+- }
++ if (!masks_inited || !bus_factor || !sysclock_factor)
++ return -ENODEV;
+ /* bus dependent stuff */
+ hw->info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME |
+@@ -194,15 +190,12 @@ static int i2sbus_pcm_open(struct i2sbus
+ hw->periods_max = MAX_DBDMA_COMMANDS;
+ err = snd_pcm_hw_constraint_integer(pi->substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+- if (err < 0) {
+- result = err;
+- goto out_unlock;
+- }
++ if (err < 0)
++ return err;
+ list_for_each_entry(cii, &sdev->codec_list, list) {
+ if (cii->codec->open) {
+ err = cii->codec->open(cii, pi->substream);
+ if (err) {
+- result = err;
+ /* unwind */
+ found_this = 0;
+ list_for_each_entry_reverse(rev,
+@@ -214,14 +207,12 @@ static int i2sbus_pcm_open(struct i2sbus
+ if (rev == cii)
+ found_this = 1;
+ }
+- goto out_unlock;
++ return err;
+ }
+ }
+ }
+
+- out_unlock:
+- mutex_unlock(&i2sdev->lock);
+- return result;
++ return 0;
+ }
+
+ #undef CHECK_RATE
+@@ -232,7 +223,7 @@ static int i2sbus_pcm_close(struct i2sbu
+ struct pcm_info *pi;
+ int err = 0, tmp;
+
+- mutex_lock(&i2sdev->lock);
++ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, NULL);
+
+@@ -246,7 +237,6 @@ static int i2sbus_pcm_close(struct i2sbu
+
+ pi->substream = NULL;
+ pi->active = 0;
+- mutex_unlock(&i2sdev->lock);
+ return err;
+ }
+
+@@ -330,33 +320,26 @@ static int i2sbus_pcm_prepare(struct i2s
+ int input_16bit;
+ struct pcm_info *pi, *other;
+ int cnt;
+- int result = 0;
+ unsigned int cmd, stopaddr;
+
+- mutex_lock(&i2sdev->lock);
++ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, &other);
+
+- if (pi->dbdma_ring.running) {
+- result = -EBUSY;
+- goto out_unlock;
+- }
++ if (pi->dbdma_ring.running)
++ return -EBUSY;
+ if (pi->dbdma_ring.stopping)
+ i2sbus_wait_for_stop(i2sdev, pi);
+
+- if (!pi->substream || !pi->substream->runtime) {
+- result = -EINVAL;
+- goto out_unlock;
+- }
++ if (!pi->substream || !pi->substream->runtime)
++ return -EINVAL;
+
+ runtime = pi->substream->runtime;
+ pi->active = 1;
+ if (other->active &&
+ ((i2sdev->format != runtime->format)
+- || (i2sdev->rate != runtime->rate))) {
+- result = -EINVAL;
+- goto out_unlock;
+- }
++ || (i2sdev->rate != runtime->rate)))
++ return -EINVAL;
+
+ i2sdev->format = runtime->format;
+ i2sdev->rate = runtime->rate;
+@@ -412,10 +395,8 @@ static int i2sbus_pcm_prepare(struct i2s
+ bi.bus_factor = cii->codec->bus_factor;
+ break;
+ }
+- if (!bi.bus_factor) {
+- result = -ENODEV;
+- goto out_unlock;
+- }
++ if (!bi.bus_factor)
++ return -ENODEV;
+ input_16bit = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S32_BE:
+@@ -426,8 +407,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ input_16bit = 0;
+ break;
+ default:
+- result = -EINVAL;
+- goto out_unlock;
++ return -EINVAL;
+ }
+ /* we assume all sysclocks are the same! */
+ list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
+@@ -438,10 +418,8 @@ static int i2sbus_pcm_prepare(struct i2s
+ if (clock_and_divisors(bi.sysclock_factor,
+ bi.bus_factor,
+ runtime->rate,
+- &sfr) < 0) {
+- result = -EINVAL;
+- goto out_unlock;
+- }
++ &sfr) < 0)
++ return -EINVAL;
+ switch (bi.bus_factor) {
+ case 32:
+ sfr |= I2S_SF_SERIAL_FORMAT_I2S_32X;
+@@ -457,10 +435,8 @@ static int i2sbus_pcm_prepare(struct i2s
+ int err = 0;
+ if (cii->codec->prepare)
+ err = cii->codec->prepare(cii, &bi, pi->substream);
+- if (err) {
+- result = err;
+- goto out_unlock;
+- }
++ if (err)
++ return err;
+ }
+ /* codecs are fine with it, so set our clocks */
+ if (input_16bit)
+@@ -476,7 +452,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ /* not locking these is fine since we touch them only in this function */
+ if (in_le32(&i2sdev->intfregs->serial_format) == sfr
+ && in_le32(&i2sdev->intfregs->data_word_sizes) == dws)
+- goto out_unlock;
++ return 0;
+
+ /* let's notify the codecs about clocks going away.
+ * For now we only do mastering on the i2s cell... */
+@@ -514,9 +490,7 @@ static int i2sbus_pcm_prepare(struct i2s
+ if (cii->codec->switch_clock)
+ cii->codec->switch_clock(cii, CLOCK_SWITCH_SLAVE);
+
+- out_unlock:
+- mutex_unlock(&i2sdev->lock);
+- return result;
++ return 0;
+ }
+
+ #ifdef CONFIG_PM
--- /dev/null
+From stable+bounces-241691-greg=kroah.com@vger.kernel.org Tue Apr 28 17:25:10 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2026 11:24:00 -0400
+Subject: arm64/mm: Enable batched TLB flush in unmap_hotplug_range()
+To: stable@vger.kernel.org
+Cc: Anshuman Khandual <anshuman.khandual@arm.com>, Will Deacon <will@kernel.org>, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, "David Hildenbrand (Arm)" <david@kernel.org>, Ryan Roberts <ryan.roberts@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260428152400.3033637-1-sashal@kernel.org>
+
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+
+[ Upstream commit 48478b9f791376b4b89018d7afdfd06865498f65 ]
+
+During a memory hot remove operation, both linear and vmemmap mappings for
+the memory range being removed, get unmapped via unmap_hotplug_range() but
+mapped pages get freed only for vmemmap mapping. This is just a sequential
+operation where each table entry gets cleared, followed by a leaf specific
+TLB flush, and then followed by memory free operation when applicable.
+
+This approach was simple and uniform both for vmemmap and linear mappings.
+But linear mapping might contain CONT marked block memory where it becomes
+necessary to first clear out all entire in the range before a TLB flush.
+This is as per the architecture requirement. Hence batch all TLB flushes
+during the table tear down walk and finally do it in unmap_hotplug_range().
+
+Prior to this fix, it was hypothetically possible for a speculative access
+to a higher address in the contiguous block to fill the TLB with shattered
+entries for the entire contiguous range after a lower address had already
+been cleared and invalidated. Due to the table entries being shattered, the
+subsequent TLB invalidation for the higher address would not then clear the
+TLB entries for the lower address, meaning stale TLB entries could persist.
+
+Besides it also helps in improving the performance via TLBI range operation
+along with reduced synchronization instructions. The time spent executing
+unmap_hotplug_range() improved 97% measured over a 2GB memory hot removal
+in KVM guest.
+
+This scheme is not applicable during vmemmap mapping tear down where memory
+needs to be freed and hence a TLB flush is required after clearing out page
+table entry.
+
+Cc: Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-kernel@vger.kernel.org
+Closes: https://lore.kernel.org/all/aWZYXhrT6D2M-7-N@willie-the-truck/
+Fixes: bbd6ec605c0f ("arm64/mm: Enable memory hot remove")
+Cc: stable@vger.kernel.org
+Reviewed-by: David Hildenbrand (Arm) <david@kernel.org>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ replaced `__pte_clear()` with `pte_clear()` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c | 36 ++++++++++++++++++++----------------
+ 1 file changed, 20 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -870,10 +870,14 @@ static void unmap_hotplug_pte_range(pmd_
+
+ WARN_ON(!pte_present(pte));
+ pte_clear(&init_mm, addr, ptep);
+- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+- if (free_mapped)
++ if (free_mapped) {
++ /* CONT blocks are not supported in the vmemmap */
++ WARN_ON(pte_cont(pte));
++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ free_hotplug_page_range(pte_page(pte),
+ PAGE_SIZE, altmap);
++ }
++ /* unmap_hotplug_range() flushes TLB for !free_mapped */
+ } while (addr += PAGE_SIZE, addr < end);
+ }
+
+@@ -894,15 +898,14 @@ static void unmap_hotplug_pmd_range(pud_
+ WARN_ON(!pmd_present(pmd));
+ if (pmd_sect(pmd)) {
+ pmd_clear(pmdp);
+-
+- /*
+- * One TLBI should be sufficient here as the PMD_SIZE
+- * range is mapped with a single block entry.
+- */
+- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+- if (free_mapped)
++ if (free_mapped) {
++ /* CONT blocks are not supported in the vmemmap */
++ WARN_ON(pmd_cont(pmd));
++ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ free_hotplug_page_range(pmd_page(pmd),
+ PMD_SIZE, altmap);
++ }
++ /* unmap_hotplug_range() flushes TLB for !free_mapped */
+ continue;
+ }
+ WARN_ON(!pmd_table(pmd));
+@@ -927,15 +930,12 @@ static void unmap_hotplug_pud_range(p4d_
+ WARN_ON(!pud_present(pud));
+ if (pud_sect(pud)) {
+ pud_clear(pudp);
+-
+- /*
+- * One TLBI should be sufficient here as the PUD_SIZE
+- * range is mapped with a single block entry.
+- */
+- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+- if (free_mapped)
++ if (free_mapped) {
++ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+ free_hotplug_page_range(pud_page(pud),
+ PUD_SIZE, altmap);
++ }
++ /* unmap_hotplug_range() flushes TLB for !free_mapped */
+ continue;
+ }
+ WARN_ON(!pud_table(pud));
+@@ -965,6 +965,7 @@ static void unmap_hotplug_p4d_range(pgd_
+ static void unmap_hotplug_range(unsigned long addr, unsigned long end,
+ bool free_mapped, struct vmem_altmap *altmap)
+ {
++ unsigned long start = addr;
+ unsigned long next;
+ pgd_t *pgdp, pgd;
+
+@@ -986,6 +987,9 @@ static void unmap_hotplug_range(unsigned
+ WARN_ON(!pgd_present(pgd));
+ unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
+ } while (addr = next, addr < end);
++
++ if (!free_mapped)
++ flush_tlb_kernel_range(start, end);
+ }
+
+ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
--- /dev/null
+From stable+bounces-242626-greg=kroah.com@vger.kernel.org Sun May 3 01:58:16 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 May 2026 19:57:59 -0400
+Subject: block: relax pgmap check in bio_add_page for compatible zone device pages
+To: stable@vger.kernel.org
+Cc: Naman Jain <namjain@linux.microsoft.com>, Christoph Hellwig <hch@lst.de>, Jens Axboe <axboe@kernel.dk>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260502235759.921576-1-sashal@kernel.org>
+
+From: Naman Jain <namjain@linux.microsoft.com>
+
+[ Upstream commit 41c665aae2b5dbecddddcc8ace344caf630cc7a4 ]
+
+bio_add_page() and bio_integrity_add_page() reject pages from different
+dev_pagemaps entirely, returning 0 even when those pages have compatible
+DMA mapping requirements. This forces callers to start a new bio when
+buffers span pgmap boundaries, even though the pages could safely coexist
+as separate bvec entries.
+
+This matters for guests where memory is registered through
+devm_memremap_pages() with MEMORY_DEVICE_GENERIC in multiple calls,
+creating separate dev_pagemaps for each chunk. When a direct I/O buffer
+spans two such chunks, bio_add_page() rejects the second page, forcing an
+unnecessary bio split or I/O failure.
+
+Introduce zone_device_pages_compatible() in blk.h to check whether two
+pages can coexist in the same bio as separate bvec entries. The block DMA
+iterator (blk_dma_map_iter_start) caches the P2PDMA mapping state from the
+first segment and applies it to all others, so P2PDMA pages from different
+pgmaps must not be mixed, and neither must P2PDMA and non-P2PDMA pages.
+All other combinations (MEMORY_DEVICE_GENERIC pages from different pgmaps,
+or MEMORY_DEVICE_GENERIC with normal RAM) use the same dma_map_phys path
+and are safe.
+
+Replace the blanket zone_device_pages_have_same_pgmap() rejection with
+zone_device_pages_compatible(), while keeping
+zone_device_pages_have_same_pgmap() as a merge guard.
+Pages from different pgmaps can be added as separate bvec entries but
+must not be coalesced into the same segment, as that would make
+it impossible to recover the correct pgmap via page_pgmap().
+
+Fixes: 49580e690755 ("block: add check when merging zone device pages")
+Cc: stable@vger.kernel.org
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://patch.msgid.link/20260410153414.4159050-3-namjain@linux.microsoft.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ restructured combined `if` into explicit `bv` block ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bio-integrity.c | 2 ++
+ block/bio.c | 14 +++++++++-----
+ block/blk.h | 19 +++++++++++++++++++
+ 3 files changed, 30 insertions(+), 5 deletions(-)
+
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -134,6 +134,8 @@ int bio_integrity_add_page(struct bio *b
+ struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
+ bool same_page = false;
+
++ if (!zone_device_pages_compatible(bv->bv_page, page))
++ return 0;
+ if (bvec_try_merge_hw_page(q, bv, page, len, offset,
+ &same_page)) {
+ bip->bip_iter.bi_size += len;
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1098,11 +1098,15 @@ int bio_add_page(struct bio *bio, struct
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return 0;
+
+- if (bio->bi_vcnt > 0 &&
+- bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
+- page, len, offset, &same_page)) {
+- bio->bi_iter.bi_size += len;
+- return len;
++ if (bio->bi_vcnt > 0) {
++ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
++
++ if (!zone_device_pages_compatible(bv->bv_page, page))
++ return 0;
++ if (bvec_try_merge_page(bv, page, len, offset, &same_page)) {
++ bio->bi_iter.bi_size += len;
++ return len;
++ }
+ }
+
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -104,6 +104,25 @@ static inline bool biovec_phys_mergeable
+ return true;
+ }
+
++/*
++ * Check if two pages from potentially different zone device pgmaps can
++ * coexist as separate bvec entries in the same bio.
++ *
++ * The block DMA iterator (blk_dma_map_iter_start) caches the P2PDMA mapping
++ * state from the first segment and applies it to all subsequent segments, so
++ * P2PDMA pages from different pgmaps must not be mixed in the same bio.
++ *
++ * Other zone device types (FS_DAX, GENERIC) use the same dma_map_phys() path
++ * as normal RAM. PRIVATE and COHERENT pages never appear in bios.
++ */
++static inline bool zone_device_pages_compatible(const struct page *a,
++ const struct page *b)
++{
++ if (is_pci_p2pdma_page(a) || is_pci_p2pdma_page(b))
++ return zone_device_pages_have_same_pgmap(a, b);
++ return true;
++}
++
+ static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
+ struct bio_vec *bprv, unsigned int offset)
+ {
--- /dev/null
+From stable+bounces-241123-greg=kroah.com@vger.kernel.org Sat Apr 25 13:18:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 07:18:05 -0400
+Subject: f2fs: fix to do sanity check on dcc->discard_cmd_cnt conditionally
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, stable@kernel.org, syzbot+62538b67389ee582837a@syzkaller.appspotmail.com, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425111805.3708792-1-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 6af249c996f7d73a3435f9e577956fa259347d18 ]
+
+Syzbot reported a f2fs bug as below:
+
+------------[ cut here ]------------
+kernel BUG at fs/f2fs/segment.c:1900!
+Oops: invalid opcode: 0000 [#1] SMP KASAN PTI
+CPU: 1 UID: 0 PID: 6527 Comm: syz.5.110 Not tainted syzkaller #0 PREEMPT_{RT,(full)}
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2026
+RIP: 0010:f2fs_issue_discard_timeout+0x59b/0x5a0 fs/f2fs/segment.c:1900
+Code: d9 80 e1 07 80 c1 03 38 c1 0f 8c d6 fe ff ff 48 89 df e8 a8 5e fa fd e9 c9 fe ff ff e8 4e 46 94 fd 90 0f 0b e8 46 46 94 fd 90 <0f> 0b 0f 1f 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3
+RSP: 0018:ffffc9000494f940 EFLAGS: 00010283
+RAX: ffffffff843009ca RBX: 0000000000000001 RCX: 0000000000080000
+RDX: ffffc9001ca78000 RSI: 00000000000029f3 RDI: 00000000000029f4
+RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+R10: dffffc0000000000 R11: ffffed100893a431 R12: 1ffff1100893a430
+R13: 1ffff1100c2b702c R14: dffffc0000000000 R15: ffff8880449d2160
+FS: 00007ffa35fed6c0(0000) GS:ffff88812643d000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f2b68634000 CR3: 0000000039f62000 CR4: 00000000003526f0
+Call Trace:
+ <TASK>
+ __f2fs_remount fs/f2fs/super.c:2960 [inline]
+ f2fs_reconfigure+0x108a/0x1710 fs/f2fs/super.c:5443
+ reconfigure_super+0x227/0x8a0 fs/super.c:1080
+ do_remount fs/namespace.c:3391 [inline]
+ path_mount+0xdc5/0x10e0 fs/namespace.c:4151
+ do_mount fs/namespace.c:4172 [inline]
+ __do_sys_mount fs/namespace.c:4361 [inline]
+ __se_sys_mount+0x31d/0x420 fs/namespace.c:4338
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x14d/0xf80 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7ffa37dbda0a
+
+The root cause is there will be race condition in between f2fs_ioc_fitrim()
+and f2fs_remount():
+
+- f2fs_remount - f2fs_ioc_fitrim
+ - f2fs_issue_discard_timeout
+ - __issue_discard_cmd
+ - __drop_discard_cmd
+ - __wait_all_discard_cmd
+ - f2fs_trim_fs
+ - f2fs_write_checkpoint
+ - f2fs_clear_prefree_segments
+ - f2fs_issue_discard
+ - __issue_discard_async
+ - __queue_discard_cmd
+ - __update_discard_tree_range
+ - __insert_discard_cmd
+ - __create_discard_cmd
+ : atomic_inc(&dcc->discard_cmd_cnt);
+ - sanity check on dcc->discard_cmd_cnt (expect discard_cmd_cnt to be zero)
+
+This will only happen when fitrim races w/ remount rw, if we remount to
+readonly filesystem, remount will wait until mnt_pcp.mnt_writers to zero,
+that means fitrim is not in process at that time.
+
+Cc: stable@kernel.org
+Fixes: 2482c4325dfe ("f2fs: detect bug_on in f2fs_wait_discard_bios")
+Reported-by: syzbot+62538b67389ee582837a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-f2fs-devel/69b07d7c.050a0220.8df7.09a1.GAE@google.com
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ dereferenced flags pointer (`*flags & SB_RDONLY`) to match `int *flags` remount signature ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/f2fs.h | 2 +-
+ fs/f2fs/segment.c | 6 +++---
+ fs/f2fs/super.c | 11 ++++++++---
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3722,7 +3722,7 @@ bool f2fs_is_checkpointed_data(struct f2
+ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
+ void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
+ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
+-bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
++bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check);
+ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc);
+ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1873,7 +1873,7 @@ void f2fs_stop_discard_thread(struct f2f
+ *
+ * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
+ */
+-bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
++bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check)
+ {
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_policy dpolicy;
+@@ -1890,7 +1890,7 @@ bool f2fs_issue_discard_timeout(struct f
+ /* just to make sure there is no pending discard commands */
+ __wait_all_discard_cmd(sbi, NULL);
+
+- f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
++ f2fs_bug_on(sbi, need_check && atomic_read(&dcc->discard_cmd_cnt));
+ return !dropped;
+ }
+
+@@ -2349,7 +2349,7 @@ static void destroy_discard_cmd_control(
+ * Recovery can cache discard commands, so in error path of
+ * fill_super(), it needs to give a chance to handle them.
+ */
+- f2fs_issue_discard_timeout(sbi);
++ f2fs_issue_discard_timeout(sbi, true);
+
+ kfree(dcc);
+ SM_I(sbi)->dcc_info = NULL;
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1612,7 +1612,7 @@ static void f2fs_put_super(struct super_
+ }
+
+ /* be sure to wait for any on-going discard commands */
+- done = f2fs_issue_discard_timeout(sbi);
++ done = f2fs_issue_discard_timeout(sbi, true);
+ if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT | CP_TRIMMED,
+@@ -1754,7 +1754,7 @@ static int f2fs_unfreeze(struct super_bl
+ * will recover after removal of snapshot.
+ */
+ if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
+- f2fs_issue_discard_timeout(sbi);
++ f2fs_issue_discard_timeout(sbi, true);
+
+ clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
+ return 0;
+@@ -2515,7 +2515,12 @@ static int f2fs_remount(struct super_blo
+ need_stop_discard = true;
+ } else {
+ f2fs_stop_discard_thread(sbi);
+- f2fs_issue_discard_timeout(sbi);
++ /*
++ * f2fs_ioc_fitrim() won't race w/ "remount ro"
++ * so it's safe to check discard_cmd_cnt in
++ * f2fs_issue_discard_timeout().
++ */
++ f2fs_issue_discard_timeout(sbi, *flags & SB_RDONLY);
+ need_restart_discard = true;
+ }
+ }
--- /dev/null
+From stable+bounces-240662-greg=kroah.com@vger.kernel.org Fri Apr 24 15:29:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 09:28:50 -0400
+Subject: f2fs: fix UAF caused by decrementing sbi->nr_pages[] in f2fs_write_end_io()
+To: stable@vger.kernel.org
+Cc: Yongpeng Yang <yangyongpeng@xiaomi.com>, stable@kernel.org, syzbot+6e4cb1cac5efc96ea0ca@syzkaller.appspotmail.com, Chao Yu <chao@kernel.org>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424132850.1947809-1-sashal@kernel.org>
+
+From: Yongpeng Yang <yangyongpeng@xiaomi.com>
+
+[ Upstream commit 2d9c4a4ed4eef1f82c5b16b037aee8bad819fd53 ]
+
+The xfstests case "generic/107" and syzbot have both reported a NULL
+pointer dereference.
+
+The concurrent scenario that triggers the panic is as follows:
+
+F2FS_WB_CP_DATA write callback umount
+ - f2fs_write_checkpoint
+ - f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA)
+- blk_mq_end_request
+ - bio_endio
+ - f2fs_write_end_io
+ : dec_page_count(sbi, F2FS_WB_CP_DATA)
+ : wake_up(&sbi->cp_wait)
+ - kill_f2fs_super
+ - kill_block_super
+ - f2fs_put_super
+ : iput(sbi->node_inode)
+ : sbi->node_inode = NULL
+ : f2fs_in_warm_node_list
+ - is_node_folio // sbi->node_inode is NULL and panic
+
+The root cause is that f2fs_put_super() calls iput(sbi->node_inode) and
+sets sbi->node_inode to NULL after sbi->nr_pages[F2FS_WB_CP_DATA] is
+decremented to zero. As a result, f2fs_in_warm_node_list() may
+dereference a NULL node_inode when checking whether a folio belongs to
+the node inode, leading to a panic.
+
+This patch fixes the issue by calling f2fs_in_warm_node_list() before
+decrementing sbi->nr_pages[F2FS_WB_CP_DATA], thus preventing the
+use-after-free condition.
+
+Cc: stable@kernel.org
+Fixes: 50fa53eccf9f ("f2fs: fix to avoid broken of dnode block list")
+Reported-by: syzbot+6e4cb1cac5efc96ea0ca@syzkaller.appspotmail.com
+Signed-off-by: Yongpeng Yang <yangyongpeng@xiaomi.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ folio => page ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/data.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -356,6 +356,8 @@ static void f2fs_write_end_io(struct bio
+
+ f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
+ page->index != nid_of_node(page));
++ if (f2fs_in_warm_node_list(sbi, page))
++ f2fs_del_fsync_node_entry(sbi, page);
+
+ dec_page_count(sbi, type);
+
+@@ -367,8 +369,6 @@ static void f2fs_write_end_io(struct bio
+ wq_has_sleeper(&sbi->cp_wait))
+ wake_up(&sbi->cp_wait);
+
+- if (f2fs_in_warm_node_list(sbi, page))
+- f2fs_del_fsync_node_entry(sbi, page);
+ clear_page_private_gcing(page);
+ end_page_writeback(page);
+ }
--- /dev/null
+From stable+bounces-243987-greg=kroah.com@vger.kernel.org Tue May 5 08:00:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 May 2026 02:00:00 -0400
+Subject: fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info
+To: stable@vger.kernel.org
+Cc: Thomas Zimmermann <tzimmermann@suse.de>, Helge Deller <deller@gmx.de>, linux-fbdev@vger.kernel.org, dri-devel@lists.freedesktop.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505060001.225157-1-sashal@kernel.org>
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 9ded47ad003f09a94b6a710b5c47f4aa5ceb7429 ]
+
+Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an
+instance as part of initializing deferred I/O and remove it only after
+the final mapping has been closed. If the fb_info and the contained
+deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info
+to invalidate the mapping. Any access will then result in a SIGBUS
+signal.
+
+Fixes a long-standing problem, where a device hot-unplug happens while
+user space still has an active mapping of the graphics memory. The hot-
+unplug frees the instance of struct fb_info. Accessing the memory will
+operate on undefined state.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support")
+Cc: Helge Deller <deller@gmx.de>
+Cc: linux-fbdev@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: stable@vger.kernel.org # v2.6.22+
+Signed-off-by: Helge Deller <deller@gmx.de>
+[ replaced `kzalloc_obj` with `kzalloc`, and dropped `mutex_destroy(&fbdefio->lock)` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fb_defio.c | 179 ++++++++++++++++++++++++++++--------
+ include/linux/fb.h | 4
+ 2 files changed, 145 insertions(+), 38 deletions(-)
+
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -23,6 +23,75 @@
+ #include <linux/rmap.h>
+ #include <linux/pagemap.h>
+
++/*
++ * struct fb_deferred_io_state
++ */
++
++struct fb_deferred_io_state {
++ struct kref ref;
++
++ struct mutex lock; /* mutex that protects the pageref list */
++ /* fields protected by lock */
++ struct fb_info *info;
++};
++
++static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
++{
++ struct fb_deferred_io_state *fbdefio_state;
++
++ fbdefio_state = kzalloc(sizeof(*fbdefio_state), GFP_KERNEL);
++ if (!fbdefio_state)
++ return NULL;
++
++ kref_init(&fbdefio_state->ref);
++ mutex_init(&fbdefio_state->lock);
++
++ return fbdefio_state;
++}
++
++static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
++{
++ mutex_destroy(&fbdefio_state->lock);
++
++ kfree(fbdefio_state);
++}
++
++static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
++{
++ kref_get(&fbdefio_state->ref);
++}
++
++static void __fb_deferred_io_state_release(struct kref *ref)
++{
++ struct fb_deferred_io_state *fbdefio_state =
++ container_of(ref, struct fb_deferred_io_state, ref);
++
++ fb_deferred_io_state_release(fbdefio_state);
++}
++
++static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
++{
++ kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
++}
++
++/*
++ * struct vm_operations_struct
++ */
++
++static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
++{
++ struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
++
++ fb_deferred_io_state_get(fbdefio_state);
++}
++
++static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
++{
++ struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
++
++ fb_deferred_io_state_put(fbdefio_state);
++}
++
+ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+ {
+ void *screen_base = (void __force *) info->screen_base;
+@@ -93,17 +162,31 @@ static void fb_deferred_io_pageref_put(s
+ /* this is to find and return the vmalloc-ed fb pages */
+ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
+ {
++ struct fb_info *info;
+ unsigned long offset;
+ struct page *page;
+- struct fb_info *info = vmf->vma->vm_private_data;
++ vm_fault_t ret;
++ struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
++
++ mutex_lock(&fbdefio_state->lock);
++
++ info = fbdefio_state->info;
++ if (!info) {
++ ret = VM_FAULT_SIGBUS; /* our device is gone */
++ goto err_mutex_unlock;
++ }
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+- if (offset >= info->fix.smem_len)
+- return VM_FAULT_SIGBUS;
++ if (offset >= info->fix.smem_len) {
++ ret = VM_FAULT_SIGBUS;
++ goto err_mutex_unlock;
++ }
+
+ page = fb_deferred_io_page(info, offset);
+- if (!page)
+- return VM_FAULT_SIGBUS;
++ if (!page) {
++ ret = VM_FAULT_SIGBUS;
++ goto err_mutex_unlock;
++ }
+
+ get_page(page);
+
+@@ -115,8 +198,15 @@ static vm_fault_t fb_deferred_io_fault(s
+ BUG_ON(!page->mapping);
+ page->index = vmf->pgoff; /* for page_mkclean() */
+
++ mutex_unlock(&fbdefio_state->lock);
++
+ vmf->page = page;
++
+ return 0;
++
++err_mutex_unlock:
++ mutex_unlock(&fbdefio_state->lock);
++ return ret;
+ }
+
+ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+@@ -143,15 +233,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
+ * Adds a page to the dirty list. Call this from struct
+ * vm_operations_struct.page_mkwrite.
+ */
+-static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
+- struct page *page)
++static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
++ unsigned long offset, struct page *page)
+ {
+- struct fb_deferred_io *fbdefio = info->fbdefio;
++ struct fb_info *info;
++ struct fb_deferred_io *fbdefio;
+ struct fb_deferred_io_pageref *pageref;
+ vm_fault_t ret;
+
+ /* protect against the workqueue changing the page list */
+- mutex_lock(&fbdefio->lock);
++ mutex_lock(&fbdefio_state->lock);
++
++ info = fbdefio_state->info;
++ if (!info) {
++ ret = VM_FAULT_SIGBUS; /* our device is gone */
++ goto err_mutex_unlock;
++ }
++
++ fbdefio = info->fbdefio;
+
+ pageref = fb_deferred_io_pageref_get(info, offset, page);
+ if (WARN_ON_ONCE(!pageref)) {
+@@ -169,50 +268,38 @@ static vm_fault_t fb_deferred_io_track_p
+ */
+ lock_page(pageref->page);
+
+- mutex_unlock(&fbdefio->lock);
++ mutex_unlock(&fbdefio_state->lock);
+
+ /* come back after delay to process the deferred IO */
+ schedule_delayed_work(&info->deferred_work, fbdefio->delay);
+ return VM_FAULT_LOCKED;
+
+ err_mutex_unlock:
+- mutex_unlock(&fbdefio->lock);
++ mutex_unlock(&fbdefio_state->lock);
+ return ret;
+ }
+
+-/*
+- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
+- * @fb_info: The fbdev info structure
+- * @vmf: The VM fault
+- *
+- * This is a callback we get when userspace first tries to
+- * write to the page. We schedule a workqueue. That workqueue
+- * will eventually mkclean the touched pages and execute the
+- * deferred framebuffer IO. Then if userspace touches a page
+- * again, we repeat the same scheme.
+- *
+- * Returns:
+- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
+- */
+-static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
++static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
++ struct vm_fault *vmf)
+ {
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
+ struct page *page = vmf->page;
+
+ file_update_time(vmf->vma->vm_file);
+
+- return fb_deferred_io_track_page(info, offset, page);
++ return fb_deferred_io_track_page(fbdefio_state, offset, page);
+ }
+
+-/* vm_ops->page_mkwrite handler */
+ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+ {
+- struct fb_info *info = vmf->vma->vm_private_data;
++ struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
+
+- return fb_deferred_io_page_mkwrite(info, vmf);
++ return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
+ }
+
+ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
++ .open = fb_deferred_io_vm_open,
++ .close = fb_deferred_io_vm_close,
+ .fault = fb_deferred_io_fault,
+ .page_mkwrite = fb_deferred_io_mkwrite,
+ };
+@@ -227,7 +314,10 @@ int fb_deferred_io_mmap(struct fb_info *
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
+ if (!(info->flags & FBINFO_VIRTFB))
+ vm_flags_set(vma, VM_IO);
+- vma->vm_private_data = info;
++ vma->vm_private_data = info->fbdefio_state;
++
++ fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
+@@ -238,9 +328,10 @@ static void fb_deferred_io_work(struct w
+ struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
+ struct fb_deferred_io_pageref *pageref, *next;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
++ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+
+ /* here we mkclean the pages, then do all deferred IO */
+- mutex_lock(&fbdefio->lock);
++ mutex_lock(&fbdefio_state->lock);
+ list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
+ struct page *cur = pageref->page;
+ lock_page(cur);
+@@ -255,12 +346,13 @@ static void fb_deferred_io_work(struct w
+ list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
+ fb_deferred_io_pageref_put(pageref, info);
+
+- mutex_unlock(&fbdefio->lock);
++ mutex_unlock(&fbdefio_state->lock);
+ }
+
+ int fb_deferred_io_init(struct fb_info *info)
+ {
+ struct fb_deferred_io *fbdefio = info->fbdefio;
++ struct fb_deferred_io_state *fbdefio_state;
+ struct fb_deferred_io_pageref *pagerefs;
+ unsigned long npagerefs, i;
+ int ret;
+@@ -270,7 +362,11 @@ int fb_deferred_io_init(struct fb_info *
+ if (WARN_ON(!info->fix.smem_len))
+ return -EINVAL;
+
+- mutex_init(&fbdefio->lock);
++ fbdefio_state = fb_deferred_io_state_alloc();
++ if (!fbdefio_state)
++ return -ENOMEM;
++ fbdefio_state->info = info;
++
+ INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
+ INIT_LIST_HEAD(&fbdefio->pagereflist);
+ if (fbdefio->delay == 0) /* set a default of 1 s */
+@@ -289,10 +385,12 @@ int fb_deferred_io_init(struct fb_info *
+ info->npagerefs = npagerefs;
+ info->pagerefs = pagerefs;
+
++ info->fbdefio_state = fbdefio_state;
++
+ return 0;
+
+ err:
+- mutex_destroy(&fbdefio->lock);
++ fb_deferred_io_state_release(fbdefio_state);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_init);
+@@ -333,11 +431,18 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release
+
+ void fb_deferred_io_cleanup(struct fb_info *info)
+ {
+- struct fb_deferred_io *fbdefio = info->fbdefio;
++ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+
+ fb_deferred_io_lastclose(info);
+
++ info->fbdefio_state = NULL;
++
++ mutex_lock(&fbdefio_state->lock);
++ fbdefio_state->info = NULL;
++ mutex_unlock(&fbdefio_state->lock);
++
++ fb_deferred_io_state_put(fbdefio_state);
++
+ kvfree(info->pagerefs);
+- mutex_destroy(&fbdefio->lock);
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -214,11 +214,12 @@ struct fb_deferred_io {
+ unsigned long delay;
+ bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
+- struct mutex lock; /* mutex that protects the pageref list */
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
+ /* callback */
+ void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
+ };
++
++struct fb_deferred_io_state;
+ #endif
+
+ /*
+@@ -476,6 +477,7 @@ struct fb_info {
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
+ struct fb_deferred_io *fbdefio;
++ struct fb_deferred_io_state *fbdefio_state;
+ #endif
+
+ const struct fb_ops *fbops;
--- /dev/null
+From stable+bounces-244118-greg=kroah.com@vger.kernel.org Tue May 5 14:06:19 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 May 2026 08:01:31 -0400
+Subject: firmware: google: framebuffer: Do not unregister platform device
+To: stable@vger.kernel.org
+Cc: Thomas Zimmermann <tzimmermann@suse.de>, Tzung-Bi Shih <tzungbi@kernel.org>, Julius Werner <jwerner@chromium.org>, Javier Martinez Canillas <javierm@redhat.com>, Hans de Goede <hansg@kernel.org>, linux-fbdev@vger.kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505120131.663403-1-sashal@kernel.org>
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 5cd28bd28c8ce426b56ce4230dbd17537181d5ad ]
+
+The native driver takes over the framebuffer aperture by removing the
+system- framebuffer platform device. Afterwards the pointer in drvdata
+is dangling. Remove the entire logic around drvdata and let the kernel's
+aperture helpers handle this. The platform device depends on the native
+hardware device instead of the coreboot device anyway.
+
+When commit 851b4c14532d ("firmware: coreboot: Add coreboot framebuffer
+driver") added the coreboot framebuffer code, the kernel did not support
+device-based aperture management. Instead native driviers only removed
+the conflicting fbdev device. At that point, unregistering the framebuffer
+device most likely worked correctly. It was definitely broken after
+commit d9702b2a2171 ("fbdev/simplefb: Do not use struct
+fb_info.apertures"). So take this commit for the Fixes tag. Earlier
+releases might work depending on the native hardware driver.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: d9702b2a2171 ("fbdev/simplefb: Do not use struct fb_info.apertures")
+Acked-by: Tzung-Bi Shih <tzungbi@kernel.org>
+Acked-by: Julius Werner <jwerner@chromium.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Javier Martinez Canillas <javierm@redhat.com>
+Cc: Hans de Goede <hansg@kernel.org>
+Cc: linux-fbdev@vger.kernel.org
+Cc: <stable@vger.kernel.org> # v6.3+
+Link: https://patch.msgid.link/20260217155836.96267-2-tzimmermann@suse.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/google/framebuffer-coreboot.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/drivers/firmware/google/framebuffer-coreboot.c
++++ b/drivers/firmware/google/framebuffer-coreboot.c
+@@ -64,22 +64,12 @@ static int framebuffer_probe(struct core
+ sizeof(pdata));
+ if (IS_ERR(pdev))
+ pr_warn("coreboot: could not register framebuffer\n");
+- else
+- dev_set_drvdata(&dev->dev, pdev);
+
+ return PTR_ERR_OR_ZERO(pdev);
+ }
+
+-static void framebuffer_remove(struct coreboot_device *dev)
+-{
+- struct platform_device *pdev = dev_get_drvdata(&dev->dev);
+-
+- platform_device_unregister(pdev);
+-}
+-
+ static struct coreboot_driver framebuffer_driver = {
+ .probe = framebuffer_probe,
+- .remove = framebuffer_remove,
+ .drv = {
+ .name = "framebuffer",
+ },
--- /dev/null
+From stable+bounces-244863-greg=kroah.com@vger.kernel.org Sat May 9 02:49:09 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 20:48:54 -0400
+Subject: hfsplus: fix held lock freed on hfsplus_fill_super()
+To: stable@vger.kernel.org
+Cc: Zilin Guan <zilin@seu.edu.cn>, Viacheslav Dubeyko <slava@dubeyko.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509004855.2446851-2-sashal@kernel.org>
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 90c500e4fd83fa33c09bc7ee23b6d9cc487ac733 ]
+
+hfsplus_fill_super() calls hfs_find_init() to initialize a search
+structure, which acquires tree->tree_lock. If the subsequent call to
+hfsplus_cat_build_key() fails, the function jumps to the out_put_root
+error label without releasing the lock. The later cleanup path then
+frees the tree data structure with the lock still held, triggering a
+held lock freed warning.
+
+Fix this by adding the missing hfs_find_exit(&fd) call before jumping
+to the out_put_root error label. This ensures that tree->tree_lock is
+properly released on the error path.
+
+The bug was originally detected on v6.13-rc1 using an experimental
+static analysis tool we are developing, and we have verified that the
+issue persists in the latest mainline kernel. The tool is specifically
+designed to detect memory management issues. It is currently under active
+development and not yet publicly available.
+
+We confirmed the bug by runtime testing under QEMU with x86_64 defconfig,
+lockdep enabled, and CONFIG_HFSPLUS_FS=y. To trigger the error path, we
+used GDB to dynamically shrink the max_unistr_len parameter to 1 before
+hfsplus_asc2uni() is called. This forces hfsplus_asc2uni() to naturally
+return -ENAMETOOLONG, which propagates to hfsplus_cat_build_key() and
+exercises the faulty error path. The following warning was observed
+during mount:
+
+ =========================
+ WARNING: held lock freed!
+ 7.0.0-rc3-00016-gb4f0dd314b39 #4 Not tainted
+ -------------------------
+ mount/174 is freeing memory ffff888103f92000-ffff888103f92fff, with a lock still held there!
+ ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0
+ 2 locks held by mount/174:
+ #0: ffff888103f960e0 (&type->s_umount_key#42/1){+.+.}-{4:4}, at: alloc_super.constprop.0+0x167/0xa40
+ #1: ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0
+
+ stack backtrace:
+ CPU: 2 UID: 0 PID: 174 Comm: mount Not tainted 7.0.0-rc3-00016-gb4f0dd314b39 #4 PREEMPT(lazy)
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x82/0xd0
+ debug_check_no_locks_freed+0x13a/0x180
+ kfree+0x16b/0x510
+ ? hfsplus_fill_super+0xcb4/0x18a0
+ hfsplus_fill_super+0xcb4/0x18a0
+ ? __pfx_hfsplus_fill_super+0x10/0x10
+ ? srso_return_thunk+0x5/0x5f
+ ? bdev_open+0x65f/0xc30
+ ? srso_return_thunk+0x5/0x5f
+ ? pointer+0x4ce/0xbf0
+ ? trace_contention_end+0x11c/0x150
+ ? __pfx_pointer+0x10/0x10
+ ? srso_return_thunk+0x5/0x5f
+ ? bdev_open+0x79b/0xc30
+ ? srso_return_thunk+0x5/0x5f
+ ? srso_return_thunk+0x5/0x5f
+ ? vsnprintf+0x6da/0x1270
+ ? srso_return_thunk+0x5/0x5f
+ ? __mutex_unlock_slowpath+0x157/0x740
+ ? __pfx_vsnprintf+0x10/0x10
+ ? srso_return_thunk+0x5/0x5f
+ ? srso_return_thunk+0x5/0x5f
+ ? mark_held_locks+0x49/0x80
+ ? srso_return_thunk+0x5/0x5f
+ ? srso_return_thunk+0x5/0x5f
+ ? irqentry_exit+0x17b/0x5e0
+ ? trace_irq_disable.constprop.0+0x116/0x150
+ ? __pfx_hfsplus_fill_super+0x10/0x10
+ ? __pfx_hfsplus_fill_super+0x10/0x10
+ get_tree_bdev_flags+0x302/0x580
+ ? __pfx_get_tree_bdev_flags+0x10/0x10
+ ? vfs_parse_fs_qstr+0x129/0x1a0
+ ? __pfx_vfs_parse_fs_qstr+0x3/0x10
+ vfs_get_tree+0x89/0x320
+ fc_mount+0x10/0x1d0
+ path_mount+0x5c5/0x21c0
+ ? __pfx_path_mount+0x10/0x10
+ ? trace_irq_enable.constprop.0+0x116/0x150
+ ? trace_irq_enable.constprop.0+0x116/0x150
+ ? srso_return_thunk+0x5/0x5f
+ ? srso_return_thunk+0x5/0x5f
+ ? kmem_cache_free+0x307/0x540
+ ? user_path_at+0x51/0x60
+ ? __x64_sys_mount+0x212/0x280
+ ? srso_return_thunk+0x5/0x5f
+ __x64_sys_mount+0x212/0x280
+ ? __pfx___x64_sys_mount+0x10/0x10
+ ? srso_return_thunk+0x5/0x5f
+ ? trace_irq_enable.constprop.0+0x116/0x150
+ ? srso_return_thunk+0x5/0x5f
+ do_syscall_64+0x111/0x680
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ RIP: 0033:0x7ffacad55eae
+ Code: 48 8b 0d 85 1f 0f 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 a5 00 00 8
+ RSP: 002b:00007fff1ab55718 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
+ RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007ffacad55eae
+ RDX: 000055740c64e5b0 RSI: 000055740c64e630 RDI: 000055740c651ab0
+ RBP: 000055740c64e380 R08: 0000000000000000 R09: 0000000000000001
+ R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+ R13: 000055740c64e5b0 R14: 000055740c651ab0 R15: 000055740c64e380
+ </TASK>
+
+After applying this patch, the warning no longer appears.
+
+Fixes: 89ac9b4d3d1a ("hfsplus: fix longname handling")
+CC: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/super.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -539,8 +539,10 @@ static int hfsplus_fill_super(struct sup
+ if (err)
+ goto out_put_root;
+ err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+- if (unlikely(err < 0))
++ if (unlikely(err < 0)) {
++ hfs_find_exit(&fd);
+ goto out_put_root;
++ }
+ if (!hfsplus_brec_read_cat(&fd, &entry)) {
+ hfs_find_exit(&fd);
+ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
--- /dev/null
+From stable+bounces-244862-greg=kroah.com@vger.kernel.org Sat May 9 02:49:03 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 20:48:53 -0400
+Subject: hfsplus: fix uninit-value by validating catalog record size
+To: stable@vger.kernel.org
+Cc: Deepanshu Kartikey <kartikey406@gmail.com>, syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com, Viacheslav Dubeyko <slava@dubeyko.com>, Charalampos Mitrodimas <charmitro@posteo.net>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509004855.2446851-1-sashal@kernel.org>
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit b6b592275aeff184aa82fcf6abccd833fb71b393 ]
+
+Syzbot reported a KMSAN uninit-value issue in hfsplus_strcasecmp(). The
+root cause is that hfs_brec_read() doesn't validate that the on-disk
+record size matches the expected size for the record type being read.
+
+When mounting a corrupted filesystem, hfs_brec_read() may read less data
+than expected. For example, when reading a catalog thread record, the
+debug output showed:
+
+ HFSPLUS_BREC_READ: rec_len=520, fd->entrylength=26
+ HFSPLUS_BREC_READ: WARNING - entrylength (26) < rec_len (520) - PARTIAL READ!
+
+hfs_brec_read() only validates that entrylength is not greater than the
+buffer size, but doesn't check if it's less than expected. It successfully
+reads 26 bytes into a 520-byte structure and returns success, leaving 494
+bytes uninitialized.
+
+This uninitialized data in tmp.thread.nodeName then gets copied by
+hfsplus_cat_build_key_uni() and used by hfsplus_strcasecmp(), triggering
+the KMSAN warning when the uninitialized bytes are used as array indices
+in case_fold().
+
+Fix by introducing hfsplus_brec_read_cat() wrapper that:
+1. Calls hfs_brec_read() to read the data
+2. Validates the record size based on the type field:
+ - Fixed size for folder and file records
+ - Variable size for thread records (depends on string length)
+3. Returns -EIO if size doesn't match expected
+
+For thread records, check against HFSPLUS_MIN_THREAD_SZ before reading
+nodeName.length to avoid reading uninitialized data at call sites that
+don't zero-initialize the entry structure.
+
+Also initialize the tmp variable in hfsplus_find_cat() as defensive
+programming to ensure no uninitialized data even if validation is
+bypassed.
+
+Reported-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=d80abb5b890d39261e72
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Suggested-by: Charalampos Mitrodimas <charmitro@posteo.net>
+Link: https://lore.kernel.org/all/20260120051114.1281285-1-kartikey406@gmail.com/ [v1]
+Link: https://lore.kernel.org/all/20260121063109.1830263-1-kartikey406@gmail.com/ [v2]
+Link: https://lore.kernel.org/all/20260212014233.2422046-1-kartikey406@gmail.com/ [v3]
+Link: https://lore.kernel.org/all/20260214002100.436125-1-kartikey406@gmail.com/T/ [v4]
+Link: https://lore.kernel.org/all/20260221061626.15853-1-kartikey406@gmail.com/T/ [v5]
+Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Link: https://lore.kernel.org/r/20260307010302.41547-1-kartikey406@gmail.com
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Stable-dep-of: 90c500e4fd83 ("hfsplus: fix held lock freed on hfsplus_fill_super()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/bfind.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/hfsplus/catalog.c | 4 +--
+ fs/hfsplus/dir.c | 2 -
+ fs/hfsplus/hfsplus_fs.h | 9 ++++++++
+ fs/hfsplus/super.c | 2 -
+ 5 files changed, 64 insertions(+), 4 deletions(-)
+
+--- a/fs/hfsplus/bfind.c
++++ b/fs/hfsplus/bfind.c
+@@ -287,3 +287,54 @@ out:
+ fd->bnode = bnode;
+ return res;
+ }
++
++/**
++ * hfsplus_brec_read_cat - read and validate a catalog record
++ * @fd: find data structure
++ * @entry: pointer to catalog entry to read into
++ *
++ * Reads a catalog record and validates its size matches the expected
++ * size based on the record type.
++ *
++ * Returns 0 on success, or negative error code on failure.
++ */
++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry)
++{
++ int res;
++ u32 expected_size;
++
++ res = hfs_brec_read(fd, entry, sizeof(hfsplus_cat_entry));
++ if (res)
++ return res;
++
++ /* Validate catalog record size based on type */
++ switch (be16_to_cpu(entry->type)) {
++ case HFSPLUS_FOLDER:
++ expected_size = sizeof(struct hfsplus_cat_folder);
++ break;
++ case HFSPLUS_FILE:
++ expected_size = sizeof(struct hfsplus_cat_file);
++ break;
++ case HFSPLUS_FOLDER_THREAD:
++ case HFSPLUS_FILE_THREAD:
++ /* Ensure we have at least the fixed fields before reading nodeName.length */
++ if (fd->entrylength < HFSPLUS_MIN_THREAD_SZ) {
++ pr_err("thread record too short (got %u)\n", fd->entrylength);
++ return -EIO;
++ }
++ expected_size = hfsplus_cat_thread_size(&entry->thread);
++ break;
++ default:
++ pr_err("unknown catalog record type %d\n",
++ be16_to_cpu(entry->type));
++ return -EIO;
++ }
++
++ if (fd->entrylength != expected_size) {
++ pr_err("catalog record size mismatch (type %d, got %u, expected %u)\n",
++ be16_to_cpu(entry->type), fd->entrylength, expected_size);
++ return -EIO;
++ }
++
++ return 0;
++}
+--- a/fs/hfsplus/catalog.c
++++ b/fs/hfsplus/catalog.c
+@@ -194,12 +194,12 @@ static int hfsplus_fill_cat_thread(struc
+ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
+ struct hfs_find_data *fd)
+ {
+- hfsplus_cat_entry tmp;
++ hfsplus_cat_entry tmp = {0};
+ int err;
+ u16 type;
+
+ hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
+- err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
++ err = hfsplus_brec_read_cat(fd, &tmp);
+ if (err)
+ return err;
+
+--- a/fs/hfsplus/dir.c
++++ b/fs/hfsplus/dir.c
+@@ -49,7 +49,7 @@ static struct dentry *hfsplus_lookup(str
+ if (unlikely(err < 0))
+ goto fail;
+ again:
+- err = hfs_brec_read(&fd, &entry, sizeof(entry));
++ err = hfsplus_brec_read_cat(&fd, &entry);
+ if (err) {
+ if (err == -ENOENT) {
+ hfs_find_exit(&fd);
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -535,6 +535,15 @@ int hfsplus_submit_bio(struct super_bloc
+ void **data, blk_opf_t opf);
+ int hfsplus_read_wrapper(struct super_block *sb);
+
++static inline u32 hfsplus_cat_thread_size(const struct hfsplus_cat_thread *thread)
++{
++ return offsetof(struct hfsplus_cat_thread, nodeName) +
++ offsetof(struct hfsplus_unistr, unicode) +
++ be16_to_cpu(thread->nodeName.length) * sizeof(hfsplus_unichr);
++}
++
++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry);
++
+ /*
+ * time helpers: convert between 1904-base and 1970-base timestamps
+ *
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -541,7 +541,7 @@ static int hfsplus_fill_super(struct sup
+ err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+ if (unlikely(err < 0))
+ goto out_put_root;
+- if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
++ if (!hfsplus_brec_read_cat(&fd, &entry)) {
+ hfs_find_exit(&fd);
+ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
+ err = -EIO;
--- /dev/null
+From stable+bounces-242438-greg=kroah.com@vger.kernel.org Fri May 1 16:07:17 2026
+From: Corey Minyard <corey@minyard.net>
+Date: Fri, 1 May 2026 09:06:58 -0500
+Subject: ipmi:ssif: Clean up kthread on errors
+To: stable@vger.kernel.org
+Cc: Corey Minyard <corey@minyard.net>, Li Xiao <252270051@hdu.edu.cn>
+Message-ID: <20260501140658.707484-2-corey@minyard.net>
+
+From: Corey Minyard <corey@minyard.net>
+
+If an error occurs after the ssif kthread is created, but before the
+main IPMI code starts the ssif interface, the ssif kthread will not
+be stopped.
+
+So make sure the kthread is stopped on an error condition if it is
+running.
+
+Fixes: 259307074bfc ("ipmi: Add SMBus interface driver (SSIF)")
+Reported-by: Li Xiao <<252270051@hdu.edu.cn>
+Cc: stable@vger.kernel.org
+Reviewed-by: Li Xiao <252270051@hdu.edu.cn>
+Signed-off-by: Corey Minyard <corey@minyard.net>
+(cherry picked from commit 75c486cb1bcaa1a3ec3a6438498176a3a4998ae4)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1287,8 +1287,10 @@ static void shutdown_ssif(void *send_inf
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->watch_timer);
+ del_timer_sync(&ssif_info->retry_timer);
+- if (ssif_info->thread)
++ if (ssif_info->thread) {
+ kthread_stop(ssif_info->thread);
++ ssif_info->thread = NULL;
++ }
+ }
+
+ static void ssif_remove(struct i2c_client *client)
+@@ -1913,6 +1915,15 @@ static int ssif_probe(struct i2c_client
+
+ out:
+ if (rv) {
++ /*
++ * If ipmi_register_smi() starts the interface, it will
++ * call shutdown and that will free the thread and set
++ * it to NULL. Otherwise it must be freed here.
++ */
++ if (ssif_info->thread) {
++ kthread_stop(ssif_info->thread);
++ ssif_info->thread = NULL;
++ }
+ if (addr_info)
+ addr_info->client = NULL;
+
--- /dev/null
+From stable+bounces-242437-greg=kroah.com@vger.kernel.org Fri May 1 16:08:23 2026
+From: Corey Minyard <corey@minyard.net>
+Date: Fri, 1 May 2026 09:06:57 -0500
+Subject: ipmi:ssif: Fix a shutdown race
+To: stable@vger.kernel.org
+Cc: Corey Minyard <corey@minyard.net>, Corey Minyard <cminyard@mvista.com>
+Message-ID: <20260501140658.707484-1-corey@minyard.net>
+
+From: Corey Minyard <corey@minyard.net>
+
+It was possible for the SSIF thread to stop and quit before the
+kthread_stop() call because ssif->stopping was set before the
+stop. So only exit the SSIF thread is kthread_should_stop()
+returns true.
+
+There is no need to wake the thread, as the wait will be interrupted
+by kthread_stop().
+
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+(cherry picked from commit 6bd0eb6d759b9a22c5509ea04e19c2e8407ba418)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -490,8 +490,6 @@ static int ipmi_ssif_thread(void *data)
+ /* Wait for something to do */
+ result = wait_for_completion_interruptible(
+ &ssif_info->wake_thread);
+- if (ssif_info->stopping)
+- break;
+ if (result == -ERESTARTSYS)
+ continue;
+ init_completion(&ssif_info->wake_thread);
+@@ -1289,10 +1287,8 @@ static void shutdown_ssif(void *send_inf
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->watch_timer);
+ del_timer_sync(&ssif_info->retry_timer);
+- if (ssif_info->thread) {
+- complete(&ssif_info->wake_thread);
++ if (ssif_info->thread)
+ kthread_stop(ssif_info->thread);
+- }
+ }
+
+ static void ssif_remove(struct i2c_client *client)
--- /dev/null
+From stable+bounces-241112-greg=kroah.com@vger.kernel.org Sat Apr 25 11:11:42 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 05:11:29 -0400
+Subject: ksmbd: replace connection list with hash table
+To: stable@vger.kernel.org
+Cc: Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425091130.3330505-2-sashal@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 0bcc831be535269556f59cb70396f7e34f03a276 ]
+
+Replace connection list with hash table to improve lookup performance.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: def036ef87f8 ("ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 23 +++++++++++------------
+ fs/smb/server/connection.h | 6 ++++--
+ fs/smb/server/smb2pdu.c | 4 ++--
+ fs/smb/server/transport_rdma.c | 5 +++++
+ fs/smb/server/transport_tcp.c | 25 +++++++++++++++++++++----
+ 5 files changed, 43 insertions(+), 20 deletions(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -19,7 +19,7 @@ static DEFINE_MUTEX(init_lock);
+
+ static struct ksmbd_conn_ops default_conn_ops;
+
+-LIST_HEAD(conn_list);
++DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS);
+ DECLARE_RWSEM(conn_list_lock);
+
+ /**
+@@ -33,7 +33,7 @@ DECLARE_RWSEM(conn_list_lock);
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+ down_write(&conn_list_lock);
+- list_del(&conn->conns_list);
++ hash_del(&conn->hlist);
+ up_write(&conn_list_lock);
+
+ xa_destroy(&conn->sessions);
+@@ -78,7 +78,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+
+ init_waitqueue_head(&conn->req_running_q);
+ init_waitqueue_head(&conn->r_count_q);
+- INIT_LIST_HEAD(&conn->conns_list);
+ INIT_LIST_HEAD(&conn->requests);
+ INIT_LIST_HEAD(&conn->async_requests);
+ spin_lock_init(&conn->request_lock);
+@@ -91,19 +90,17 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+
+ init_rwsem(&conn->session_lock);
+
+- down_write(&conn_list_lock);
+- list_add(&conn->conns_list, &conn_list);
+- up_write(&conn_list_lock);
+ return conn;
+ }
+
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ {
+ struct ksmbd_conn *t;
++ int bkt;
+ bool ret = false;
+
+ down_read(&conn_list_lock);
+- list_for_each_entry(t, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, t, hlist) {
+ if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+ continue;
+
+@@ -164,9 +161,10 @@ void ksmbd_conn_unlock(struct ksmbd_conn
+ void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
+ struct ksmbd_conn *conn;
++ int bkt;
+
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id))
+ WRITE_ONCE(conn->status, status);
+ }
+@@ -182,14 +180,14 @@ int ksmbd_conn_wait_idle_sess_id(struct
+ {
+ struct ksmbd_conn *conn;
+ int rc, retry_count = 0, max_timeout = 120;
+- int rcount = 1;
++ int rcount = 1, bkt;
+
+ retry_idle:
+ if (retry_count >= max_timeout)
+ return -EIO;
+
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
+ if (conn == curr_conn)
+ rcount = 2;
+@@ -480,10 +478,11 @@ static void stop_sessions(void)
+ {
+ struct ksmbd_conn *conn;
+ struct ksmbd_transport *t;
++ int bkt;
+
+ again:
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ t = conn->transport;
+ ksmbd_conn_set_exiting(conn);
+ if (t->ops->shutdown) {
+@@ -494,7 +493,7 @@ again:
+ }
+ up_read(&conn_list_lock);
+
+- if (!list_empty(&conn_list)) {
++ if (!hash_empty(conn_list)) {
+ msleep(100);
+ goto again;
+ }
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -52,11 +52,12 @@ struct ksmbd_conn {
+ u8 inet6_addr[16];
+ #endif
+ };
++ unsigned int inet_hash;
+ char *request_buf;
+ struct ksmbd_transport *transport;
+ struct nls_table *local_nls;
+ struct unicode_map *um;
+- struct list_head conns_list;
++ struct hlist_node hlist;
+ struct rw_semaphore session_lock;
+ /* smb session 1 per user */
+ struct xarray sessions;
+@@ -151,7 +152,8 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_SEND_TIMEOUT (5 * HZ)
+ #define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
+
+-extern struct list_head conn_list;
++#define CONN_HASH_BITS 12
++extern DECLARE_HASHTABLE(conn_list, CONN_HASH_BITS);
+ extern struct rw_semaphore conn_list_lock;
+
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7348,7 +7348,7 @@ int smb2_lock(struct ksmbd_work *work)
+ int nolock = 0;
+ LIST_HEAD(lock_list);
+ LIST_HEAD(rollback_list);
+- int prior_lock = 0;
++ int prior_lock = 0, bkt;
+
+ WORK_BUFFERS(work, req, rsp);
+
+@@ -7458,7 +7458,7 @@ int smb2_lock(struct ksmbd_work *work)
+ nolock = 1;
+ /* check locks in connection list */
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each(conn_list, bkt, conn, hlist) {
+ spin_lock(&conn->llist_lock);
+ list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+ if (file_inode(cmp_lock->fl->fl_file) !=
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -381,6 +381,11 @@ static struct smb_direct_transport *allo
+ conn = ksmbd_conn_alloc();
+ if (!conn)
+ goto err;
++
++ down_write(&conn_list_lock);
++ hash_add(conn_list, &conn->hlist, 0);
++ up_write(&conn_list_lock);
++
+ conn->transport = KSMBD_TRANS(t);
+ KSMBD_TRANS(t)->conn = conn;
+ KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -89,13 +89,21 @@ static struct tcp_transport *alloc_trans
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (client_sk->sk->sk_family == AF_INET6)
++ if (client_sk->sk->sk_family == AF_INET6) {
+ memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16);
+- else
++ conn->inet_hash = ipv6_addr_hash(&client_sk->sk->sk_v6_daddr);
++ } else {
+ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
++ conn->inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
++ }
+ #else
+ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
++ conn->inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
+ #endif
++ down_write(&conn_list_lock);
++ hash_add(conn_list, &conn->hlist, conn->inet_hash);
++ up_write(&conn_list_lock);
++
+ conn->transport = KSMBD_TRANS(t);
+ KSMBD_TRANS(t)->conn = conn;
+ KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
+@@ -242,7 +250,7 @@ static int ksmbd_kthread_fn(void *p)
+ struct socket *client_sk = NULL;
+ struct interface *iface = (struct interface *)p;
+ struct ksmbd_conn *conn;
+- int ret;
++ int ret, inet_hash;
+ unsigned int max_ip_conns;
+
+ while (!kthread_should_stop()) {
+@@ -267,9 +275,18 @@ static int ksmbd_kthread_fn(void *p)
+ /*
+ * Limits repeated connections from clients with the same IP.
+ */
++#if IS_ENABLED(CONFIG_IPV6)
++ if (client_sk->sk->sk_family == AF_INET6)
++ inet_hash = ipv6_addr_hash(&client_sk->sk->sk_v6_daddr);
++ else
++ inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
++#else
++ inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
++#endif
++
+ max_ip_conns = 0;
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list) {
++ hash_for_each_possible(conn_list, conn, hlist, inet_hash) {
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (client_sk->sk->sk_family == AF_INET6) {
+ if (memcmp(&client_sk->sk->sk_v6_daddr,
--- /dev/null
+From stable+bounces-240658-greg=kroah.com@vger.kernel.org Fri Apr 24 15:05:17 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 09:04:01 -0400
+Subject: ksmbd: require minimum ACE size in smb_check_perm_dacl()
+To: stable@vger.kernel.org
+Cc: Michael Bommarito <michael.bommarito@gmail.com>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424130401.1917926-3-sashal@kernel.org>
+
+From: Michael Bommarito <michael.bommarito@gmail.com>
+
+[ Upstream commit d07b26f39246a82399661936dd0c853983cfade7 ]
+
+Both ACE-walk loops in smb_check_perm_dacl() only guard against an
+under-sized remaining buffer, not against an ACE whose declared
+`ace->size` is smaller than the struct it claims to describe:
+
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+ if (ace_size > aces_size)
+ break;
+
+The first check only requires the 4-byte ACE header to be in bounds;
+it does not require access_req (4 bytes at offset 4) to be readable.
+An attacker who has set a crafted DACL on a file they own can declare
+ace->size == 4 with aces_size == 4, pass both checks, and then
+
+ granted |= le32_to_cpu(ace->access_req); /* upper loop */
+ compare_sids(&sid, &ace->sid); /* lower loop */
+
+reads access_req at offset 4 (OOB by up to 4 bytes) and ace->sid at
+offset 8 (OOB by up to CIFS_SID_BASE_SIZE + SID_MAX_SUB_AUTHORITIES
+* 4 bytes).
+
+Tighten both loops to require
+
+ ace_size >= offsetof(struct smb_ace, sid) + CIFS_SID_BASE_SIZE
+
+which is the smallest valid on-wire ACE layout (4-byte header +
+4-byte access_req + 8-byte sid base with zero sub-auths). Also
+reject ACEs whose sid.num_subauth exceeds SID_MAX_SUB_AUTHORITIES
+before letting compare_sids() dereference sub_auth[] entries.
+
+parse_sec_desc() already enforces an equivalent check (lines 441-448);
+smb_check_perm_dacl() simply grew weaker validation over time.
+
+Reachability: authenticated SMB client with permission to set an ACL
+on a file. On a subsequent CREATE against that file, the kernel
+walks the stored DACL via smb_check_perm_dacl() and triggers the
+OOB read. Not pre-auth, and the OOB read is not reflected to the
+attacker, but KASAN reports and kernel state corruption are
+possible.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-6
+Assisted-by: Codex:gpt-5-4
+Signed-off-by: Michael Bommarito <michael.bommarito@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smbacl.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -1298,10 +1298,13 @@ int smb_check_perm_dacl(struct ksmbd_con
+ ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ aces_size = acl_size - sizeof(struct smb_acl);
+ for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
+- if (offsetof(struct smb_ace, access_req) > aces_size)
++ if (offsetof(struct smb_ace, sid) +
++ aces_size < CIFS_SID_BASE_SIZE)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+- if (ace_size > aces_size)
++ if (ace_size > aces_size ||
++ ace_size < offsetof(struct smb_ace, sid) +
++ CIFS_SID_BASE_SIZE)
+ break;
+ aces_size -= ace_size;
+ granted |= le32_to_cpu(ace->access_req);
+@@ -1319,13 +1322,19 @@ int smb_check_perm_dacl(struct ksmbd_con
+ ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ aces_size = acl_size - sizeof(struct smb_acl);
+ for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
+- if (offsetof(struct smb_ace, access_req) > aces_size)
++ if (offsetof(struct smb_ace, sid) +
++ aces_size < CIFS_SID_BASE_SIZE)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+- if (ace_size > aces_size)
++ if (ace_size > aces_size ||
++ ace_size < offsetof(struct smb_ace, sid) +
++ CIFS_SID_BASE_SIZE)
+ break;
+ aces_size -= ace_size;
+
++ if (ace->sid.num_subauth > SID_MAX_SUB_AUTHORITIES)
++ break;
++
+ if (!compare_sids(&sid, &ace->sid) ||
+ !compare_sids(&sid_unix_NFS_mode, &ace->sid)) {
+ found = 1;
--- /dev/null
+From stable+bounces-241113-greg=kroah.com@vger.kernel.org Sat Apr 25 11:11:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 05:11:30 -0400
+Subject: ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()
+To: stable@vger.kernel.org
+Cc: DaeMyung Kang <charsyam@gmail.com>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425091130.3330505-3-sashal@kernel.org>
+
+From: DaeMyung Kang <charsyam@gmail.com>
+
+[ Upstream commit def036ef87f8641c1c525d5ae17438d7a1006491 ]
+
+rcount is intended to be connection-specific: 2 for curr_conn, 1 for
+every other connection sharing the same session. However, it is
+initialised only once before the hash iteration and is never reset.
+After the loop visits curr_conn, later sibling connections are also
+checked against rcount == 2, so a sibling with req_running == 1 is
+incorrectly treated as idle. This makes the outcome depend on the
+hash iteration order: whether a given sibling is checked against the
+loose (< 2) or the strict (< 1) threshold is decided by whether it
+happens to be visited before or after curr_conn.
+
+The function's contract is "wait until every connection sharing this
+session is idle" so that destroy_previous_session() can safely tear
+the session down. The latched rcount violates that contract and
+reopens the teardown race window the wait logic was meant to close:
+destroy_previous_session() may proceed before sibling channels have
+actually quiesced, overlapping session teardown with in-flight work
+on those connections.
+
+Recompute rcount inside the loop so each connection is compared
+against its own threshold regardless of iteration order.
+
+This is a code-inspection fix for an iteration-order-dependent logic
+error; a targeted reproducer would require SMB3 multichannel with
+in-flight work on a sibling channel landing after curr_conn in hash
+order, which is not something that can be triggered reliably.
+
+Fixes: 76e98a158b20 ("ksmbd: fix race condition between destroy_previous_session() and smb2 operations()")
+Cc: stable@vger.kernel.org
+Signed-off-by: DaeMyung Kang <charsyam@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -180,7 +180,7 @@ int ksmbd_conn_wait_idle_sess_id(struct
+ {
+ struct ksmbd_conn *conn;
+ int rc, retry_count = 0, max_timeout = 120;
+- int rcount = 1, bkt;
++ int rcount, bkt;
+
+ retry_idle:
+ if (retry_count >= max_timeout)
+@@ -189,8 +189,7 @@ retry_idle:
+ down_read(&conn_list_lock);
+ hash_for_each(conn_list, bkt, conn, hlist) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
+- if (conn == curr_conn)
+- rcount = 2;
++ rcount = (conn == curr_conn) ? 2 : 1;
+ if (atomic_read(&conn->req_running) >= rcount) {
+ rc = wait_event_timeout(conn->req_running_q,
+ atomic_read(&conn->req_running) < rcount,
--- /dev/null
+From stable+bounces-241111-greg=kroah.com@vger.kernel.org Sat Apr 25 11:11:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 05:11:28 -0400
+Subject: ksmbd: use msleep instaed of schedule_timeout_interruptible()
+To: stable@vger.kernel.org
+Cc: Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260425091130.3330505-1-sashal@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit f75f8bdd4ff4830abe31a1b94892eb12b85b9535 ]
+
+use msleep instaed of schedule_timeout_interruptible()
+to guarantee the task delays as expected.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: def036ef87f8 ("ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -495,7 +495,7 @@ again:
+ up_read(&conn_list_lock);
+
+ if (!list_empty(&conn_list)) {
+- schedule_timeout_interruptible(HZ / 10); /* 100ms */
++ msleep(100);
+ goto again;
+ }
+ }
--- /dev/null
+From stable+bounces-241755-greg=kroah.com@vger.kernel.org Tue Apr 28 20:20:17 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2026 14:19:44 -0400
+Subject: lib: test_hmm: evict device pages on file close to avoid use-after-free
+To: stable@vger.kernel.org
+Cc: Alistair Popple <apopple@nvidia.com>, Zenghui Yu <zenghui.yu@linux.dev>, Balbir Singh <balbirs@nvidia.com>, David Hildenbrand <david@kernel.org>, Jason Gunthorpe <jgg@ziepe.ca>, Leon Romanovsky <leon@kernel.org>, Liam Howlett <liam.howlett@oracle.com>, "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>, Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>, Suren Baghdasaryan <surenb@google.com>, Matthew Brost <matthew.brost@intel.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260428181944.3126951-1-sashal@kernel.org>
+
+From: Alistair Popple <apopple@nvidia.com>
+
+[ Upstream commit 744dd97752ef1076a8d8672bb0d8aa2c7abc1144 ]
+
+Patch series "Minor hmm_test fixes and cleanups".
+
+Two bugfixes a cleanup for the HMM kernel selftests. These were mostly
+reported by Zenghui Yu with special thanks to Lorenzo for analysing and
+pointing out the problems.
+
+This patch (of 3):
+
+When dmirror_fops_release() is called it frees the dmirror struct but
+doesn't migrate device private pages back to system memory first. This
+leaves those pages with a dangling zone_device_data pointer to the freed
+dmirror.
+
+If a subsequent fault occurs on those pages (eg. during coredump) the
+dmirror_devmem_fault() callback dereferences the stale pointer causing a
+kernel panic. This was reported [1] when running mm/ksft_hmm.sh on arm64,
+where a test failure triggered SIGABRT and the resulting coredump walked
+the VMAs faulting in the stale device private pages.
+
+Fix this by calling dmirror_device_evict_chunk() for each devmem chunk in
+dmirror_fops_release() to migrate all device private pages back to system
+memory before freeing the dmirror struct. The function is moved earlier
+in the file to avoid a forward declaration.
+
+Link: https://lore.kernel.org/20260331063445.3551404-1-apopple@nvidia.com
+Link: https://lore.kernel.org/20260331063445.3551404-2-apopple@nvidia.com
+Fixes: b2ef9f5a5cb3 ("mm/hmm/test: add selftest driver for HMM")
+Signed-off-by: Alistair Popple <apopple@nvidia.com>
+Reported-by: Zenghui Yu <zenghui.yu@linux.dev>
+Closes: https://lore.kernel.org/linux-mm/8bd0396a-8997-4d2e-a13f-5aac033083d7@linux.dev/
+Reviewed-by: Balbir Singh <balbirs@nvidia.com>
+Tested-by: Zenghui Yu <zenghui.yu@linux.dev>
+Cc: David Hildenbrand <david@kernel.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Leon Romanovsky <leon@kernel.org>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Zenghui Yu <zenghui.yu@linux.dev>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ kept the existing simpler `dmirror_device_evict_chunk()` body instead of the upstream compound-folio version ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/test_hmm.c | 86 ++++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 49 insertions(+), 37 deletions(-)
+
+--- a/lib/test_hmm.c
++++ b/lib/test_hmm.c
+@@ -183,11 +183,60 @@ static int dmirror_fops_open(struct inod
+ return 0;
+ }
+
++static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
++{
++ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
++ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
++ unsigned long npages = end_pfn - start_pfn + 1;
++ unsigned long i;
++ unsigned long *src_pfns;
++ unsigned long *dst_pfns;
++
++ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
++
++ migrate_device_range(src_pfns, start_pfn, npages);
++ for (i = 0; i < npages; i++) {
++ struct page *dpage, *spage;
++
++ spage = migrate_pfn_to_page(src_pfns[i]);
++ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
++ continue;
++
++ if (WARN_ON(!is_device_private_page(spage) &&
++ !is_device_coherent_page(spage)))
++ continue;
++ spage = BACKING_PAGE(spage);
++ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
++ lock_page(dpage);
++ copy_highpage(dpage, spage);
++ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
++ if (src_pfns[i] & MIGRATE_PFN_WRITE)
++ dst_pfns[i] |= MIGRATE_PFN_WRITE;
++ }
++ migrate_device_pages(src_pfns, dst_pfns, npages);
++ migrate_device_finalize(src_pfns, dst_pfns, npages);
++ kvfree(src_pfns);
++ kvfree(dst_pfns);
++}
++
+ static int dmirror_fops_release(struct inode *inode, struct file *filp)
+ {
+ struct dmirror *dmirror = filp->private_data;
++ struct dmirror_device *mdevice = dmirror->mdevice;
++ int i;
+
+ mmu_interval_notifier_remove(&dmirror->notifier);
++
++ if (mdevice->devmem_chunks) {
++ for (i = 0; i < mdevice->devmem_count; i++) {
++ struct dmirror_chunk *devmem =
++ mdevice->devmem_chunks[i];
++
++ dmirror_device_evict_chunk(devmem);
++ }
++ }
++
+ xa_destroy(&dmirror->pt);
+ kfree(dmirror);
+ return 0;
+@@ -1217,43 +1266,6 @@ static int dmirror_snapshot(struct dmirr
+ return ret;
+ }
+
+-static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+-{
+- unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+- unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+- unsigned long npages = end_pfn - start_pfn + 1;
+- unsigned long i;
+- unsigned long *src_pfns;
+- unsigned long *dst_pfns;
+-
+- src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+- dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+-
+- migrate_device_range(src_pfns, start_pfn, npages);
+- for (i = 0; i < npages; i++) {
+- struct page *dpage, *spage;
+-
+- spage = migrate_pfn_to_page(src_pfns[i]);
+- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+- continue;
+-
+- if (WARN_ON(!is_device_private_page(spage) &&
+- !is_device_coherent_page(spage)))
+- continue;
+- spage = BACKING_PAGE(spage);
+- dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+- lock_page(dpage);
+- copy_highpage(dpage, spage);
+- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+- if (src_pfns[i] & MIGRATE_PFN_WRITE)
+- dst_pfns[i] |= MIGRATE_PFN_WRITE;
+- }
+- migrate_device_pages(src_pfns, dst_pfns, npages);
+- migrate_device_finalize(src_pfns, dst_pfns, npages);
+- kvfree(src_pfns);
+- kvfree(dst_pfns);
+-}
+-
+ /* Removes free pages from the free list so they can't be re-allocated */
+ static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
+ {
--- /dev/null
+From stable+bounces-242523-greg=kroah.com@vger.kernel.org Fri May 1 23:07:36 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 17:06:21 -0400
+Subject: media: rc: igorplugusb: heed coherency rules
+To: stable@vger.kernel.org
+Cc: Oliver Neukum <oneukum@suse.com>, Sean Young <sean@mess.org>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501210621.4038633-1-sashal@kernel.org>
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit eac69475b01fe1e861dfe3960b57fa95671c132e ]
+
+In a control request, the USB request structure
+can be subject to DMA on some HCs. Hence it must obey
+the rules for DMA coherency. Allocate it separately.
+
+Fixes: b1c97193c6437 ("[media] rc: port IgorPlug-USB to rc-core")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ replaced kzalloc_obj(*ir->request, GFP_KERNEL) with kzalloc(sizeof(*ir->request), GFP_KERNEL) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/rc/igorplugusb.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/rc/igorplugusb.c
++++ b/drivers/media/rc/igorplugusb.c
+@@ -34,7 +34,7 @@ struct igorplugusb {
+ struct device *dev;
+
+ struct urb *urb;
+- struct usb_ctrlrequest request;
++ struct usb_ctrlrequest *request;
+
+ struct timer_list timer;
+
+@@ -122,7 +122,7 @@ static void igorplugusb_cmd(struct igorp
+ {
+ int ret;
+
+- ir->request.bRequest = cmd;
++ ir->request->bRequest = cmd;
+ ir->urb->transfer_flags = 0;
+ ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
+ if (ret && ret != -EPERM)
+@@ -164,13 +164,17 @@ static int igorplugusb_probe(struct usb_
+ if (!ir)
+ return -ENOMEM;
+
++ ir->request = kzalloc(sizeof(*ir->request), GFP_KERNEL);
++ if (!ir->request)
++ goto fail;
++
+ ir->dev = &intf->dev;
+
+ timer_setup(&ir->timer, igorplugusb_timer, 0);
+
+- ir->request.bRequest = GET_INFRACODE;
+- ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+- ir->request.wLength = cpu_to_le16(MAX_PACKET);
++ ir->request->bRequest = GET_INFRACODE;
++ ir->request->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
++ ir->request->wLength = cpu_to_le16(MAX_PACKET);
+
+ ir->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ir->urb)
+@@ -228,6 +232,7 @@ fail:
+ usb_free_urb(ir->urb);
+ rc_free_device(ir->rc);
+ kfree(ir->buf_in);
++ kfree(ir->request);
+
+ return ret;
+ }
+@@ -243,6 +248,7 @@ static void igorplugusb_disconnect(struc
+ usb_unpoison_urb(ir->urb);
+ usb_free_urb(ir->urb);
+ kfree(ir->buf_in);
++ kfree(ir->request);
+ }
+
+ static const struct usb_device_id igorplugusb_table[] = {
--- /dev/null
+From stable+bounces-242470-greg=kroah.com@vger.kernel.org Fri May 1 18:59:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 12:59:04 -0400
+Subject: media: rc: ttusbir: respect DMA coherency rules
+To: stable@vger.kernel.org
+Cc: Oliver Neukum <oneukum@suse.com>, Sean Young <sean@mess.org>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501165904.3629254-1-sashal@kernel.org>
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit 50acaad3d202c064779db8dc3d010007347f59c7 ]
+
+Buffers must not share a cache line with other data structures.
+Allocate separately.
+
+Fixes: 0938069fa0897 ("[media] rc: Add support for the TechnoTrend USB IR Receiver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ kept kzalloc(sizeof(*tt), GFP_KERNEL) instead of kzalloc_obj() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/rc/ttusbir.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/media/rc/ttusbir.c
++++ b/drivers/media/rc/ttusbir.c
+@@ -32,7 +32,7 @@ struct ttusbir {
+
+ struct led_classdev led;
+ struct urb *bulk_urb;
+- uint8_t bulk_buffer[5];
++ u8 *bulk_buffer;
+ int bulk_out_endp, iso_in_endp;
+ bool led_on, is_led_on;
+ atomic_t led_complete;
+@@ -186,13 +186,16 @@ static int ttusbir_probe(struct usb_inte
+ struct rc_dev *rc;
+ int i, j, ret;
+ int altsetting = -1;
++ u8 *buffer;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
++ buffer = kzalloc(5, GFP_KERNEL);
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
+- if (!tt || !rc) {
++ if (!tt || !rc || buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
++ tt->bulk_buffer = buffer;
+
+ /* find the correct alt setting */
+ for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
+@@ -281,8 +284,8 @@ static int ttusbir_probe(struct usb_inte
+ tt->bulk_buffer[3] = 0x01;
+
+ usb_fill_bulk_urb(tt->bulk_urb, tt->udev, usb_sndbulkpipe(tt->udev,
+- tt->bulk_out_endp), tt->bulk_buffer, sizeof(tt->bulk_buffer),
+- ttusbir_bulk_complete, tt);
++ tt->bulk_out_endp), tt->bulk_buffer, 5,
++ ttusbir_bulk_complete, tt);
+
+ tt->led.name = "ttusbir:green:power";
+ tt->led.default_trigger = "rc-feedback";
+@@ -351,6 +354,7 @@ out:
+ kfree(tt);
+ }
+ rc_free_device(rc);
++ kfree(buffer);
+
+ return ret;
+ }
+@@ -373,6 +377,7 @@ static void ttusbir_disconnect(struct us
+ }
+ usb_kill_urb(tt->bulk_urb);
+ usb_free_urb(tt->bulk_urb);
++ kfree(tt->bulk_buffer);
+ usb_set_intfdata(intf, NULL);
+ kfree(tt);
+ }
--- /dev/null
+From stable+bounces-244779-greg=kroah.com@vger.kernel.org Fri May 8 16:57:18 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 10:52:22 -0400
+Subject: mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs
+To: stable@vger.kernel.org
+Cc: Luke Wang <ziniu.wang_1@nxp.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260508145222.1512925-1-sashal@kernel.org>
+
+From: Luke Wang <ziniu.wang_1@nxp.com>
+
+[ Upstream commit d6bf2e64dec87322f2b11565ddb59c0e967f96e3 ]
+
+Kingston eMMC IY2964 and IB2932 takes a fixed ~2 seconds for each secure
+erase/trim operation regardless of size - that is, a single secure
+erase/trim operation of 1MB takes the same time as 1GB. With default
+calculated 3.5MB max discard size, secure erase 1GB requires ~300 separate
+operations taking ~10 minutes total.
+
+Add a card quirk, MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME, to set maximum
+secure erase size for those devices. This allows 1GB secure erase to
+complete in a single operation, reducing time from 10 minutes to just 2
+seconds.
+
+Signed-off-by: Luke Wang <ziniu.wang_1@nxp.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+[ adapted `lim->max_secure_erase_sectors =` assignment to `blk_queue_max_secure_erase_sectors(q, ...)` setter and used pre-rename `mmc_can_secure_erase_trim`/`mmc_can_trim` helpers ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/card.h | 5 +++++
+ drivers/mmc/core/queue.c | 8 ++++++--
+ drivers/mmc/core/quirks.h | 9 +++++++++
+ include/linux/mmc/card.h | 1 +
+ 4 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -297,4 +297,9 @@ static inline int mmc_card_no_uhs_ddr50_
+ return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
+ }
+
++static inline int mmc_card_fixed_secure_erase_trim_time(const struct mmc_card *c)
++{
++ return c->quirks & MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME;
++}
++
+ #endif
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -188,8 +188,12 @@ static void mmc_queue_setup_discard(stru
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = SECTOR_SIZE;
+- if (mmc_can_secure_erase_trim(card))
+- blk_queue_max_secure_erase_sectors(q, max_discard);
++ if (mmc_can_secure_erase_trim(card)) {
++ if (mmc_card_fixed_secure_erase_trim_time(card))
++ blk_queue_max_secure_erase_sectors(q, UINT_MAX >> card->erase_shift);
++ else
++ blk_queue_max_secure_erase_sectors(q, max_discard);
++ }
+ if (mmc_can_trim(card) && card->erased_byte == 0)
+ blk_queue_max_write_zeroes_sectors(q, max_discard);
+ }
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -153,6 +153,15 @@ static const struct mmc_fixup __maybe_un
+ MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
++ /*
++ * On Some Kingston eMMCs, secure erase/trim time is independent
++ * of erase size, fixed at approximately 2 seconds.
++ */
++ MMC_FIXUP("IY2964", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++ MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME),
++ MMC_FIXUP("IB2932", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++ MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME),
++
+ END_FIXUP
+ };
+
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -298,6 +298,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
+ #define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
++#define MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME (1<<20) /* Secure erase/trim time is fixed regardless of size */
+
+ bool written_flag; /* Indicates eMMC has been written since power on */
+ bool reenable_cmdq; /* Re-enable Command Queue */
--- /dev/null
+From stable+bounces-242864-greg=kroah.com@vger.kernel.org Mon May 4 09:47:30 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 03:47:18 -0400
+Subject: net: bridge: use a stable FDB dst snapshot in RCU readers
+To: stable@vger.kernel.org
+Cc: Zhengchuan Liang <zcliangcn@gmail.com>, stable@kernel.org, Yifan Wu <yifanwucs@gmail.com>, Juefei Pu <tomapufckgml@gmail.com>, Yuan Tan <yuantan098@gmail.com>, Xin Liu <bird@lzu.edu.cn>, Ren Wei <enjou1224z@gmail.com>, Ren Wei <n05ec@lzu.edu.cn>, Ido Schimmel <idosch@nvidia.com>, Nikolay Aleksandrov <razor@blackwall.org>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504074719.1856971-1-sashal@kernel.org>
+
+From: Zhengchuan Liang <zcliangcn@gmail.com>
+
+[ Upstream commit df4601653201de21b487c3e7fffd464790cab808 ]
+
+Local FDB entries can be rewritten in place by `fdb_delete_local()`, which
+updates `f->dst` to another port or to `NULL` while keeping the entry
+alive. Several bridge RCU readers inspect `f->dst`, including
+`br_fdb_fillbuf()` through the `brforward_read()` sysfs path.
+
+These readers currently load `f->dst` multiple times and can therefore
+observe inconsistent values across the check and later dereference.
+In `br_fdb_fillbuf()`, this means a concurrent local-FDB update can change
+`f->dst` after the NULL check and before the `port_no` dereference,
+leading to a NULL-ptr-deref.
+
+Fix this by taking a single `READ_ONCE()` snapshot of `f->dst` in each
+affected RCU reader and using that snapshot for the rest of the access
+sequence. Also publish the in-place `f->dst` updates in `fdb_delete_local()`
+with `WRITE_ONCE()` so the readers and writer use matching access patterns.
+
+Fixes: 960b589f86c7 ("bridge: Properly check if local fdb entry can be deleted in br_fdb_change_mac_address")
+Cc: stable@kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Tested-by: Ren Wei <enjou1224z@gmail.com>
+Signed-off-by: Zhengchuan Liang <zcliangcn@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/6570fabb85ecadb8baaf019efe856f407711c7b9.1776043229.git.zcliangcn@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ kept `*idx < cb->args[2]` instead of `*idx < ctx->fdb_idx` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_arp_nd_proxy.c | 8 +++++---
+ net/bridge/br_fdb.c | 28 ++++++++++++++++++----------
+ 2 files changed, 23 insertions(+), 13 deletions(-)
+
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -199,11 +199,12 @@ void br_do_proxy_suppress_arp(struct sk_
+
+ f = br_fdb_find_rcu(br, n->ha, vid);
+ if (f) {
++ const struct net_bridge_port *dst = READ_ONCE(f->dst);
+ bool replied = false;
+
+ if ((p && (p->flags & BR_PROXYARP)) ||
+- (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) ||
+- br_is_neigh_suppress_enabled(f->dst, vid)) {
++ (dst && (dst->flags & BR_PROXYARP_WIFI)) ||
++ br_is_neigh_suppress_enabled(dst, vid)) {
+ if (!vid)
+ br_arp_send(br, p, skb->dev, sip, tip,
+ sha, n->ha, sha, 0, 0);
+@@ -463,9 +464,10 @@ void br_do_suppress_nd(struct sk_buff *s
+
+ f = br_fdb_find_rcu(br, n->ha, vid);
+ if (f) {
++ const struct net_bridge_port *dst = READ_ONCE(f->dst);
+ bool replied = false;
+
+- if (br_is_neigh_suppress_enabled(f->dst, vid)) {
++ if (br_is_neigh_suppress_enabled(dst, vid)) {
+ if (vid != 0)
+ br_nd_send(br, p, skb, n,
+ skb->vlan_proto,
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -246,6 +246,7 @@ struct net_device *br_fdb_find_port(cons
+ const unsigned char *addr,
+ __u16 vid)
+ {
++ const struct net_bridge_port *dst;
+ struct net_bridge_fdb_entry *f;
+ struct net_device *dev = NULL;
+ struct net_bridge *br;
+@@ -258,8 +259,11 @@ struct net_device *br_fdb_find_port(cons
+ br = netdev_priv(br_dev);
+ rcu_read_lock();
+ f = br_fdb_find_rcu(br, addr, vid);
+- if (f && f->dst)
+- dev = f->dst->dev;
++ if (f) {
++ dst = READ_ONCE(f->dst);
++ if (dst)
++ dev = dst->dev;
++ }
+ rcu_read_unlock();
+
+ return dev;
+@@ -349,7 +353,7 @@ static void fdb_delete_local(struct net_
+ vg = nbp_vlan_group(op);
+ if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+ (!vid || br_vlan_find(vg, vid))) {
+- f->dst = op;
++ WRITE_ONCE(f->dst, op);
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+ return;
+ }
+@@ -360,7 +364,7 @@ static void fdb_delete_local(struct net_
+ /* Maybe bridge device has same hw addr? */
+ if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+ (!vid || (v && br_vlan_should_use(v)))) {
+- f->dst = NULL;
++ WRITE_ONCE(f->dst, NULL);
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+ return;
+ }
+@@ -790,6 +794,7 @@ int br_fdb_test_addr(struct net_device *
+ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
+ unsigned long maxnum, unsigned long skip)
+ {
++ const struct net_bridge_port *dst;
+ struct net_bridge_fdb_entry *f;
+ struct __fdb_entry *fe = buf;
+ unsigned long delta;
+@@ -806,7 +811,8 @@ int br_fdb_fillbuf(struct net_bridge *br
+ continue;
+
+ /* ignore pseudo entry for local MAC address */
+- if (!f->dst)
++ dst = READ_ONCE(f->dst);
++ if (!dst)
+ continue;
+
+ if (skip) {
+@@ -818,8 +824,8 @@ int br_fdb_fillbuf(struct net_bridge *br
+ memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+
+ /* due to ABI compat need to split into hi/lo */
+- fe->port_no = f->dst->port_no;
+- fe->port_hi = f->dst->port_no >> 8;
++ fe->port_no = dst->port_no;
++ fe->port_hi = dst->port_no >> 8;
+
+ fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+ if (!test_bit(BR_FDB_STATIC, &f->flags)) {
+@@ -940,9 +946,11 @@ int br_fdb_dump(struct sk_buff *skb,
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
++ const struct net_bridge_port *dst = READ_ONCE(f->dst);
++
+ if (*idx < cb->args[2])
+ goto skip;
+- if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
++ if (filter_dev && (!dst || dst->dev != filter_dev)) {
+ if (filter_dev != dev)
+ goto skip;
+ /* !f->dst is a special case for bridge
+@@ -950,10 +958,10 @@ int br_fdb_dump(struct sk_buff *skb,
+ * Therefore need a little more filtering
+ * we only want to dump the !f->dst case
+ */
+- if (f->dst)
++ if (dst)
+ goto skip;
+ }
+- if (!filter_dev && f->dst)
++ if (!filter_dev && dst)
+ goto skip;
+
+ err = fdb_fill_info(skb, br, f,
--- /dev/null
+From stable+bounces-242867-greg=kroah.com@vger.kernel.org Mon May 4 10:14:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 04:08:48 -0400
+Subject: net: mctp: fix don't require received header reserved bits to be zero
+To: stable@vger.kernel.org
+Cc: Yuan Zhaoming <yuanzm2@lenovo.com>, Jeremy Kerr <jk@codeconstruct.com.au>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504080848.1870640-1-sashal@kernel.org>
+
+From: Yuan Zhaoming <yuanzm2@lenovo.com>
+
+[ Upstream commit a663bac71a2f0b3ac6c373168ca57b2a6e6381aa ]
+
+>From the MCTP Base specification (DSP0236 v1.2.1), the first byte of
+the MCTP header contains a 4 bit reserved field, and 4 bit version.
+
+On our current receive path, we require those 4 reserved bits to be
+zero, but the 9500-8i card is non-conformant, and may set these
+reserved bits.
+
+DSP0236 states that the reserved bits must be written as zero, and
+ignored when read. While the device might not conform to the former,
+we should accept these message to conform to the latter.
+
+Relax our check on the MCTP version byte to allow non-zero bits in the
+reserved field.
+
+Fixes: 889b7da23abf ("mctp: Add initial routing framework")
+Signed-off-by: Yuan Zhaoming <yuanzm2@lenovo.com>
+Cc: stable@vger.kernel.org
+Acked-by: Jeremy Kerr <jk@codeconstruct.com.au>
+Link: https://patch.msgid.link/20260417141340.5306-1-yuanzhaoming901030@126.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/mctp.h | 3 +++
+ net/mctp/route.c | 8 ++++++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -26,6 +26,9 @@ struct mctp_hdr {
+ #define MCTP_VER_MIN 1
+ #define MCTP_VER_MAX 1
+
++/* Definitions for ver field */
++#define MCTP_HDR_VER_MASK GENMASK(3, 0)
++
+ /* Definitions for flags_seq_tag field */
+ #define MCTP_HDR_FLAG_SOM BIT(7)
+ #define MCTP_HDR_FLAG_EOM BIT(6)
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -335,6 +335,7 @@ static int mctp_route_input(struct mctp_
+ unsigned long f;
+ u8 tag, flags;
+ int rc;
++ u8 ver;
+
+ msk = NULL;
+ rc = -EINVAL;
+@@ -357,7 +358,8 @@ static int mctp_route_input(struct mctp_
+ mh = mctp_hdr(skb);
+ skb_pull(skb, sizeof(struct mctp_hdr));
+
+- if (mh->ver != 1)
++ ver = mh->ver & MCTP_HDR_VER_MASK;
++ if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
+ goto out;
+
+ flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
+@@ -1124,6 +1126,7 @@ static int mctp_pkttype_receive(struct s
+ struct mctp_skb_cb *cb;
+ struct mctp_route *rt;
+ struct mctp_hdr *mh;
++ u8 ver;
+
+ rcu_read_lock();
+ mdev = __mctp_dev_get(dev);
+@@ -1141,7 +1144,8 @@ static int mctp_pkttype_receive(struct s
+
+ /* We have enough for a header; decode and route */
+ mh = mctp_hdr(skb);
+- if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
++ ver = mh->ver & MCTP_HDR_VER_MASK;
++ if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
+ goto err_drop;
+
+ /* source must be valid unicast or null; drop reserved ranges and
--- /dev/null
+From stable+bounces-242830-greg=kroah.com@vger.kernel.org Mon May 4 06:11:20 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 00:11:11 -0400
+Subject: net: qrtr: ns: Limit the maximum number of lookups
+To: stable@vger.kernel.org
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504041111.1425615-1-sashal@kernel.org>
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit 5640227d9a21c6a8be249a10677b832e7f40dc55 ]
+
+Current code does no bound checking on the number of lookups a client can
+perform. Though the code restricts the lookups to local clients, there is
+still a possibility of a malicious local client sending a flood of
+NEW_LOOKUP messages over the same socket.
+
+Fix this issue by limiting the maximum number of lookups to 64 globally.
+Since the nameserver allows only atmost one local observer, this global
+lookup count will ensure that the lookups stay within the limit.
+
+Note that, limit of 64 is chosen based on the current platform
+requirements. If requirement changes in the future, this limit can be
+increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-2-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ adapted comment block to only mention QRTR_NS_MAX_LOOKUPS and kept kzalloc() instead of kzalloc_obj() due to missing prerequisite commits ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -22,6 +22,7 @@ static struct {
+ struct socket *sock;
+ struct sockaddr_qrtr bcast_sq;
+ struct list_head lookups;
++ u32 lookup_count;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ void (*saved_data_ready)(struct sock *sk);
+@@ -76,6 +77,11 @@ struct qrtr_node {
+ */
+ #define QRTR_NS_MAX_SERVERS 256
+
++/* Max lookup limit is chosen based on the current platform requirements. If the
++ * requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_LOOKUPS 64
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+ struct qrtr_node *node;
+@@ -441,6 +447,7 @@ static int ctrl_cmd_del_client(struct so
+
+ list_del(&lookup->li);
+ kfree(lookup);
++ qrtr_ns.lookup_count--;
+ }
+
+ /* Remove the server belonging to this port but don't broadcast
+@@ -558,6 +565,11 @@ static int ctrl_cmd_new_lookup(struct so
+ if (from->sq_node != qrtr_ns.local_node)
+ return -EINVAL;
+
++ if (qrtr_ns.lookup_count >= QRTR_NS_MAX_LOOKUPS) {
++ pr_err_ratelimited("QRTR client node exceeds max lookup limit!\n");
++ return -ENOSPC;
++ }
++
+ lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+@@ -566,6 +578,7 @@ static int ctrl_cmd_new_lookup(struct so
+ lookup->service = service;
+ lookup->instance = instance;
+ list_add_tail(&lookup->li, &qrtr_ns.lookups);
++ qrtr_ns.lookup_count++;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.service = service;
+@@ -606,6 +619,7 @@ static void ctrl_cmd_del_lookup(struct s
+
+ list_del(&lookup->li);
+ kfree(lookup);
++ qrtr_ns.lookup_count--;
+ }
+ }
+
--- /dev/null
+From stable+bounces-242831-greg=kroah.com@vger.kernel.org Mon May 4 06:11:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 00:11:15 -0400
+Subject: net: qrtr: ns: Limit the maximum server registration per node
+To: stable@vger.kernel.org
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>, Yiming Qian <yimingqian591@gmail.com>, Simon Horman <horms@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504041115.1425803-1-sashal@kernel.org>
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit d5ee2ff98322337951c56398e79d51815acbf955 ]
+
+Current code does no bound checking on the number of servers added per
+node. A malicious client can flood NEW_SERVER messages and exhaust memory.
+
+Fix this issue by limiting the maximum number of server registrations to
+256 per node. If the NEW_SERVER message is received for an old port, then
+don't restrict it as it will get replaced. While at it, also rate limit
+the error messages in the failure path of qrtr_ns_worker().
+
+Note that the limit of 256 is chosen based on the current platform
+requirements. If requirement changes in the future, this limit can be
+increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Reported-by: Yiming Qian <yimingqian591@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-1-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c | 26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -68,8 +68,14 @@ struct qrtr_server {
+ struct qrtr_node {
+ unsigned int id;
+ struct xarray servers;
++ u32 server_count;
+ };
+
++/* Max server limit is chosen based on the current platform requirements. If the
++ * requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_SERVERS 256
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+ struct qrtr_node *node;
+@@ -227,6 +233,17 @@ static struct qrtr_server *server_add(un
+ if (!service || !port)
+ return NULL;
+
++ node = node_get(node_id);
++ if (!node)
++ return NULL;
++
++ /* Make sure the new servers per port are capped at the maximum value */
++ old = xa_load(&node->servers, port);
++ if (!old && node->server_count >= QRTR_NS_MAX_SERVERS) {
++ pr_err_ratelimited("QRTR client node %u exceeds max server limit!\n", node_id);
++ return NULL;
++ }
++
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+@@ -236,10 +253,6 @@ static struct qrtr_server *server_add(un
+ srv->node = node_id;
+ srv->port = port;
+
+- node = node_get(node_id);
+- if (!node)
+- goto err;
+-
+ /* Delete the old server on the same port */
+ old = xa_store(&node->servers, port, srv, GFP_KERNEL);
+ if (old) {
+@@ -250,6 +263,8 @@ static struct qrtr_server *server_add(un
+ } else {
+ kfree(old);
+ }
++ } else {
++ node->server_count++;
+ }
+
+ trace_qrtr_ns_server_add(srv->service, srv->instance,
+@@ -290,6 +305,7 @@ static int server_del(struct qrtr_node *
+ }
+
+ kfree(srv);
++ node->server_count--;
+
+ return 0;
+ }
+@@ -678,7 +694,7 @@ static void qrtr_ns_worker(struct work_s
+ }
+
+ if (ret < 0)
+- pr_err("failed while handling packet from %d:%d",
++ pr_err_ratelimited("failed while handling packet from %d:%d",
+ sq.sq_node, sq.sq_port);
+ }
+
--- /dev/null
+From stable+bounces-242870-greg=kroah.com@vger.kernel.org Mon May 4 10:22:50 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 04:22:33 -0400
+Subject: net: qrtr: ns: Limit the total number of nodes
+To: stable@vger.kernel.org
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504082233.1875710-2-sashal@kernel.org>
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit 27d5e84e810b0849d08b9aec68e48570461ce313 ]
+
+Currently, the nameserver doesn't limit the number of nodes it handles.
+This can be an attack vector if a malicious client starts registering
+random nodes, leading to memory exhaustion.
+
+Hence, limit the maximum number of nodes to 64. Note that, limit of 64 is
+chosen based on the current platform requirements. If requirement changes
+in the future, this limit can be increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-4-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ dropped comment/define changes for missing QRTR_NS_MAX_SERVERS/LOOKUPS prereqs and kept plain kzalloc instead of kzalloc_obj ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -82,6 +82,13 @@ struct qrtr_node {
+ */
+ #define QRTR_NS_MAX_LOOKUPS 64
+
++/* Max nodes limit is chosen based on the current platform requirements.
++ * If the requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_NODES 64
++
++static u8 node_count;
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+ struct qrtr_node *node;
+@@ -90,6 +97,11 @@ static struct qrtr_node *node_get(unsign
+ if (node)
+ return node;
+
++ if (node_count >= QRTR_NS_MAX_NODES) {
++ pr_err_ratelimited("QRTR clients exceed max node limit!\n");
++ return NULL;
++ }
++
+ /* If node didn't exist, allocate and insert it to the tree */
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+@@ -103,6 +115,8 @@ static struct qrtr_node *node_get(unsign
+ return NULL;
+ }
+
++ node_count++;
++
+ return node;
+ }
+
+@@ -406,6 +420,7 @@ static int ctrl_cmd_bye(struct sockaddr_
+ delete_node:
+ xa_erase(&nodes, from->sq_node);
+ kfree(node);
++ node_count--;
+
+ return ret;
+ }
--- /dev/null
+From stable+bounces-242425-greg=kroah.com@vger.kernel.org Fri May 1 15:13:53 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 09:13:44 -0400
+Subject: PCI: epf-mhi: Return 0, not remaining timeout, when eDMA ops complete
+To: stable@vger.kernel.org
+Cc: Daniel Hodges <git@danielhodges.dev>, Manivannan Sadhasivam <mani@kernel.org>, Krishna Chaitanya Chundru <krishna.chundru@oss.qualcomm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260501131344.3233285-1-sashal@kernel.org>
+
+From: Daniel Hodges <git@danielhodges.dev>
+
+[ Upstream commit 36bfc3642b19a98f1302aed4437c331df9b481f0 ]
+
+pci_epf_mhi_edma_read() and pci_epf_mhi_edma_write() start DMA
+operations and wait for completion with a timeout.
+
+On successful completion, they previously returned the remaining
+timeout, which callers may treat as an error. In particular,
+mhi_ep_ring_add_element(), which calls pci_epf_mhi_edma_write() via
+mhi_cntrl->write_sync(), interprets any non-zero return value as
+failure.
+
+Return 0 on success instead of the remaining timeout to prevent
+mhi_ep_ring_add_element() from treating successful completion as an
+error.
+
+Fixes: 7b99aaaddabb ("PCI: epf-mhi: Add eDMA support")
+Signed-off-by: Daniel Hodges <git@danielhodges.dev>
+[mani: changed commit log as per https://lore.kernel.org/linux-pci/20260227191510.GA3904799@bhelgaas]
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Reviewed-by: Krishna Chaitanya Chundru <krishna.chundru@oss.qualcomm.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260206200529.10784-1-git@danielhodges.dev
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/endpoint/functions/pci-epf-mhi.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
++++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+@@ -331,6 +331,8 @@ static int pci_epf_mhi_edma_read(struct
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
++ } else {
++ ret = 0;
+ }
+
+ err_unmap:
+@@ -402,6 +404,8 @@ static int pci_epf_mhi_edma_write(struct
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
++ } else {
++ ret = 0;
+ }
+
+ err_unmap:
--- /dev/null
+From stable+bounces-242579-greg=kroah.com@vger.kernel.org Sat May 2 04:59:36 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2026 22:59:23 -0400
+Subject: RDMA/mana_ib: Disable RX steering on RSS QP destroy
+To: stable@vger.kernel.org
+Cc: Long Li <longli@microsoft.com>, Leon Romanovsky <leon@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260502025930.238469-1-sashal@kernel.org>
+
+From: Long Li <longli@microsoft.com>
+
+[ Upstream commit dbeb256e8dd87233d891b170c0b32a6466467036 ]
+
+When an RSS QP is destroyed (e.g. DPDK exit), mana_ib_destroy_qp_rss()
+destroys the RX WQ objects but does not disable vPort RX steering in
+firmware. This leaves stale steering configuration that still points to
+the destroyed RX objects.
+
+If traffic continues to arrive (e.g. peer VM is still transmitting) and
+the VF interface is subsequently brought up (mana_open), the firmware
+may deliver completions using stale CQ IDs from the old RX objects.
+These CQ IDs can be reused by the ethernet driver for new TX CQs,
+causing RX completions to land on TX CQs:
+
+ WARNING: mana_poll_tx_cq+0x1b8/0x220 [mana] (is_sq == false)
+ WARNING: mana_gd_process_eq_events+0x209/0x290 (cq_table lookup fails)
+
+Fix this by disabling vPort RX steering before destroying RX WQ objects.
+Note that mana_fence_rqs() cannot be used here because the fence
+completion is delivered on the CQ, which is polled by user-mode (e.g.
+DPDK) and not visible to the kernel driver.
+
+Refactor the disable logic into a shared mana_disable_vport_rx() in
+mana_en, exported for use by mana_ib, replacing the duplicate code.
+The ethernet driver's mana_dealloc_queues() is also updated to call
+this common function.
+
+Fixes: 0266a177631d ("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Long Li <longli@microsoft.com>
+Link: https://patch.msgid.link/20260325194100.1929056-1-longli@microsoft.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+[ kept early-return error handling and used unquoted NET_MANA namespace in EXPORT_SYMBOL_NS ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mana/qp.c | 15 +++++++++++++++
+ drivers/net/ethernet/microsoft/mana/mana_en.c | 11 ++++++++++-
+ include/net/mana/mana.h | 1 +
+ 3 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mana/qp.c
++++ b/drivers/infiniband/hw/mana/qp.c
+@@ -449,6 +449,21 @@ static int mana_ib_destroy_qp_rss(struct
+ ndev = mc->ports[qp->port - 1];
+ mpc = netdev_priv(ndev);
+
++ /* Disable vPort RX steering before destroying RX WQ objects.
++ * Otherwise firmware still routes traffic to the destroyed queues,
++ * which can cause bogus completions on reused CQ IDs when the
++ * ethernet driver later creates new queues on mana_open().
++ *
++ * Unlike the ethernet teardown path, mana_fence_rqs() cannot be
++ * used here because the fence completion CQE is delivered on the
++ * CQ which is polled by userspace (e.g. DPDK), so there is no way
++ * for the kernel to wait for fence completion.
++ *
++ * This is best effort — if it fails there is not much we can do,
++ * and mana_cfg_vport_steering() already logs the error.
++ */
++ mana_disable_vport_rx(mpc);
++
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ ibwq = ind_tbl->ind_tbl[i];
+ wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -2380,6 +2380,13 @@ static void mana_rss_table_init(struct m
+ ethtool_rxfh_indir_default(i, apc->num_queues);
+ }
+
++int mana_disable_vport_rx(struct mana_port_context *apc)
++{
++ return mana_cfg_vport_steering(apc, TRI_STATE_FALSE, false, false,
++ false);
++}
++EXPORT_SYMBOL_NS(mana_disable_vport_rx, NET_MANA);
++
+ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
+ bool update_hash, bool update_tab)
+ {
+@@ -2620,12 +2627,14 @@ static int mana_dealloc_queues(struct ne
+ */
+
+ apc->rss_state = TRI_STATE_FALSE;
+- err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
++ err = mana_disable_vport_rx(apc);
+ if (err) {
+ netdev_err(ndev, "Failed to disable vPort: %d\n", err);
+ return err;
+ }
+
++ mana_fence_rqs(apc);
++
+ mana_destroy_vport(apc);
+
+ return 0;
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -437,6 +437,7 @@ struct mana_port_context {
+ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+ int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
+ bool update_hash, bool update_tab);
++int mana_disable_vport_rx(struct mana_port_context *apc);
+
+ int mana_alloc_queues(struct net_device *ndev);
+ int mana_attach(struct net_device *ndev);
--- /dev/null
+From stable+bounces-242625-greg=kroah.com@vger.kernel.org Sun May 3 01:30:39 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 May 2026 19:30:19 -0400
+Subject: sched: Use u64 for bandwidth ratio calculations
+To: stable@vger.kernel.org
+Cc: Joseph Salisbury <joseph.salisbury@oracle.com>, "Peter Zijlstra (Intel)" <peterz@infradead.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260502233019.915219-1-sashal@kernel.org>
+
+From: Joseph Salisbury <joseph.salisbury@oracle.com>
+
+[ Upstream commit c6e80201e057dfb7253385e60bf541121bf5dc33 ]
+
+to_ratio() computes BW_SHIFT-scaled bandwidth ratios from u64 period and
+runtime values, but it returns unsigned long. tg_rt_schedulable() also
+stores the current group limit and the accumulated child sum in unsigned
+long.
+
+On 32-bit builds, large bandwidth ratios can be truncated and the RT
+group sum can wrap when enough siblings are present. That can let an
+overcommitted RT hierarchy pass the schedulability check, and it also
+narrows the helper result for other callers.
+
+Return u64 from to_ratio() and use u64 for the RT group totals so
+bandwidth ratios are preserved and compared at full width on both 32-bit
+and 64-bit builds.
+
+Fixes: b40b2e8eb521 ("sched: rt: multi level group constraints")
+Assisted-by: Codex:GPT-5
+Signed-off-by: Joseph Salisbury <joseph.salisbury@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260403210014.2713404-1-joseph.salisbury@oracle.com
+[ dropped `extern` keyword from `to_ratio()` declaration ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 2 +-
+ kernel/sched/rt.c | 2 +-
+ kernel/sched/sched.h | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4823,7 +4823,7 @@ void sched_post_fork(struct task_struct
+ uclamp_post_fork(p);
+ }
+
+-unsigned long to_ratio(u64 period, u64 runtime)
++u64 to_ratio(u64 period, u64 runtime)
+ {
+ if (runtime == RUNTIME_INF)
+ return BW_UNIT;
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2776,7 +2776,7 @@ static int tg_rt_schedulable(struct task
+ {
+ struct rt_schedulable_data *d = data;
+ struct task_group *child;
+- unsigned long total, sum = 0;
++ u64 total, sum = 0;
+ u64 period, runtime;
+
+ period = ktime_to_ns(tg->rt_bandwidth.rt_period);
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2463,7 +2463,7 @@ extern void init_dl_entity(struct sched_
+ #define RATIO_SHIFT 8
+ #define MAX_BW_BITS (64 - BW_SHIFT)
+ #define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
+-unsigned long to_ratio(u64 period, u64 runtime);
++u64 to_ratio(u64 period, u64 runtime);
+
+ extern void init_entity_runnable_average(struct sched_entity *se);
+ extern void post_init_entity_util_avg(struct task_struct *p);
batman-adv-bla-prevent-use-after-free-when-deleting-claims.patch
batman-adv-bla-only-purge-non-released-claims.patch
batman-adv-bla-put-backbone-reference-on-failed-claim-hash-insert.patch
+smb-move-some-duplicate-definitions-to-common-smbacl.h.patch
+smb-common-change-the-data-type-of-num_aces-to-le16.patch
+ksmbd-require-minimum-ace-size-in-smb_check_perm_dacl.patch
+smb-client-validate-the-whole-dacl-before-rewriting-it-in-cifsacl.patch
+f2fs-fix-uaf-caused-by-decrementing-sbi-nr_pages-in-f2fs_write_end_io.patch
+ksmbd-use-msleep-instaed-of-schedule_timeout_interruptible.patch
+ksmbd-replace-connection-list-with-hash-table.patch
+f2fs-fix-to-do-sanity-check-on-dcc-discard_cmd_cnt-conditionally.patch
+wifi-mwifiex-fix-use-after-free-in-mwifiex_adapter_cleanup.patch
+lib-test_hmm-evict-device-pages-on-file-close-to-avoid-use-after-free.patch
+arm64-mm-enable-batched-tlb-flush-in-unmap_hotplug_range.patch
+wifi-mt76-connac-introduce-helper-for-mt7925-chipset.patch
+wifi-mt76-mt792x-describe-usb-wfsys-reset-with-a-descriptor.patch
+wifi-mt76-mt792x-fix-mt7925u-usb-wfsys-reset-handling.patch
+pci-epf-mhi-return-0-not-remaining-timeout-when-edma-ops-complete.patch
+thermal-core-fix-thermal-zone-governor-cleanup-issues.patch
+ipmi-ssif-fix-a-shutdown-race.patch
+ipmi-ssif-clean-up-kthread-on-errors.patch
+alsa-aoa-use-guard-for-mutex-locks.patch
+alsa-aoa-i2sbus-clear-stale-prepared-state.patch
+media-rc-ttusbir-respect-dma-coherency-rules.patch
+alsa-aoa-skip-devices-with-no-codecs-in-i2sbus_resume.patch
+media-rc-igorplugusb-heed-coherency-rules.patch
+block-relax-pgmap-check-in-bio_add_page-for-compatible-zone-device-pages.patch
+sched-use-u64-for-bandwidth-ratio-calculations.patch
+rdma-mana_ib-disable-rx-steering-on-rss-qp-destroy.patch
+net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch
+net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch
+net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch
+net-qrtr-ns-limit-the-maximum-number-of-lookups.patch
+net-qrtr-ns-limit-the-total-number-of-nodes.patch
+spi-fix-resource-leaks-on-device-setup-failure.patch
+fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
+firmware-google-framebuffer-do-not-unregister-platform-device.patch
+udf-fix-partition-descriptor-append-bookkeeping.patch
+mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
+xfs-fix-a-resource-leak-in-xfs_alloc_buftarg.patch
+hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
+hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
+wifi-rtl8xxxu-fix-potential-use-of-uninitialized-value.patch
+ksmbd-reset-rcount-per-connection-in-ksmbd_conn_wait_idle_sess_id.patch
--- /dev/null
+From stable+bounces-240988-greg=kroah.com@vger.kernel.org Fri Apr 24 16:55:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 10:55:13 -0400
+Subject: smb: client: validate the whole DACL before rewriting it in cifsacl
+To: stable@vger.kernel.org
+Cc: Michael Bommarito <michael.bommarito@gmail.com>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424145513.2190638-1-sashal@kernel.org>
+
+From: Michael Bommarito <michael.bommarito@gmail.com>
+
+[ Upstream commit 0a8cf165566ba55a39fd0f4de172119dd646d39a ]
+
+build_sec_desc() and id_mode_to_cifs_acl() derive a DACL pointer from a
+server-supplied dacloffset and then use the incoming ACL to rebuild the
+chmod/chown security descriptor.
+
+The original fix only checked that the struct smb_acl header fits before
+reading dacl_ptr->size or dacl_ptr->num_aces. That avoids the immediate
+header-field OOB read, but the rewrite helpers still walk ACEs based on
+pdacl->num_aces with no structural validation of the incoming DACL body.
+
+A malicious server can return a truncated DACL that still contains a
+header, claims one or more ACEs, and then drive
+replace_sids_and_copy_aces() or set_chmod_dacl() past the validated
+extent while they compare or copy attacker-controlled ACEs.
+
+Factor the DACL structural checks into validate_dacl(), extend them to
+validate each ACE against the DACL bounds, and use the shared validator
+before the chmod/chown rebuild paths. parse_dacl() reuses the same
+validator so the read-side parser and write-side rewrite paths agree on
+what constitutes a well-formed incoming DACL.
+
+Fixes: bc3e9dd9d104 ("cifs: Change SIDs in ACEs while transferring file ownership.")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-6
+Assisted-by: Codex:gpt-5-4
+Signed-off-by: Michael Bommarito <michael.bommarito@gmail.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ no kmalloc_objs ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.c | 116 +++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 85 insertions(+), 31 deletions(-)
+
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -758,6 +758,77 @@ static void dump_ace(struct smb_ace *pac
+ }
+ #endif
+
++static int validate_dacl(struct smb_acl *pdacl, char *end_of_acl)
++{
++ int i, ace_hdr_size, ace_size, min_ace_size;
++ u16 dacl_size, num_aces;
++ char *acl_base, *end_of_dacl;
++ struct smb_ace *pace;
++
++ if (!pdacl)
++ return 0;
++
++ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl)) {
++ cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ return -EINVAL;
++ }
++
++ dacl_size = le16_to_cpu(pdacl->size);
++ if (dacl_size < sizeof(struct smb_acl) ||
++ end_of_acl < (char *)pdacl + dacl_size) {
++ cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ return -EINVAL;
++ }
++
++ num_aces = le16_to_cpu(pdacl->num_aces);
++ if (!num_aces)
++ return 0;
++
++ ace_hdr_size = offsetof(struct smb_ace, sid) +
++ offsetof(struct smb_sid, sub_auth);
++ min_ace_size = ace_hdr_size + sizeof(__le32);
++ if (num_aces > (dacl_size - sizeof(struct smb_acl)) / min_ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ return -EINVAL;
++ }
++
++ end_of_dacl = (char *)pdacl + dacl_size;
++ acl_base = (char *)pdacl;
++ ace_size = sizeof(struct smb_acl);
++
++ for (i = 0; i < num_aces; ++i) {
++ if (end_of_dacl - acl_base < ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++
++ pace = (struct smb_ace *)(acl_base + ace_size);
++ acl_base = (char *)pace;
++
++ if (end_of_dacl - acl_base < ace_hdr_size ||
++ pace->sid.num_subauth == 0 ||
++ pace->sid.num_subauth > SID_MAX_SUB_AUTHORITIES) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++
++ ace_size = ace_hdr_size + sizeof(__le32) * pace->sid.num_subauth;
++ if (end_of_dacl - acl_base < ace_size ||
++ le16_to_cpu(pace->size) < ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++
++ ace_size = le16_to_cpu(pace->size);
++ if (end_of_dacl - acl_base < ace_size) {
++ cifs_dbg(VFS, "ACL too small to parse ACE\n");
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
++
+ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+ struct cifs_fattr *fattr, bool mode_from_special_sid)
+@@ -765,7 +836,7 @@ static void parse_dacl(struct smb_acl *p
+ int i;
+ u16 num_aces = 0;
+ int acl_size;
+- char *acl_base;
++ char *acl_base, *end_of_dacl;
+ struct smb_ace **ppace;
+
+ /* BB need to add parm so we can store the SID BB */
+@@ -777,12 +848,8 @@ static void parse_dacl(struct smb_acl *p
+ return;
+ }
+
+- /* validate that we do not go past end of acl */
+- if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
+- end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+- cifs_dbg(VFS, "ACL too small to parse DACL\n");
++ if (validate_dacl(pdacl, end_of_acl))
+ return;
+- }
+
+ cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
+ le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+@@ -793,6 +860,7 @@ static void parse_dacl(struct smb_acl *p
+ user/group/other have no permissions */
+ fattr->cf_mode &= ~(0777);
+
++ end_of_dacl = (char *)pdacl + le16_to_cpu(pdacl->size);
+ acl_base = (char *)pdacl;
+ acl_size = sizeof(struct smb_acl);
+
+@@ -800,36 +868,16 @@ static void parse_dacl(struct smb_acl *p
+ if (num_aces > 0) {
+ umode_t denied_mode = 0;
+
+- if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
+- (offsetof(struct smb_ace, sid) +
+- offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
+- return;
+-
+ ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
+ GFP_KERNEL);
+ if (!ppace)
+ return;
+
+ for (i = 0; i < num_aces; ++i) {
+- if (end_of_acl - acl_base < acl_size)
+- break;
+-
+ ppace[i] = (struct smb_ace *) (acl_base + acl_size);
+- acl_base = (char *)ppace[i];
+- acl_size = offsetof(struct smb_ace, sid) +
+- offsetof(struct smb_sid, sub_auth);
+-
+- if (end_of_acl - acl_base < acl_size ||
+- ppace[i]->sid.num_subauth == 0 ||
+- ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+- (end_of_acl - acl_base <
+- acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+- (le16_to_cpu(ppace[i]->size) <
+- acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+- break;
+
+ #ifdef CONFIG_CIFS_DEBUG2
+- dump_ace(ppace[i], end_of_acl);
++ dump_ace(ppace[i], end_of_dacl);
+ #endif
+ if (mode_from_special_sid &&
+ ppace[i]->sid.num_subauth >= 3 &&
+@@ -872,6 +920,7 @@ static void parse_dacl(struct smb_acl *p
+ (void *)ppace[i],
+ sizeof(struct smb_ace)); */
+
++ acl_base = (char *)ppace[i];
+ acl_size = le16_to_cpu(ppace[i]->size);
+ }
+
+@@ -1317,10 +1366,9 @@ static int build_sec_desc(struct smb_nts
+ }
+
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+- if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
+- cifs_dbg(VFS, "Server returned illegal ACL size\n");
+- return -EINVAL;
+- }
++ rc = validate_dacl(dacl_ptr, end_of_acl);
++ if (rc)
++ return rc;
+ }
+
+ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+@@ -1697,6 +1745,12 @@ id_mode_to_cifs_acl(struct inode *inode,
+ }
+
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
++ rc = validate_dacl(dacl_ptr, (char *)pntsd + secdesclen);
++ if (rc) {
++ kfree(pntsd);
++ cifs_put_tlink(tlink);
++ return rc;
++ }
+ if (mode_from_sid)
+ nsecdesclen +=
+ le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
--- /dev/null
+From stable+bounces-240657-greg=kroah.com@vger.kernel.org Fri Apr 24 15:05:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 09:04:00 -0400
+Subject: smb: common: change the data type of num_aces to le16
+To: stable@vger.kernel.org
+Cc: Namjae Jeon <linkinjeon@kernel.org>, Igor Leite Ladessa <igor-ladessa@hotmail.com>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424130401.1917926-2-sashal@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 62e7dd0a39c2d0d7ff03274c36df971f1b3d2d0d ]
+
+2.4.5 in [MS-DTYP].pdf describe the data type of num_aces as le16.
+
+AceCount (2 bytes): An unsigned 16-bit integer that specifies the count
+of the number of ACE records in the ACL.
+
+Change it to le16 and add reserved field to smb_acl struct.
+
+Reported-by: Igor Leite Ladessa <igor-ladessa@hotmail.com>
+Tested-by: Igor Leite Ladessa <igor-ladessa@hotmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: d07b26f39246 ("ksmbd: require minimum ACE size in smb_check_perm_dacl()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.c | 26 +++++++++++++-------------
+ fs/smb/common/smbacl.h | 3 ++-
+ fs/smb/server/smbacl.c | 31 ++++++++++++++++---------------
+ fs/smb/server/smbacl.h | 2 +-
+ 4 files changed, 32 insertions(+), 30 deletions(-)
+
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -763,7 +763,7 @@ static void parse_dacl(struct smb_acl *p
+ struct cifs_fattr *fattr, bool mode_from_special_sid)
+ {
+ int i;
+- int num_aces = 0;
++ u16 num_aces = 0;
+ int acl_size;
+ char *acl_base;
+ struct smb_ace **ppace;
+@@ -786,7 +786,7 @@ static void parse_dacl(struct smb_acl *p
+
+ cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
+ le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+- le32_to_cpu(pdacl->num_aces));
++ le16_to_cpu(pdacl->num_aces));
+
+ /* reset rwx permissions for user/group/other.
+ Also, if num_aces is 0 i.e. DACL has no ACEs,
+@@ -796,7 +796,7 @@ static void parse_dacl(struct smb_acl *p
+ acl_base = (char *)pdacl;
+ acl_size = sizeof(struct smb_acl);
+
+- num_aces = le32_to_cpu(pdacl->num_aces);
++ num_aces = le16_to_cpu(pdacl->num_aces);
+ if (num_aces > 0) {
+ umode_t denied_mode = 0;
+
+@@ -957,12 +957,12 @@ unsigned int setup_special_user_owner_AC
+ static void populate_new_aces(char *nacl_base,
+ struct smb_sid *pownersid,
+ struct smb_sid *pgrpsid,
+- __u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
++ __u64 *pnmode, u16 *pnum_aces, u16 *pnsize,
+ bool modefromsid,
+ bool posix)
+ {
+ __u64 nmode;
+- u32 num_aces = 0;
++ u16 num_aces = 0;
+ u16 nsize = 0;
+ __u64 user_mode;
+ __u64 group_mode;
+@@ -1070,7 +1070,7 @@ static __u16 replace_sids_and_copy_aces(
+ u16 size = 0;
+ struct smb_ace *pntace = NULL;
+ char *acl_base = NULL;
+- u32 src_num_aces = 0;
++ u16 src_num_aces = 0;
+ u16 nsize = 0;
+ struct smb_ace *pnntace = NULL;
+ char *nacl_base = NULL;
+@@ -1078,7 +1078,7 @@ static __u16 replace_sids_and_copy_aces(
+
+ acl_base = (char *)pdacl;
+ size = sizeof(struct smb_acl);
+- src_num_aces = le32_to_cpu(pdacl->num_aces);
++ src_num_aces = le16_to_cpu(pdacl->num_aces);
+
+ nacl_base = (char *)pndacl;
+ nsize = sizeof(struct smb_acl);
+@@ -1110,11 +1110,11 @@ static int set_chmod_dacl(struct smb_acl
+ u16 size = 0;
+ struct smb_ace *pntace = NULL;
+ char *acl_base = NULL;
+- u32 src_num_aces = 0;
++ u16 src_num_aces = 0;
+ u16 nsize = 0;
+ struct smb_ace *pnntace = NULL;
+ char *nacl_base = NULL;
+- u32 num_aces = 0;
++ u16 num_aces = 0;
+ bool new_aces_set = false;
+
+ /* Assuming that pndacl and pnmode are never NULL */
+@@ -1132,7 +1132,7 @@ static int set_chmod_dacl(struct smb_acl
+
+ acl_base = (char *)pdacl;
+ size = sizeof(struct smb_acl);
+- src_num_aces = le32_to_cpu(pdacl->num_aces);
++ src_num_aces = le16_to_cpu(pdacl->num_aces);
+
+ /* Retain old ACEs which we can retain */
+ for (i = 0; i < src_num_aces; ++i) {
+@@ -1178,7 +1178,7 @@ next_ace:
+ }
+
+ finalize_dacl:
+- pndacl->num_aces = cpu_to_le32(num_aces);
++ pndacl->num_aces = cpu_to_le16(num_aces);
+ pndacl->size = cpu_to_le16(nsize);
+
+ return 0;
+@@ -1335,7 +1335,7 @@ static int build_sec_desc(struct smb_nts
+ dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+
+ ndacl_ptr->size = cpu_to_le16(0);
+- ndacl_ptr->num_aces = cpu_to_le32(0);
++ ndacl_ptr->num_aces = cpu_to_le16(0);
+
+ rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+ pnmode, mode_from_sid, posix);
+@@ -1699,7 +1699,7 @@ id_mode_to_cifs_acl(struct inode *inode,
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ if (mode_from_sid)
+ nsecdesclen +=
+- le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
++ le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
+ else /* cifsacl */
+ nsecdesclen += le16_to_cpu(dacl_ptr->size);
+ }
+--- a/fs/smb/common/smbacl.h
++++ b/fs/smb/common/smbacl.h
+@@ -107,7 +107,8 @@ struct smb_sid {
+ struct smb_acl {
+ __le16 revision; /* revision level */
+ __le16 size;
+- __le32 num_aces;
++ __le16 num_aces;
++ __le16 reserved;
+ } __attribute__((packed));
+
+ struct smb_ace {
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -338,7 +338,7 @@ void posix_state_to_acl(struct posix_acl
+ pace->e_perm = state->other.allow;
+ }
+
+-int init_acl_state(struct posix_acl_state *state, int cnt)
++int init_acl_state(struct posix_acl_state *state, u16 cnt)
+ {
+ int alloc;
+
+@@ -373,7 +373,7 @@ static void parse_dacl(struct mnt_idmap
+ struct smb_fattr *fattr)
+ {
+ int i, ret;
+- int num_aces = 0;
++ u16 num_aces = 0;
+ unsigned int acl_size;
+ char *acl_base;
+ struct smb_ace **ppace;
+@@ -394,12 +394,12 @@ static void parse_dacl(struct mnt_idmap
+
+ ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n",
+ le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+- le32_to_cpu(pdacl->num_aces));
++ le16_to_cpu(pdacl->num_aces));
+
+ acl_base = (char *)pdacl;
+ acl_size = sizeof(struct smb_acl);
+
+- num_aces = le32_to_cpu(pdacl->num_aces);
++ num_aces = le16_to_cpu(pdacl->num_aces);
+ if (num_aces <= 0)
+ return;
+
+@@ -589,7 +589,7 @@ static void parse_dacl(struct mnt_idmap
+
+ static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap,
+ struct smb_ace *pndace,
+- struct smb_fattr *fattr, u32 *num_aces,
++ struct smb_fattr *fattr, u16 *num_aces,
+ u16 *size, u32 nt_aces_num)
+ {
+ struct posix_acl_entry *pace;
+@@ -717,7 +717,7 @@ static void set_ntacl_dacl(struct mnt_id
+ struct smb_fattr *fattr)
+ {
+ struct smb_ace *ntace, *pndace;
+- int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0;
++ u16 nt_num_aces = le16_to_cpu(nt_dacl->num_aces), num_aces = 0;
+ unsigned short size = 0;
+ int i;
+
+@@ -745,7 +745,7 @@ static void set_ntacl_dacl(struct mnt_id
+
+ set_posix_acl_entries_dacl(idmap, pndace, fattr,
+ &num_aces, &size, nt_num_aces);
+- pndacl->num_aces = cpu_to_le32(num_aces);
++ pndacl->num_aces = cpu_to_le16(num_aces);
+ pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+ }
+
+@@ -753,7 +753,7 @@ static void set_mode_dacl(struct mnt_idm
+ struct smb_acl *pndacl, struct smb_fattr *fattr)
+ {
+ struct smb_ace *pace, *pndace;
+- u32 num_aces = 0;
++ u16 num_aces = 0;
+ u16 size = 0, ace_size = 0;
+ uid_t uid;
+ const struct smb_sid *sid;
+@@ -809,7 +809,7 @@ static void set_mode_dacl(struct mnt_idm
+ fattr->cf_mode, 0007);
+
+ out:
+- pndacl->num_aces = cpu_to_le32(num_aces);
++ pndacl->num_aces = cpu_to_le16(num_aces);
+ pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+ }
+
+@@ -1039,8 +1039,9 @@ int smb_inherit_dacl(struct ksmbd_conn *
+ struct smb_sid owner_sid, group_sid;
+ struct dentry *parent = path->dentry->d_parent;
+ struct mnt_idmap *idmap = mnt_idmap(path->mnt);
+- int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
+- int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
++ int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size;
++ int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
++ u16 num_aces, ace_cnt = 0;
+ char *aces_base;
+ bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
+
+@@ -1056,7 +1057,7 @@ int smb_inherit_dacl(struct ksmbd_conn *
+
+ parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
+ acl_len = pntsd_size - dacloffset;
+- num_aces = le32_to_cpu(parent_pdacl->num_aces);
++ num_aces = le16_to_cpu(parent_pdacl->num_aces);
+ pntsd_type = le16_to_cpu(parent_pntsd->type);
+ pdacl_size = le16_to_cpu(parent_pdacl->size);
+
+@@ -1215,7 +1216,7 @@ pass:
+ pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+ pdacl->revision = cpu_to_le16(2);
+ pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size);
+- pdacl->num_aces = cpu_to_le32(ace_cnt);
++ pdacl->num_aces = cpu_to_le16(ace_cnt);
+ pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ memcpy(pace, aces_base, nt_size);
+ pntsd_size += sizeof(struct smb_acl) + nt_size;
+@@ -1296,7 +1297,7 @@ int smb_check_perm_dacl(struct ksmbd_con
+
+ ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ aces_size = acl_size - sizeof(struct smb_acl);
+- for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
++ for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+@@ -1317,7 +1318,7 @@ int smb_check_perm_dacl(struct ksmbd_con
+
+ ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ aces_size = acl_size - sizeof(struct smb_acl);
+- for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
++ for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+--- a/fs/smb/server/smbacl.h
++++ b/fs/smb/server/smbacl.h
+@@ -86,7 +86,7 @@ int parse_sec_desc(struct mnt_idmap *idm
+ int build_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
+ struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info,
+ __u32 *secdesclen, struct smb_fattr *fattr);
+-int init_acl_state(struct posix_acl_state *state, int cnt);
++int init_acl_state(struct posix_acl_state *state, u16 cnt);
+ void free_acl_state(struct posix_acl_state *state);
+ void posix_state_to_acl(struct posix_acl_state *state,
+ struct posix_acl_entry *pace);
--- /dev/null
+From stable+bounces-240656-greg=kroah.com@vger.kernel.org Fri Apr 24 15:05:09 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2026 09:03:59 -0400
+Subject: smb: move some duplicate definitions to common/smbacl.h
+To: stable@vger.kernel.org
+Cc: ChenXiaoSong <chenxiaosong@kylinos.cn>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260424130401.1917926-1-sashal@kernel.org>
+
+From: ChenXiaoSong <chenxiaosong@kylinos.cn>
+
+[ Upstream commit b51174da743b6b7cd87c02e882ebe60dcb99f8bf ]
+
+In order to maintain the code more easily, move duplicate definitions
+to new common header file.
+
+Signed-off-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: d07b26f39246 ("ksmbd: require minimum ACE size in smb_check_perm_dacl()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.h | 91 ------------------------------------
+ fs/smb/common/smbacl.h | 121 ++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/smb/server/smbacl.h | 111 --------------------------------------------
+ 3 files changed, 123 insertions(+), 200 deletions(-)
+ create mode 100644 fs/smb/common/smbacl.h
+
+--- a/fs/smb/client/cifsacl.h
++++ b/fs/smb/client/cifsacl.h
+@@ -9,8 +9,7 @@
+ #ifndef _CIFSACL_H
+ #define _CIFSACL_H
+
+-#define NUM_AUTHS (6) /* number of authority fields */
+-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
++#include "../common/smbacl.h"
+
+ #define READ_BIT 0x4
+ #define WRITE_BIT 0x2
+@@ -23,12 +22,6 @@
+ #define UBITSHIFT 6
+ #define GBITSHIFT 3
+
+-#define ACCESS_ALLOWED 0
+-#define ACCESS_DENIED 1
+-
+-#define SIDOWNER 1
+-#define SIDGROUP 2
+-
+ /*
+ * Security Descriptor length containing DACL with 3 ACEs (one each for
+ * owner, group and world).
+@@ -38,88 +31,6 @@
+ (sizeof(struct smb_ace) * 4))
+
+ /*
+- * Maximum size of a string representation of a SID:
+- *
+- * The fields are unsigned values in decimal. So:
+- *
+- * u8: max 3 bytes in decimal
+- * u32: max 10 bytes in decimal
+- *
+- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
+- *
+- * For authority field, max is when all 6 values are non-zero and it must be
+- * represented in hex. So "-0x" + 12 hex digits.
+- *
+- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
+- */
+-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+-
+-struct smb_ntsd {
+- __le16 revision; /* revision level */
+- __le16 type;
+- __le32 osidoffset;
+- __le32 gsidoffset;
+- __le32 sacloffset;
+- __le32 dacloffset;
+-} __attribute__((packed));
+-
+-struct smb_sid {
+- __u8 revision; /* revision level */
+- __u8 num_subauth;
+- __u8 authority[NUM_AUTHS];
+- __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+-} __attribute__((packed));
+-
+-/* size of a struct smb_sid, sans sub_auth array */
+-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+-
+-struct smb_acl {
+- __le16 revision; /* revision level */
+- __le16 size;
+- __le32 num_aces;
+-} __attribute__((packed));
+-
+-/* ACE types - see MS-DTYP 2.4.4.1 */
+-#define ACCESS_ALLOWED_ACE_TYPE 0x00
+-#define ACCESS_DENIED_ACE_TYPE 0x01
+-#define SYSTEM_AUDIT_ACE_TYPE 0x02
+-#define SYSTEM_ALARM_ACE_TYPE 0x03
+-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
+-#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
+-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
+-#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
+-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+-#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
+-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
+-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
+-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
+-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
+-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+-
+-/* ACE flags */
+-#define OBJECT_INHERIT_ACE 0x01
+-#define CONTAINER_INHERIT_ACE 0x02
+-#define NO_PROPAGATE_INHERIT_ACE 0x04
+-#define INHERIT_ONLY_ACE 0x08
+-#define INHERITED_ACE 0x10
+-#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
+-#define FAILED_ACCESS_ACE_FLAG 0x80
+-
+-struct smb_ace {
+- __u8 type; /* see above and MS-DTYP 2.4.4.1 */
+- __u8 flags;
+- __le16 size;
+- __le32 access_req;
+- struct smb_sid sid; /* ie UUID of user or group who gets these perms */
+-} __attribute__((packed));
+-
+-/*
+ * The current SMB3 form of security descriptor is similar to what was used for
+ * cifs (see above) but some fields are split, and fields in the struct below
+ * matches names of fields to the spec, MS-DTYP (see sections 2.4.5 and
+--- /dev/null
++++ b/fs/smb/common/smbacl.h
+@@ -0,0 +1,121 @@
++/* SPDX-License-Identifier: LGPL-2.1+ */
++/*
++ * Copyright (c) International Business Machines Corp., 2007
++ * Author(s): Steve French (sfrench@us.ibm.com)
++ * Modified by Namjae Jeon (linkinjeon@kernel.org)
++ */
++
++#ifndef _COMMON_SMBACL_H
++#define _COMMON_SMBACL_H
++
++#define NUM_AUTHS (6) /* number of authority fields */
++#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
++
++/* ACE types - see MS-DTYP 2.4.4.1 */
++#define ACCESS_ALLOWED_ACE_TYPE 0x00
++#define ACCESS_DENIED_ACE_TYPE 0x01
++#define SYSTEM_AUDIT_ACE_TYPE 0x02
++#define SYSTEM_ALARM_ACE_TYPE 0x03
++#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
++#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
++#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
++#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
++#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
++#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
++#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
++#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
++#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
++#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
++#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
++#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
++#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
++#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
++#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
++#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
++
++/* ACE flags */
++#define OBJECT_INHERIT_ACE 0x01
++#define CONTAINER_INHERIT_ACE 0x02
++#define NO_PROPAGATE_INHERIT_ACE 0x04
++#define INHERIT_ONLY_ACE 0x08
++#define INHERITED_ACE 0x10
++#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
++#define FAILED_ACCESS_ACE_FLAG 0x80
++
++/*
++ * Maximum size of a string representation of a SID:
++ *
++ * The fields are unsigned values in decimal. So:
++ *
++ * u8: max 3 bytes in decimal
++ * u32: max 10 bytes in decimal
++ *
++ * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
++ *
++ * For authority field, max is when all 6 values are non-zero and it must be
++ * represented in hex. So "-0x" + 12 hex digits.
++ *
++ * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
++ */
++#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
++#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
++
++#define DOMAIN_USER_RID_LE cpu_to_le32(513)
++
++/*
++ * ACE types - see MS-DTYP 2.4.4.1
++ */
++enum {
++ ACCESS_ALLOWED,
++ ACCESS_DENIED,
++};
++
++/*
++ * Security ID types
++ */
++enum {
++ SIDOWNER = 1,
++ SIDGROUP,
++ SIDCREATOR_OWNER,
++ SIDCREATOR_GROUP,
++ SIDUNIX_USER,
++ SIDUNIX_GROUP,
++ SIDNFS_USER,
++ SIDNFS_GROUP,
++ SIDNFS_MODE,
++};
++
++struct smb_ntsd {
++ __le16 revision; /* revision level */
++ __le16 type;
++ __le32 osidoffset;
++ __le32 gsidoffset;
++ __le32 sacloffset;
++ __le32 dacloffset;
++} __attribute__((packed));
++
++struct smb_sid {
++ __u8 revision; /* revision level */
++ __u8 num_subauth;
++ __u8 authority[NUM_AUTHS];
++ __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
++} __attribute__((packed));
++
++/* size of a struct smb_sid, sans sub_auth array */
++#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
++
++struct smb_acl {
++ __le16 revision; /* revision level */
++ __le16 size;
++ __le32 num_aces;
++} __attribute__((packed));
++
++struct smb_ace {
++ __u8 type; /* see above and MS-DTYP 2.4.4.1 */
++ __u8 flags;
++ __le16 size;
++ __le32 access_req;
++ struct smb_sid sid; /* ie UUID of user or group who gets these perms */
++} __attribute__((packed));
++
++#endif /* _COMMON_SMBACL_H */
+--- a/fs/smb/server/smbacl.h
++++ b/fs/smb/server/smbacl.h
+@@ -8,6 +8,7 @@
+ #ifndef _SMBACL_H
+ #define _SMBACL_H
+
++#include "../common/smbacl.h"
+ #include <linux/fs.h>
+ #include <linux/namei.h>
+ #include <linux/posix_acl.h>
+@@ -15,32 +16,6 @@
+
+ #include "mgmt/tree_connect.h"
+
+-#define NUM_AUTHS (6) /* number of authority fields */
+-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
+-
+-/*
+- * ACE types - see MS-DTYP 2.4.4.1
+- */
+-enum {
+- ACCESS_ALLOWED,
+- ACCESS_DENIED,
+-};
+-
+-/*
+- * Security ID types
+- */
+-enum {
+- SIDOWNER = 1,
+- SIDGROUP,
+- SIDCREATOR_OWNER,
+- SIDCREATOR_GROUP,
+- SIDUNIX_USER,
+- SIDUNIX_GROUP,
+- SIDNFS_USER,
+- SIDNFS_GROUP,
+- SIDNFS_MODE,
+-};
+-
+ /* Revision for ACLs */
+ #define SD_REVISION 1
+
+@@ -62,92 +37,8 @@ enum {
+ #define RM_CONTROL_VALID 0x4000
+ #define SELF_RELATIVE 0x8000
+
+-/* ACE types - see MS-DTYP 2.4.4.1 */
+-#define ACCESS_ALLOWED_ACE_TYPE 0x00
+-#define ACCESS_DENIED_ACE_TYPE 0x01
+-#define SYSTEM_AUDIT_ACE_TYPE 0x02
+-#define SYSTEM_ALARM_ACE_TYPE 0x03
+-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
+-#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
+-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
+-#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
+-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+-#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
+-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
+-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
+-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
+-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
+-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+-
+-/* ACE flags */
+-#define OBJECT_INHERIT_ACE 0x01
+-#define CONTAINER_INHERIT_ACE 0x02
+-#define NO_PROPAGATE_INHERIT_ACE 0x04
+-#define INHERIT_ONLY_ACE 0x08
+-#define INHERITED_ACE 0x10
+-#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
+-#define FAILED_ACCESS_ACE_FLAG 0x80
+-
+-/*
+- * Maximum size of a string representation of a SID:
+- *
+- * The fields are unsigned values in decimal. So:
+- *
+- * u8: max 3 bytes in decimal
+- * u32: max 10 bytes in decimal
+- *
+- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
+- *
+- * For authority field, max is when all 6 values are non-zero and it must be
+- * represented in hex. So "-0x" + 12 hex digits.
+- *
+- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
+- */
+-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+-
+-#define DOMAIN_USER_RID_LE cpu_to_le32(513)
+-
+ struct ksmbd_conn;
+
+-struct smb_ntsd {
+- __le16 revision; /* revision level */
+- __le16 type;
+- __le32 osidoffset;
+- __le32 gsidoffset;
+- __le32 sacloffset;
+- __le32 dacloffset;
+-} __packed;
+-
+-struct smb_sid {
+- __u8 revision; /* revision level */
+- __u8 num_subauth;
+- __u8 authority[NUM_AUTHS];
+- __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+-} __packed;
+-
+-/* size of a struct cifs_sid, sans sub_auth array */
+-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+-
+-struct smb_acl {
+- __le16 revision; /* revision level */
+- __le16 size;
+- __le32 num_aces;
+-} __packed;
+-
+-struct smb_ace {
+- __u8 type;
+- __u8 flags;
+- __le16 size;
+- __le32 access_req;
+- struct smb_sid sid; /* ie UUID of user or group who gets these perms */
+-} __packed;
+-
+ struct smb_fattr {
+ kuid_t cf_uid;
+ kgid_t cf_gid;
--- /dev/null
+From stable+bounces-243011-greg=kroah.com@vger.kernel.org Mon May 4 14:16:04 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2026 08:15:56 -0400
+Subject: spi: fix resource leaks on device setup failure
+To: stable@vger.kernel.org
+Cc: Johan Hovold <johan@kernel.org>, Saravana Kannan <saravanak@kernel.org>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504121556.2149853-1-sashal@kernel.org>
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit db357034f7e0cf23f233f414a8508312dfe8fbbe ]
+
+Make sure to call controller cleanup() if spi_setup() fails while
+registering a device to avoid leaking any resources allocated by
+setup().
+
+Fixes: c7299fea6769 ("spi: Fix spi device unregister flow")
+Cc: stable@vger.kernel.org # 5.13
+Cc: Saravana Kannan <saravanak@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260410154907.129248-2-johan@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi.c | 61 ++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 24 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -42,6 +42,8 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_st
+
+ #include "internals.h"
+
++static int __spi_setup(struct spi_device *spi, bool initial_setup);
++
+ static DEFINE_IDR(spi_master_idr);
+
+ static void spidev_release(struct device *dev)
+@@ -677,7 +679,7 @@ static int __spi_add_device(struct spi_d
+ * normally rely on the device being setup. Devices
+ * using SPI_CS_HIGH can't coexist well otherwise...
+ */
+- status = spi_setup(spi);
++ status = __spi_setup(spi, true);
+ if (status < 0) {
+ dev_err(dev, "can't setup %s, status %d\n",
+ dev_name(&spi->dev), status);
+@@ -3734,27 +3736,7 @@ static int spi_set_cs_timing(struct spi_
+ return status;
+ }
+
+-/**
+- * spi_setup - setup SPI mode and clock rate
+- * @spi: the device whose settings are being modified
+- * Context: can sleep, and no requests are queued to the device
+- *
+- * SPI protocol drivers may need to update the transfer mode if the
+- * device doesn't work with its default. They may likewise need
+- * to update clock rates or word sizes from initial values. This function
+- * changes those settings, and must be called from a context that can sleep.
+- * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+- * effect the next time the device is selected and data is transferred to
+- * or from it. When this function returns, the SPI device is deselected.
+- *
+- * Note that this call will fail if the protocol driver specifies an option
+- * that the underlying controller or its driver does not support. For
+- * example, not all hardware supports wire transfers using nine bit words,
+- * LSB-first wire encoding, or active-high chipselects.
+- *
+- * Return: zero on success, else a negative error code.
+- */
+-int spi_setup(struct spi_device *spi)
++static int __spi_setup(struct spi_device *spi, bool initial_setup)
+ {
+ unsigned bad_bits, ugly_bits;
+ int status = 0;
+@@ -3833,7 +3815,7 @@ int spi_setup(struct spi_device *spi)
+ status = spi_set_cs_timing(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+- return status;
++ goto err_cleanup;
+ }
+
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+@@ -3842,7 +3824,7 @@ int spi_setup(struct spi_device *spi)
+ mutex_unlock(&spi->controller->io_mutex);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+- return status;
++ goto err_cleanup;
+ }
+
+ /*
+@@ -3879,6 +3861,37 @@ int spi_setup(struct spi_device *spi)
+ status);
+
+ return status;
++
++err_cleanup:
++ if (initial_setup)
++ spi_cleanup(spi);
++
++ return status;
++}
++
++/**
++ * spi_setup - setup SPI mode and clock rate
++ * @spi: the device whose settings are being modified
++ * Context: can sleep, and no requests are queued to the device
++ *
++ * SPI protocol drivers may need to update the transfer mode if the
++ * device doesn't work with its default. They may likewise need
++ * to update clock rates or word sizes from initial values. This function
++ * changes those settings, and must be called from a context that can sleep.
++ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
++ * effect the next time the device is selected and data is transferred to
++ * or from it. When this function returns, the SPI device is deselected.
++ *
++ * Note that this call will fail if the protocol driver specifies an option
++ * that the underlying controller or its driver does not support. For
++ * example, not all hardware supports wire transfers using nine bit words,
++ * LSB-first wire encoding, or active-high chipselects.
++ *
++ * Return: zero on success, else a negative error code.
++ */
++int spi_setup(struct spi_device *spi)
++{
++ return __spi_setup(spi, false);
+ }
+ EXPORT_SYMBOL_GPL(spi_setup);
+
--- /dev/null
+From stable+bounces-242186-greg=kroah.com@vger.kernel.org Thu Apr 30 19:58:55 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 13:56:55 -0400
+Subject: thermal: core: Fix thermal zone governor cleanup issues
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430175655.1906210-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 41ff66baf81c6541f4f985dd7eac4494d03d9440 ]
+
+If thermal_zone_device_register_with_trips() fails after adding
+a thermal governor to the thermal zone being registered, the
+governor is not removed from it as appropriate which may lead to
+a memory leak.
+
+In turn, thermal_zone_device_unregister() calls thermal_set_governor()
+without acquiring the thermal zone lock beforehand which may race with
+a governor update via sysfs and may lead to a use-after-free in that
+case.
+
+Address these issues by adding two thermal_set_governor() calls, one to
+thermal_release() to remove the governor from the given thermal zone,
+and one to the thermal zone registration error path to cover failures
+preceding the thermal zone device registration.
+
+Fixes: e33df1d2f3a0 ("thermal: let governors have private data for each thermal zone")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://patch.msgid.link/5092923.31r3eYUQgx@rafael.j.wysocki
+[ kept the `thermal_zone_create_device_groups(tz, mask)` signature when adding the new failure-path cleanup ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/thermal_core.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -804,6 +804,7 @@ static void thermal_release(struct devic
+ sizeof("thermal_zone") - 1)) {
+ tz = to_thermal_zone(dev);
+ thermal_zone_destroy_device_groups(tz);
++ thermal_set_governor(tz, NULL);
+ mutex_destroy(&tz->lock);
+ complete(&tz->removal);
+ } else if (!strncmp(dev_name(dev), "cooling_device",
+@@ -1325,8 +1326,10 @@ thermal_zone_device_register_with_trips(
+ /* sys I/F */
+ /* Add nodes that are always present via .groups */
+ result = thermal_zone_create_device_groups(tz, mask);
+- if (result)
++ if (result) {
++ thermal_set_governor(tz, NULL);
+ goto remove_id;
++ }
+
+ /* A new thermal zone needs to be updated anyway. */
+ atomic_set(&tz->need_update, 1);
+@@ -1478,8 +1481,6 @@ void thermal_zone_device_unregister(stru
+
+ cancel_delayed_work_sync(&tz->poll_queue);
+
+- thermal_set_governor(tz, NULL);
+-
+ thermal_remove_hwmon_sysfs(tz);
+ ida_free(&thermal_tz_ida, tz->id);
+ ida_destroy(&tz->ida);
--- /dev/null
+From stable+bounces-244821-greg=kroah.com@vger.kernel.org Fri May 8 22:04:49 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 16:04:44 -0400
+Subject: udf: fix partition descriptor append bookkeeping
+To: stable@vger.kernel.org
+Cc: Seohyeon Maeng <bioloidgp@gmail.com>, Jan Kara <jack@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260508200444.1891021-1-sashal@kernel.org>
+
+From: Seohyeon Maeng <bioloidgp@gmail.com>
+
+[ Upstream commit 08841b06fa64d8edbd1a21ca6e613420c90cc4b8 ]
+
+Mounting a crafted UDF image with repeated partition descriptors can
+trigger a heap out-of-bounds write in part_descs_loc[].
+
+handle_partition_descriptor() deduplicates entries by partition number,
+but appended slots never record partnum. As a result duplicate
+Partition Descriptors are appended repeatedly and num_part_descs keeps
+growing.
+
+Once the table is full, the growth path still sizes the allocation from
+partnum even though inserts are indexed by num_part_descs. If partnum is
+already aligned to PART_DESC_ALLOC_STEP, ALIGN(partnum, step) can keep
+the old capacity and the next append writes past the end of the table.
+
+Store partnum in the appended slot and size growth from the next append
+count so deduplication and capacity tracking follow the same model.
+
+Fixes: ee4af50ca94f ("udf: Fix mounting of Win7 created UDF filesystems")
+Cc: stable@vger.kernel.org
+Signed-off-by: Seohyeon Maeng <bioloidgp@gmail.com>
+Link: https://patch.msgid.link/20260310081652.21220-1-bioloidgp@gmail.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+[ replaced kzalloc_objs() helper with equivalent kcalloc() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/super.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1656,8 +1656,9 @@ static struct udf_vds_record *handle_par
+ return &(data->part_descs_loc[i].rec);
+ if (data->num_part_descs >= data->size_part_descs) {
+ struct part_desc_seq_scan_data *new_loc;
+- unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
++ unsigned int new_size;
+
++ new_size = data->num_part_descs + PART_DESC_ALLOC_STEP;
+ new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
+ if (!new_loc)
+ return ERR_PTR(-ENOMEM);
+@@ -1667,6 +1668,7 @@ static struct udf_vds_record *handle_par
+ data->part_descs_loc = new_loc;
+ data->size_part_descs = new_size;
+ }
++ data->part_descs_loc[data->num_part_descs].partnum = partnum;
+ return &(data->part_descs_loc[data->num_part_descs++].rec);
+ }
+
--- /dev/null
+From stable+bounces-242174-greg=kroah.com@vger.kernel.org Thu Apr 30 19:23:30 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 13:23:22 -0400
+Subject: wifi: mt76: connac: introduce helper for mt7925 chipset
+To: stable@vger.kernel.org
+Cc: Deren Wu <deren.wu@mediatek.com>, Lorenzo Bianconi <lorenzo@kernel.org>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430172324.1875442-1-sashal@kernel.org>
+
+From: Deren Wu <deren.wu@mediatek.com>
+
+[ Upstream commit 525209262f9c2999f6f5fa0c40b4519cd6acfa2e ]
+
+Introduce is_mt7925() helper for new chipset. mt7925 runs the same
+firmware download and mmio map flow as mt7921.
+
+This is a preliminary patch to support mt7925 driver.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Deren Wu <deren.wu@mediatek.com>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Stable-dep-of: 56154fef47d1 ("wifi: mt76: mt792x: fix mt7925u USB WFSYS reset handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt76_connac.h | 6 ++++++
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 4 ++--
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c | 3 ++-
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h | 2 +-
+ 4 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+@@ -172,6 +172,11 @@ struct mt76_connac_tx_free {
+
+ extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
+
++static inline bool is_mt7925(struct mt76_dev *dev)
++{
++ return mt76_chip(dev) == 0x7925;
++}
++
+ static inline bool is_mt7922(struct mt76_dev *dev)
+ {
+ return mt76_chip(dev) == 0x7922;
+@@ -245,6 +250,7 @@ static inline bool is_mt76_fw_txp(struct
+ switch (mt76_chip(dev)) {
+ case 0x7961:
+ case 0x7922:
++ case 0x7925:
+ case 0x7663:
+ case 0x7622:
+ return false;
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -170,7 +170,7 @@ void mt76_connac_write_hw_txp(struct mt7
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+- if (is_mt7663(dev) || is_mt7921(dev))
++ if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_AMSDU_LAST |
+@@ -214,7 +214,7 @@ mt76_connac_txp_skb_unmap_hw(struct mt76
+ u32 last_mask;
+ int i;
+
+- if (is_mt7663(dev) || is_mt7921(dev))
++ if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_MSDU_LAST;
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -66,6 +66,7 @@ int mt76_connac_mcu_init_download(struct
+
+ if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
+ (is_mt7921(dev) && addr == 0x900000) ||
++ (is_mt7925(dev) && addr == 0x900000) ||
+ (is_mt7996(dev) && addr == 0x900000))
+ cmd = MCU_CMD(PATCH_START_REQ);
+ else
+@@ -3080,7 +3081,7 @@ static u32 mt76_connac2_get_data_mode(st
+ {
+ u32 mode = DL_MODE_NEED_RSP;
+
+- if (!is_mt7921(dev) || info == PATCH_SEC_NOT_SUPPORT)
++ if ((!is_mt7921(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT)
+ return mode;
+
+ switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -1739,7 +1739,7 @@ mt76_connac_mcu_gen_dl_mode(struct mt76_
+
+ ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
+ DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
+- if (is_mt7921(dev))
++ if (is_mt7921(dev) || is_mt7925(dev))
+ ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
+ DL_CONFIG_ENCRY_MODE_SEL : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
--- /dev/null
+From stable+bounces-242175-greg=kroah.com@vger.kernel.org Thu Apr 30 19:25:07 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 13:23:23 -0400
+Subject: wifi: mt76: mt792x: describe USB WFSYS reset with a descriptor
+To: stable@vger.kernel.org
+Cc: Sean Wang <sean.wang@mediatek.com>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430172324.1875442-2-sashal@kernel.org>
+
+From: Sean Wang <sean.wang@mediatek.com>
+
+[ Upstream commit e6f48512c1ceebcd1ce6bb83df3b3d56a261507d ]
+
+Prepare mt792xu_wfsys_reset() for chips that share the same USB WFSYS
+reset flow but use different register definitions.
+
+This is a pure refactor of the current mt7921u path and keeps the reset
+sequence unchanged.
+
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Link: https://patch.msgid.link/20260311002825.15502-1-sean.wang@kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Stable-dep-of: 56154fef47d1 ("wifi: mt76: mt792x: fix mt7925u USB WFSYS reset handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt792x_usb.c | 40 +++++++++++++++++++-----
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+@@ -208,6 +208,24 @@ static void mt792xu_epctl_rst_opt(struct
+ mt792xu_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
+ }
+
++struct mt792xu_wfsys_desc {
++ u32 rst_reg;
++ u32 done_reg;
++ u32 done_mask;
++ u32 done_val;
++ u32 delay_ms;
++ bool need_status_sel;
++};
++
++static const struct mt792xu_wfsys_desc mt7921_wfsys_desc = {
++ .rst_reg = MT_CBTOP_RGU_WF_SUBSYS_RST,
++ .done_reg = MT_UDMA_CONN_INFRA_STATUS,
++ .done_mask = MT_UDMA_CONN_WFSYS_INIT_DONE,
++ .done_val = MT_UDMA_CONN_WFSYS_INIT_DONE,
++ .delay_ms = 0,
++ .need_status_sel = true,
++};
++
+ int mt792xu_dma_init(struct mt792x_dev *dev, bool resume)
+ {
+ int err;
+@@ -238,25 +256,31 @@ EXPORT_SYMBOL_GPL(mt792xu_dma_init);
+
+ int mt792xu_wfsys_reset(struct mt792x_dev *dev)
+ {
++ const struct mt792xu_wfsys_desc *desc = &mt7921_wfsys_desc;
+ u32 val;
+ int i;
+
+ mt792xu_epctl_rst_opt(dev, false);
+
+- val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
++ val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg);
+ val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+- mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
++ mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val);
+
+- usleep_range(10, 20);
++ if (desc->delay_ms)
++ msleep(desc->delay_ms);
++ else
++ usleep_range(10, 20);
+
+- val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
++ val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg);
+ val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+- mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
++ mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val);
++
++ if (desc->need_status_sel)
++ mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+
+- mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+ for (i = 0; i < MT792x_WFSYS_INIT_RETRY_COUNT; i++) {
+- val = mt792xu_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
+- if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
++ val = mt792xu_uhw_rr(&dev->mt76, desc->done_reg);
++ if ((val & desc->done_mask) == desc->done_val)
+ break;
+
+ msleep(100);
--- /dev/null
+From stable+bounces-242176-greg=kroah.com@vger.kernel.org Thu Apr 30 19:25:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 13:23:24 -0400
+Subject: wifi: mt76: mt792x: fix mt7925u USB WFSYS reset handling
+To: stable@vger.kernel.org
+Cc: Sean Wang <sean.wang@mediatek.com>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430172324.1875442-3-sashal@kernel.org>
+
+From: Sean Wang <sean.wang@mediatek.com>
+
+[ Upstream commit 56154fef47d104effa9f29ed3db4f805cbc0d640 ]
+
+mt7925u uses different reset/status registers from mt7921u. Reusing the
+mt7921u register set causes the WFSYS reset to fail.
+
+Add a chip-specific descriptor in mt792xu_wfsys_reset() to select the
+correct registers and fix mt7925u failing to initialize after a warm
+reboot.
+
+Fixes: d28e1a48952e ("wifi: mt76: mt792x: introduce mt792x-usb module")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Link: https://patch.msgid.link/20260311002825.15502-2-sean.wang@kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt792x_regs.h | 4 ++++
+ drivers/net/wireless/mediatek/mt76/mt792x_usb.c | 13 ++++++++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+@@ -385,6 +385,10 @@
+ #define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600)
+ #define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+
++#define MT7925_CBTOP_RGU_WF_SUBSYS_RST 0x70028600
++#define MT7925_WFSYS_INIT_DONE_ADDR 0x184c1604
++#define MT7925_WFSYS_INIT_DONE 0x00001d1e
++
+ #define MT_HW_BOUND 0x70010020
+ #define MT_HW_CHIPID 0x70010200
+ #define MT_HW_REV 0x70010204
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+@@ -226,6 +226,15 @@ static const struct mt792xu_wfsys_desc m
+ .need_status_sel = true,
+ };
+
++static const struct mt792xu_wfsys_desc mt7925_wfsys_desc = {
++ .rst_reg = MT7925_CBTOP_RGU_WF_SUBSYS_RST,
++ .done_reg = MT7925_WFSYS_INIT_DONE_ADDR,
++ .done_mask = U32_MAX,
++ .done_val = MT7925_WFSYS_INIT_DONE,
++ .delay_ms = 20,
++ .need_status_sel = false,
++};
++
+ int mt792xu_dma_init(struct mt792x_dev *dev, bool resume)
+ {
+ int err;
+@@ -256,7 +265,9 @@ EXPORT_SYMBOL_GPL(mt792xu_dma_init);
+
+ int mt792xu_wfsys_reset(struct mt792x_dev *dev)
+ {
+- const struct mt792xu_wfsys_desc *desc = &mt7921_wfsys_desc;
++ const struct mt792xu_wfsys_desc *desc = is_mt7925(&dev->mt76) ?
++ &mt7925_wfsys_desc :
++ &mt7921_wfsys_desc;
+ u32 val;
+ int i;
+
--- /dev/null
+From stable+bounces-242168-greg=kroah.com@vger.kernel.org Thu Apr 30 18:58:39 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2026 12:54:06 -0400
+Subject: wifi: mwifiex: fix use-after-free in mwifiex_adapter_cleanup()
+To: stable@vger.kernel.org
+Cc: Daniel Hodges <git@danielhodges.dev>, Johannes Berg <johannes.berg@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260430165406.1860168-1-sashal@kernel.org>
+
+From: Daniel Hodges <git@danielhodges.dev>
+
+[ Upstream commit ae5e95d4157481693be2317e3ffcd84e36010cbb ]
+
+The mwifiex_adapter_cleanup() function uses timer_delete()
+(non-synchronous) for the wakeup_timer before the adapter structure is
+freed. This is incorrect because timer_delete() does not wait for any
+running timer callback to complete.
+
+If the wakeup_timer callback (wakeup_timer_fn) is executing when
+mwifiex_adapter_cleanup() is called, the callback will continue to
+access adapter fields (adapter->hw_status, adapter->if_ops.card_reset,
+etc.) which may be freed by mwifiex_free_adapter() called later in the
+mwifiex_remove_card() path.
+
+Use timer_delete_sync() instead to ensure any running timer callback has
+completed before returning.
+
+Fixes: 4636187da60b ("mwifiex: add wakeup timer based recovery mechanism")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Hodges <git@danielhodges.dev>
+Link: https://patch.msgid.link/20260206194401.2346-1-git@danielhodges.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ changed `timer_delete_sync()` to `del_timer_sync()` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/marvell/mwifiex/init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/init.c
++++ b/drivers/net/wireless/marvell/mwifiex/init.c
+@@ -386,7 +386,7 @@ static void mwifiex_invalidate_lists(str
+ static void
+ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
+ {
+- del_timer(&adapter->wakeup_timer);
++ del_timer_sync(&adapter->wakeup_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
+ mwifiex_cancel_all_pending_cmd(adapter);
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);
--- /dev/null
+From stable+bounces-244855-greg=kroah.com@vger.kernel.org Sat May 9 02:35:37 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 20:35:20 -0400
+Subject: wifi: rtl8xxxu: fix potential use of uninitialized value
+To: stable@vger.kernel.org
+Cc: Yi Cong <yicong@kylinos.cn>, Ping-Ke Shih <pkshih@realtek.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509003520.2360221-1-sashal@kernel.org>
+
+From: Yi Cong <yicong@kylinos.cn>
+
+[ Upstream commit f8a2fc809bfeb49130709b31a4d357a049f28547 ]
+
+The local variables 'mcs' and 'nss' in rtl8xxxu_update_ra_report() are
+passed to rtl8xxxu_desc_to_mcsrate() as output parameters. If the helper
+function encounters an unhandled rate index, it may return without setting
+these values, leading to the use of uninitialized stack data.
+
+Remove the helper rtl8xxxu_desc_to_mcsrate() and inline the logic into
+rtl8xxxu_update_ra_report(). This fixes the use of uninitialized 'mcs'
+and 'nss' variables for legacy rates.
+
+The new implementation explicitly handles:
+- Legacy rates: Set bitrate only.
+- HT rates (MCS0-15): Set MCS flags, index, and NSS (1 or 2) directly.
+- Invalid rates: Return early.
+
+Fixes: 7de16123d9e2 ("wifi: rtl8xxxu: Introduce rtl8xxxu_update_ra_report")
+Cc: stable@vger.kernel.org
+Suggested-by: Ping-Ke Shih <pkshih@realtek.com>
+Signed-off-by: Yi Cong <yicong@kylinos.cn>
+Link: https://lore.kernel.org/all/96e31963da0c42dcb52ce44f818963d7@realtek.com/
+Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
+Link: https://patch.msgid.link/20260306071627.56501-1-cong.yi@linux.dev
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 28 +++++-------------
+ 1 file changed, 8 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -4809,20 +4809,6 @@ static const struct ieee80211_rate rtl8x
+ {.bitrate = 540, .hw_value = 0x0b,},
+ };
+
+-static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+-{
+- if (rate <= DESC_RATE_54M)
+- return;
+-
+- if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
+- if (rate < DESC_RATE_MCS8)
+- *nss = 1;
+- else
+- *nss = 2;
+- *mcs = rate - DESC_RATE_MCS0;
+- }
+-}
+-
+ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
+ {
+ struct ieee80211_hw *hw = priv->hw;
+@@ -4927,23 +4913,25 @@ static void rtl8xxxu_set_aifs(struct rtl
+ void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
+ u8 rate, u8 sgi, u8 bw)
+ {
+- u8 mcs, nss;
+-
+ rarpt->txrate.flags = 0;
+
+ if (rate <= DESC_RATE_54M) {
+ rarpt->txrate.legacy = rtl8xxxu_legacy_ratetable[rate].bitrate;
+- } else {
+- rtl8xxxu_desc_to_mcsrate(rate, &mcs, &nss);
++ } else if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
+ rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS;
++ if (rate < DESC_RATE_MCS8)
++ rarpt->txrate.nss = 1;
++ else
++ rarpt->txrate.nss = 2;
+
+- rarpt->txrate.mcs = mcs;
+- rarpt->txrate.nss = nss;
++ rarpt->txrate.mcs = rate - DESC_RATE_MCS0;
+
+ if (sgi)
+ rarpt->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ rarpt->txrate.bw = bw;
++ } else {
++ return;
+ }
+
+ rarpt->bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
--- /dev/null
+From stable+bounces-244758-greg=kroah.com@vger.kernel.org Fri May 8 15:28:51 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 09:22:52 -0400
+Subject: xfs: fix a resource leak in xfs_alloc_buftarg()
+To: stable@vger.kernel.org
+Cc: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260508132252.1479242-1-sashal@kernel.org>
+
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+
+[ Upstream commit 29a7b2614357393b176ef06ba5bc3ff5afc8df69 ]
+
+In the error path, call fs_put_dax() to drop the DAX
+device reference.
+
+Fixes: 6f643c57d57c ("xfs: implement ->notify_failure() for XFS")
+Cc: stable@vger.kernel.org
+Signed-off-by: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+[ kept `kmem_free(btp)` and `return NULL` instead of `kfree(btp)`/`ERR_PTR(error)` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_buf.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -2045,6 +2045,7 @@ error_pcpu:
+ error_lru:
+ list_lru_destroy(&btp->bt_lru);
+ error_free:
++ fs_put_dax(btp->bt_daxdev, mp);
+ kmem_free(btp);
+ return NULL;
+ }