--- /dev/null
+From d63c219b7ff39f897da10c160a2edef76320f16c Mon Sep 17 00:00:00 2001
+From: Tommaso Soncin <soncintommaso@gmail.com>
+Date: Wed, 29 Apr 2026 18:08:57 +0200
+Subject: ASoC: amd: yc: Add HP OMEN Gaming Laptop 16-ap0xxx product line in quirk table
+
+From: Tommaso Soncin <soncintommaso@gmail.com>
+
+commit d63c219b7ff39f897da10c160a2edef76320f16c upstream.
+
+Add a DMI quirk for the HP OMEN Gaming Laptop 16-ap0xxx line fixing the
+issue where the internal microphone was not detected.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tommaso Soncin <soncintommaso@gmail.com>
+Link: https://patch.msgid.link/20260429160858.538986-1-soncintommaso@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/amd/yc/acp6x-mach.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -55,6 +55,13 @@ static const struct dmi_system_id yc_acp
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN Gaming Laptop 16-ap0xxx"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5525"),
+ }
+@@ -648,6 +655,13 @@ static const struct dmi_system_id yc_acp
+ }
+ },
+ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8E35"),
++ }
++ },
++ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MECHREVO"),
--- /dev/null
+From 804dce6c73fdfa44184ee4e8b09abad7f5da408f Mon Sep 17 00:00:00 2001
+From: Joseph Salisbury <joseph.salisbury@oracle.com>
+Date: Mon, 16 Mar 2026 14:05:45 -0400
+Subject: ASoC: fsl_easrc: fix comment typo
+
+From: Joseph Salisbury <joseph.salisbury@oracle.com>
+
+commit 804dce6c73fdfa44184ee4e8b09abad7f5da408f upstream.
+
+The file contains a spelling error in a source comment (funciton).
+
+Typos in comments reduce readability and make text searches less reliable
+for developers and maintainers.
+
+Replace 'funciton' with 'function' in the affected comment. This is a
+comment-only cleanup and does not change behavior.
+
+Fixes: 955ac624058f ("ASoC: fsl_easrc: Add EASRC ASoC CPU DAI drivers")
+Cc: stable@vger.kernel.org
+Signed-off-by: Joseph Salisbury <joseph.salisbury@oracle.com>
+Link: https://patch.msgid.link/20260316180545.144032-1-joseph.salisbury@oracle.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/fsl/fsl_easrc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/fsl/fsl_easrc.c
++++ b/sound/soc/fsl/fsl_easrc.c
+@@ -1286,7 +1286,7 @@ static int fsl_easrc_request_context(int
+ /*
+ * Release the context
+ *
+- * This funciton is mainly doing the revert thing in request context
++ * This function is mainly doing the revert thing in request context
+ */
+ static void fsl_easrc_release_context(struct fsl_asrc_pair *ctx)
+ {
--- /dev/null
+From 13d30682e8dee191ac04e93642f0372a723e8b0c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?C=C3=A1ssio=20Gabriel?= <cassiogabrielcontato@gmail.com>
+Date: Mon, 27 Apr 2026 23:38:41 -0300
+Subject: ASoC: Intel: bytcr_wm5102: Fix MCLK leak on platform_clock_control error
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cássio Gabriel <cassiogabrielcontato@gmail.com>
+
+commit 13d30682e8dee191ac04e93642f0372a723e8b0c upstream.
+
+If byt_wm5102_prepare_and_enable_pll1() fails in the
+SND_SOC_DAPM_EVENT_ON() path, platform_clock_control() returns after
+clk_prepare_enable(priv->mclk) without disabling the clock again.
+
+This leaks an MCLK enable reference on failed power-up attempts. Add the
+missing clk_disable_unprepare() on the error path, matching the unwind
+used by the other Intel platform_clock_control() implementations.
+
+Fixes: 9a87fc1e0619 ("ASoC: Intel: bytcr_wm5102: Add machine driver for BYT/WM5102")
+Cc: stable@vger.kernel.org
+Signed-off-by: Cássio Gabriel <cassiogabrielcontato@gmail.com>
+Reviewed-by: Cezary Rojewski <cezary.rojewski@intel.com>
+Reviewed-by: Hans de Goede <johannes.goede@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260427-bytcr-wm5102-mclk-leak-v1-1-02b96d08e99c@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/intel/boards/bytcr_wm5102.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/soc/intel/boards/bytcr_wm5102.c
++++ b/sound/soc/intel/boards/bytcr_wm5102.c
+@@ -171,6 +171,7 @@ static int platform_clock_control(struct
+ ret = byt_wm5102_prepare_and_enable_pll1(codec_dai, 48000);
+ if (ret) {
+ dev_err(card->dev, "Error setting codec sysclk: %d\n", ret);
++ clk_disable_unprepare(priv->mclk);
+ return ret;
+ }
+ } else {
--- /dev/null
+From cab45ab95ce7600fc0ff84585c77fd45b7b0d67c Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Date: Thu, 2 Apr 2026 08:11:10 +0000
+Subject: ASoC: qcom: q6apm-dai: reset queue ptr on trigger stop
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit cab45ab95ce7600fc0ff84585c77fd45b7b0d67c upstream.
+
+Reset queue pointer on SNDRV_PCM_TRIGGER_STOP event to be inline
+with resetting appl_ptr. Without this we will end up with a queue_ptr
+out of sync and driver could try to send data that is not ready yet.
+
+Fix this by resetting the queue_ptr.
+
+Fixes: 3d4a4411aa8bb ("ASoC: q6apm-dai: schedule all available frames to avoid dsp under-runs")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260402081118.348071-6-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/q6apm-dai.c | 1 +
+ sound/soc/qcom/qdsp6/q6apm.c | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
+@@ -322,6 +322,7 @@ static int q6apm_dai_trigger(struct snd_
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* TODO support be handled via SoftPause Module */
+ prtd->state = Q6APM_STREAM_STOPPED;
++ prtd->queue_ptr = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+--- a/sound/soc/qcom/qdsp6/q6apm.c
++++ b/sound/soc/qcom/qdsp6/q6apm.c
+@@ -225,6 +225,8 @@ int q6apm_map_memory_regions(struct q6ap
+
+ mutex_lock(&graph->lock);
+
++ data->dsp_buf = 0;
++
+ if (data->buf) {
+ mutex_unlock(&graph->lock);
+ return 0;
--- /dev/null
+From 69acc488aaf39d0ddf6c3cf0e47c1873d39919a2 Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Date: Thu, 2 Apr 2026 08:11:09 +0000
+Subject: ASoC: qcom: q6apm-lpass-dai: Fix multiple graph opens
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit 69acc488aaf39d0ddf6c3cf0e47c1873d39919a2 upstream.
+
+As prepare can be called mulitple times, this can result in multiple
+graph opens for playback path.
+
+This will result in a memory leaks, fix this by adding a check before
+opening.
+
+Fixes: be1fae62cf25 ("ASoC: q6apm-lpass-dai: close graph on prepare errors")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260402081118.348071-5-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/q6apm-lpass-dais.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -181,7 +181,7 @@ static int q6apm_lpass_dai_prepare(struc
+ * It is recommend to load DSP with source graph first and then sink
+ * graph, so sequence for playback and capture will be different
+ */
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && dai_data->graph[dai->id] == NULL) {
+ graph = q6apm_graph_open(dai->dev, NULL, dai->dev, graph_id);
+ if (IS_ERR(graph)) {
+ dev_err(dai->dev, "Failed to open graph (%d)\n", graph_id);
--- /dev/null
+From 4a0e1bcc98f7281d1605768bd2fe71eacc34f9b7 Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Date: Thu, 2 Apr 2026 08:11:07 +0000
+Subject: ASoC: qcom: q6apm: remove child devices when apm is removed
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit 4a0e1bcc98f7281d1605768bd2fe71eacc34f9b7 upstream.
+
+looks like q6apm driver does not remove the child driver q6apm-dai and
+q6apm-bedais when the this driver is removed.
+
+Fix this by depopulating them in remove callback.
+
+With this change when the dsp is shutdown all the devices associated with
+q6apm will now be removed.
+
+Fixes: 5477518b8a0e ("ASoC: qdsp6: audioreach: add q6apm support")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260402081118.348071-3-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/q6apm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/soc/qcom/qdsp6/q6apm.c
++++ b/sound/soc/qcom/qdsp6/q6apm.c
+@@ -781,6 +781,7 @@ static int apm_probe(gpr_device_t *gdev)
+
+ static void apm_remove(gpr_device_t *gdev)
+ {
++ of_platform_depopulate(&gdev->dev);
+ snd_soc_unregister_component(&gdev->dev);
+ }
+
--- /dev/null
+From 3f487be81292702a59ea9dbc4088b3360a50e837 Mon Sep 17 00:00:00 2001
+From: Guangshuo Li <lgs201920130244@gmail.com>
+Date: Wed, 1 Apr 2026 18:56:19 +0800
+Subject: btrfs: fix double free in create_space_info() error path
+
+From: Guangshuo Li <lgs201920130244@gmail.com>
+
+commit 3f487be81292702a59ea9dbc4088b3360a50e837 upstream.
+
+When kobject_init_and_add() fails, the call chain is:
+
+create_space_info()
+-> btrfs_sysfs_add_space_info_type()
+-> kobject_init_and_add()
+-> failure
+-> kobject_put(&space_info->kobj)
+-> space_info_release()
+-> kfree(space_info)
+
+Then control returns to create_space_info():
+
+btrfs_sysfs_add_space_info_type() returns error
+-> goto out_free
+-> kfree(space_info)
+
+This causes a double free.
+
+Keep the direct kfree(space_info) for the earlier failure path, but
+after btrfs_sysfs_add_space_info_type() has called kobject_put(), let
+the kobject release callback handle the cleanup.
+
+Fixes: a11224a016d6d ("btrfs: fix memory leaks in create_space_info() error paths")
+CC: stable@vger.kernel.org # 6.19+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Guangshuo Li <lgs201920130244@gmail.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/space-info.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -296,7 +296,7 @@ static int create_space_info(struct btrf
+
+ ret = btrfs_sysfs_add_space_info_type(info, space_info);
+ if (ret)
+- goto out_free;
++ return ret;
+
+ list_add(&space_info->list, &info->space_info);
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
--- /dev/null
+From b7cce3e2cca9cd78418f3c3784474b778e7996fe Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 16 Mar 2026 15:04:15 +0100
+Subject: dm: don't report warning when doing deferred remove
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit b7cce3e2cca9cd78418f3c3784474b778e7996fe upstream.
+
+If dm_hash_remove_all was called from dm_deferred_remove, it would write
+a warning "remove_all left %d open device(s)" if there are some other
+devices active.
+
+The warning is bogus, so let's disable it in this case.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reported-by: Zdenek Kabelac <zkabelac@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 2c140a246dc0 ("dm: allow remove to be deferred")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-ioctl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -384,7 +384,7 @@ retry:
+
+ up_write(&_hash_lock);
+
+- if (dev_skipped)
++ if (dev_skipped && !only_deferred)
+ DMWARN("remove_all left %d open device(s)", dev_skipped);
+ }
+
--- /dev/null
+From 2fa49cc884f6496a915c35621ba4da35649bf159 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 9 Apr 2026 17:49:58 +0200
+Subject: dm: fix a buffer overflow in ioctl processing
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 2fa49cc884f6496a915c35621ba4da35649bf159 upstream.
+
+Tony Asleson (using Claude) found a buffer overflow in dm-ioctl in the
+function retrieve_status:
+
+1. The code in retrieve_status checks that the output string fits into
+ the output buffer and writes the output string there
+2. Then, the code aligns the "outptr" variable to the next 8-byte
+ boundary:
+ outptr = align_ptr(outptr);
+3. The alignment doesn't check overflow, so outptr could point past the
+ buffer end
+4. The "for" loop is iterated again, it executes:
+ remaining = len - (outptr - outbuf);
+5. If "outptr" points past "outbuf + len", the arithmetics wraps around
+ and the variable "remaining" contains unusually high number
+6. With "remaining" being high, the code writes more data past the end of
+ the buffer
+
+Luckily, this bug has no security implications because:
+1. Only root can issue device mapper ioctls
+2. The commonly used libraries that communicate with device mapper
+ (libdevmapper and devicemapper-rs) use buffer size that is aligned to
+ 8 bytes - thus, "outptr = align_ptr(outptr)" can't overshoot the input
+ buffer and the bug can't happen accidentally
+
+Reported-by: Tony Asleson <tasleson@redhat.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Bryn M. Reeves <bmr@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-ioctl.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1341,6 +1341,10 @@ static void retrieve_status(struct dm_ta
+ used = param->data_start + (outptr - outbuf);
+
+ outptr = align_ptr(outptr);
++ if (!outptr || outptr > outbuf + len) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ break;
++ }
+ spec->next = outptr - outbuf;
+ }
+
--- /dev/null
+From 09a65adc7d8bbfce06392cb6d375468e2728ead5 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 20 Apr 2026 19:56:44 +0200
+Subject: dm-thin: fix metadata refcount underflow
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 09a65adc7d8bbfce06392cb6d375468e2728ead5 upstream.
+
+There's a bug in dm-thin in the function rebalance_children. If the
+internal btree node has one entry, the code tries to copy all btree
+entries from the node's child to the node itself and then decrement the
+child's reference count.
+
+If the child node is shared (it has reference count > 1), we won't free
+it, so there would be two pointers to each of the grandchildren nodes.
+But the reference counts of the grandchildren is not increased, thus the
+reference count doesn't match the number of pointers that point to the
+grandchildren. This results in "device mapper: space map common: unable
+to decrement block" errors.
+
+Fix this bug by incrementing reference counts on the grandchildren if the
+btree node is shared.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Fixes: 3241b1d3e0aa ("dm: add persistent data library")
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/persistent-data/dm-btree-remove.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -490,12 +490,20 @@ static int rebalance_children(struct sha
+
+ if (le32_to_cpu(n->header.nr_entries) == 1) {
+ struct dm_block *child;
++ int is_shared;
+ dm_block_t b = value64(n, 0);
+
++ r = dm_tm_block_is_shared(info->tm, b, &is_shared);
++ if (r)
++ return r;
++
+ r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
+ if (r)
+ return r;
+
++ if (is_shared)
++ inc_children(info->tm, dm_block_data(child), vt);
++
+ memcpy(n, dm_block_data(child),
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+
--- /dev/null
+From 2b14e0bb63cc671120e7791658f5c494fc66d072 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Thu, 5 Feb 2026 20:59:20 -0800
+Subject: dm-verity-fec: correctly reject too-small FEC devices
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 2b14e0bb63cc671120e7791658f5c494fc66d072 upstream.
+
+Fix verity_fec_ctr() to reject too-small FEC devices by correctly
+computing the number of parity blocks as 'f->rounds * f->roots'.
+Previously it incorrectly used 'div64_u64(f->rounds * f->roots,
+v->fec->roots << SECTOR_SHIFT)' which is a much smaller value.
+
+Note that the units of 'rounds' are blocks, not bytes. This matches the
+units of the value returned by dm_bufio_get_device_size(), which are
+also blocks. A later commit will give 'rounds' a clearer name.
+
+Fixes: a739ff3f543a ("dm verity: add support for forward error correction")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-verity-fec.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -668,7 +668,7 @@ int verity_fec_ctr(struct dm_verity *v)
+ {
+ struct dm_verity_fec *f = v->fec;
+ struct dm_target *ti = v->ti;
+- u64 hash_blocks, fec_blocks;
++ u64 hash_blocks;
+ int ret;
+
+ if (!verity_fec_is_enabled(v)) {
+@@ -749,8 +749,7 @@ int verity_fec_ctr(struct dm_verity *v)
+
+ dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
+
+- fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
+- if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
++ if (dm_bufio_get_device_size(f->bufio) < f->rounds * f->roots) {
+ ti->error = "FEC device is too small";
+ return -E2BIG;
+ }
--- /dev/null
+From 4355142245f7e55336dcc005ec03592df4d546f8 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Thu, 5 Feb 2026 20:59:21 -0800
+Subject: dm-verity-fec: correctly reject too-small hash devices
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 4355142245f7e55336dcc005ec03592df4d546f8 upstream.
+
+Fix verity_fec_ctr() to reject too-small hash devices by correctly
+taking hash_start into account.
+
+Note that this is necessary because dm-verity doesn't call
+dm_bufio_set_sector_offset() on the hash device's bufio client
+(v->bufio). Thus, dm_bufio_get_device_size(v->bufio) returns a size
+relative to 0 rather than hash_start. An alternative fix would be to
+call dm_bufio_set_sector_offset() on v->bufio, but then all the code
+that reads from the hash device would have to be adjusted accordingly.
+
+Fixes: a739ff3f543a ("dm verity: add support for forward error correction")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-verity-fec.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -731,7 +731,8 @@ int verity_fec_ctr(struct dm_verity *v)
+ * it to be large enough.
+ */
+ f->hash_blocks = f->blocks - v->data_blocks;
+- if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) {
++ if (dm_bufio_get_device_size(v->bufio) <
++ v->hash_start + f->hash_blocks) {
+ ti->error = "Hash device is too small for "
+ DM_VERITY_OPT_FEC_BLOCKS;
+ return -E2BIG;
--- /dev/null
+From 07004a8c4b572171934390148ee48c4175c77eed Mon Sep 17 00:00:00 2001
+From: David Carlier <devnexen@gmail.com>
+Date: Sat, 18 Apr 2026 20:17:37 +0100
+Subject: eventfs: Hold eventfs_mutex and SRCU when remount walks events
+
+From: David Carlier <devnexen@gmail.com>
+
+commit 07004a8c4b572171934390148ee48c4175c77eed upstream.
+
+Commit 340f0c7067a9 ("eventfs: Update all the eventfs_inodes from the
+events descriptor") had eventfs_set_attrs() recurse through ei->children
+on remount. The walk only holds the rcu_read_lock() taken by
+tracefs_apply_options() over tracefs_inodes, which is wrong:
+
+ - list_for_each_entry over ei->children races with the list_del_rcu()
+ in eventfs_remove_rec() -- LIST_POISON1 deref, same shape as
+ d2603279c7d6.
+ - eventfs_inodes are freed via call_srcu(&eventfs_srcu, ...).
+ rcu_read_lock() does not extend an SRCU grace period, so ti->private
+ can be reclaimed under the walk.
+ - The writes to ei->attr race with eventfs_set_attr(), which holds
+ eventfs_mutex.
+
+Reproducer:
+
+ while :; do mount -o remount,uid=$((RANDOM%1000)) /sys/kernel/tracing; done &
+ while :; do
+ echo "p:kp submit_bio" > /sys/kernel/tracing/kprobe_events
+ echo > /sys/kernel/tracing/kprobe_events
+ done
+
+Wrap the events portion of tracefs_apply_options() in
+eventfs_remount_lock()/_unlock() that take eventfs_mutex and
+srcu_read_lock(&eventfs_srcu). eventfs_set_attrs() doesn't sleep so the
+nested rcu_read_lock() is fine; lockdep_assert_held() pins the contract.
+
+Comment in tracefs_drop_inode() said "RCU cycle" -- it is SRCU.
+
+Fixes: 340f0c7067a9 ("eventfs: Update all the eventfs_inodes from the events descriptor")
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260418191737.10289-1-devnexen@gmail.com
+Signed-off-by: David Carlier <devnexen@gmail.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/tracefs/event_inode.c | 14 ++++++++++++++
+ fs/tracefs/inode.c | 5 ++++-
+ fs/tracefs/internal.h | 3 +++
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -250,6 +250,8 @@ static void eventfs_set_attrs(struct eve
+ {
+ struct eventfs_inode *ei_child;
+
++ lockdep_assert_held(&eventfs_mutex);
++
+ /* Update events/<system>/<event> */
+ if (WARN_ON_ONCE(level > 3))
+ return;
+@@ -912,3 +914,15 @@ void eventfs_remove_events_dir(struct ev
+ d_invalidate(dentry);
+ dput(dentry);
+ }
++
++int eventfs_remount_lock(void)
++{
++ mutex_lock(&eventfs_mutex);
++ return srcu_read_lock(&eventfs_srcu);
++}
++
++void eventfs_remount_unlock(int srcu_idx)
++{
++ srcu_read_unlock(&eventfs_srcu, srcu_idx);
++ mutex_unlock(&eventfs_mutex);
++}
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -336,6 +336,7 @@ static int tracefs_apply_options(struct
+ struct inode *inode = d_inode(sb->s_root);
+ struct tracefs_inode *ti;
+ bool update_uid, update_gid;
++ int srcu_idx;
+ umode_t tmp_mode;
+
+ /*
+@@ -360,6 +361,7 @@ static int tracefs_apply_options(struct
+ update_uid = fsi->opts & BIT(Opt_uid);
+ update_gid = fsi->opts & BIT(Opt_gid);
+
++ srcu_idx = eventfs_remount_lock();
+ rcu_read_lock();
+ list_for_each_entry_rcu(ti, &tracefs_inodes, list) {
+ if (update_uid) {
+@@ -381,6 +383,7 @@ static int tracefs_apply_options(struct
+ eventfs_remount(ti, update_uid, update_gid);
+ }
+ rcu_read_unlock();
++ eventfs_remount_unlock(srcu_idx);
+ }
+
+ return 0;
+@@ -426,7 +429,7 @@ static int tracefs_drop_inode(struct ino
+ * This inode is being freed and cannot be used for
+ * eventfs. Clear the flag so that it doesn't call into
+ * eventfs during the remount flag updates. The eventfs_inode
+- * gets freed after an RCU cycle, so the content will still
++ * gets freed after an SRCU cycle, so the content will still
+ * be safe if the iteration is going on now.
+ */
+ ti->flags &= ~TRACEFS_EVENT_INODE;
+--- a/fs/tracefs/internal.h
++++ b/fs/tracefs/internal.h
+@@ -76,4 +76,7 @@ struct inode *tracefs_get_inode(struct s
+ void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid);
+ void eventfs_d_release(struct dentry *dentry);
+
++int eventfs_remount_lock(void);
++void eventfs_remount_unlock(int srcu_idx);
++
+ #endif /* _TRACEFS_INTERNAL_H */
--- /dev/null
+From 6fabce53f6b9c2419012a9103e1a46d40888cefa Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Tue, 17 Mar 2026 00:59:16 -0700
+Subject: iommu/arm-smmu-v3: Add a missing dma_wmb() for hitless STE update
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit 6fabce53f6b9c2419012a9103e1a46d40888cefa upstream.
+
+When writing a new (previously invalid) valid IOPTE to a page table, then
+installing the page table into an STE hitlesslessly (e.g. in S2TTB field),
+there is a window before an STE invalidation, where the page-table may be
+accessed by SMMU but the new IOPTE is still siting in the CPU cache.
+
+This could occur when we allocate an iommu_domain and immediately install
+it hitlessly, while there would be no dma_wmb() for the page table memory
+prior to the earliest point of HW reading the STE.
+
+Fix it by adding a dma_wmb() prior to updating the STE.
+
+Fixes: 56e1a4cc2588 ("iommu/arm-smmu-v3: Add unit tests for arm_smmu_write_entry")
+Cc: stable@vger.kernel.org
+Reported-by: Will Deacon <will@kernel.org>
+Closes: https://lore.kernel.org/linux-iommu/aXdlnLLFUBwjT0V5@willie-the-truck/
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -1161,6 +1161,13 @@ void arm_smmu_write_entry(struct arm_smm
+ __le64 unused_update[NUM_ENTRY_QWORDS];
+ u8 used_qword_diff;
+
++ /*
++ * Many of the entry structures have pointers to other structures that
++ * need to have their updates be visible before any writes of the entry
++ * happen.
++ */
++ dma_wmb();
++
+ used_qword_diff =
+ arm_smmu_entry_qword_diff(writer, entry, target, unused_update);
+ if (hweight8(used_qword_diff) == 1) {
--- /dev/null
+From 24376458138387fb251e782e624c7776e9826796 Mon Sep 17 00:00:00 2001
+From: Michael Bommarito <michael.bommarito@gmail.com>
+Date: Sun, 19 Apr 2026 17:21:55 -0400
+Subject: isofs: validate block number from NFS file handle in isofs_export_iget
+
+From: Michael Bommarito <michael.bommarito@gmail.com>
+
+commit 24376458138387fb251e782e624c7776e9826796 upstream.
+
+isofs_fh_to_dentry() and isofs_fh_to_parent() pass an attacker-
+controlled block number (ifid->block or ifid->parent_block) from
+the NFS file handle to isofs_export_iget(), which only rejects
+block == 0 before calling isofs_iget() and ultimately sb_bread().
+A crafted file handle with fh_len sufficient to pass the check
+added by commit 0405d4b63d08 ("isofs: Prevent the use of too small
+fid") can still drive the server to read any in-range block on the
+backing device as if it were an iso_directory_record. That earlier
+fix was assigned CVE-2025-37780.
+
+sb_bread() on an out-of-range block returns NULL cleanly via the
+EIO path, so there is no memory-safety violation. For in-range
+reads of adjacent-partition data on the same block device, the
+unrelated bytes end up in iso_inode_info fields that reach the NFS
+client as dentry metadata. The deployment surface (isofs exported
+over NFS from loop-mounted images) is narrow and requires an
+authenticated NFS peer, but the malformed-file-handle class is
+reportable as hardening next to the existing CVE-2025-37780 fix.
+
+Reject block >= ISOFS_SB(sb)->s_nzones in isofs_export_iget() so
+the check covers both isofs_fh_to_dentry() and isofs_fh_to_parent()
+call sites with a single line.
+
+Fixes: 0405d4b63d08 ("isofs: Prevent the use of too small fid")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-7
+Signed-off-by: Michael Bommarito <michael.bommarito@gmail.com>
+Link: https://patch.msgid.link/20260419212155.2169382-3-michael.bommarito@gmail.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/isofs/export.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -24,7 +24,7 @@ isofs_export_iget(struct super_block *sb
+ {
+ struct inode *inode;
+
+- if (block == 0)
++ if (block == 0 || block >= ISOFS_SB(sb)->s_nzones)
+ return ERR_PTR(-ESTALE);
+ inode = isofs_iget(sb, block, offset);
+ if (IS_ERR(inode))
--- /dev/null
+From a36d990f591320e9dd379ab30063ebfe91d47e1f Mon Sep 17 00:00:00 2001
+From: Michael Bommarito <michael.bommarito@gmail.com>
+Date: Sun, 19 Apr 2026 17:21:54 -0400
+Subject: isofs: validate Rock Ridge CE continuation extent against volume size
+
+From: Michael Bommarito <michael.bommarito@gmail.com>
+
+commit a36d990f591320e9dd379ab30063ebfe91d47e1f upstream.
+
+rock_continue() reads rs->cont_extent verbatim from the Rock Ridge CE
+record and passes it to sb_bread() without checking that the block
+number is within the mounted ISO 9660 volume. commit e595447e177b
+("[PATCH] rock.c: handle corrupted directories") added cont_offset
+and cont_size rejection for the CE continuation but did not validate
+the extent block number itself. commit f54e18f1b831 ("isofs: Fix
+infinite looping over CE entries") later capped the CE chain length
+at RR_MAX_CE_ENTRIES = 32 but again left the block number unchecked.
+
+With a crafted ISO mounted via udisks2 (desktop optical auto-mount)
+or via CAP_SYS_ADMIN mount, rs->cont_extent can therefore point at
+an out-of-range block or at blocks belonging to an adjacent
+filesystem on the same block device. sb_bread() on an out-of-range
+block returns NULL cleanly via the block layer EIO path, so there
+is no memory-safety violation. For in-range reads of adjacent-
+filesystem data, the CE buffer is parsed as Rock Ridge records and
+only the text of SL sub-records reaches userspace through
+readlink(), which makes the info-leak channel narrow and difficult
+to exploit; still, rejecting the malformed CE outright matches the
+rejection shape already present in the same function for
+cont_offset and cont_size.
+
+Add an ISOFS_SB(sb)->s_nzones bounds check to rock_continue() next
+to the existing offset/size rejection, printing the same
+corrupted-directory-entry notice.
+
+Fixes: f54e18f1b831 ("isofs: Fix infinite looping over CE entries")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-7
+Signed-off-by: Michael Bommarito <michael.bommarito@gmail.com>
+Link: https://patch.msgid.link/20260419212155.2169382-2-michael.bommarito@gmail.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/isofs/rock.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -101,6 +101,15 @@ static int rock_continue(struct rock_sta
+ goto out;
+ }
+
++ if ((unsigned)rs->cont_extent >= ISOFS_SB(rs->inode->i_sb)->s_nzones) {
++ printk(KERN_NOTICE "rock: corrupted directory entry. "
++ "extent=%u out of volume (nzones=%lu)\n",
++ (unsigned)rs->cont_extent,
++ ISOFS_SB(rs->inode->i_sb)->s_nzones);
++ ret = -EIO;
++ goto out;
++ }
++
+ if (rs->cont_extent) {
+ struct buffer_head *bh;
+
--- /dev/null
+From 464af6fc2b1dcc74005b7f58ee3812b17777efee Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 27 Apr 2026 14:25:40 +0200
+Subject: KVM: x86: check for nEPT/nNPT in slow flush hypercalls
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 464af6fc2b1dcc74005b7f58ee3812b17777efee upstream.
+
+Checking is_guest_mode(vcpu) is incorrect, because translate_nested_gpa()
+is only valid if an L2 guest is running *with nested EPT/NPT enabled*.
+Instead use the same condition as translate_nested_gpa() itself.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Fixes: aee738236dca ("KVM: x86: Prepare kvm_hv_flush_tlb() to handle L2's GPAs", 2022-11-18)
+Link: https://patch.msgid.link/20260503200905.106077-1-pbonzini@redhat.com/
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/hyperv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -2038,7 +2038,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_v
+ * flush). Translate the address here so the memory can be uniformly
+ * read with kvm_read_guest().
+ */
+- if (!hc->fast && is_guest_mode(vcpu)) {
++ if (!hc->fast && mmu_is_nested(vcpu)) {
+ hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
+ if (unlikely(hc->ingpa == INVALID_GPA))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
--- /dev/null
+From 8c2f1288250a90a4b5cabed5d888d7e3aeed4035 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Sun, 12 Apr 2026 16:19:47 +0200
+Subject: lib/crypto: mpi: Fix integer underflow in mpi_read_raw_from_sgl()
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 8c2f1288250a90a4b5cabed5d888d7e3aeed4035 upstream.
+
+Yiming reports an integer underflow in mpi_read_raw_from_sgl() when
+subtracting "lzeros" from the unsigned "nbytes".
+
+For this to happen, the scatterlist "sgl" needs to occupy more bytes
+than the "nbytes" parameter and the first "nbytes + 1" bytes of the
+scatterlist must be zero. Under these conditions, the while loop
+iterating over the scatterlist will count more zeroes than "nbytes",
+subtract the number of zeroes from "nbytes" and cause the underflow.
+
+When commit 2d4d1eea540b ("lib/mpi: Add mpi sgl helpers") originally
+introduced the bug, it couldn't be triggered because all callers of
+mpi_read_raw_from_sgl() passed a scatterlist whose length was equal to
+"nbytes".
+
+However since commit 63ba4d67594a ("KEYS: asymmetric: Use new crypto
+interface without scatterlists"), the underflow can now actually be
+triggered. When invoking a KEYCTL_PKEY_ENCRYPT system call with a
+larger "out_len" than "in_len" and filling the "in" buffer with zeroes,
+crypto_akcipher_sync_prep() will create an all-zero scatterlist used for
+both the "src" and "dst" member of struct akcipher_request and thereby
+fulfil the conditions to trigger the bug:
+
+ sys_keyctl()
+ keyctl_pkey_e_d_s()
+ asymmetric_key_eds_op()
+ software_key_eds_op()
+ crypto_akcipher_sync_encrypt()
+ crypto_akcipher_sync_prep()
+ crypto_akcipher_encrypt()
+ rsa_enc()
+ mpi_read_raw_from_sgl()
+
+To the user this will be visible as a DoS as the kernel spins forever,
+causing soft lockup splats as a side effect.
+
+Fix it.
+
+Reported-by: Yiming Qian <yimingqian591@gmail.com> # off-list
+Fixes: 2d4d1eea540b ("lib/mpi: Add mpi sgl helpers")
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v4.4+
+Reviewed-by: Ignat Korchagin <ignat@linux.win>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Link: https://lore.kernel.org/r/59eca92ff4f87e2081777f1423a0efaaadcfdb39.1776003111.git.lukas@wunner.de
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/mpi/mpicoder.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/crypto/mpi/mpicoder.c
++++ b/lib/crypto/mpi/mpicoder.c
+@@ -346,7 +346,7 @@ MPI mpi_read_raw_from_sgl(struct scatter
+ lzeros = 0;
+ len = 0;
+ while (nbytes > 0) {
+- while (len && !*buff) {
++ while (len && !*buff && lzeros < nbytes) {
+ lzeros++;
+ len--;
+ buff++;
--- /dev/null
+From 07b7d66e65d9cfe6b9c2c34aa22cfcaac37a5c45 Mon Sep 17 00:00:00 2001
+From: "Christian A. Ehrhardt" <lk@c--e.de>
+Date: Thu, 26 Mar 2026 22:49:01 +0100
+Subject: lib/scatterlist: fix length calculations in extract_kvec_to_sg
+
+From: Christian A. Ehrhardt <lk@c--e.de>
+
+commit 07b7d66e65d9cfe6b9c2c34aa22cfcaac37a5c45 upstream.
+
+Patch series "Fix bugs in extract_iter_to_sg()", v3.
+
+Fix bugs in the kvec and user variants of extract_iter_to_sg. This series
+is growing due to useful remarks made by sashiko.dev.
+
+The main bugs are:
+- The length for an sglist entry when extracting from
+ a kvec can exceed the number of bytes in the page. This
+ is obviously not intended.
+- When extracting a user buffer the sglist is temporarily
+ used as a scratch buffer for extracted page pointers.
+ If the sglist already contains some elements this scratch
+ buffer could overlap with existing entries in the sglist.
+
+The series adds test cases to the kunit_iov_iter test that demonstrate all
+of these bugs. Additionally, there is a memory leak fix for the test
+itself.
+
+The bugs were orignally introduced into kernel v6.3 where the function
+lived in fs/netfs/iterator.c. It was later moved to lib/scatterlist.c in
+v6.5. Thus the actual fix is only marked for backports to v6.5+.
+
+
+This patch (of 5):
+
+When extracting from a kvec to a scatterlist, do not cross page
+boundaries. The required length was already calculated but not used as
+intended.
+
+Adjust the copied length if the loop runs out of sglist entries without
+extracting everything.
+
+While there, return immediately from extract_iter_to_sg if there are no
+sglist entries at all.
+
+A subsequent commit will add kunit test cases that demonstrate that the
+patch is necessary.
+
+Link: https://lkml.kernel.org/r/20260326214905.818170-1-lk@c--e.de
+Link: https://lkml.kernel.org/r/20260326214905.818170-2-lk@c--e.de
+Fixes: 018584697533 ("netfs: Add a function to extract an iterator into a scatterlist")
+Signed-off-by: Christian A. Ehrhardt <lk@c--e.de>
+Cc: David Gow <davidgow@google.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: <stable@vger.kernel.org> [v6.5+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/scatterlist.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -1242,7 +1242,7 @@ static ssize_t extract_kvec_to_sg(struct
+ else
+ page = virt_to_page((void *)kaddr);
+
+- sg_set_page(sg, page, len, off);
++ sg_set_page(sg, page, seg, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+@@ -1251,6 +1251,7 @@ static ssize_t extract_kvec_to_sg(struct
+ kaddr += PAGE_SIZE;
+ off = 0;
+ } while (len > 0 && sg_max > 0);
++ ret -= len;
+
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+@@ -1404,7 +1405,7 @@ ssize_t extract_iter_to_sg(struct iov_it
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+ {
+- if (maxsize == 0)
++ if (maxsize == 0 || sg_max == 0)
+ return 0;
+
+ switch (iov_iter_type(iter)) {
--- /dev/null
+From 118cf3f55975352ac357fb194405031458186819 Mon Sep 17 00:00:00 2001
+From: "Christian A. Ehrhardt" <lk@c--e.de>
+Date: Thu, 26 Mar 2026 22:49:02 +0100
+Subject: lib/scatterlist: fix temp buffer in extract_user_to_sg()
+
+From: Christian A. Ehrhardt <lk@c--e.de>
+
+commit 118cf3f55975352ac357fb194405031458186819 upstream.
+
+Instead of allocating a temporary buffer for extracted user pages
+extract_user_to_sg() uses the end of the to be filled scatterlist as a
+temporary buffer.
+
+Fix the calculation of the start address if the scatterlist already
+contains elements. The unused space starts at sgtable->sgl +
+sgtable->nents not directly at sgtable->nents and the temporary buffer is
+placed at the end of this unused space.
+
+A subsequent commit will add kunit test cases that demonstrate that the
+patch is necessary.
+
+Pointed out by sashiko.dev on a previous iteration of this series.
+
+Link: https://lkml.kernel.org/r/20260326214905.818170-3-lk@c--e.de
+Fixes: 018584697533 ("netfs: Add a function to extract an iterator into a scatterlist")
+Signed-off-by: Christian A. Ehrhardt <lk@c--e.de>
+Cc: David Howells <dhowells@redhat.com>
+Cc: David Gow <davidgow@google.com>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: <stable@vger.kernel.org> [v6.5+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/scatterlist.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -1118,8 +1118,7 @@ static ssize_t extract_user_to_sg(struct
+ size_t len, off;
+
+ /* We decant the page list into the tail of the scatterlist */
+- pages = (void *)sgtable->sgl +
+- array_size(sg_max, sizeof(struct scatterlist));
++ pages = (void *)sg + array_size(sg_max, sizeof(struct scatterlist));
+ pages -= sg_max;
+
+ do {
--- /dev/null
+From 1c439de70b1c3eb3c6bffa8245c16b9fc318f114 Mon Sep 17 00:00:00 2001
+From: Raphael Zimmer <raphael.zimmer@tu-ilmenau.de>
+Date: Tue, 21 Apr 2026 10:27:01 +0200
+Subject: libceph: Fix slab-out-of-bounds access in auth message processing
+
+From: Raphael Zimmer <raphael.zimmer@tu-ilmenau.de>
+
+commit 1c439de70b1c3eb3c6bffa8245c16b9fc318f114 upstream.
+
+If a (potentially corrupted) message of type CEPH_MSG_AUTH_REPLY
+contains a positive value in its result field, it is treated as an
+error code by ceph_handle_auth_reply() and returned to
+handle_auth_reply(). Thereafter, an attempt is made to send the
+preallocated message of type CEPH_MSG_AUTH, where the returned value is
+interpreted as the size of the front segment to send. If the result
+value in the message is greater than the size of the memory buffer
+allocated for the front segment, an out-of-bounds access occurs, and
+the content of the memory region beyond this buffer is sent out.
+
+This patch fixes the issue by treating only negative values in the
+result field as errors. Positive values are therefore treated as success
+in the same way as a zero value. Additionally, a BUG_ON is added to
+__send_prepared_auth_request() comparing the len parameter to
+front_alloc_len to prevent sending the message if it exceeds the bounds
+of the allocation and to make it easier to catch any logic flaws leading
+to this.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Raphael Zimmer <raphael.zimmer@tu-ilmenau.de>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/auth.c | 2 +-
+ net/ceph/mon_client.c | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ceph/auth.c
++++ b/net/ceph/auth.c
+@@ -257,7 +257,7 @@ int ceph_handle_auth_reply(struct ceph_a
+ ac->negotiating = false;
+ }
+
+- if (result) {
++ if (result < 0) {
+ pr_err("auth protocol '%s' mauth authentication failed: %d\n",
+ ceph_auth_proto_name(ac->protocol), result);
+ ret = result;
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -174,6 +174,8 @@ int ceph_monmap_contains(struct ceph_mon
+ */
+ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
+ {
++ BUG_ON(len > monc->m_auth->front_alloc_len);
++
+ monc->pending_auth = 1;
+ monc->m_auth->front.iov_len = len;
+ monc->m_auth->hdr.front_len = cpu_to_le32(len);
--- /dev/null
+From 9aa6d860b0930e2f72795665c42c44252a558a0c Mon Sep 17 00:00:00 2001
+From: Junrui Luo <moonafterrain@outlook.com>
+Date: Thu, 16 Apr 2026 11:39:56 +0800
+Subject: md/raid10: fix divide-by-zero in setup_geo() with zero far_copies
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+commit 9aa6d860b0930e2f72795665c42c44252a558a0c upstream.
+
+setup_geo() extracts near_copies (nc) and far_copies (fc) from the
+user-provided layout parameter without checking for zero. When fc=0
+with the "improved" far set layout selected, 'geo->far_set_size =
+disks / fc' triggers a divide-by-zero.
+
+Validate nc and fc immediately after extraction, returning -1 if
+either is zero.
+
+Fixes: 475901aff158 ("MD RAID10: Improve redundancy for 'far' and 'offset' algorithms (part 1)")
+Cc: stable@vger.kernel.org
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://lore.kernel.org/linux-raid/SYBPR01MB7881A5E2556806CC1D318582AF232@SYBPR01MB7881.ausprd01.prod.outlook.com
+Signed-off-by: Yu Kuai <yukuai@fnnas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid10.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3872,6 +3872,8 @@ static int setup_geo(struct geom *geo, s
+ nc = layout & 255;
+ fc = (layout >> 8) & 255;
+ fo = layout & (1<<16);
++ if (!nc || !fc)
++ return -1;
+ geo->raid_disks = disks;
+ geo->near_copies = nc;
+ geo->far_copies = fc;
--- /dev/null
+From 1e68eb96e8beb1abefd12dd22c5637795d8a877e Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 23 Apr 2026 08:02:51 -0700
+Subject: mm/damon/sysfs-schemes: protect memcg_path kfree() with damon_sysfs_lock
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 1e68eb96e8beb1abefd12dd22c5637795d8a877e upstream.
+
+Patch series "mm/damon/sysfs-schemes: fix use-after-free for [memcg_]path".
+
+Reads of 'memcg_path' and 'path' files in DAMON sysfs interface could race
+with their writes, results in use-after-free. Fix those.
+
+
+This patch (of 2):
+
+damon_sysfs_scheme_filter->mmecg_path can be read and written by users,
+via DAMON sysfs memcg_path file. It can also be indirectly read, for the
+parameters {on,off}line committing to DAMON. The reads for parameters
+committing are protected by damon_sysfs_lock to avoid the sysfs files
+being destroyed while any of the parameters are being read. But the
+user-driven direct reads and writes are not protected by any lock, while
+the write is deallocating the memcg_path-pointing buffer. As a result,
+the readers could read the already freed buffer (user-after-free). Note
+that the user-reads don't race when the same open file is used by the
+writer, due to kernfs's open file locking. Nonetheless, doing the reads
+and writes with separate open files would be common. Fix it by protecting
+both the user-direct reads and writes with damon_sysfs_lock.
+
+Link: https://lore.kernel.org/20260423150253.111520-1-sj@kernel.org
+Link: https://lore.kernel.org/20260423150253.111520-2-sj@kernel.org
+Fixes: 4f489fe6afb3 ("mm/damon/sysfs-schemes: free old damon_sysfs_scheme_filter->memcg_path on write")
+Co-developed-by: Junxi Qian <qjx1298677004@gmail.com>
+Signed-off-by: Junxi Qian <qjx1298677004@gmail.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> # 6.16.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs-schemes.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -407,9 +407,14 @@ static ssize_t memcg_path_show(struct ko
+ {
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
++ int len;
+
+- return sysfs_emit(buf, "%s\n",
++ if (!mutex_trylock(&damon_sysfs_lock))
++ return -EBUSY;
++ len = sysfs_emit(buf, "%s\n",
+ filter->memcg_path ? filter->memcg_path : "");
++ mutex_unlock(&damon_sysfs_lock);
++ return len;
+ }
+
+ static ssize_t memcg_path_store(struct kobject *kobj,
+@@ -423,8 +428,13 @@ static ssize_t memcg_path_store(struct k
+ return -ENOMEM;
+
+ strscpy(path, buf, count + 1);
++ if (!mutex_trylock(&damon_sysfs_lock)) {
++ kfree(path);
++ return -EBUSY;
++ }
+ kfree(filter->memcg_path);
+ filter->memcg_path = path;
++ mutex_unlock(&damon_sysfs_lock);
+ return count;
+ }
+
--- /dev/null
+From ba9d308ccd6732dd97ed8080d834a4a89e758e14 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Wed, 8 Apr 2026 17:18:14 +0300
+Subject: nvme-apple: drop invalid put of admin queue reference count
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit ba9d308ccd6732dd97ed8080d834a4a89e758e14 upstream.
+
+Commit 03b3bcd319b3 ("nvme: fix admin request_queue lifetime") moved the
+admin queue reference ->put call into nvme_free_ctrl() - a controller
+device release callback performed for every nvme driver doing
+nvme_init_ctrl().
+
+nvme-apple sets refcount of the admin queue to 1 at allocation during the
+probe function and then puts it twice now:
+
+nvme_free_ctrl()
+ blk_put_queue(ctrl->admin_q) // #1
+ ->free_ctrl()
+ apple_nvme_free_ctrl()
+ blk_put_queue(anv->ctrl.admin_q) // #2
+
+Note that there is a commit 941f7298c70c ("nvme-apple: remove an extra
+queue reference") which intended to drop taking an extra admin queue
+reference. Looks like at that moment it accidentally fixed a refcount
+leak, which existed since the driver's introduction. There were two ->get
+calls at driver's probe function and a single ->put inside
+apple_nvme_free_ctrl().
+
+However now after commit 03b3bcd319b3 ("nvme: fix admin request_queue
+lifetime") the refcount is imbalanced again. Fix it by removing extra
+->put call from apple_nvme_free_ctrl(). anv->dev and ctrl->dev point to
+the same device, so use ctrl->dev directly for simplification. Compile
+tested only.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 03b3bcd319b3 ("nvme: fix admin request_queue lifetime")
+Cc: stable@vger.kernel.org
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/apple.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/nvme/host/apple.c
++++ b/drivers/nvme/host/apple.c
+@@ -1210,11 +1210,7 @@ static int apple_nvme_get_address(struct
+
+ static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
+ {
+- struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
+-
+- if (anv->ctrl.admin_q)
+- blk_put_queue(anv->ctrl.admin_q);
+- put_device(anv->dev);
++ put_device(ctrl->dev);
+ }
+
+ static const struct nvme_ctrl_ops nvme_ctrl_ops = {
--- /dev/null
+From aade8abd8b868b6ffa9697aadaea28ec7f65bee6 Mon Sep 17 00:00:00 2001
+From: Chaitanya Kulkarni <kch@nvidia.com>
+Date: Wed, 8 Apr 2026 17:56:47 -0700
+Subject: nvmet: avoid recursive nvmet-wq flush in nvmet_ctrl_free
+
+From: Chaitanya Kulkarni <kch@nvidia.com>
+
+commit aade8abd8b868b6ffa9697aadaea28ec7f65bee6 upstream.
+
+nvmet_tcp_release_queue_work() runs on nvmet-wq and can drop the
+final controller reference through nvmet_cq_put(). If that triggers
+nvmet_ctrl_free(), the teardown path flushes ctrl->async_event_work on
+the same nvmet-wq.
+
+Call chain:
+
+ nvmet_tcp_schedule_release_queue()
+ kref_put(&queue->kref, nvmet_tcp_release_queue)
+ nvmet_tcp_release_queue()
+ queue_work(nvmet_wq, &queue->release_work) <--- nvmet_wq
+ process_one_work()
+ nvmet_tcp_release_queue_work()
+ nvmet_cq_put(&queue->nvme_cq)
+ nvmet_cq_destroy()
+ nvmet_ctrl_put(cq->ctrl)
+ nvmet_ctrl_free()
+ flush_work(&ctrl->async_event_work) <--- nvmet_wq
+
+ Previously Scheduled by :-
+ nvmet_add_async_event
+ queue_work(nvmet_wq, &ctrl->async_event_work);
+
+This trips lockdep with a possible recursive locking warning.
+
+[ 5223.015876] run blktests nvme/003 at 2026-04-07 20:53:55
+[ 5223.061801] loop0: detected capacity change from 0 to 2097152
+[ 5223.072206] nvmet: adding nsid 1 to subsystem blktests-subsystem-1
+[ 5223.088368] nvmet_tcp: enabling port 0 (127.0.0.1:4420)
+[ 5223.126086] nvmet: Created discovery controller 1 for subsystem nqn.2014-08.org.nvmexpress.discovery for NQN nqn.2014-08.org.nvmexpress:uuid:0f01fb42-9f7f-4856-b0b3-51e60b8de349.
+[ 5223.128453] nvme nvme1: new ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery", addr 127.0.0.1:4420, hostnqn: nqn.2014-08.org.nvmexpress:uuid:0f01fb42-9f7f-4856-b0b3-51e60b8de349
+[ 5233.199447] nvme nvme1: Removing ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery"
+
+[ 5233.227718] ============================================
+[ 5233.231283] WARNING: possible recursive locking detected
+[ 5233.234696] 7.0.0-rc3nvme+ #20 Tainted: G O N
+[ 5233.238434] --------------------------------------------
+[ 5233.241852] kworker/u192:6/2413 is trying to acquire lock:
+[ 5233.245429] ffff888111632548 ((wq_completion)nvmet-wq){+.+.}-{0:0}, at: touch_wq_lockdep_map+0x26/0x90
+[ 5233.251438]
+ but task is already holding lock:
+[ 5233.255254] ffff888111632548 ((wq_completion)nvmet-wq){+.+.}-{0:0}, at: process_one_work+0x5cc/0x6e0
+[ 5233.261125]
+ other info that might help us debug this:
+[ 5233.265333] Possible unsafe locking scenario:
+
+[ 5233.269217] CPU0
+[ 5233.270795] ----
+[ 5233.272436] lock((wq_completion)nvmet-wq);
+[ 5233.275241] lock((wq_completion)nvmet-wq);
+[ 5233.278020]
+ *** DEADLOCK ***
+
+[ 5233.281793] May be due to missing lock nesting notation
+
+[ 5233.286195] 3 locks held by kworker/u192:6/2413:
+[ 5233.289192] #0: ffff888111632548 ((wq_completion)nvmet-wq){+.+.}-{0:0}, at: process_one_work+0x5cc/0x6e0
+[ 5233.294569] #1: ffffc9000e2a7e40 ((work_completion)(&queue->release_work)){+.+.}-{0:0}, at: process_one_work+0x1c5/0x6e0
+[ 5233.300128] #2: ffffffff82d7dc40 (rcu_read_lock){....}-{1:3}, at: __flush_work+0x62/0x530
+[ 5233.304290]
+ stack backtrace:
+[ 5233.306520] CPU: 4 UID: 0 PID: 2413 Comm: kworker/u192:6 Tainted: G O N 7.0.0-rc3nvme+ #20 PREEMPT(full)
+[ 5233.306524] Tainted: [O]=OOT_MODULE, [N]=TEST
+[ 5233.306525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+[ 5233.306527] Workqueue: nvmet-wq nvmet_tcp_release_queue_work [nvmet_tcp]
+[ 5233.306532] Call Trace:
+[ 5233.306534] <TASK>
+[ 5233.306536] dump_stack_lvl+0x73/0xb0
+[ 5233.306552] print_deadlock_bug+0x225/0x2f0
+[ 5233.306556] __lock_acquire+0x13f0/0x2290
+[ 5233.306563] lock_acquire+0xd0/0x300
+[ 5233.306565] ? touch_wq_lockdep_map+0x26/0x90
+[ 5233.306571] ? __flush_work+0x20b/0x530
+[ 5233.306573] ? touch_wq_lockdep_map+0x26/0x90
+[ 5233.306577] touch_wq_lockdep_map+0x3b/0x90
+[ 5233.306580] ? touch_wq_lockdep_map+0x26/0x90
+[ 5233.306583] ? __flush_work+0x20b/0x530
+[ 5233.306585] __flush_work+0x268/0x530
+[ 5233.306588] ? __pfx_wq_barrier_func+0x10/0x10
+[ 5233.306594] ? xen_error_entry+0x30/0x60
+[ 5233.306600] nvmet_ctrl_free+0x140/0x310 [nvmet]
+[ 5233.306617] nvmet_cq_put+0x74/0x90 [nvmet]
+[ 5233.306629] nvmet_tcp_release_queue_work+0x19f/0x360 [nvmet_tcp]
+[ 5233.306634] process_one_work+0x206/0x6e0
+[ 5233.306640] worker_thread+0x184/0x320
+[ 5233.306643] ? __pfx_worker_thread+0x10/0x10
+[ 5233.306646] kthread+0xf1/0x130
+[ 5233.306648] ? __pfx_kthread+0x10/0x10
+[ 5233.306651] ret_from_fork+0x355/0x450
+[ 5233.306653] ? __pfx_kthread+0x10/0x10
+[ 5233.306656] ret_from_fork_asm+0x1a/0x30
+[ 5233.306664] </TASK>
+
+There is also no need to flush async_event_work from controller
+teardown. The admin queue teardown already fails outstanding AER
+requests before the final controller put :-
+
+ nvmet_sq_destroy(admin sq)
+ nvmet_async_events_failall(ctrl)
+
+The controller has already been removed from the subsystem list before
+nvmet_ctrl_free() quiesces outstanding work.
+
+Replace flush_work() with cancel_work_sync() so a pending
+async_event_work item is canceled and a running instance is waited on
+without recursing into the same workqueue.
+
+Fixes: 06406d81a2d7 ("nvmet: cancel fatal error and flush async work before free controller")
+Cc: stable@vger.kernel.org
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/target/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1505,7 +1505,7 @@ static void nvmet_ctrl_free(struct kref
+
+ nvmet_stop_keep_alive_timer(ctrl);
+
+- flush_work(&ctrl->async_event_work);
++ cancel_work_sync(&ctrl->async_event_work);
+ cancel_work_sync(&ctrl->fatal_err_work);
+
+ nvmet_destroy_auth(ctrl);
--- /dev/null
+From 5293a8882c549fab4a878bc76b0b6c951f980a61 Mon Sep 17 00:00:00 2001
+From: Chaitanya Kulkarni <kch@nvidia.com>
+Date: Wed, 8 Apr 2026 00:51:31 -0700
+Subject: nvmet-tcp: fix race between ICReq handling and queue teardown
+
+From: Chaitanya Kulkarni <kch@nvidia.com>
+
+commit 5293a8882c549fab4a878bc76b0b6c951f980a61 upstream.
+
+nvmet_tcp_handle_icreq() updates queue->state after sending an
+Initialization Connection Response (ICResp), but it does so without
+serializing against target-side queue teardown.
+
+If an NVMe/TCP host sends an Initialization Connection Request
+(ICReq) and immediately closes the connection, target-side teardown
+may start in softirq context before io_work drains the already
+buffered ICReq. In that case, nvmet_tcp_schedule_release_queue()
+sets queue->state to NVMET_TCP_Q_DISCONNECTING and drops the queue
+reference under state_lock.
+
+If io_work later processes that ICReq, nvmet_tcp_handle_icreq() can
+still overwrite the state back to NVMET_TCP_Q_LIVE. That defeats the
+DISCONNECTING-state guard in nvmet_tcp_schedule_release_queue() and
+allows a later socket state change to re-enter teardown and issue a
+second kref_put() on an already released queue.
+
+The ICResp send failure path has the same problem. If teardown has
+already moved the queue to DISCONNECTING, a send error can still
+overwrite the state with NVMET_TCP_Q_FAILED, again reopening the
+window for a second teardown path to drop the queue reference.
+
+Fix this by serializing both post-send state transitions with
+state_lock and bailing out if teardown has already started.
+
+Use -ESHUTDOWN as an internal sentinel for that bail-out path rather
+than propagating it as a transport error like -ECONNRESET. Keep
+nvmet_tcp_socket_error() setting rcv_state to NVMET_TCP_RECV_ERR before
+honoring that sentinel so receive-side parsing stays quiesced until the
+existing release path completes.
+
+Fixes: c46a6465bac2 ("nvmet-tcp: add NVMe over TCP target driver")
+Cc: stable@vger.kernel.org
+Reported-by: Shivam Kumar <skumar47@syr.edu>
+Tested-by: Shivam Kumar <kumar.shivam43666@gmail.com>
+Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/target/tcp.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -406,6 +406,19 @@ static void nvmet_tcp_build_pdu_iovec(st
+
+ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
+ {
++ /*
++ * Keep rcv_state at RECV_ERR even for the internal -ESHUTDOWN path.
++ * nvmet_tcp_handle_icreq() can return -ESHUTDOWN after the ICReq has
++ * already been consumed and queue teardown has started.
++ *
++ * If nvmet_tcp_data_ready() or nvmet_tcp_write_space() queues
++ * nvmet_tcp_io_work() again before nvmet_tcp_release_queue_work()
++ * cancels it, the queue must not keep that old receive state.
++ * Otherwise the next nvmet_tcp_io_work() run can reach
++ * nvmet_tcp_done_recv_pdu() and try to handle the same ICReq again.
++ *
++ * That is why queue->rcv_state needs to be updated before we return.
++ */
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
+ if (queue->nvme_sq.ctrl)
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+@@ -962,11 +975,24 @@ static int nvmet_tcp_handle_icreq(struct
+ iov.iov_len = sizeof(*icresp);
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (ret < 0) {
++ spin_lock_bh(&queue->state_lock);
++ if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
++ spin_unlock_bh(&queue->state_lock);
++ return -ESHUTDOWN;
++ }
+ queue->state = NVMET_TCP_Q_FAILED;
++ spin_unlock_bh(&queue->state_lock);
+ return ret; /* queue removal will cleanup */
+ }
+
++ spin_lock_bh(&queue->state_lock);
++ if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
++ spin_unlock_bh(&queue->state_lock);
++ /* Tell nvmet_tcp_socket_error() teardown is in progress. */
++ return -ESHUTDOWN;
++ }
+ queue->state = NVMET_TCP_Q_LIVE;
++ spin_unlock_bh(&queue->state_lock);
+ nvmet_prepare_receive_pdu(queue);
+ return 0;
+ }
--- /dev/null
+From aa69918bd418e700309fdd08509dba324fb24296 Mon Sep 17 00:00:00 2001
+From: Ilya Maximets <i.maximets@ovn.org>
+Date: Fri, 1 May 2026 01:38:37 +0200
+Subject: openvswitch: vport: fix self-deadlock on release of tunnel ports
+
+From: Ilya Maximets <i.maximets@ovn.org>
+
+commit aa69918bd418e700309fdd08509dba324fb24296 upstream.
+
+vports are used concurrently and protected by RCU, so netdev_put()
+must happen after the RCU grace period. So, either in an RCU call or
+after the synchronize_net(). The rtnl_delete_link() must happen under
+RTNL and so can't be executed in RCU context. Calling synchronize_net()
+while holding RTNL is not a good idea for performance and system
+stability under load in general, so calling netdev_put() in RCU call
+is the right solution here.
+
+However,
+when the device is deleted, rtnl_unlock() will call netdev_run_todo()
+and block until all the references are gone. In the current code this
+means that we never reach the call_rcu() and the vport is never freed
+and the reference is never released, causing a self-deadlock on device
+removal.
+
+Fix that by moving the rcu_call() before the rtnl_unlock(), so the
+scheduled RCU callback will be executed when synchronize_net() is
+called from the rtnl_unlock()->netdev_run_todo() while the RTNL itself
+is already released.
+
+Fixes: 6931d21f87bc ("openvswitch: defer tunnel netdev_put to RCU release")
+Cc: stable@vger.kernel.org
+Acked-by: Eelco Chaudron <echaudro@redhat.com>
+Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+Acked-by: Aaron Conole <aconole@redhat.com>
+Link: https://patch.msgid.link/20260430233848.440994-2-i.maximets@ovn.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/vport-netdev.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/openvswitch/vport-netdev.c
++++ b/net/openvswitch/vport-netdev.c
+@@ -196,9 +196,13 @@ void ovs_netdev_tunnel_destroy(struct vp
+ */
+ if (vport->dev->reg_state == NETREG_REGISTERED)
+ rtnl_delete_link(vport->dev, 0, NULL);
+- rtnl_unlock();
+
++ /* We can't put the device reference yet, since it can still be in
++ * use, but rtnl_unlock()->netdev_run_todo() will block until all
++ * the references are released, so the RCU call must be before it.
++ */
+ call_rcu(&vport->rcu, vport_netdev_free);
++ rtnl_unlock();
+ }
+ EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
+
--- /dev/null
+From 909f7bf9b080c10df3c3b38533906dbf09ff1d8b Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Wed, 15 Apr 2026 17:56:06 +0200
+Subject: PCI: Update saved_config_space upon resource assignment
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 909f7bf9b080c10df3c3b38533906dbf09ff1d8b upstream.
+
+Bernd reports passthrough failure of a Digital Devices Cine S2 V6 DVB
+adapter plugged into an ASRock X570S PG Riptide board with BIOS version
+P5.41 (09/07/2023):
+
+ ddbridge 0000:05:00.0: detected Digital Devices Cine S2 V6 DVB adapter
+ ddbridge 0000:05:00.0: cannot read registers
+ ddbridge 0000:05:00.0: fail
+
+BIOS assigns an incorrect BAR to the DVB adapter which doesn't fit into the
+upstream bridge window. The kernel corrects the BAR assignment:
+
+ pci 0000:07:00.0: BAR 0 [mem 0xfffffffffc500000-0xfffffffffc50ffff 64bit]: can't claim; no compatible bridge window
+ pci 0000:07:00.0: BAR 0 [mem 0xfc500000-0xfc50ffff 64bit]: assigned
+
+Correction of the BAR assignment happens in an x86-specific fs_initcall,
+pcibios_assign_resources(), after device enumeration in a subsys_initcall.
+This order was introduced at the behest of Linus in 2004:
+
+ https://git.kernel.org/tglx/history/c/a06a30144bbc
+
+No other architecture performs such a late BAR correction.
+
+Bernd bisected the issue to commit a2f1e22390ac ("PCI/ERR: Ensure error
+recoverability at all times"), but it only occurs in the absence of commit
+4d4c10f763d7 ("PCI: Explicitly put devices into D0 when initializing").
+This combination exists in stable kernel v6.12.70, but not in mainline,
+hence Bernd cannot reproduce the issue with mainline.
+
+Since a2f1e22390ac, config space is saved on enumeration, prior to BAR
+correction. Upon passthrough, the corrected BAR is overwritten with the
+incorrect saved value by:
+
+ vfio_pci_core_register_device()
+ vfio_pci_set_power_state()
+ pci_restore_state()
+
+But only if the device's current_state is PCI_UNKNOWN, as it was prior to
+commit 4d4c10f763d7. Since the commit, it is PCI_D0, which changes the
+behavior of vfio_pci_set_power_state() to no longer restore the state
+without saving it first.
+
+Alexandre is reporting the same issue as Bernd, but in his case, mainline
+is affected as well. The difference is that on Alexandre's system, the
+host kernel binds a driver to the device which is unbound prior to
+passthrough, whereas on Bernd's system no driver gets bound by the host
+kernel.
+
+Unbinding sets current_state to PCI_UNKNOWN in pci_device_remove(), so when
+vfio-pci is subsequently bound to the device, pci_restore_state() is once
+again called without invoking pci_save_state() first.
+
+To robustly fix the issue, always update saved_config_space upon resource
+assignment.
+
+Reported-by: Bernd Schumacher <bernd@bschu.de>
+Closes: https://lore.kernel.org/r/acfZrlP0Ua_5D3U4@eldamar.lan/
+Reported-by: Alexandre N. <an.tech@mailo.com>
+Closes: https://lore.kernel.org/r/dd3c3358-de0f-4a56-9c81-04aceaab4058@mailo.com/
+Fixes: a2f1e22390ac ("PCI/ERR: Ensure error recoverability at all times")
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Bernd Schumacher <bernd@bschu.de>
+Tested-by: Alexandre N. <an.tech@mailo.com>
+Cc: stable@vger.kernel.org # v6.12+
+Link: https://patch.msgid.link/febc3f354e0c1f5a9f5b3ee9ffddaa44caccf651.1776268054.git.lukas@wunner.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/setup-res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -102,6 +102,7 @@ static void pci_std_update_resource(stru
+ }
+
+ pci_write_config_dword(dev, reg, new);
++ dev->saved_config_space[reg / 4] = new;
+ pci_read_config_dword(dev, reg, &check);
+
+ if ((new ^ check) & mask) {
+@@ -112,6 +113,7 @@ static void pci_std_update_resource(stru
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
++ dev->saved_config_space[(reg + 4) / 4] = new;
+ pci_read_config_dword(dev, reg + 4, &check);
+ if (check != new) {
+ pci_err(dev, "%s: error updating (high %#010x != %#010x)\n",
--- /dev/null
+From 26735dfdd8930d9ef1fa92e590a9bf77726efdf6 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@linaro.org>
+Date: Fri, 17 Apr 2026 13:13:31 +0200
+Subject: pmdomain: core: Fix detach procedure for virtual devices in genpd
+
+From: Ulf Hansson <ulf.hansson@linaro.org>
+
+commit 26735dfdd8930d9ef1fa92e590a9bf77726efdf6 upstream.
+
+If a device is attached to a PM domain through genpd_dev_pm_attach_by_id(),
+genpd calls pm_runtime_enable() for the corresponding virtual device that
+it registers. While this avoids boilerplate code in drivers, there is no
+corresponding call to pm_runtime_disable() in genpd_dev_pm_detach().
+
+This means these virtual devices are typically detached from its genpd,
+while runtime PM remains enabled for them, which is not how things are
+designed to work. In worst cases it may lead to critical errors, like a
+NULL pointer dereference bug in genpd_runtime_suspend(), which was recently
+reported. For another case, we may end up keeping an unnecessary vote for a
+performance state for the device.
+
+To fix these problems, let's add this missing call to pm_runtime_disable()
+in genpd_dev_pm_detach().
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Closes: https://lore.kernel.org/all/CAMuHMdWapT40hV3c+CSBqFOW05aWcV1a6v_NiJYgoYi0i9_PDQ@mail.gmail.com/
+Fixes: 3c095f32a92b ("PM / Domains: Add support for multi PM domains per device to genpd")
+Cc: stable@vger.kernel.org
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pmdomain/core.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -2900,6 +2900,7 @@ static const struct bus_type genpd_bus_t
+ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
+ {
+ struct generic_pm_domain *pd;
++ bool is_virt_dev;
+ unsigned int i;
+ int ret = 0;
+
+@@ -2909,6 +2910,13 @@ static void genpd_dev_pm_detach(struct d
+
+ dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+
++ /* Check if the device was created by genpd at attach. */
++ is_virt_dev = dev->bus == &genpd_bus_type;
++
++ /* Disable runtime PM if we enabled it at attach. */
++ if (is_virt_dev)
++ pm_runtime_disable(dev);
++
+ /* Drop the default performance state */
+ if (dev_gpd_data(dev)->default_pstate) {
+ dev_pm_genpd_set_performance_state(dev, 0);
+@@ -2934,7 +2942,7 @@ static void genpd_dev_pm_detach(struct d
+ genpd_queue_power_off_work(pd);
+
+ /* Unregister the device if it was created by genpd. */
+- if (dev->bus == &genpd_bus_type)
++ if (is_virt_dev)
+ device_unregister(dev);
+ }
+
--- /dev/null
+From 0c99acbc8b6c6dd526ae475a48ee1897b61072fb Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@nvidia.com>
+Date: Tue, 28 Apr 2026 13:17:48 -0300
+Subject: RDMA/hns: Fix unlocked call to hns_roce_qp_remove()
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+commit 0c99acbc8b6c6dd526ae475a48ee1897b61072fb upstream.
+
+Sashiko points out that hns_roce_qp_remove() requires the caller to hold
+locks. The error flow in hns_roce_create_qp_common() doesn't hold those
+locks for the error unwind so it risks corrupting memory.
+
+Grab the same locks the other two callers use.
+
+Cc: stable@vger.kernel.org
+Fixes: e088a685eae9 ("RDMA/hns: Support rq record doorbell for the user space")
+Link: https://sashiko.dev/#/patchset/0-v2-1c49eeb88c48%2B91-rdma_udata_rep_jgg%40nvidia.com?part=9
+Link: https://patch.msgid.link/r/15-v1-41f3135e5565+9d2-rdma_ai_fixes1_jgg@nvidia.com
+Reviewed-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1150,6 +1150,7 @@ static int hns_roce_create_qp_common(str
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_qp ucmd = {};
++ unsigned long flags;
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+@@ -1236,7 +1237,13 @@ static int hns_roce_create_qp_common(str
+ return 0;
+
+ err_flow_ctrl:
++ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
++ hns_roce_lock_cqs(init_attr->send_cq ? to_hr_cq(init_attr->send_cq) : NULL,
++ init_attr->recv_cq ? to_hr_cq(init_attr->recv_cq) : NULL);
+ hns_roce_qp_remove(hr_dev, hr_qp);
++ hns_roce_unlock_cqs(init_attr->send_cq ? to_hr_cq(init_attr->send_cq) : NULL,
++ init_attr->recv_cq ? to_hr_cq(init_attr->recv_cq) : NULL);
++ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+ err_store:
+ free_qpc(hr_dev, hr_qp);
+ err_qpc:
--- /dev/null
+From b7c958d7c1eb1cb9b2be7b5ee4129fcd66cec978 Mon Sep 17 00:00:00 2001
+From: Osama Abdelkader <osama.abdelkader@gmail.com>
+Date: Mon, 16 Mar 2026 16:16:11 +0100
+Subject: riscv: kvm: fix vector context allocation leak
+
+From: Osama Abdelkader <osama.abdelkader@gmail.com>
+
+commit b7c958d7c1eb1cb9b2be7b5ee4129fcd66cec978 upstream.
+
+When the second kzalloc (host_context.vector.datap) fails in
+kvm_riscv_vcpu_alloc_vector_context, the first allocation
+(guest_context.vector.datap) is leaked. Free it before returning.
+
+Fixes: 0f4b82579716 ("riscv: KVM: Add vector lazy save/restore support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Osama Abdelkader <osama.abdelkader@gmail.com>
+Reviewed-by: Andy Chiu <andybnac@gmail.com>
+Link: https://lore.kernel.org/r/20260316151612.13305-1-osama.abdelkader@gmail.com
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kvm/vcpu_vector.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/riscv/kvm/vcpu_vector.c
++++ b/arch/riscv/kvm/vcpu_vector.c
+@@ -79,8 +79,11 @@ int kvm_riscv_vcpu_alloc_vector_context(
+ cntx->vector.vlenb = riscv_v_vsize / 32;
+
+ vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
+- if (!vcpu->arch.host_context.vector.datap)
++ if (!vcpu->arch.host_context.vector.datap) {
++ kfree(vcpu->arch.guest_context.vector.datap);
++ vcpu->arch.guest_context.vector.datap = NULL;
+ return -ENOMEM;
++ }
+
+ return 0;
+ }
--- /dev/null
+From e14622a7584f9608927c59a7d6ae4a0999dc545e Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Fri, 17 Apr 2026 14:33:43 +0200
+Subject: s390/debug: Reject zero-length input in debug_input_flush_fn()
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit e14622a7584f9608927c59a7d6ae4a0999dc545e upstream.
+
+debug_input_flush_fn() always copies one byte from the userspace buffer
+with copy_from_user() regardless of the supplied write length. A
+zero-length write therefore reads one byte beyond the caller's buffer.
+If the stale byte happens to be '-' or a digit the debug log is
+silently flushed. With an unmapped buffer the call returns -EFAULT.
+
+Reject zero-length writes before copying from userspace.
+
+Cc: stable@vger.kernel.org # v5.10+
+Acked-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/debug.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/s390/kernel/debug.c
++++ b/arch/s390/kernel/debug.c
+@@ -1432,6 +1432,11 @@ static int debug_input_flush_fn(debug_in
+ char input_buf[1];
+ int rc = user_len;
+
++ if (!user_len) {
++ rc = -EINVAL;
++ goto out;
++ }
++
+ if (user_len > 0x10000)
+ user_len = 0x10000;
+ if (*offset != 0) {
clk-imx-imx8-acm-fix-flags-for-acm-clocks.patch
clk-microchip-mpfs-ccc-fix-out-of-bounds-access-during-output-registration.patch
cpuidle-powerpc-avoid-double-clear-when-breaking-snooze.patch
+asoc-amd-yc-add-hp-omen-gaming-laptop-16-ap0xxx-product-line-in-quirk-table.patch
+asoc-fsl_easrc-fix-comment-typo.patch
+asoc-intel-bytcr_wm5102-fix-mclk-leak-on-platform_clock_control-error.patch
+asoc-qcom-q6apm-dai-reset-queue-ptr-on-trigger-stop.patch
+asoc-qcom-q6apm-lpass-dai-fix-multiple-graph-opens.patch
+asoc-qcom-q6apm-remove-child-devices-when-apm-is-removed.patch
+btrfs-fix-double-free-in-create_space_info-error-path.patch
+dm-thin-fix-metadata-refcount-underflow.patch
+dm-don-t-report-warning-when-doing-deferred-remove.patch
+dm-fix-a-buffer-overflow-in-ioctl-processing.patch
+eventfs-hold-eventfs_mutex-and-srcu-when-remount-walks-events.patch
+dm-verity-fec-correctly-reject-too-small-fec-devices.patch
+dm-verity-fec-correctly-reject-too-small-hash-devices.patch
+isofs-validate-rock-ridge-ce-continuation-extent-against-volume-size.patch
+isofs-validate-block-number-from-nfs-file-handle-in-isofs_export_iget.patch
+iommu-arm-smmu-v3-add-a-missing-dma_wmb-for-hitless-ste-update.patch
+lib-crypto-mpi-fix-integer-underflow-in-mpi_read_raw_from_sgl.patch
+lib-scatterlist-fix-length-calculations-in-extract_kvec_to_sg.patch
+lib-scatterlist-fix-temp-buffer-in-extract_user_to_sg.patch
+libceph-fix-slab-out-of-bounds-access-in-auth-message-processing.patch
+md-raid10-fix-divide-by-zero-in-setup_geo-with-zero-far_copies.patch
+nvme-apple-drop-invalid-put-of-admin-queue-reference-count.patch
+nvmet-tcp-fix-race-between-icreq-handling-and-queue-teardown.patch
+nvmet-avoid-recursive-nvmet-wq-flush-in-nvmet_ctrl_free.patch
+openvswitch-vport-fix-self-deadlock-on-release-of-tunnel-ports.patch
+pmdomain-core-fix-detach-procedure-for-virtual-devices-in-genpd.patch
+rdma-hns-fix-unlocked-call-to-hns_roce_qp_remove.patch
+riscv-kvm-fix-vector-context-allocation-leak.patch
+s390-debug-reject-zero-length-input-in-debug_input_flush_fn.patch
+smb-client-fix-out-of-bounds-read-in-smb2_compound_op.patch
+smb-client-fix-out-of-bounds-read-in-symlink_data.patch
+smb-client-use-kzalloc-to-zero-initialize-security-descriptor-buffer.patch
+smb-client-validate-dacloffset-before-building-dacl-pointers.patch
+kvm-x86-check-for-nept-nnpt-in-slow-flush-hypercalls.patch
+mm-damon-sysfs-schemes-protect-memcg_path-kfree-with-damon_sysfs_lock.patch
+pci-update-saved_config_space-upon-resource-assignment.patch
--- /dev/null
+From 8d09328dfda089675e4c049f3f256064a1d1996b Mon Sep 17 00:00:00 2001
+From: Zisen Ye <zisenye@stu.xidian.edu.cn>
+Date: Wed, 6 May 2026 11:49:08 +0800
+Subject: smb/client: fix out-of-bounds read in smb2_compound_op()
+
+From: Zisen Ye <zisenye@stu.xidian.edu.cn>
+
+commit 8d09328dfda089675e4c049f3f256064a1d1996b upstream.
+
+If a server sends a truncated response but a large OutputBufferLength, and
+terminates the EA list early, check_wsl_eas() returns success without
+validating that the entire OutputBufferLength fits within iov_len.
+
+Then smb2_compound_op() does:
+ memcpy(idata->wsl.eas, data[0], size[0]);
+
+Where size[0] is OutputBufferLength. If iov_len is smaller than size[0],
+memcpy can read beyond the end of the rsp_iov allocation and leak adjacent
+kernel heap memory.
+
+Link: https://lore.kernel.org/linux-cifs/d998240c-aca9-420d-9dbd-f5ba24af19e0@chenxiaosong.com/
+Fixes: ea41367b2a60 ("smb: client: introduce SMB2_OP_QUERY_WSL_EA")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zisen Ye <zisenye@stu.xidian.edu.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2inode.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -108,7 +108,7 @@ static int check_wsl_eas(struct kvec *rs
+ u32 outlen, next;
+ u16 vlen;
+ u8 nlen;
+- u8 *end;
++ u8 *ea_end, *iov_end;
+
+ outlen = le32_to_cpu(rsp->OutputBufferLength);
+ if (outlen < SMB2_WSL_MIN_QUERY_EA_RESP_SIZE ||
+@@ -117,15 +117,19 @@ static int check_wsl_eas(struct kvec *rs
+
+ ea = (void *)((u8 *)rsp_iov->iov_base +
+ le16_to_cpu(rsp->OutputBufferOffset));
+- end = (u8 *)rsp_iov->iov_base + rsp_iov->iov_len;
++ ea_end = (u8 *)ea + outlen;
++ iov_end = (u8 *)rsp_iov->iov_base + rsp_iov->iov_len;
++ if (ea_end > iov_end)
++ return -EINVAL;
++
+ for (;;) {
+- if ((u8 *)ea > end - sizeof(*ea))
++ if ((u8 *)ea > ea_end - sizeof(*ea))
+ return -EINVAL;
+
+ nlen = ea->ea_name_length;
+ vlen = le16_to_cpu(ea->ea_value_length);
+ if (nlen != SMB2_WSL_XATTR_NAME_LEN ||
+- (u8 *)ea->ea_data + nlen + 1 + vlen > end)
++ (u8 *)ea->ea_data + nlen + 1 + vlen > ea_end)
+ return -EINVAL;
+
+ switch (vlen) {
--- /dev/null
+From d62b8d236fab503c6fec1d3e9a38bea71feaca20 Mon Sep 17 00:00:00 2001
+From: Zisen Ye <zisenye@stu.xidian.edu.cn>
+Date: Sat, 2 May 2026 18:48:36 +0800
+Subject: smb/client: fix out-of-bounds read in symlink_data()
+
+From: Zisen Ye <zisenye@stu.xidian.edu.cn>
+
+commit d62b8d236fab503c6fec1d3e9a38bea71feaca20 upstream.
+
+Since smb2_check_message() returns success without length validation for
+the symlink error response, in symlink_data() it is possible for
+iov->iov_len to be smaller than sizeof(struct smb2_err_rsp). If the buffer
+only contains the base SMB2 header (64 bytes), accessing
+err->ErrorContextCount (at offset 66) or err->ByteCount later in
+symlink_data() will cause an out-of-bounds read.
+
+Link: https://lore.kernel.org/linux-cifs/297d8d9b-adf7-42fd-a1c2-5b1f230032bc@chenxiaosong.com/
+Fixes: 76894f3e2f71 ("cifs: improve symlink handling for smb2+")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Zisen Ye <zisenye@stu.xidian.edu.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2misc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -239,7 +239,8 @@ smb2_check_message(char *buf, unsigned i
+ if (len != calc_len) {
+ /* create failed on symlink */
+ if (command == SMB2_CREATE_HE &&
+- shdr->Status == STATUS_STOPPED_ON_SYMLINK)
++ shdr->Status == STATUS_STOPPED_ON_SYMLINK &&
++ len > calc_len)
+ return 0;
+ /* Windows 7 server returns 24 bytes more */
+ if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
--- /dev/null
+From 5e489c6c47a2ac15edbaca153b9348e42c1eacab Mon Sep 17 00:00:00 2001
+From: Bjoern Doebel <doebel@amazon.de>
+Date: Thu, 30 Apr 2026 08:57:17 +0000
+Subject: smb: client: use kzalloc to zero-initialize security descriptor buffer
+
+From: Bjoern Doebel <doebel@amazon.de>
+
+commit 5e489c6c47a2ac15edbaca153b9348e42c1eacab upstream.
+
+Commit 62e7dd0a39c2d ("smb: common: change the data type of num_aces
+to le16") split struct smb_acl's __le32 num_aces field into __le16
+num_aces and __le16 reserved. The reserved field corresponds to Sbz2
+in the MS-DTYP ACL wire format, which must be zero [1].
+
+When building an ACL descriptor in build_sec_desc(), we are using a
+kmalloc()'ed descriptor buffer and writing the fields explicitly using
+le16() writes now. This never writes to the 2 byte reserved field,
+leaving it as uninitialized heap data.
+
+When the reserved field happens to contain non-zero slab garbage,
+Samba rejects the security descriptor with "ndr_pull_security_descriptor
+failed: Range Error", causing chmod to fail with EINVAL.
+
+Change kmalloc() to kzalloc() to ensure the entire buffer is
+zero-initialized.
+
+Fixes: 62e7dd0a39c2d ("smb: common: change the data type of num_aces to le16")
+Cc: stable@vger.kernel.org
+
+Signed-off-by: Bjoern Doebel <doebel@amazon.de>
+Assisted-by: Kiro:claude-opus-4.6
+[1] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/20233ed8-a6c6-4097-aafa-dd545ed24428
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -1738,7 +1738,7 @@ id_mode_to_cifs_acl(struct inode *inode,
+ * descriptor parameters, and security descriptor itself
+ */
+ nsecdesclen = max_t(u32, nsecdesclen, DEFAULT_SEC_DESC_LEN);
+- pnntsd = kmalloc(nsecdesclen, GFP_KERNEL);
++ pnntsd = kzalloc(nsecdesclen, GFP_KERNEL);
+ if (!pnntsd) {
+ kfree(pntsd);
+ cifs_put_tlink(tlink);
--- /dev/null
+From f98b48151cc502ada59d9778f0112d21f2586ca3 Mon Sep 17 00:00:00 2001
+From: Michael Bommarito <michael.bommarito@gmail.com>
+Date: Mon, 20 Apr 2026 10:47:47 -0400
+Subject: smb: client: validate dacloffset before building DACL pointers
+
+From: Michael Bommarito <michael.bommarito@gmail.com>
+
+commit f98b48151cc502ada59d9778f0112d21f2586ca3 upstream.
+
+parse_sec_desc(), build_sec_desc(), and the chown path in
+id_mode_to_cifs_acl() all add the server-supplied dacloffset to pntsd
+before proving a DACL header fits inside the returned security
+descriptor.
+
+On 32-bit builds a malicious server can return dacloffset near
+U32_MAX, wrap the derived DACL pointer below end_of_acl, and then slip
+past the later pointer-based bounds checks. build_sec_desc() and
+id_mode_to_cifs_acl() can then dereference DACL fields from the wrapped
+pointer in the chmod/chown rewrite paths.
+
+Validate dacloffset numerically before building any DACL pointer and
+reuse the same helper at the three DACL entry points.
+
+Fixes: bc3e9dd9d104 ("cifs: Change SIDs in ACEs while transferring file ownership.")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-6
+Signed-off-by: Michael Bommarito <michael.bommarito@gmail.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.c | 35 ++++++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -1265,6 +1265,17 @@ static int parse_sid(struct smb_sid *psi
+ return 0;
+ }
+
++static bool dacl_offset_valid(unsigned int acl_len, __u32 dacloffset)
++{
++ if (acl_len < sizeof(struct smb_acl))
++ return false;
++
++ if (dacloffset < sizeof(struct smb_ntsd))
++ return false;
++
++ return dacloffset <= acl_len - sizeof(struct smb_acl);
++}
++
+
+ /* Convert CIFS ACL to POSIX form */
+ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
+@@ -1285,7 +1296,6 @@ static int parse_sec_desc(struct cifs_sb
+ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+- dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
+ pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
+ le32_to_cpu(pntsd->gsidoffset),
+@@ -1316,11 +1326,18 @@ static int parse_sec_desc(struct cifs_sb
+ return rc;
+ }
+
+- if (dacloffset)
++ if (dacloffset) {
++ if (!dacl_offset_valid(acl_len, dacloffset)) {
++ cifs_dbg(VFS, "Server returned illegal DACL offset\n");
++ return -EINVAL;
++ }
++
++ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
+ group_sid_ptr, fattr, get_mode_from_special_sid);
+- else
++ } else {
+ cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */
++ }
+
+ return rc;
+ }
+@@ -1343,6 +1360,11 @@ static int build_sec_desc(struct smb_nts
+
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
++ if (!dacl_offset_valid(secdesclen, dacloffset)) {
++ cifs_dbg(VFS, "Server returned illegal DACL offset\n");
++ return -EINVAL;
++ }
++
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ rc = validate_dacl(dacl_ptr, end_of_acl);
+ if (rc)
+@@ -1716,6 +1738,12 @@ id_mode_to_cifs_acl(struct inode *inode,
+ nsecdesclen = sizeof(struct smb_ntsd) + (sizeof(struct smb_sid) * 2);
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
++ if (!dacl_offset_valid(secdesclen, dacloffset)) {
++ cifs_dbg(VFS, "Server returned illegal DACL offset\n");
++ rc = -EINVAL;
++ goto id_mode_to_cifs_acl_exit;
++ }
++
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ rc = validate_dacl(dacl_ptr, (char *)pntsd + secdesclen);
+ if (rc) {
+@@ -1758,6 +1786,7 @@ id_mode_to_cifs_acl(struct inode *inode,
+ rc = ops->set_acl(pnntsd, nsecdesclen, inode, path, aclflag);
+ cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
+ }
++id_mode_to_cifs_acl_exit:
+ cifs_put_tlink(tlink);
+
+ kfree(pnntsd);