--- /dev/null
+From d33c3471047fc54966621d19329e6a23ebc8ec50 Mon Sep 17 00:00:00 2001
+From: Praful Adiga <praful.adiga@gmail.com>
+Date: Thu, 18 Sep 2025 12:40:18 -0400
+Subject: ALSA: hda/realtek: Fix mute led for HP Laptop 15-dw4xx
+
+From: Praful Adiga <praful.adiga@gmail.com>
+
+commit d33c3471047fc54966621d19329e6a23ebc8ec50 upstream.
+
+This laptop uses the ALC236 codec with COEF 0x7 and idx 1 to
+control the mute LED. Enable the existing quirk for this device.
+
+Signed-off-by: Praful Adiga <praful.adiga@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10666,6 +10666,7 @@ static const struct hda_quirk alc269_fix
+ SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x103c, 0x89a0, "HP Laptop 15-dw4xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
--- /dev/null
+From 5f1af203ef964e7f7bf9d32716dfa5f332cc6f09 Mon Sep 17 00:00:00 2001
+From: Mohammad Rafi Shaik <mohammad.rafi.shaik@oss.qualcomm.com>
+Date: Mon, 8 Sep 2025 11:06:29 +0530
+Subject: ASoC: qcom: audioreach: Fix lpaif_type configuration for the I2S interface
+
+From: Mohammad Rafi Shaik <mohammad.rafi.shaik@oss.qualcomm.com>
+
+commit 5f1af203ef964e7f7bf9d32716dfa5f332cc6f09 upstream.
+
+Fix missing lpaif_type configuration for the I2S interface.
+The proper lpaif interface type required to allow DSP to vote
+appropriate clock setting for I2S interface.
+
+Fixes: 25ab80db6b133 ("ASoC: qdsp6: audioreach: add module configuration command helpers")
+Cc: stable@vger.kernel.org
+Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Signed-off-by: Mohammad Rafi Shaik <mohammad.rafi.shaik@oss.qualcomm.com>
+Message-ID: <20250908053631.70978-2-mohammad.rafi.shaik@oss.qualcomm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/audioreach.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/soc/qcom/qdsp6/audioreach.c
++++ b/sound/soc/qcom/qdsp6/audioreach.c
+@@ -971,6 +971,7 @@ static int audioreach_i2s_set_media_form
+ param_data->param_id = PARAM_ID_I2S_INTF_CFG;
+ param_data->param_size = ic_sz - APM_MODULE_PARAM_DATA_SIZE;
+
++ intf_cfg->cfg.lpaif_type = module->hw_interface_type;
+ intf_cfg->cfg.intf_idx = module->hw_interface_idx;
+ intf_cfg->cfg.sd_line_idx = module->sd_line_idx;
+
--- /dev/null
+From 33b55b94bca904ca25a9585e3cd43d15f0467969 Mon Sep 17 00:00:00 2001
+From: Mohammad Rafi Shaik <mohammad.rafi.shaik@oss.qualcomm.com>
+Date: Mon, 8 Sep 2025 11:06:30 +0530
+Subject: ASoC: qcom: q6apm-lpass-dais: Fix missing set_fmt DAI op for I2S
+
+From: Mohammad Rafi Shaik <mohammad.rafi.shaik@oss.qualcomm.com>
+
+commit 33b55b94bca904ca25a9585e3cd43d15f0467969 upstream.
+
+The q6i2s_set_fmt() function was defined but never linked into the
+I2S DAI operations, resulting DAI format settings is being ignored
+during stream setup. This change fixes the issue by properly linking
+the .set_fmt handler within the DAI ops.
+
+Fixes: 30ad723b93ade ("ASoC: qdsp6: audioreach: add q6apm lpass dai support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Signed-off-by: Mohammad Rafi Shaik <mohammad.rafi.shaik@oss.qualcomm.com>
+Message-ID: <20250908053631.70978-3-mohammad.rafi.shaik@oss.qualcomm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/q6apm-lpass-dais.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -262,6 +262,7 @@ static const struct snd_soc_dai_ops q6i2
+ .shutdown = q6apm_lpass_dai_shutdown,
+ .set_channel_map = q6dma_set_channel_map,
+ .hw_params = q6dma_hw_params,
++ .set_fmt = q6i2s_set_fmt,
+ };
+
+ static const struct snd_soc_dai_ops q6hdmi_ops = {
--- /dev/null
+From 68f27f7c7708183e7873c585ded2f1b057ac5b97 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Thu, 4 Sep 2025 12:18:50 +0200
+Subject: ASoC: qcom: q6apm-lpass-dais: Fix NULL pointer dereference if source graph failed
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit 68f27f7c7708183e7873c585ded2f1b057ac5b97 upstream.
+
+If earlier opening of source graph fails (e.g. ADSP rejects due to
+incorrect audioreach topology), the graph is closed and
+"dai_data->graph[dai->id]" is assigned NULL. Preparing the DAI for sink
+graph continues though and next call to q6apm_lpass_dai_prepare()
+receives dai_data->graph[dai->id]=NULL leading to NULL pointer
+exception:
+
+ qcom-apm gprsvc:service:2:1: Error (1) Processing 0x01001002 cmd
+ qcom-apm gprsvc:service:2:1: DSP returned error[1001002] 1
+ q6apm-lpass-dais 30000000.remoteproc:glink-edge:gpr:service@1:bedais: fail to start APM port 78
+ q6apm-lpass-dais 30000000.remoteproc:glink-edge:gpr:service@1:bedais: ASoC: error at snd_soc_pcm_dai_prepare on TX_CODEC_DMA_TX_3: -22
+ Unable to handle kernel NULL pointer dereference at virtual address 00000000000000a8
+ ...
+ Call trace:
+ q6apm_graph_media_format_pcm+0x48/0x120 (P)
+ q6apm_lpass_dai_prepare+0x110/0x1b4
+ snd_soc_pcm_dai_prepare+0x74/0x108
+ __soc_pcm_prepare+0x44/0x160
+ dpcm_be_dai_prepare+0x124/0x1c0
+
+Fixes: 30ad723b93ad ("ASoC: qdsp6: audioreach: add q6apm lpass dai support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Message-ID: <20250904101849.121503-2-krzysztof.kozlowski@linaro.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/q6apm-lpass-dais.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -213,8 +213,10 @@ static int q6apm_lpass_dai_prepare(struc
+
+ return 0;
+ err:
+- q6apm_graph_close(dai_data->graph[dai->id]);
+- dai_data->graph[dai->id] = NULL;
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ }
+ return rc;
+ }
+
--- /dev/null
+From 96fa515e70f3e4b98685ef8cac9d737fc62f10e1 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Tue, 16 Sep 2025 07:54:06 +0930
+Subject: btrfs: tree-checker: fix the incorrect inode ref size check
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit 96fa515e70f3e4b98685ef8cac9d737fc62f10e1 upstream.
+
+[BUG]
+Inside check_inode_ref(), we need to make sure every structure,
+including the btrfs_inode_extref header, is covered by the item. But
+our code is incorrectly using "sizeof(iref)", where @iref is just a
+pointer.
+
+This means "sizeof(iref)" will always be "sizeof(void *)", which is much
+smaller than "sizeof(struct btrfs_inode_extref)".
+
+This will allow some bad inode extrefs to sneak in, defeating tree-checker.
+
+[FIX]
+Fix the typo by calling "sizeof(*iref)", which is the same as
+"sizeof(struct btrfs_inode_extref)", and will be the correct behavior we
+want.
+
+Fixes: 71bf92a9b877 ("btrfs: tree-checker: Add check for INODE_REF")
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/tree-checker.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1744,10 +1744,10 @@ static int check_inode_ref(struct extent
+ while (ptr < end) {
+ u16 namelen;
+
+- if (unlikely(ptr + sizeof(iref) > end)) {
++ if (unlikely(ptr + sizeof(*iref) > end)) {
+ inode_ref_err(leaf, slot,
+ "inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
+- ptr, end, sizeof(iref));
++ ptr, end, sizeof(*iref));
+ return -EUCLEAN;
+ }
+
--- /dev/null
+From 1b34cbbf4f011a121ef7b2d7d6e6920a036d5285 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 16 Sep 2025 17:20:59 +0800
+Subject: crypto: af_alg - Disallow concurrent writes in af_alg_sendmsg
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 1b34cbbf4f011a121ef7b2d7d6e6920a036d5285 upstream.
+
+Issuing two writes to the same af_alg socket is bogus as the
+data will be interleaved in an unpredictable fashion. Furthermore,
+concurrent writes may create inconsistencies in the internal
+socket state.
+
+Disallow this by adding a new ctx->write field that indiciates
+exclusive ownership for writing.
+
+Fixes: 8ff590903d5 ("crypto: algif_skcipher - User-space interface for skcipher operations")
+Reported-by: Muhammad Alifa Ramdhan <ramdhan@starlabs.sg>
+Reported-by: Bing-Jhong Billy Jheng <billy@starlabs.sg>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/af_alg.c | 7 +++++++
+ include/crypto/if_alg.h | 10 ++++++----
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -970,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock,
+ }
+
+ lock_sock(sk);
++ if (ctx->write) {
++ release_sock(sk);
++ return -EBUSY;
++ }
++ ctx->write = true;
++
+ if (ctx->init && !ctx->more) {
+ if (ctx->used) {
+ err = -EINVAL;
+@@ -1104,6 +1110,7 @@ int af_alg_sendmsg(struct socket *sock,
+
+ unlock:
+ af_alg_data_wakeup(sk);
++ ctx->write = false;
+ release_sock(sk);
+
+ return copied ?: err;
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -135,6 +135,7 @@ struct af_alg_async_req {
+ * SG?
+ * @enc: Cryptographic operation to be performed when
+ * recvmsg is invoked.
++ * @write: True if we are in the middle of a write.
+ * @init: True if metadata has been sent.
+ * @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
+@@ -151,10 +152,11 @@ struct af_alg_ctx {
+ size_t used;
+ atomic_t rcvused;
+
+- bool more;
+- bool merge;
+- bool enc;
+- bool init;
++ u32 more:1,
++ merge:1,
++ enc:1,
++ write:1,
++ init:1;
+
+ unsigned int len;
+
--- /dev/null
+From a86556264696b797d94238d99d8284d0d34ed960 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 15 Sep 2025 16:12:40 +0200
+Subject: dm-raid: don't set io_min and io_opt for raid1
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit a86556264696b797d94238d99d8284d0d34ed960 upstream.
+
+These commands
+ modprobe brd rd_size=1048576
+ vgcreate vg /dev/ram*
+ lvcreate -m4 -L10 -n lv vg
+trigger the following warnings:
+device-mapper: table: 252:10: adding target device (start sect 0 len 24576) caused an alignment inconsistency
+device-mapper: table: 252:10: adding target device (start sect 0 len 24576) caused an alignment inconsistency
+
+The warnings are caused by the fact that io_min is 512 and physical block
+size is 4096.
+
+If there's chunk-less raid, such as raid1, io_min shouldn't be set to zero
+because it would be raised to 512 and it would trigger the warning.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-raid.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3811,8 +3811,10 @@ static void raid_io_hints(struct dm_targ
+ struct raid_set *rs = ti->private;
+ unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
+
+- limits->io_min = chunk_size_bytes;
+- limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
++ if (chunk_size_bytes) {
++ limits->io_min = chunk_size_bytes;
++ limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
++ }
+ }
+
+ static void raid_presuspend(struct dm_target *ti)
--- /dev/null
+From 1071d560afb4c245c2076494226df47db5a35708 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 11 Aug 2025 13:17:32 +0200
+Subject: dm-stripe: fix a possible integer overflow
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 1071d560afb4c245c2076494226df47db5a35708 upstream.
+
+There's a possible integer overflow in stripe_io_hints if we have too
+large chunk size. Test if the overflow happened, and if it did, don't set
+limits->io_min and limits->io_opt;
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Suggested-by: Dongsheng Yang <dongsheng.yang@linux.dev>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-stripe.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -457,11 +457,15 @@ static void stripe_io_hints(struct dm_ta
+ struct queue_limits *limits)
+ {
+ struct stripe_c *sc = ti->private;
+- unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
++ unsigned int io_min, io_opt;
+
+ limits->chunk_sectors = sc->chunk_size;
+- limits->io_min = chunk_size;
+- limits->io_opt = chunk_size * sc->stripes;
++
++ if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
++ !check_mul_overflow(io_min, sc->stripes, &io_opt)) {
++ limits->io_min = io_min;
++ limits->io_opt = io_opt;
++ }
+ }
+
+ static struct target_type stripe_target = {
--- /dev/null
+From 29a2f430475357f760679b249f33e7282688e292 Mon Sep 17 00:00:00 2001
+From: Ivan Lipski <ivan.lipski@amd.com>
+Date: Tue, 2 Sep 2025 16:20:09 -0400
+Subject: drm/amd/display: Allow RX6xxx & RX7700 to invoke amdgpu_irq_get/put
+
+From: Ivan Lipski <ivan.lipski@amd.com>
+
+commit 29a2f430475357f760679b249f33e7282688e292 upstream.
+
+[Why&How]
+As reported on https://gitlab.freedesktop.org/drm/amd/-/issues/3936,
+SMU hang can occur if the interrupts are not enabled appropriately,
+causing a vblank timeout.
+
+This patch reverts commit 5009628d8509 ("drm/amd/display: Remove unnecessary
+amdgpu_irq_get/put"), but only for RX6xxx & RX7700 GPUs, on which the
+issue was observed.
+
+This will re-enable interrupts regardless of whether the user space needed
+it or not.
+
+Fixes: 5009628d8509 ("drm/amd/display: Remove unnecessary amdgpu_irq_get/put")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3936
+Suggested-by: Sun peng Li <sunpeng.li@amd.com>
+Reviewed-by: Sun peng Li <sunpeng.li@amd.com>
+Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
+Signed-off-by: Ray Wu <ray.wu@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 95d168b367aa28a59f94fc690ff76ebf69312c6d)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 39 +++++++++++++++++++++-
+ 1 file changed, 38 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8462,7 +8462,16 @@ static int amdgpu_dm_encoder_init(struct
+ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dm_crtc_state *acrtc_state)
+-{
++{ /*
++ * We cannot be sure that the frontend index maps to the same
++ * backend index - some even map to more than one.
++ * So we have to go through the CRTC to find the right IRQ.
++ */
++ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
++ adev,
++ acrtc->crtc_id);
++ struct drm_device *dev = adev_to_drm(adev);
++
+ struct drm_vblank_crtc_config config = {0};
+ struct dc_crtc_timing *timing;
+ int offdelay;
+@@ -8515,7 +8524,35 @@ static void manage_dm_interrupts(struct
+
+ drm_crtc_vblank_on_config(&acrtc->base,
+ &config);
++ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
++ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
++ case IP_VERSION(3, 0, 0):
++ case IP_VERSION(3, 0, 2):
++ case IP_VERSION(3, 0, 3):
++ case IP_VERSION(3, 2, 0):
++ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
++ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
++#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
++ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
++ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
++#endif
++ }
++
+ } else {
++ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
++ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
++ case IP_VERSION(3, 0, 0):
++ case IP_VERSION(3, 0, 2):
++ case IP_VERSION(3, 0, 3):
++ case IP_VERSION(3, 2, 0):
++#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
++ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
++ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
++#endif
++ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
++ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
++ }
++
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+ }
--- /dev/null
+From a03db236aebfaeadf79396dbd570896b870bda01 Mon Sep 17 00:00:00 2001
+From: Li Zhe <lizhe.67@bytedance.com>
+Date: Fri, 6 Jun 2025 10:37:42 +0800
+Subject: gup: optimize longterm pin_user_pages() for large folio
+
+From: Li Zhe <lizhe.67@bytedance.com>
+
+commit a03db236aebfaeadf79396dbd570896b870bda01 upstream.
+
+In the current implementation of longterm pin_user_pages(), we invoke
+collect_longterm_unpinnable_folios(). This function iterates through the
+list to check whether each folio belongs to the "longterm_unpinnabled"
+category. The folios in this list essentially correspond to a contiguous
+region of userspace addresses, with each folio representing a physical
+address in increments of PAGESIZE.
+
+If this userspace address range is mapped with large folio, we can
+optimize the performance of function collect_longterm_unpinnable_folios()
+by reducing the using of READ_ONCE() invoked in
+pofs_get_folio()->page_folio()->_compound_head().
+
+Also, we can simplify the logic of collect_longterm_unpinnable_folios().
+Instead of comparing with prev_folio after calling pofs_get_folio(), we
+can check whether the next page is within the same folio.
+
+The performance test results, based on v6.15, obtained through the
+gup_test tool from the kernel source tree are as follows. We achieve an
+improvement of over 66% for large folio with pagesize=2M. For small
+folio, we have only observed a very slight degradation in performance.
+
+Without this patch:
+
+ [root@localhost ~] ./gup_test -HL -m 8192 -n 512
+ TAP version 13
+ 1..1
+ # PIN_LONGTERM_BENCHMARK: Time: get:14391 put:10858 us#
+ ok 1 ioctl status 0
+ # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
+ [root@localhost ~]# ./gup_test -LT -m 8192 -n 512
+ TAP version 13
+ 1..1
+ # PIN_LONGTERM_BENCHMARK: Time: get:130538 put:31676 us#
+ ok 1 ioctl status 0
+ # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+With this patch:
+
+ [root@localhost ~] ./gup_test -HL -m 8192 -n 512
+ TAP version 13
+ 1..1
+ # PIN_LONGTERM_BENCHMARK: Time: get:4867 put:10516 us#
+ ok 1 ioctl status 0
+ # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
+ [root@localhost ~]# ./gup_test -LT -m 8192 -n 512
+ TAP version 13
+ 1..1
+ # PIN_LONGTERM_BENCHMARK: Time: get:131798 put:31328 us#
+ ok 1 ioctl status 0
+ # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+[lizhe.67@bytedance.com: whitespace fix, per David]
+ Link: https://lkml.kernel.org/r/20250606091917.91384-1-lizhe.67@bytedance.com
+Link: https://lkml.kernel.org/r/20250606023742.58344-1-lizhe.67@bytedance.com
+Signed-off-by: Li Zhe <lizhe.67@bytedance.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Dev Jain <dev.jain@arm.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Peter Xu <peterx@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/gup.c | 38 ++++++++++++++++++++++++++++++--------
+ 1 file changed, 30 insertions(+), 8 deletions(-)
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2323,6 +2323,31 @@ static void pofs_unpin(struct pages_or_f
+ unpin_user_pages(pofs->pages, pofs->nr_entries);
+ }
+
++static struct folio *pofs_next_folio(struct folio *folio,
++ struct pages_or_folios *pofs, long *index_ptr)
++{
++ long i = *index_ptr + 1;
++
++ if (!pofs->has_folios && folio_test_large(folio)) {
++ const unsigned long start_pfn = folio_pfn(folio);
++ const unsigned long end_pfn = start_pfn + folio_nr_pages(folio);
++
++ for (; i < pofs->nr_entries; i++) {
++ unsigned long pfn = page_to_pfn(pofs->pages[i]);
++
++ /* Is this page part of this folio? */
++ if (pfn < start_pfn || pfn >= end_pfn)
++ break;
++ }
++ }
++
++ if (unlikely(i == pofs->nr_entries))
++ return NULL;
++ *index_ptr = i;
++
++ return pofs_get_folio(pofs, i);
++}
++
+ /*
+ * Returns the number of collected folios. Return value is always >= 0.
+ */
+@@ -2330,16 +2355,13 @@ static unsigned long collect_longterm_un
+ struct list_head *movable_folio_list,
+ struct pages_or_folios *pofs)
+ {
+- unsigned long i, collected = 0;
+- struct folio *prev_folio = NULL;
++ unsigned long collected = 0;
+ bool drain_allow = true;
++ struct folio *folio;
++ long i = 0;
+
+- for (i = 0; i < pofs->nr_entries; i++) {
+- struct folio *folio = pofs_get_folio(pofs, i);
+-
+- if (folio == prev_folio)
+- continue;
+- prev_folio = folio;
++ for (folio = pofs_get_folio(pofs, i); folio;
++ folio = pofs_next_folio(folio, pofs, &i)) {
+
+ if (folio_is_longterm_pinnable(folio))
+ continue;
--- /dev/null
+From 32d72fba3a7dbe4c819a792c6277371902160eec Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 18 Sep 2025 11:27:06 -0600
+Subject: io_uring: backport io_should_terminate_tw()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Parts of commit b6f58a3f4aa8dba424356c7a69388a81f4459300 upstream.
+
+Backport io_should_terminate_tw() helper to judge whether task_work
+should be run or terminated.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 3 +--
+ io_uring/io_uring.h | 13 +++++++++++++
+ io_uring/poll.c | 3 +--
+ io_uring/timeout.c | 2 +-
+ io_uring/uring_cmd.c | 2 +-
+ 5 files changed, 17 insertions(+), 6 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1359,8 +1359,7 @@ static void io_req_task_cancel(struct io
+ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
+ {
+ io_tw_lock(req->ctx, ts);
+- /* req->task == current here, checking PF_EXITING is safe */
+- if (unlikely(req->task->flags & PF_EXITING))
++ if (unlikely(io_should_terminate_tw()))
+ io_req_defer_failed(req, -EFAULT);
+ else if (req->flags & REQ_F_FORCE_ASYNC)
+ io_queue_iowq(req);
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -421,6 +421,19 @@ static inline bool io_allowed_run_tw(str
+ ctx->submitter_task == current);
+ }
+
++/*
++ * Terminate the request if either of these conditions are true:
++ *
++ * 1) It's being executed by the original task, but that task is marked
++ * with PF_EXITING as it's exiting.
++ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
++ * our fallback task_work.
++ */
++static inline bool io_should_terminate_tw(void)
++{
++ return current->flags & (PF_KTHREAD | PF_EXITING);
++}
++
+ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
+ {
+ io_req_set_res(req, res, 0);
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -265,8 +265,7 @@ static int io_poll_check_events(struct i
+ {
+ int v;
+
+- /* req->task == current here, checking PF_EXITING is safe */
+- if (unlikely(req->task->flags & PF_EXITING))
++ if (unlikely(io_should_terminate_tw()))
+ return -ECANCELED;
+
+ do {
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -303,7 +303,7 @@ static void io_req_task_link_timeout(str
+ int ret = -ENOENT;
+
+ if (prev) {
+- if (!(req->task->flags & PF_EXITING)) {
++ if (!io_should_terminate_tw()) {
+ struct io_cancel_data cd = {
+ .ctx = req->ctx,
+ .data = prev->cqe.user_data,
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ unsigned int flags = IO_URING_F_COMPLETE_DEFER;
+
+- if (current->flags & (PF_EXITING | PF_KTHREAD))
++ if (io_should_terminate_tw())
+ flags |= IO_URING_F_TASK_DEAD;
+
+ /* task_work executor checks the deffered list completion */
--- /dev/null
+From d61426de1cd739117889e8f876d1f8de548c6c92 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Mon, 4 Nov 2024 16:12:04 +0000
+Subject: io_uring/cmd: let cmds to know about dying task
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+Commit df3b8ca604f224eb4cd51669416ad4d607682273 upstream.
+
+When the taks that submitted a request is dying, a task work for that
+request might get run by a kernel thread or even worse by a half
+dismantled task. We can't just cancel the task work without running the
+callback as the cmd might need to do some clean up, so pass a flag
+instead. If set, it's not safe to access any task resources and the
+callback is expected to cancel the cmd ASAP.
+
+Reviewed-by: Jens Axboe <axboe@kernel.dk>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/io_uring_types.h | 1 +
+ io_uring/uring_cmd.c | 6 +++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -37,6 +37,7 @@ enum io_uring_cmd_flags {
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
++ IO_URING_F_TASK_DEAD = (1 << 13),
+ };
+
+ struct io_wq_work_node {
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -116,9 +116,13 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_mark_canc
+ static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
+ {
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
++ unsigned int flags = IO_URING_F_COMPLETE_DEFER;
++
++ if (current->flags & (PF_EXITING | PF_KTHREAD))
++ flags |= IO_URING_F_TASK_DEAD;
+
+ /* task_work executor checks the deffered list completion */
+- ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER);
++ ioucmd->task_work_cb(ioucmd, flags);
+ }
+
+ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
--- /dev/null
+From 29c99f5dee72c8a4477f38c7804e70fe3db0ab34 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 18 Sep 2025 10:21:14 -0600
+Subject: io_uring: include dying ring in task_work "should cancel" state
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 3539b1467e94336d5854ebf976d9627bfb65d6c3 upstream.
+
+When running task_work for an exiting task, rather than perform the
+issue retry attempt, the task_work is canceled. However, this isn't
+done for a ring that has been closed. This can lead to requests being
+successfully completed post the ring being closed, which is somewhat
+confusing and surprising to an application.
+
+Rather than just check the task exit state, also include the ring
+ref state in deciding whether or not to terminate a given request when
+run from task_work.
+
+Cc: stable@vger.kernel.org # 6.1+
+Link: https://github.com/axboe/liburing/discussions/1459
+Reported-by: Benedek Thaler <thaler@thaler.hu>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 6 ++++--
+ io_uring/io_uring.h | 4 ++--
+ io_uring/poll.c | 2 +-
+ io_uring/timeout.c | 2 +-
+ io_uring/uring_cmd.c | 2 +-
+ 5 files changed, 9 insertions(+), 7 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1358,8 +1358,10 @@ static void io_req_task_cancel(struct io
+
+ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
+ {
+- io_tw_lock(req->ctx, ts);
+- if (unlikely(io_should_terminate_tw()))
++ struct io_ring_ctx *ctx = req->ctx;
++
++ io_tw_lock(ctx, ts);
++ if (unlikely(io_should_terminate_tw(ctx)))
+ io_req_defer_failed(req, -EFAULT);
+ else if (req->flags & REQ_F_FORCE_ASYNC)
+ io_queue_iowq(req);
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -429,9 +429,9 @@ static inline bool io_allowed_run_tw(str
+ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
+ * our fallback task_work.
+ */
+-static inline bool io_should_terminate_tw(void)
++static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
+ {
+- return current->flags & (PF_KTHREAD | PF_EXITING);
++ return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
+ }
+
+ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -265,7 +265,7 @@ static int io_poll_check_events(struct i
+ {
+ int v;
+
+- if (unlikely(io_should_terminate_tw()))
++ if (unlikely(io_should_terminate_tw(req->ctx)))
+ return -ECANCELED;
+
+ do {
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -303,7 +303,7 @@ static void io_req_task_link_timeout(str
+ int ret = -ENOENT;
+
+ if (prev) {
+- if (!io_should_terminate_tw()) {
++ if (!io_should_terminate_tw(req->ctx)) {
+ struct io_cancel_data cd = {
+ .ctx = req->ctx,
+ .data = prev->cqe.user_data,
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ unsigned int flags = IO_URING_F_COMPLETE_DEFER;
+
+- if (io_should_terminate_tw())
++ if (io_should_terminate_tw(req->ctx))
+ flags |= IO_URING_F_TASK_DEAD;
+
+ /* task_work executor checks the deffered list completion */
--- /dev/null
+From 17e7c0b7ee81de9706aa45d8cee1b1c2427eaf6b Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 18 Sep 2025 15:45:41 -0600
+Subject: io_uring/kbuf: drop WARN_ON_ONCE() from incremental length check
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Partially based on commit 98b6fa62c84f2e129161e976a5b9b3cb4ccd117b upstream.
+
+This can be triggered by userspace, so just drop it. The condition
+is appropriately handled.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/kbuf.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -143,7 +143,7 @@ static inline bool io_kbuf_commit(struct
+ struct io_uring_buf *buf;
+
+ buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
+- if (WARN_ON_ONCE(len > buf->len))
++ if (len > buf->len)
+ len = buf->len;
+ buf->len -= len;
+ if (buf->len) {
--- /dev/null
+From 8a41ad655edfb6e052070133a43300fcaa9bfaab Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 18 Sep 2025 14:16:53 -0600
+Subject: io_uring/msg_ring: kill alloc_cache for io_kiocb allocations
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit df8922afc37aa2111ca79a216653a629146763ad upstream.
+
+A recent commit:
+
+fc582cd26e88 ("io_uring/msg_ring: ensure io_kiocb freeing is deferred for RCU")
+
+fixed an issue with not deferring freeing of io_kiocb structs that
+msg_ring allocates to after the current RCU grace period. But this only
+covers requests that don't end up in the allocation cache. If a request
+goes into the alloc cache, it can get reused before it is sane to do so.
+A recent syzbot report would seem to indicate that there's something
+there, however it may very well just be because of the KASAN poisoning
+that the alloc_cache handles manually.
+
+Rather than attempt to make the alloc_cache sane for that use case, just
+drop the usage of the alloc_cache for msg_ring request payload data.
+
+Fixes: 50cf5f3842af ("io_uring/msg_ring: add an alloc cache for io_kiocb entries")
+Link: https://lore.kernel.org/io-uring/68cc2687.050a0220.139b6.0005.GAE@google.com/
+Reported-by: syzbot+baa2e0f4e02df602583e@syzkaller.appspotmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/io_uring_types.h | 3 ---
+ io_uring/io_uring.c | 5 -----
+ io_uring/msg_ring.c | 24 ++----------------------
+ 3 files changed, 2 insertions(+), 30 deletions(-)
+
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -400,9 +400,6 @@ struct io_ring_ctx {
+ struct callback_head poll_wq_task_work;
+ struct list_head defer_list;
+
+- struct io_alloc_cache msg_cache;
+- spinlock_t msg_lock;
+-
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ struct list_head napi_list; /* track busy poll napi_id */
+ spinlock_t napi_lock; /* napi_list lock */
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -316,9 +316,6 @@ static __cold struct io_ring_ctx *io_rin
+ sizeof(struct io_async_rw));
+ ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
+ sizeof(struct uring_cache));
+- spin_lock_init(&ctx->msg_lock);
+- ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
+- sizeof(struct io_kiocb));
+ ret |= io_futex_cache_init(ctx);
+ if (ret)
+ goto free_ref;
+@@ -358,7 +355,6 @@ err:
+ io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ io_alloc_cache_free(&ctx->uring_cache, kfree);
+- io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
+ io_futex_cache_free(ctx);
+ kfree(ctx->cancel_table.hbs);
+ kfree(ctx->cancel_table_locked.hbs);
+@@ -2743,7 +2739,6 @@ static __cold void io_ring_ctx_free(stru
+ io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ io_alloc_cache_free(&ctx->uring_cache, kfree);
+- io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
+ io_futex_cache_free(ctx);
+ io_destroy_buffers(ctx);
+ mutex_unlock(&ctx->uring_lock);
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -11,7 +11,6 @@
+ #include "io_uring.h"
+ #include "rsrc.h"
+ #include "filetable.h"
+-#include "alloc_cache.h"
+ #include "msg_ring.h"
+
+ /* All valid masks for MSG_RING */
+@@ -76,13 +75,7 @@ static void io_msg_tw_complete(struct io
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
+- if (spin_trylock(&ctx->msg_lock)) {
+- if (io_alloc_cache_put(&ctx->msg_cache, req))
+- req = NULL;
+- spin_unlock(&ctx->msg_lock);
+- }
+- if (req)
+- kfree_rcu(req, rcu_head);
++ kfree_rcu(req, rcu_head);
+ percpu_ref_put(&ctx->refs);
+ }
+
+@@ -104,19 +97,6 @@ static int io_msg_remote_post(struct io_
+ return 0;
+ }
+
+-static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
+-{
+- struct io_kiocb *req = NULL;
+-
+- if (spin_trylock(&ctx->msg_lock)) {
+- req = io_alloc_cache_get(&ctx->msg_cache);
+- spin_unlock(&ctx->msg_lock);
+- if (req)
+- return req;
+- }
+- return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+-}
+-
+ static int io_msg_data_remote(struct io_kiocb *req)
+ {
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+@@ -124,7 +104,7 @@ static int io_msg_data_remote(struct io_
+ struct io_kiocb *target;
+ u32 flags = 0;
+
+- target = io_msg_get_kiocb(req->ctx);
++ target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (unlikely(!target))
+ return -ENOMEM;
+
--- /dev/null
+From 1e56310b40fd2e7e0b9493da9ff488af145bdd0c Mon Sep 17 00:00:00 2001
+From: Vasant Hegde <vasant.hegde@amd.com>
+Date: Sat, 13 Sep 2025 06:26:57 +0000
+Subject: iommu/amd/pgtbl: Fix possible race while increase page table level
+
+From: Vasant Hegde <vasant.hegde@amd.com>
+
+commit 1e56310b40fd2e7e0b9493da9ff488af145bdd0c upstream.
+
+The AMD IOMMU host page table implementation supports dynamic page table levels
+(up to 6 levels), starting with a 3-level configuration that expands based on
+IOVA address. The kernel maintains a root pointer and current page table level
+to enable proper page table walks in alloc_pte()/fetch_pte() operations.
+
+The IOMMU IOVA allocator initially starts with 32-bit address and onces its
+exhuasted it switches to 64-bit address (max address is determined based
+on IOMMU and device DMA capability). To support larger IOVA, AMD IOMMU
+driver increases page table level.
+
+But in unmap path (iommu_v1_unmap_pages()), fetch_pte() reads
+pgtable->[root/mode] without lock. So its possible that in exteme corner case,
+when increase_address_space() is updating pgtable->[root/mode], fetch_pte()
+reads wrong page table level (pgtable->mode). It does compare the value with
+level encoded in page table and returns NULL. This will result is
+iommu_unmap ops to fail and upper layer may retry/log WARN_ON.
+
+CPU 0 CPU 1
+------ ------
+map pages unmap pages
+alloc_pte() -> increase_address_space() iommu_v1_unmap_pages() -> fetch_pte()
+ pgtable->root = pte (new root value)
+ READ pgtable->[mode/root]
+ Reads new root, old mode
+ Updates mode (pgtable->mode += 1)
+
+Since Page table level updates are infrequent and already synchronized with a
+spinlock, implement seqcount to enable lock-free read operations on the read path.
+
+Fixes: 754265bcab7 ("iommu/amd: Fix race in increase_address_space()")
+Reported-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Cc: stable@vger.kernel.org
+Cc: Joao Martins <joao.m.martins@oracle.com>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/amd/amd_iommu_types.h | 1 +
+ drivers/iommu/amd/io_pgtable.c | 25 +++++++++++++++++++++----
+ 2 files changed, 22 insertions(+), 4 deletions(-)
+
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -545,6 +545,7 @@ struct gcr3_tbl_info {
+ };
+
+ struct amd_io_pgtable {
++ seqcount_t seqcount; /* Protects root/mode update */
+ struct io_pgtable pgtbl;
+ int mode;
+ u64 *root;
+--- a/drivers/iommu/amd/io_pgtable.c
++++ b/drivers/iommu/amd/io_pgtable.c
+@@ -17,6 +17,7 @@
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/dma-mapping.h>
++#include <linux/seqlock.h>
+
+ #include <asm/barrier.h>
+
+@@ -144,8 +145,11 @@ static bool increase_address_space(struc
+
+ *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
+
++ write_seqcount_begin(&pgtable->seqcount);
+ pgtable->root = pte;
+ pgtable->mode += 1;
++ write_seqcount_end(&pgtable->seqcount);
++
+ amd_iommu_update_and_flush_device_table(domain);
+
+ pte = NULL;
+@@ -167,6 +171,7 @@ static u64 *alloc_pte(struct amd_io_pgta
+ {
+ unsigned long last_addr = address + (page_size - 1);
+ struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
++ unsigned int seqcount;
+ int level, end_lvl;
+ u64 *pte, *page;
+
+@@ -184,8 +189,14 @@ static u64 *alloc_pte(struct amd_io_pgta
+ }
+
+
+- level = pgtable->mode - 1;
+- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++ do {
++ seqcount = read_seqcount_begin(&pgtable->seqcount);
++
++ level = pgtable->mode - 1;
++ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
++
++
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+@@ -262,6 +273,7 @@ static u64 *fetch_pte(struct amd_io_pgta
+ unsigned long *page_size)
+ {
+ int level;
++ unsigned int seqcount;
+ u64 *pte;
+
+ *page_size = 0;
+@@ -269,8 +281,12 @@ static u64 *fetch_pte(struct amd_io_pgta
+ if (address > PM_LEVEL_SIZE(pgtable->mode))
+ return NULL;
+
+- level = pgtable->mode - 1;
+- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++ do {
++ seqcount = read_seqcount_begin(&pgtable->seqcount);
++ level = pgtable->mode - 1;
++ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
++
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+
+ while (level > 0) {
+@@ -552,6 +568,7 @@ static struct io_pgtable *v1_alloc_pgtab
+ if (!pgtable->root)
+ return NULL;
+ pgtable->mode = PAGE_MODE_3_LEVEL;
++ seqcount_init(&pgtable->seqcount);
+
+ cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
--- /dev/null
+From dce043c07ca1ac19cfbe2844a6dc71e35c322353 Mon Sep 17 00:00:00 2001
+From: Eugene Koira <eugkoira@amazon.com>
+Date: Wed, 3 Sep 2025 13:53:29 +0800
+Subject: iommu/vt-d: Fix __domain_mapping()'s usage of switch_to_super_page()
+
+From: Eugene Koira <eugkoira@amazon.com>
+
+commit dce043c07ca1ac19cfbe2844a6dc71e35c322353 upstream.
+
+switch_to_super_page() assumes the memory range it's working on is aligned
+to the target large page level. Unfortunately, __domain_mapping() doesn't
+take this into account when using it, and will pass unaligned ranges
+ultimately freeing a PTE range larger than expected.
+
+Take for example a mapping with the following iov_pfn range [0x3fe400,
+0x4c0600), which should be backed by the following mappings:
+
+ iov_pfn [0x3fe400, 0x3fffff] covered by 2MiB pages
+ iov_pfn [0x400000, 0x4bffff] covered by 1GiB pages
+ iov_pfn [0x4c0000, 0x4c05ff] covered by 2MiB pages
+
+Under this circumstance, __domain_mapping() will pass [0x400000, 0x4c05ff]
+to switch_to_super_page() at a 1 GiB granularity, which will in turn
+free PTEs all the way to iov_pfn 0x4fffff.
+
+Mitigate this by rounding down the iov_pfn range passed to
+switch_to_super_page() in __domain_mapping()
+to the target large page level.
+
+Additionally add range alignment checks to switch_to_super_page.
+
+Fixes: 9906b9352a35 ("iommu/vt-d: Avoid duplicate removing in __domain_mapping()")
+Signed-off-by: Eugene Koira <eugkoira@amazon.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nicolas Saenz Julienne <nsaenz@amazon.com>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lore.kernel.org/r/20250826143816.38686-1-eugkoira@amazon.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1768,6 +1768,10 @@ static void switch_to_super_page(struct
+ unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct dma_pte *pte = NULL;
+
++ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
++ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
++ return;
++
+ while (start_pfn <= end_pfn) {
+ if (!pte)
+ pte = pfn_to_dma_pte(domain, start_pfn, &level,
+@@ -1844,7 +1848,8 @@ __domain_mapping(struct dmar_domain *dom
+ unsigned long pages_to_remove;
+
+ pteval |= DMA_PTE_LARGE_PAGE;
+- pages_to_remove = min_t(unsigned long, nr_pages,
++ pages_to_remove = min_t(unsigned long,
++ round_down(nr_pages, lvl_pages),
+ nr_pte_to_next_page(pte) * lvl_pages);
+ end_pfn = iov_pfn + pages_to_remove - 1;
+ switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
--- /dev/null
+From 5282491fc49d5614ac6ddcd012e5743eecb6a67c Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 10 Sep 2025 11:22:52 +0900
+Subject: ksmbd: smbdirect: validate data_offset and data_length field of smb_direct_data_transfer
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 5282491fc49d5614ac6ddcd012e5743eecb6a67c upstream.
+
+If data_offset and data_length of smb_direct_data_transfer struct are
+invalid, out of bounds issue could happen.
+This patch validate data_offset and data_length field in recv_done.
+
+Cc: stable@vger.kernel.org
+Fixes: 2ea086e35c3d ("ksmbd: add buffer validation for smb direct")
+Reviewed-by: Stefan Metzmacher <metze@samba.org>
+Reported-by: Luigino Camastra, Aisle Research <luigino.camastra@aisle.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/transport_rdma.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -553,7 +553,7 @@ static void recv_done(struct ib_cq *cq,
+ case SMB_DIRECT_MSG_DATA_TRANSFER: {
+ struct smb_direct_data_transfer *data_transfer =
+ (struct smb_direct_data_transfer *)recvmsg->packet;
+- unsigned int data_length;
++ unsigned int data_offset, data_length;
+ int avail_recvmsg_count, receive_credits;
+
+ if (wc->byte_len <
+@@ -564,14 +564,15 @@ static void recv_done(struct ib_cq *cq,
+ }
+
+ data_length = le32_to_cpu(data_transfer->data_length);
+- if (data_length) {
+- if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+- (u64)data_length) {
+- put_recvmsg(t, recvmsg);
+- smb_direct_disconnect_rdma_connection(t);
+- return;
+- }
++ data_offset = le32_to_cpu(data_transfer->data_offset);
++ if (wc->byte_len < data_offset ||
++ wc->byte_len < (u64)data_offset + data_length) {
++ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
++ return;
++ }
+
++ if (data_length) {
+ if (t->full_packet_received)
+ recvmsg->first_segment = true;
+
--- /dev/null
+From e1868ba37fd27c6a68e31565402b154beaa65df0 Mon Sep 17 00:00:00 2001
+From: Stefan Metzmacher <metze@samba.org>
+Date: Thu, 11 Sep 2025 10:05:23 +0900
+Subject: ksmbd: smbdirect: verify remaining_data_length respects max_fragmented_recv_size
+
+From: Stefan Metzmacher <metze@samba.org>
+
+commit e1868ba37fd27c6a68e31565402b154beaa65df0 upstream.
+
+This is inspired by the check for data_offset + data_length.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Cc: stable@vger.kernel.org
+Fixes: 2ea086e35c3d ("ksmbd: add buffer validation for smb direct")
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/transport_rdma.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -553,7 +553,7 @@ static void recv_done(struct ib_cq *cq,
+ case SMB_DIRECT_MSG_DATA_TRANSFER: {
+ struct smb_direct_data_transfer *data_transfer =
+ (struct smb_direct_data_transfer *)recvmsg->packet;
+- unsigned int data_offset, data_length;
++ u32 remaining_data_length, data_offset, data_length;
+ int avail_recvmsg_count, receive_credits;
+
+ if (wc->byte_len <
+@@ -563,6 +563,7 @@ static void recv_done(struct ib_cq *cq,
+ return;
+ }
+
++ remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
+ data_length = le32_to_cpu(data_transfer->data_length);
+ data_offset = le32_to_cpu(data_transfer->data_offset);
+ if (wc->byte_len < data_offset ||
+@@ -570,6 +571,14 @@ static void recv_done(struct ib_cq *cq,
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
+ return;
++ }
++ if (remaining_data_length > t->max_fragmented_recv_size ||
++ data_length > t->max_fragmented_recv_size ||
++ (u64)remaining_data_length + (u64)data_length >
++ (u64)t->max_fragmented_recv_size) {
++ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
++ return;
+ }
+
+ if (data_length) {
--- /dev/null
+From d02e48830e3fce9701265f6c5a58d9bdaf906a76 Mon Sep 17 00:00:00 2001
+From: "Maciej S. Szmigiero" <maciej.szmigiero@oracle.com>
+Date: Mon, 25 Aug 2025 18:44:28 +0200
+Subject: KVM: SVM: Sync TPR from LAPIC into VMCB::V_TPR even if AVIC is active
+
+From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
+
+commit d02e48830e3fce9701265f6c5a58d9bdaf906a76 upstream.
+
+Commit 3bbf3565f48c ("svm: Do not intercept CR8 when enable AVIC")
+inhibited pre-VMRUN sync of TPR from LAPIC into VMCB::V_TPR in
+sync_lapic_to_cr8() when AVIC is active.
+
+AVIC does automatically sync between these two fields, however it does
+so only on explicit guest writes to one of these fields, not on a bare
+VMRUN.
+
+This meant that when AVIC is enabled host changes to TPR in the LAPIC
+state might not get automatically copied into the V_TPR field of VMCB.
+
+This is especially true when it is the userspace setting LAPIC state via
+KVM_SET_LAPIC ioctl() since userspace does not have access to the guest
+VMCB.
+
+Practice shows that it is the V_TPR that is actually used by the AVIC to
+decide whether to issue pending interrupts to the CPU (not TPR in TASKPRI),
+so any leftover value in V_TPR will cause serious interrupt delivery issues
+in the guest when AVIC is enabled.
+
+Fix this issue by doing pre-VMRUN TPR sync from LAPIC into VMCB::V_TPR
+even when AVIC is enabled.
+
+Fixes: 3bbf3565f48c ("svm: Do not intercept CR8 when enable AVIC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
+Reviewed-by: Naveen N Rao (AMD) <naveen@kernel.org>
+Link: https://lore.kernel.org/r/c231be64280b1461e854e1ce3595d70cde3a2e9d.1756139678.git.maciej.szmigiero@oracle.com
+[sean: tag for stable@]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4044,8 +4044,7 @@ static inline void sync_lapic_to_cr8(str
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 cr8;
+
+- if (nested_svm_virtualize_tpr(vcpu) ||
+- kvm_vcpu_apicv_active(vcpu))
++ if (nested_svm_virtualize_tpr(vcpu))
+ return;
+
+ cr8 = kvm_get_cr8(vcpu);
--- /dev/null
+From a9d13433fe17be0e867e51e71a1acd2731fbef8d Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Thu, 18 Sep 2025 19:44:01 +0800
+Subject: LoongArch: Align ACPI structures if ARCH_STRICT_ALIGN enabled
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit a9d13433fe17be0e867e51e71a1acd2731fbef8d upstream.
+
+ARCH_STRICT_ALIGN is used for hardware without UAL, now it only control
+the -mstrict-align flag. However, ACPI structures are packed by default
+so will cause unaligned accesses.
+
+To avoid this, define ACPI_MISALIGNMENT_NOT_SUPPORTED in asm/acenv.h to
+align ACPI structures if ARCH_STRICT_ALIGN enabled.
+
+Cc: stable@vger.kernel.org
+Reported-by: Binbin Zhou <zhoubinbin@loongson.cn>
+Suggested-by: Xi Ruoyao <xry111@xry111.site>
+Suggested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/acenv.h | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/loongarch/include/asm/acenv.h
++++ b/arch/loongarch/include/asm/acenv.h
+@@ -10,9 +10,8 @@
+ #ifndef _ASM_LOONGARCH_ACENV_H
+ #define _ASM_LOONGARCH_ACENV_H
+
+-/*
+- * This header is required by ACPI core, but we have nothing to fill in
+- * right now. Will be updated later when needed.
+- */
++#ifdef CONFIG_ARCH_STRICT_ALIGN
++#define ACPI_MISALIGNMENT_NOT_SUPPORTED
++#endif /* CONFIG_ARCH_STRICT_ALIGN */
+
+ #endif /* _ASM_LOONGARCH_ACENV_H */
--- /dev/null
+From 51adb03e6b865c0c6790f29659ff52d56742de2e Mon Sep 17 00:00:00 2001
+From: Tao Cui <cuitao@kylinos.cn>
+Date: Thu, 18 Sep 2025 19:44:04 +0800
+Subject: LoongArch: Check the return value when creating kobj
+
+From: Tao Cui <cuitao@kylinos.cn>
+
+commit 51adb03e6b865c0c6790f29659ff52d56742de2e upstream.
+
+Add a check for the return value of kobject_create_and_add(), to ensure
+that the kobj allocation succeeds for later use.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tao Cui <cuitao@kylinos.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/env.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/loongarch/kernel/env.c
++++ b/arch/loongarch/kernel/env.c
+@@ -109,6 +109,8 @@ static int __init boardinfo_init(void)
+ struct kobject *loongson_kobj;
+
+ loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
++ if (!loongson_kobj)
++ return -ENOMEM;
+
+ return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+ }
--- /dev/null
+From 677d4a52d4dc4a147d5e84af9ff207832578be70 Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Thu, 18 Sep 2025 19:44:08 +0800
+Subject: LoongArch: Fix unreliable stack for live patching
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit 677d4a52d4dc4a147d5e84af9ff207832578be70 upstream.
+
+When testing the kernel live patching with "modprobe livepatch-sample",
+there is a timeout over 15 seconds from "starting patching transition"
+to "patching complete". The dmesg command shows "unreliable stack" for
+user tasks in debug mode, here is one of the messages:
+
+ livepatch: klp_try_switch_task: bash:1193 has an unreliable stack
+
+The "unreliable stack" is because it can not unwind from do_syscall()
+to its previous frame handle_syscall(). It should use fp to find the
+original stack top due to secondary stack in do_syscall(), but fp is
+not used for some other functions, then fp can not be restored by the
+next frame of do_syscall(), so it is necessary to save fp if task is
+not current, in order to get the stack top of do_syscall().
+
+Here are the call chains:
+
+ klp_enable_patch()
+ klp_try_complete_transition()
+ klp_try_switch_task()
+ klp_check_and_switch_task()
+ klp_check_stack()
+ stack_trace_save_tsk_reliable()
+ arch_stack_walk_reliable()
+
+When executing "rmmod livepatch-sample", there exists a similar issue.
+With this patch, it takes a short time for patching and unpatching.
+
+Before:
+
+ # modprobe livepatch-sample
+ # dmesg -T | tail -3
+ [Sat Sep 6 11:00:20 2025] livepatch: 'livepatch_sample': starting patching transition
+ [Sat Sep 6 11:00:35 2025] livepatch: signaling remaining tasks
+ [Sat Sep 6 11:00:36 2025] livepatch: 'livepatch_sample': patching complete
+
+ # echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled
+ # rmmod livepatch_sample
+ rmmod: ERROR: Module livepatch_sample is in use
+ # rmmod livepatch_sample
+ # dmesg -T | tail -3
+ [Sat Sep 6 11:06:05 2025] livepatch: 'livepatch_sample': starting unpatching transition
+ [Sat Sep 6 11:06:20 2025] livepatch: signaling remaining tasks
+ [Sat Sep 6 11:06:21 2025] livepatch: 'livepatch_sample': unpatching complete
+
+After:
+
+ # modprobe livepatch-sample
+ # dmesg -T | tail -2
+ [Tue Sep 16 16:19:30 2025] livepatch: 'livepatch_sample': starting patching transition
+ [Tue Sep 16 16:19:31 2025] livepatch: 'livepatch_sample': patching complete
+
+ # echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled
+ # rmmod livepatch_sample
+ # dmesg -T | tail -2
+ [Tue Sep 16 16:19:36 2025] livepatch: 'livepatch_sample': starting unpatching transition
+ [Tue Sep 16 16:19:37 2025] livepatch: 'livepatch_sample': unpatching complete
+
+Cc: stable@vger.kernel.org # v6.9+
+Fixes: 199cc14cb4f1 ("LoongArch: Add kernel livepatching support")
+Reported-by: Xi Zhang <zhangxi@kylinos.cn>
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/stacktrace.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/kernel/stacktrace.c
++++ b/arch/loongarch/kernel/stacktrace.c
+@@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
++ regs->regs[22] = 0;
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
++ regs->regs[22] = task->thread.reg22;
+ }
+ regs->regs[1] = 0;
+- regs->regs[22] = 0;
+
+ for (unwind_start(&state, task, regs);
+ !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
--- /dev/null
+From f5003098e2f337d8e8a87dc636250e3fa978d9ad Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Thu, 18 Sep 2025 19:43:42 +0800
+Subject: LoongArch: Update help info of ARCH_STRICT_ALIGN
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit f5003098e2f337d8e8a87dc636250e3fa978d9ad upstream.
+
+Loongson-3A6000 and 3C6000 CPUs also support unaligned memory access, so
+the current description is out of date to some extent.
+
+Actually, all of Loongson-3 series processors based on LoongArch support
+unaligned memory access, this hardware capability is indicated by the bit
+20 (UAL) of CPUCFG1 register, update the help info to reflect the reality.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/Kconfig | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -540,10 +540,14 @@ config ARCH_STRICT_ALIGN
+ -mstrict-align build parameter to prevent unaligned accesses.
+
+ CPUs with h/w unaligned access support:
+- Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
++ Loongson-2K2000/2K3000 and all of Loongson-3 series processors
++ based on LoongArch.
+
+ CPUs without h/w unaligned access support:
+- Loongson-2K500/2K1000.
++ Loongson-2K0300/2K0500/2K1000.
++
++ If you want to make sure whether to support unaligned memory access
++ on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
+
+ This option is enabled by default to make the kernel be able to run
+ on all LoongArch systems. But you can disable it manually if you want
--- /dev/null
+From ac398f570724c41e5e039d54e4075519f6af7408 Mon Sep 17 00:00:00 2001
+From: Guangshuo Li <202321181@mail.sdu.edu.cn>
+Date: Thu, 18 Sep 2025 19:44:10 +0800
+Subject: LoongArch: vDSO: Check kcalloc() result in init_vdso()
+
+From: Guangshuo Li <202321181@mail.sdu.edu.cn>
+
+commit ac398f570724c41e5e039d54e4075519f6af7408 upstream.
+
+Add a NULL-pointer check after the kcalloc() call in init_vdso(). If
+allocation fails, return -ENOMEM to prevent a possible dereference of
+vdso_info.code_mapping.pages when it is NULL.
+
+Cc: stable@vger.kernel.org
+Fixes: 2ed119aef60d ("LoongArch: Set correct size for vDSO code mapping")
+Signed-off-by: Guangshuo Li <202321181@mail.sdu.edu.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/vdso.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/loongarch/kernel/vdso.c
++++ b/arch/loongarch/kernel/vdso.c
+@@ -108,6 +108,9 @@ static int __init init_vdso(void)
+ vdso_info.code_mapping.pages =
+ kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
+
++ if (!vdso_info.code_mapping.pages)
++ return -ENOMEM;
++
+ pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
+ for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
+ vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
--- /dev/null
+From 8d79ed36bfc83d0583ab72216b7980340478cdfb Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 8 Sep 2025 15:21:12 -0700
+Subject: mm: revert "mm: vmscan.c: fix OOM on swap stress test"
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 8d79ed36bfc83d0583ab72216b7980340478cdfb upstream.
+
+This reverts commit 0885ef470560: that was a fix to the reverted
+33dfe9204f29b415bbc0abb1a50642d1ba94f5e9.
+
+Link: https://lkml.kernel.org/r/aa0e9d67-fbcd-9d79-88a1-641dfbe1d9d1@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Chris Li <chrisl@kernel.org>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Keir Fraser <keirf@google.com>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Li Zhe <lizhe.67@bytedance.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Shivank Garg <shivankg@amd.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Wei Xu <weixugc@google.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: yangge <yangge1116@126.com>
+Cc: Yuanchu Xie <yuanchu@google.com>
+Cc: Yu Zhao <yuzhao@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/vmscan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4352,7 +4352,7 @@ static bool sort_folio(struct lruvec *lr
+ }
+
+ /* ineligible */
+- if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
++ if (zone > sc->reclaim_idx) {
+ gen = folio_inc_gen(lruvec, folio, false);
+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ return true;
--- /dev/null
+From 8ab2f1c35669bff7d7ed1bb16bf5cc989b3e2e17 Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Tue, 26 Aug 2025 09:58:08 +0200
+Subject: mmc: mvsdio: Fix dma_unmap_sg() nents value
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit 8ab2f1c35669bff7d7ed1bb16bf5cc989b3e2e17 upstream.
+
+The dma_unmap_sg() functions should be called with the same nents as the
+dma_map_sg(), not the value the map function returned.
+
+Fixes: 236caa7cc351 ("mmc: SDIO driver for Marvell SoCs")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/mvsdio.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/mvsdio.c
++++ b/drivers/mmc/host/mvsdio.c
+@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_
+ host->pio_ptr = NULL;
+ host->pio_size = 0;
+ } else {
+- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+
--- /dev/null
+From f755be0b1ff429a2ecf709beeb1bcd7abc111c2b Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri, 12 Sep 2025 14:25:50 +0200
+Subject: mptcp: propagate shutdown to subflows when possible
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit f755be0b1ff429a2ecf709beeb1bcd7abc111c2b upstream.
+
+When the MPTCP DATA FIN have been ACKed, there is no more MPTCP related
+metadata to exchange, and all subflows can be safely shutdown.
+
+Before this patch, the subflows were actually terminated at 'close()'
+time. That's certainly fine most of the time, but not when the userspace
+'shutdown()' a connection, without close()ing it. When doing so, the
+subflows were staying in LAST_ACK state on one side -- and consequently
+in FIN_WAIT2 on the other side -- until the 'close()' of the MPTCP
+socket.
+
+Now, when the DATA FIN have been ACKed, all subflows are shutdown. A
+consequence of this is that the TCP 'FIN' flag can be set earlier now,
+but the end result is the same. This affects the packetdrill tests
+looking at the end of the MPTCP connections, but for a good reason.
+
+Note that tcp_shutdown() will check the subflow state, so no need to do
+that again before calling it.
+
+Fixes: 3721b9b64676 ("mptcp: Track received DATA_FIN sequence number and add related helpers")
+Cc: stable@vger.kernel.org
+Fixes: 16a9a9da1723 ("mptcp: Add helper to process acks of DATA_FIN")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-fix-sft-connect-v1-1-d40e77cbbf02@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -413,6 +413,20 @@ static void mptcp_close_wake_up(struct s
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ }
+
++static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
++{
++ struct mptcp_subflow_context *subflow;
++
++ mptcp_for_each_subflow(msk, subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ bool slow;
++
++ slow = lock_sock_fast(ssk);
++ tcp_shutdown(ssk, SEND_SHUTDOWN);
++ unlock_sock_fast(ssk, slow);
++ }
++}
++
+ /* called under the msk socket lock */
+ static bool mptcp_pending_data_fin_ack(struct sock *sk)
+ {
+@@ -437,6 +451,7 @@ static void mptcp_check_data_fin_ack(str
+ break;
+ case TCP_CLOSING:
+ case TCP_LAST_ACK:
++ mptcp_shutdown_subflows(msk);
+ mptcp_set_state(sk, TCP_CLOSE);
+ break;
+ }
+@@ -605,6 +620,7 @@ static bool mptcp_check_data_fin(struct
+ mptcp_set_state(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
++ mptcp_shutdown_subflows(msk);
+ mptcp_set_state(sk, TCP_CLOSE);
+ break;
+ default:
--- /dev/null
+From b6f56a44e4c1014b08859dcf04ed246500e310e5 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hansg@kernel.org>
+Date: Sat, 13 Sep 2025 13:35:15 +0200
+Subject: net: rfkill: gpio: Fix crash due to dereferencering uninitialized pointer
+
+From: Hans de Goede <hansg@kernel.org>
+
+commit b6f56a44e4c1014b08859dcf04ed246500e310e5 upstream.
+
+Since commit 7d5e9737efda ("net: rfkill: gpio: get the name and type from
+device property") rfkill_find_type() gets called with the possibly
+uninitialized "const char *type_name;" local variable.
+
+On x86 systems when rfkill-gpio binds to a "BCM4752" or "LNV4752"
+acpi_device, the rfkill->type is set based on the ACPI acpi_device_id:
+
+ rfkill->type = (unsigned)id->driver_data;
+
+and there is no "type" property so device_property_read_string() will fail
+and leave type_name uninitialized, leading to a potential crash.
+
+rfkill_find_type() does accept a NULL pointer, fix the potential crash
+by initializing type_name to NULL.
+
+Note likely sofar this has not been caught because:
+
+1. Not many x86 machines actually have a "BCM4752"/"LNV4752" acpi_device
+2. The stack happened to contain NULL where type_name is stored
+
+Fixes: 7d5e9737efda ("net: rfkill: gpio: get the name and type from device property")
+Cc: stable@vger.kernel.org
+Cc: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://patch.msgid.link/20250913113515.21698-1-hansg@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rfkill/rfkill-gpio.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -94,10 +94,10 @@ static const struct dmi_system_id rfkill
+ static int rfkill_gpio_probe(struct platform_device *pdev)
+ {
+ struct rfkill_gpio_data *rfkill;
+- struct gpio_desc *gpio;
++ const char *type_name = NULL;
+ const char *name_property;
+ const char *type_property;
+- const char *type_name;
++ struct gpio_desc *gpio;
+ int ret;
+
+ if (dmi_check_system(rfkill_gpio_deny_table))
--- /dev/null
+From 025e87f8ea2ae3a28bf1fe2b052bfa412c27ed4a Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Sat, 6 Sep 2025 23:43:34 +0900
+Subject: nilfs2: fix CFI failure when accessing /sys/fs/nilfs2/features/*
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 025e87f8ea2ae3a28bf1fe2b052bfa412c27ed4a upstream.
+
+When accessing one of the files under /sys/fs/nilfs2/features when
+CONFIG_CFI_CLANG is enabled, there is a CFI violation:
+
+ CFI failure at kobj_attr_show+0x59/0x80 (target: nilfs_feature_revision_show+0x0/0x30; expected type: 0xfc392c4d)
+ ...
+ Call Trace:
+ <TASK>
+ sysfs_kf_seq_show+0x2a6/0x390
+ ? __cfi_kobj_attr_show+0x10/0x10
+ kernfs_seq_show+0x104/0x15b
+ seq_read_iter+0x580/0xe2b
+ ...
+
+When the kobject of the kset for /sys/fs/nilfs2 is initialized, its ktype
+is set to kset_ktype, which has a ->sysfs_ops of kobj_sysfs_ops. When
+nilfs_feature_attr_group is added to that kobject via
+sysfs_create_group(), the kernfs_ops of each files is sysfs_file_kfops_rw,
+which will call sysfs_kf_seq_show() when ->seq_show() is called.
+sysfs_kf_seq_show() in turn calls kobj_attr_show() through
+->sysfs_ops->show(). kobj_attr_show() casts the provided attribute out to
+a 'struct kobj_attribute' via container_of() and calls ->show(), resulting
+in the CFI violation since neither nilfs_feature_revision_show() nor
+nilfs_feature_README_show() match the prototype of ->show() in 'struct
+kobj_attribute'.
+
+Resolve the CFI violation by adjusting the second parameter in
+nilfs_feature_{revision,README}_show() from 'struct attribute' to 'struct
+kobj_attribute' to match the expected prototype.
+
+Link: https://lkml.kernel.org/r/20250906144410.22511-1-konishi.ryusuke@gmail.com
+Fixes: aebe17f68444 ("nilfs2: add /sys/fs/nilfs2/features group")
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Closes: https://lore.kernel.org/oe-lkp/202509021646.bc78d9ef-lkp@intel.com/
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/sysfs.c | 4 ++--
+ fs/nilfs2/sysfs.h | 8 ++++----
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(str
+ ************************************************************************/
+
+ static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
+- struct attribute *attr, char *buf)
++ struct kobj_attribute *attr, char *buf)
+ {
+ return sysfs_emit(buf, "%d.%d\n",
+ NILFS_CURRENT_REV, NILFS_MINOR_REV);
+@@ -1087,7 +1087,7 @@ static const char features_readme_str[]
+ "(1) revision\n\tshow current revision of NILFS file system driver.\n";
+
+ static ssize_t nilfs_feature_README_show(struct kobject *kobj,
+- struct attribute *attr,
++ struct kobj_attribute *attr,
+ char *buf)
+ {
+ return sysfs_emit(buf, features_readme_str);
+--- a/fs/nilfs2/sysfs.h
++++ b/fs/nilfs2/sysfs.h
+@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
+ struct completion sg_segments_kobj_unregister;
+ };
+
+-#define NILFS_COMMON_ATTR_STRUCT(name) \
++#define NILFS_KOBJ_ATTR_STRUCT(name) \
+ struct nilfs_##name##_attr { \
+ struct attribute attr; \
+- ssize_t (*show)(struct kobject *, struct attribute *, \
++ ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
+ char *); \
+- ssize_t (*store)(struct kobject *, struct attribute *, \
++ ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
+ const char *, size_t); \
+ }
+
+-NILFS_COMMON_ATTR_STRUCT(feature);
++NILFS_KOBJ_ATTR_STRUCT(feature);
+
+ #define NILFS_DEV_ATTR_STRUCT(name) \
+ struct nilfs_##name##_attr { \
--- /dev/null
+From 539d7344d4feaea37e05863e9aa86bd31f28e46f Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Thu, 18 Sep 2025 19:43:36 +0800
+Subject: objtool/LoongArch: Mark special atomic instruction as INSN_BUG type
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit 539d7344d4feaea37e05863e9aa86bd31f28e46f upstream.
+
+When compiling with LLVM and CONFIG_RUST is set, there exists the
+following objtool warning:
+
+ rust/compiler_builtins.o: warning: objtool: __rust__unordsf2(): unexpected end of section .text.unlikely.
+
+objdump shows that the end of section .text.unlikely is an atomic
+instruction:
+
+ amswap.w $zero, $ra, $zero
+
+According to the LoongArch Reference Manual, if the amswap.w atomic
+memory access instruction has the same register number as rd and rj,
+the execution will trigger an Instruction Non-defined Exception, so
+mark the above instruction as INSN_BUG type to fix the warning.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/arch/loongarch/include/asm/inst.h | 12 ++++++++++++
+ tools/objtool/arch/loongarch/decode.c | 21 +++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/tools/arch/loongarch/include/asm/inst.h
++++ b/tools/arch/loongarch/include/asm/inst.h
+@@ -51,6 +51,10 @@ enum reg2i16_op {
+ bgeu_op = 0x1b,
+ };
+
++enum reg3_op {
++ amswapw_op = 0x70c0,
++};
++
+ struct reg0i15_format {
+ unsigned int immediate : 15;
+ unsigned int opcode : 17;
+@@ -96,6 +100,13 @@ struct reg2i16_format {
+ unsigned int opcode : 6;
+ };
+
++struct reg3_format {
++ unsigned int rd : 5;
++ unsigned int rj : 5;
++ unsigned int rk : 5;
++ unsigned int opcode : 17;
++};
++
+ union loongarch_instruction {
+ unsigned int word;
+ struct reg0i15_format reg0i15_format;
+@@ -105,6 +116,7 @@ union loongarch_instruction {
+ struct reg2i12_format reg2i12_format;
+ struct reg2i14_format reg2i14_format;
+ struct reg2i16_format reg2i16_format;
++ struct reg3_format reg3_format;
+ };
+
+ #define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
+--- a/tools/objtool/arch/loongarch/decode.c
++++ b/tools/objtool/arch/loongarch/decode.c
+@@ -281,6 +281,25 @@ static bool decode_insn_reg2i16_fomat(un
+ return true;
+ }
+
++static bool decode_insn_reg3_fomat(union loongarch_instruction inst,
++ struct instruction *insn)
++{
++ switch (inst.reg3_format.opcode) {
++ case amswapw_op:
++ if (inst.reg3_format.rd == LOONGARCH_GPR_ZERO &&
++ inst.reg3_format.rk == LOONGARCH_GPR_RA &&
++ inst.reg3_format.rj == LOONGARCH_GPR_ZERO) {
++ /* amswap.w $zero, $ra, $zero */
++ insn->type = INSN_BUG;
++ }
++ break;
++ default:
++ return false;
++ }
++
++ return true;
++}
++
+ int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
+ unsigned long offset, unsigned int maxlen,
+ struct instruction *insn)
+@@ -312,6 +331,8 @@ int arch_decode_instruction(struct objto
+ return 0;
+ if (decode_insn_reg2i16_fomat(inst, insn))
+ return 0;
++ if (decode_insn_reg3_fomat(inst, insn))
++ return 0;
+
+ if (inst.word == 0) {
+ /* andi $zero, $zero, 0x0 */
--- /dev/null
+From baad7830ee9a56756b3857348452fe756cb0a702 Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Thu, 18 Sep 2025 19:43:36 +0800
+Subject: objtool/LoongArch: Mark types based on break immediate code
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit baad7830ee9a56756b3857348452fe756cb0a702 upstream.
+
+If the break immediate code is 0, it should mark the type as
+INSN_TRAP. If the break immediate code is 1, it should mark the
+type as INSN_BUG.
+
+While at it, format the code style and add the code comment for nop.
+
+Cc: stable@vger.kernel.org
+Suggested-by: WANG Rui <wangrui@loongson.cn>
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/arch/loongarch/decode.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/tools/objtool/arch/loongarch/decode.c
++++ b/tools/objtool/arch/loongarch/decode.c
+@@ -313,10 +313,16 @@ int arch_decode_instruction(struct objto
+ if (decode_insn_reg2i16_fomat(inst, insn))
+ return 0;
+
+- if (inst.word == 0)
++ if (inst.word == 0) {
++ /* andi $zero, $zero, 0x0 */
+ insn->type = INSN_NOP;
+- else if (inst.reg0i15_format.opcode == break_op) {
+- /* break */
++ } else if (inst.reg0i15_format.opcode == break_op &&
++ inst.reg0i15_format.immediate == 0x0) {
++ /* break 0x0 */
++ insn->type = INSN_TRAP;
++ } else if (inst.reg0i15_format.opcode == break_op &&
++ inst.reg0i15_format.immediate == 0x1) {
++ /* break 0x1 */
+ insn->type = INSN_BUG;
+ } else if (inst.reg2_format.opcode == ertn_op) {
+ /* ertn */
--- /dev/null
+From 2c334d038466ac509468fbe06905a32d202117db Mon Sep 17 00:00:00 2001
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+Date: Sat, 23 Aug 2025 12:34:56 +0200
+Subject: power: supply: bq27xxx: fix error return in case of no bq27000 hdq battery
+
+From: H. Nikolaus Schaller <hns@goldelico.com>
+
+commit 2c334d038466ac509468fbe06905a32d202117db upstream.
+
+Since commit
+
+ commit f16d9fb6cf03 ("power: supply: bq27xxx: Retrieve again when busy")
+
+the console log of some devices with hdq enabled but no bq27000 battery
+(like e.g. the Pandaboard) is flooded with messages like:
+
+[ 34.247833] power_supply bq27000-battery: driver failed to report 'status' property: -1
+
+as soon as user-space is finding a /sys entry and trying to read the
+"status" property.
+
+It turns out that the offending commit changes the logic to now return the
+value of cache.flags if it is <0. This is likely under the assumption that
+it is an error number. In normal errors from bq27xxx_read() this is indeed
+the case.
+
+But there is special code to detect if no bq27000 is installed or accessible
+through hdq/1wire and wants to report this. In that case, the cache.flags
+are set historically by
+
+ commit 3dd843e1c26a ("bq27000: report missing device better.")
+
+to constant -1 which did make reading properties return -ENODEV. So everything
+appeared to be fine before the return value was passed upwards.
+
+Now the -1 is returned as -EPERM instead of -ENODEV, triggering the error
+condition in power_supply_format_property() which then floods the console log.
+
+So we change the detection of missing bq27000 battery to simply set
+
+ cache.flags = -ENODEV
+
+instead of -1.
+
+Fixes: f16d9fb6cf03 ("power: supply: bq27xxx: Retrieve again when busy")
+Cc: Jerry Lv <Jerry.Lv@axis.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: H. Nikolaus Schaller <hns@goldelico.com>
+Link: https://lore.kernel.org/r/692f79eb6fd541adb397038ea6e750d4de2deddf.1755945297.git.hns@goldelico.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/bq27xxx_battery.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1910,7 +1910,7 @@ static void bq27xxx_battery_update_unloc
+
+ cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+ if ((cache.flags & 0xff) == 0xff)
+- cache.flags = -1; /* read error */
++ cache.flags = -ENODEV; /* read error */
+ if (cache.flags >= 0) {
+ cache.capacity = bq27xxx_battery_read_soc(di);
+
--- /dev/null
+From 1e451977e1703b6db072719b37cd1b8e250b9cc9 Mon Sep 17 00:00:00 2001
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+Date: Sat, 23 Aug 2025 12:34:57 +0200
+Subject: power: supply: bq27xxx: restrict no-battery detection to bq27000
+
+From: H. Nikolaus Schaller <hns@goldelico.com>
+
+commit 1e451977e1703b6db072719b37cd1b8e250b9cc9 upstream.
+
+There are fuel gauges in the bq27xxx series (e.g. bq27z561) which may in some
+cases report 0xff as the value of BQ27XXX_REG_FLAGS that should not be
+interpreted as "no battery" like for a disconnected battery with some built
+in bq27000 chip.
+
+So restrict the no-battery detection originally introduced by
+
+ commit 3dd843e1c26a ("bq27000: report missing device better.")
+
+to the bq27000.
+
+There is no need to backport further because this was hidden before
+
+ commit f16d9fb6cf03 ("power: supply: bq27xxx: Retrieve again when busy")
+
+Fixes: f16d9fb6cf03 ("power: supply: bq27xxx: Retrieve again when busy")
+Suggested-by: Jerry Lv <Jerry.Lv@axis.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: H. Nikolaus Schaller <hns@goldelico.com>
+Link: https://lore.kernel.org/r/dd979fa6855fd051ee5117016c58daaa05966e24.1755945297.git.hns@goldelico.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/bq27xxx_battery.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1909,8 +1909,8 @@ static void bq27xxx_battery_update_unloc
+ bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+
+ cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+- if ((cache.flags & 0xff) == 0xff)
+- cache.flags = -ENODEV; /* read error */
++ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
++ cache.flags = -ENODEV; /* bq27000 hdq read error */
+ if (cache.flags >= 0) {
+ cache.capacity = bq27xxx_battery_read_soc(di);
+
--- /dev/null
+From 4351ca3fcb3ffecf12631b4996bf085a2dad0db6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?H=C3=A5kon=20Bugge?= <haakon.bugge@oracle.com>
+Date: Thu, 11 Sep 2025 15:33:34 +0200
+Subject: rds: ib: Increment i_fastreg_wrs before bailing out
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Håkon Bugge <haakon.bugge@oracle.com>
+
+commit 4351ca3fcb3ffecf12631b4996bf085a2dad0db6 upstream.
+
+We need to increment i_fastreg_wrs before we bail out from
+rds_ib_post_reg_frmr().
+
+We have a fixed budget of how many FRWR operations that can be
+outstanding using the dedicated QP used for memory registrations and
+de-registrations. This budget is enforced by the atomic_t
+i_fastreg_wrs. If we bail out early in rds_ib_post_reg_frmr(), we will
+"leak" the possibility of posting an FRWR operation, and if that
+accumulates, no FRWR operation can be carried out.
+
+Fixes: 1659185fb4d0 ("RDS: IB: Support Fastreg MR (FRMR) memory registration mode")
+Fixes: 3a2886cca703 ("net/rds: Keep track of and wait for FRWR segments in use upon shutdown")
+Cc: stable@vger.kernel.org
+Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com>
+Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
+Link: https://patch.msgid.link/20250911133336.451212-1-haakon.bugge@oracle.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/ib_frmr.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/net/rds/ib_frmr.c
++++ b/net/rds/ib_frmr.c
+@@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct r
+
+ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
+ &off, PAGE_SIZE);
+- if (unlikely(ret != ibmr->sg_dma_len))
+- return ret < 0 ? ret : -EINVAL;
++ if (unlikely(ret != ibmr->sg_dma_len)) {
++ ret = ret < 0 ? ret : -EINVAL;
++ goto out_inc;
++ }
+
+- if (cmpxchg(&frmr->fr_state,
+- FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
+- return -EBUSY;
++ if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) {
++ ret = -EBUSY;
++ goto out_inc;
++ }
+
+ atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
+
+@@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct r
+ /* Failure here can be because of -ENOMEM as well */
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
+
+- atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ if (printk_ratelimit())
+ pr_warn("RDS/IB: %s returned error(%d)\n",
+ __func__, ret);
+- goto out;
++ goto out_inc;
+ }
+
+ /* Wait for the registration to complete in order to prevent an invalid
+@@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct r
+ */
+ wait_event(frmr->fr_reg_done, !frmr->fr_reg);
+
+-out:
++ return ret;
+
++out_inc:
++ atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ return ret;
+ }
+
--- /dev/null
+From 8708c5d8b3fb3f6d5d3b9e6bfe01a505819f519a Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri, 12 Sep 2025 14:25:52 +0200
+Subject: selftests: mptcp: avoid spurious errors on TCP disconnect
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 8708c5d8b3fb3f6d5d3b9e6bfe01a505819f519a upstream.
+
+The disconnect test-case, with 'plain' TCP sockets generates spurious
+errors, e.g.
+
+ 07 ns1 TCP -> ns1 (dead:beef:1::1:10006) MPTCP
+ read: Connection reset by peer
+ read: Connection reset by peer
+ (duration 155ms) [FAIL] client exit code 3, server 3
+
+ netns ns1-FloSdv (listener) socket stat for 10006:
+ TcpActiveOpens 2 0.0
+ TcpPassiveOpens 2 0.0
+ TcpEstabResets 2 0.0
+ TcpInSegs 274 0.0
+ TcpOutSegs 276 0.0
+ TcpOutRsts 3 0.0
+ TcpExtPruneCalled 2 0.0
+ TcpExtRcvPruned 1 0.0
+ TcpExtTCPPureAcks 104 0.0
+ TcpExtTCPRcvCollapsed 2 0.0
+ TcpExtTCPBacklogCoalesce 42 0.0
+ TcpExtTCPRcvCoalesce 43 0.0
+ TcpExtTCPChallengeACK 1 0.0
+ TcpExtTCPFromZeroWindowAdv 42 0.0
+ TcpExtTCPToZeroWindowAdv 41 0.0
+ TcpExtTCPWantZeroWindowAdv 13 0.0
+ TcpExtTCPOrigDataSent 164 0.0
+ TcpExtTCPDelivered 165 0.0
+ TcpExtTCPRcvQDrop 1 0.0
+
+In the failing scenarios (TCP -> MPTCP), the involved sockets are
+actually plain TCP ones, as fallbacks for passive sockets at 2WHS time
+cause the MPTCP listeners to actually create 'plain' TCP sockets.
+
+Similar to commit 218cc166321f ("selftests: mptcp: avoid spurious errors
+on disconnect"), the root cause is in the user-space bits: the test
+program tries to disconnect as soon as all the pending data has been
+spooled, generating an RST. If such option reaches the peer before the
+connection has reached the closed status, the TCP socket will report an
+error to the user-space, as per protocol specification, causing the
+above failure. Note that it looks like this issue got more visible since
+the "tcp: receiver changes" series from commit 06baf9bfa6ca ("Merge
+branch 'tcp-receiver-changes'").
+
+Address the issue by explicitly waiting for the TCP sockets (-t) to
+reach a closed status before performing the disconnect. More precisely,
+the test program now waits for plain TCP sockets or TCP subflows in
+addition to the MPTCP sockets that were already monitored.
+
+While at it, use 'ss' with '-n' to avoid resolving service names, which
+is not needed here.
+
+Fixes: 218cc166321f ("selftests: mptcp: avoid spurious errors on disconnect")
+Cc: stable@vger.kernel.org
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-fix-sft-connect-v1-3-d40e77cbbf02@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_connect.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1234,7 +1234,7 @@ void xdisconnect(int fd)
+ else
+ xerror("bad family");
+
+- strcpy(cmd, "ss -M | grep -q ");
++ strcpy(cmd, "ss -Mnt | grep -q ");
+ cmdlen = strlen(cmd);
+ if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
+ sizeof(cmd) - cmdlen))
+@@ -1244,7 +1244,7 @@ void xdisconnect(int fd)
+
+ /*
+ * wait until the pending data is completely flushed and all
+- * the MPTCP sockets reached the closed status.
++ * the sockets reached the closed status.
+ * disconnect will bypass/ignore/drop any pending data.
+ */
+ for (i = 0; ; i += msec_sleep) {
--- /dev/null
+From 14e22b43df25dbd4301351b882486ea38892ae4f Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri, 12 Sep 2025 14:25:51 +0200
+Subject: selftests: mptcp: connect: catch IO errors on listen side
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 14e22b43df25dbd4301351b882486ea38892ae4f upstream.
+
+IO errors were correctly printed to stderr, and propagated up to the
+main loop for the server side, but the returned value was ignored. As a
+consequence, the program for the listener side was no longer exiting
+with an error code in case of IO issues.
+
+Because of that, some issues might not have been seen. But very likely,
+most issues either had an effect on the client side, or the file
+transfer was not the expected one, e.g. the connection got reset before
+the end. Still, it is better to fix this.
+
+The main consequence of this issue is the error that was reported by the
+selftests: the received and sent files were different, and the MIB
+counters were not printed. Also, when such errors happened during the
+'disconnect' tests, the program tried to continue until the timeout.
+
+Now when an IO error is detected, the program exits directly with an
+error.
+
+Fixes: 05be5e273c84 ("selftests: mptcp: add disconnect tests")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-fix-sft-connect-v1-2-d40e77cbbf02@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_connect.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1079,6 +1079,7 @@ int main_loop_s(int listensock)
+ struct pollfd polls;
+ socklen_t salen;
+ int remotesock;
++ int err = 0;
+ int fd = 0;
+
+ again:
+@@ -1111,7 +1112,7 @@ again:
+ SOCK_TEST_TCPULP(remotesock, 0);
+
+ memset(&winfo, 0, sizeof(winfo));
+- copyfd_io(fd, remotesock, 1, true, &winfo);
++ err = copyfd_io(fd, remotesock, 1, true, &winfo);
+ } else {
+ perror("accept");
+ return 1;
+@@ -1120,10 +1121,10 @@ again:
+ if (cfg_input)
+ close(fd);
+
+- if (--cfg_repeat > 0)
++ if (!err && --cfg_repeat > 0)
+ goto again;
+
+- return 0;
++ return err;
+ }
+
+ static void init_rng(void)
cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
octeontx2-pf-fix-use-after-free-bugs-in-otx2_sync_ts.patch
perf-x86-intel-fix-crash-in-icl_update_topdown_event.patch
+ksmbd-smbdirect-validate-data_offset-and-data_length-field-of-smb_direct_data_transfer.patch
+ksmbd-smbdirect-verify-remaining_data_length-respects-max_fragmented_recv_size.patch
+nilfs2-fix-cfi-failure-when-accessing-sys-fs-nilfs2-features.patch
+crypto-af_alg-disallow-concurrent-writes-in-af_alg_sendmsg.patch
+power-supply-bq27xxx-fix-error-return-in-case-of-no-bq27000-hdq-battery.patch
+power-supply-bq27xxx-restrict-no-battery-detection-to-bq27000.patch
+dm-raid-don-t-set-io_min-and-io_opt-for-raid1.patch
+dm-stripe-fix-a-possible-integer-overflow.patch
+gup-optimize-longterm-pin_user_pages-for-large-folio.patch
+mm-revert-mm-vmscan.c-fix-oom-on-swap-stress-test.patch
+loongarch-update-help-info-of-arch_strict_align.patch
+objtool-loongarch-mark-types-based-on-break-immediate-code.patch
+objtool-loongarch-mark-special-atomic-instruction-as-insn_bug-type.patch
+loongarch-fix-unreliable-stack-for-live-patching.patch
+loongarch-vdso-check-kcalloc-result-in-init_vdso.patch
+loongarch-align-acpi-structures-if-arch_strict_align-enabled.patch
+loongarch-check-the-return-value-when-creating-kobj.patch
+iommu-vt-d-fix-__domain_mapping-s-usage-of-switch_to_super_page.patch
+iommu-amd-pgtbl-fix-possible-race-while-increase-page-table-level.patch
+btrfs-tree-checker-fix-the-incorrect-inode-ref-size-check.patch
+asoc-qcom-audioreach-fix-lpaif_type-configuration-for-the-i2s-interface.patch
+asoc-qcom-q6apm-lpass-dais-fix-null-pointer-dereference-if-source-graph-failed.patch
+asoc-qcom-q6apm-lpass-dais-fix-missing-set_fmt-dai-op-for-i2s.patch
+mmc-mvsdio-fix-dma_unmap_sg-nents-value.patch
+kvm-svm-sync-tpr-from-lapic-into-vmcb-v_tpr-even-if-avic-is-active.patch
+drm-amd-display-allow-rx6xxx-rx7700-to-invoke-amdgpu_irq_get-put.patch
+net-rfkill-gpio-fix-crash-due-to-dereferencering-uninitialized-pointer.patch
+rds-ib-increment-i_fastreg_wrs-before-bailing-out.patch
+mptcp-propagate-shutdown-to-subflows-when-possible.patch
+selftests-mptcp-connect-catch-io-errors-on-listen-side.patch
+selftests-mptcp-avoid-spurious-errors-on-tcp-disconnect.patch
+alsa-hda-realtek-fix-mute-led-for-hp-laptop-15-dw4xx.patch
+io_uring-cmd-let-cmds-to-know-about-dying-task.patch
+io_uring-backport-io_should_terminate_tw.patch
+io_uring-include-dying-ring-in-task_work-should-cancel-state.patch
+io_uring-msg_ring-kill-alloc_cache-for-io_kiocb-allocations.patch
+io_uring-kbuf-drop-warn_on_once-from-incremental-length-check.patch