--- /dev/null
+From stable+bounces-164960-greg=kroah.com@vger.kernel.org Mon Jul 28 16:22:53 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 10:16:34 -0400
+Subject: ARM: 9448/1: Use an absolute path to unified.h in KBUILD_AFLAGS
+To: stable@vger.kernel.org
+Cc: Nathan Chancellor <nathan@kernel.org>, KernelCI bot <bot@kernelci.org>, Masahiro Yamada <masahiroy@kernel.org>, Russell King <rmk+kernel@armlinux.org.uk>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250728141634.2334125-1-sashal@kernel.org>
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit 87c4e1459e80bf65066f864c762ef4dc932fad4b ]
+
+After commit d5c8d6e0fa61 ("kbuild: Update assembler calls to use proper
+flags and language target"), which updated as-instr to use the
+'assembler-with-cpp' language option, the Kbuild version of as-instr
+always fails internally for arch/arm with
+
+ <command-line>: fatal error: asm/unified.h: No such file or directory
+ compilation terminated.
+
+because '-include' flags are now taken into account by the compiler
+driver and as-instr does not have '$(LINUXINCLUDE)', so unified.h is not
+found.
+
+This went unnoticed at the time of the Kbuild change because the last
+use of as-instr in Kbuild that arch/arm could reach was removed in 5.7
+by commit 541ad0150ca4 ("arm: Remove 32bit KVM host support") but a
+stable backport of the Kbuild change to before that point exposed this
+potential issue if one were to be reintroduced.
+
+Follow the general pattern of '-include' paths throughout the tree and
+make unified.h absolute using '$(srctree)' to ensure KBUILD_AFLAGS can
+be used independently.
+
+Closes: https://lore.kernel.org/CACo-S-1qbCX4WAVFA63dWfHtrRHZBTyyr2js8Lx=Az03XHTTHg@mail.gmail.com/
+
+Cc: stable@vger.kernel.org
+Fixes: d5c8d6e0fa61 ("kbuild: Update assembler calls to use proper flags and language target")
+Reported-by: KernelCI bot <bot@kernelci.org>
+Reviewed-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+[ No KBUILD_RUSTFLAGS in 6.12 ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -149,7 +149,7 @@ endif
+ # Need -Uarm for gcc < 3.x
+ KBUILD_CPPFLAGS +=$(cpp-y)
+ KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+-KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include asm/unified.h -msoft-float
++KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include $(srctree)/arch/arm/include/asm/unified.h -msoft-float
+
+ CHECKFLAGS += -D__arm__
+
--- /dev/null
+From stable+bounces-164614-greg=kroah.com@vger.kernel.org Thu Jul 24 16:11:09 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 10:09:23 -0400
+Subject: arm64: dts: qcom: x1-crd: Fix vreg_l2j_1p2 voltage
+To: stable@vger.kernel.org
+Cc: Stephan Gerhold <stephan.gerhold@linaro.org>, Johan Hovold <johan+linaro@kernel.org>, Abel Vesa <abel.vesa@linaro.org>, Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>, Bjorn Andersson <andersson@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724140923.1305618-1-sashal@kernel.org>
+
+From: Stephan Gerhold <stephan.gerhold@linaro.org>
+
+[ Upstream commit 5ce920e6a8db40e4b094c0d863cbd19fdcfbbb7a ]
+
+In the ACPI DSDT table, PPP_RESOURCE_ID_LDO2_J is configured with 1256000
+uV instead of the 1200000 uV we have currently in the device tree. Use the
+same for consistency and correctness.
+
+Cc: stable@vger.kernel.org
+Fixes: bd50b1f5b6f3 ("arm64: dts: qcom: x1e80100: Add Compute Reference Device")
+Signed-off-by: Stephan Gerhold <stephan.gerhold@linaro.org>
+Reviewed-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250423-x1e-vreg-l2j-voltage-v1-1-24b6a2043025@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+[ Change x1e80100-crd.dts instead ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/x1e80100-crd.dts | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -659,8 +659,8 @@
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <1200000>;
++ regulator-min-microvolt = <1256000>;
++ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
--- /dev/null
+From stable+bounces-164529-greg=kroah.com@vger.kernel.org Thu Jul 24 03:49:50 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 21:49:38 -0400
+Subject: arm64: dts: qcom: x1e78100-t14s: mark l12b and l15b always-on
+To: stable@vger.kernel.org
+Cc: Johan Hovold <johan+linaro@kernel.org>, Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>, Bjorn Andersson <andersson@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724014938.1251643-1-sashal@kernel.org>
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 673fa129e558c5f1196adb27d97ac90ddfe4f19c ]
+
+The l12b and l15b supplies are used by components that are not (fully)
+described (and some never will be) and must never be disabled.
+
+Mark the regulators as always-on to prevent them from being disabled,
+for example, when consumers probe defer or suspend.
+
+Fixes: 7d1cbe2f4985 ("arm64: dts: qcom: Add X1E78100 ThinkPad T14s Gen 6")
+Cc: stable@vger.kernel.org # 6.12
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20250314145440.11371-3-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+[ applied changes to .dts file instead of .dtsi ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
++++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+@@ -232,6 +232,7 @@
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++ regulator-always-on;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+@@ -253,6 +254,7 @@
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++ regulator-always-on;
+ };
+
+ vreg_l17b_2p5: ldo17 {
--- /dev/null
+From 1b98304c09a0192598d0767f1eb8c83d7e793091 Mon Sep 17 00:00:00 2001
+From: Ian Abbott <abbotti@mev.co.uk>
+Date: Tue, 8 Jul 2025 14:06:27 +0100
+Subject: comedi: comedi_test: Fix possible deletion of uninitialized timers
+
+From: Ian Abbott <abbotti@mev.co.uk>
+
+commit 1b98304c09a0192598d0767f1eb8c83d7e793091 upstream.
+
+In `waveform_common_attach()`, the two timers `&devpriv->ai_timer` and
+`&devpriv->ao_timer` are initialized after the allocation of the device
+private data by `comedi_alloc_devpriv()` and the subdevices by
+`comedi_alloc_subdevices()`. The function may return with an error
+between those function calls. In that case, `waveform_detach()` will be
+called by the Comedi core to clean up. The check that
+`waveform_detach()` uses to decide whether to delete the timers is
+incorrect. It only checks that the device private data was allocated,
+but that does not guarantee that the timers were initialized. It also
+needs to check that the subdevices were allocated. Fix it.
+
+Fixes: 73e0e4dfed4c ("staging: comedi: comedi_test: fix timer lock-up")
+Cc: stable@vger.kernel.org # 6.15+
+Signed-off-by: Ian Abbott <abbotti@mev.co.uk>
+Link: https://lore.kernel.org/r/20250708130627.21743-1-abbotti@mev.co.uk
+[ changed timer_delete_sync() to del_timer_sync() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/comedi/drivers/comedi_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/comedi/drivers/comedi_test.c
++++ b/drivers/comedi/drivers/comedi_test.c
+@@ -790,7 +790,7 @@ static void waveform_detach(struct comed
+ {
+ struct waveform_private *devpriv = dev->private;
+
+- if (devpriv) {
++ if (devpriv && dev->n_subdevices) {
+ del_timer_sync(&devpriv->ai_timer);
+ del_timer_sync(&devpriv->ao_timer);
+ }
--- /dev/null
+From stable+bounces-164532-greg=kroah.com@vger.kernel.org Thu Jul 24 04:49:32 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:49:21 -0400
+Subject: crypto: powerpc/poly1305 - add depends on BROKEN for now
+To: stable@vger.kernel.org
+Cc: Eric Biggers <ebiggers@google.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724024921.1276399-1-sashal@kernel.org>
+
+From: Eric Biggers <ebiggers@google.com>
+
+[ Upstream commit bc8169003b41e89fe7052e408cf9fdbecb4017fe ]
+
+As discussed in the thread containing
+https://lore.kernel.org/linux-crypto/20250510053308.GB505731@sol/, the
+Power10-optimized Poly1305 code is currently not safe to call in softirq
+context. Disable it for now. It can be re-enabled once it is fixed.
+
+Fixes: ba8f8624fde2 ("crypto: poly1305-p10 - Glue code for optmized Poly1305 implementation for ppc64le")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ applied to arch/powerpc/crypto/Kconfig ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/crypto/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/crypto/Kconfig
++++ b/arch/powerpc/crypto/Kconfig
+@@ -143,6 +143,7 @@ config CRYPTO_CHACHA20_P10
+ config CRYPTO_POLY1305_P10
+ tristate "Hash functions: Poly1305 (P10 or later)"
+ depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
++ depends on BROKEN # Needs to be fixed to work in softirq context
+ select CRYPTO_HASH
+ select CRYPTO_LIB_POLY1305_GENERIC
+ help
--- /dev/null
+From 65995e97a1caacf0024bebda3332b8d1f0f443c4 Mon Sep 17 00:00:00 2001
+From: Naman Jain <namjain@linux.microsoft.com>
+Date: Fri, 2 May 2025 13:18:11 +0530
+Subject: Drivers: hv: Make the sysfs node size for the ring buffer dynamic
+
+From: Naman Jain <namjain@linux.microsoft.com>
+
+commit 65995e97a1caacf0024bebda3332b8d1f0f443c4 upstream.
+
+The ring buffer size varies across VMBus channels. The size of sysfs
+node for the ring buffer is currently hardcoded to 4 MB. Userspace
+clients either use fstat() or hardcode this size for doing mmap().
+To address this, make the sysfs node size dynamic to reflect the
+actual ring buffer size for each channel. This will ensure that
+fstat() on ring sysfs node always returns the correct size of
+ring buffer.
+
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Reviewed-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20250502074811.2022-3-namjain@linux.microsoft.com
+[ The structure "struct attribute_group" does not have bin_size field in
+ v6.12.x kernel so the logic of configuring size of sysfs node for ring buffer
+ has been moved to vmbus_chan_bin_attr_is_visible().
+ Original change was not a fix, but it needs to be backported to fix size
+ related discrepancy caused by the commit mentioned in Fixes tag. ]
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/vmbus_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1810,7 +1810,6 @@ static struct bin_attribute chan_attr_ri
+ .name = "ring",
+ .mode = 0600,
+ },
+- .size = 2 * SZ_2M,
+ .mmap = hv_mmap_ring_buffer_wrapper,
+ };
+ static struct attribute *vmbus_chan_attrs[] = {
+@@ -1866,6 +1865,7 @@ static umode_t vmbus_chan_bin_attr_is_vi
+ /* Hide ring attribute if channel's ring_sysfs_visible is set to false */
+ if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
+ return 0;
++ attr->size = channel->ringbuffer_pagecount << PAGE_SHIFT;
+
+ return attr->attr.mode;
+ }
--- /dev/null
+From stable+bounces-164451-greg=kroah.com@vger.kernel.org Wed Jul 23 15:50:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 09:50:08 -0400
+Subject: erofs: clean up header parsing for ztailpacking and fragments
+To: stable@vger.kernel.org
+Cc: Gao Xiang <hsiangkao@linux.alibaba.com>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723135009.1089152-5-sashal@kernel.org>
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 540787d38b10dbc16a7d2bc2845752ab1605403a ]
+
+Simplify the logic in z_erofs_fill_inode_lazy() by combining the
+handling of ztailpacking and fragments, as they are mutually exclusive.
+
+Note that `h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT` is handled
+above, so no need to duplicate the check.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250224123747.1387122-2-hsiangkao@linux.alibaba.com
+Stable-dep-of: b44686c8391b ("erofs: fix large fragment handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/zmap.c | 39 ++++++++++++++-------------------------
+ 1 file changed, 14 insertions(+), 25 deletions(-)
+
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -394,7 +394,8 @@ static int z_erofs_get_extent_decompress
+ static int z_erofs_do_map_blocks(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
+ {
+- struct erofs_inode *const vi = EROFS_I(inode);
++ struct erofs_inode *vi = EROFS_I(inode);
++ struct super_block *sb = inode->i_sb;
+ bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
+ bool ztailpacking = vi->z_idata_size;
+ struct z_erofs_maprecorder m = {
+@@ -438,7 +439,7 @@ static int z_erofs_do_map_blocks(struct
+ }
+ /* m.lcn should be >= 1 if endoff < m.clusterofs */
+ if (!m.lcn) {
+- erofs_err(inode->i_sb, "invalid logical cluster 0 at nid %llu",
++ erofs_err(sb, "invalid logical cluster 0 at nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+@@ -454,7 +455,7 @@ static int z_erofs_do_map_blocks(struct
+ goto unmap_out;
+ break;
+ default:
+- erofs_err(inode->i_sb, "unknown type %u @ offset %llu of nid %llu",
++ erofs_err(sb, "unknown type %u @ offset %llu of nid %llu",
+ m.type, ofs, vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_out;
+@@ -473,10 +474,16 @@ static int z_erofs_do_map_blocks(struct
+ map->m_flags |= EROFS_MAP_META;
+ map->m_pa = vi->z_fragmentoff;
+ map->m_plen = vi->z_idata_size;
++ if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
++ erofs_err(sb, "invalid tail-packing pclustersize %llu",
++ map->m_plen);
++ err = -EFSCORRUPTED;
++ goto unmap_out;
++ }
+ } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
+ map->m_flags |= EROFS_MAP_FRAGMENT;
+ } else {
+- map->m_pa = erofs_pos(inode->i_sb, m.pblk);
++ map->m_pa = erofs_pos(sb, m.pblk);
+ err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
+ if (err)
+ goto unmap_out;
+@@ -495,7 +502,7 @@ static int z_erofs_do_map_blocks(struct
+ afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
+ vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
+ if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
+- erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
++ erofs_err(sb, "inconsistent algorithmtype %u for nid %llu",
+ afmt, vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+@@ -596,26 +603,8 @@ static int z_erofs_fill_inode_lazy(struc
+ goto out_put_metabuf;
+ }
+
+- if (vi->z_idata_size) {
+- struct erofs_map_blocks map = {
+- .buf = __EROFS_BUF_INITIALIZER
+- };
+-
+- err = z_erofs_do_map_blocks(inode, &map,
+- EROFS_GET_BLOCKS_FINDTAIL);
+- erofs_put_metabuf(&map.buf);
+-
+- if (erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
+- erofs_err(sb, "invalid tail-packing pclustersize %llu",
+- map.m_plen);
+- err = -EFSCORRUPTED;
+- }
+- if (err < 0)
+- goto out_put_metabuf;
+- }
+-
+- if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
+- !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
++ if (vi->z_idata_size ||
++ (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+ struct erofs_map_blocks map = {
+ .buf = __EROFS_BUF_INITIALIZER
+ };
--- /dev/null
+From stable+bounces-164452-greg=kroah.com@vger.kernel.org Wed Jul 23 15:50:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 09:50:09 -0400
+Subject: erofs: fix large fragment handling
+To: stable@vger.kernel.org
+Cc: Gao Xiang <hsiangkao@linux.alibaba.com>, Axel Fontaine <axel@axelfontaine.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723135009.1089152-6-sashal@kernel.org>
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit b44686c8391b427fb1c85a31c35077e6947c6d90 ]
+
+Fragments aren't limited by Z_EROFS_PCLUSTER_MAX_DSIZE. However, if
+a fragment's logical length is larger than Z_EROFS_PCLUSTER_MAX_DSIZE
+but the fragment is not the whole inode, it currently returns
+-EOPNOTSUPP because m_flags has the wrong EROFS_MAP_ENCODED flag set.
+It is not intended by design but should be rare, as it can only be
+reproduced by mkfs with `-Eall-fragments` in a specific case.
+
+Let's normalize fragment m_flags using the new EROFS_MAP_FRAGMENT.
+
+Reported-by: Axel Fontaine <axel@axelfontaine.com>
+Closes: https://github.com/erofs/erofs-utils/issues/23
+Fixes: 7c3ca1838a78 ("erofs: restrict pcluster size limitations")
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250711195826.3601157-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/internal.h | 4 +++-
+ fs/erofs/zdata.c | 2 +-
+ fs/erofs/zmap.c | 7 +++----
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -324,10 +324,12 @@ static inline struct folio *erofs_grab_f
+ /* The length of extent is full */
+ #define EROFS_MAP_FULL_MAPPED 0x0008
+ /* Located in the special packed inode */
+-#define EROFS_MAP_FRAGMENT 0x0010
++#define __EROFS_MAP_FRAGMENT 0x0010
+ /* The extent refers to partial decompressed data */
+ #define EROFS_MAP_PARTIAL_REF 0x0020
+
++#define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT)
++
+ struct erofs_map_blocks {
+ struct erofs_buf buf;
+
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1016,7 +1016,7 @@ static int z_erofs_scan_folio(struct z_e
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ folio_zero_segment(folio, cur, end);
+ tight = false;
+- } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
++ } else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
+ erofs_off_t fpos = offset + cur - map->m_la;
+
+ err = z_erofs_read_fragment(inode->i_sb, folio, cur,
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -481,7 +481,7 @@ static int z_erofs_do_map_blocks(struct
+ goto unmap_out;
+ }
+ } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
+- map->m_flags |= EROFS_MAP_FRAGMENT;
++ map->m_flags = EROFS_MAP_FRAGMENT;
+ } else {
+ map->m_pa = erofs_pos(sb, m.pblk);
+ err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
+@@ -644,8 +644,7 @@ int z_erofs_map_blocks_iter(struct inode
+ !vi->z_tailextent_headlcn) {
+ map->m_la = 0;
+ map->m_llen = inode->i_size;
+- map->m_flags = EROFS_MAP_MAPPED |
+- EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
++ map->m_flags = EROFS_MAP_FRAGMENT;
+ } else {
+ err = z_erofs_do_map_blocks(inode, map, flags);
+ }
+@@ -678,7 +677,7 @@ static int z_erofs_iomap_begin_report(st
+ iomap->length = map.m_llen;
+ if (map.m_flags & EROFS_MAP_MAPPED) {
+ iomap->type = IOMAP_MAPPED;
+- iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
++ iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
+ IOMAP_NULL_ADDR : map.m_pa;
+ } else {
+ iomap->type = IOMAP_HOLE;
--- /dev/null
+From stable+bounces-164447-greg=kroah.com@vger.kernel.org Wed Jul 23 15:50:23 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 09:50:05 -0400
+Subject: erofs: refine z_erofs_get_extent_compressedlen()
+To: stable@vger.kernel.org
+Cc: Gao Xiang <hsiangkao@linux.alibaba.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723135009.1089152-2-sashal@kernel.org>
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 8f9530aeeb4f756bdfa70510b40e5d28ea3c742e ]
+
+ - Set `compressedblks = 1` directly for non-bigpcluster cases. This
+ simplifies the logic a bit since lcluster sizes larger than one block
+ are unsupported and the details remain unclear.
+
+ - For Z_EROFS_LCLUSTER_TYPE_PLAIN pclusters, avoid assuming
+ `compressedblks = 1` by default. Instead, check if
+ Z_EROFS_ADVISE_BIG_PCLUSTER_2 is set.
+
+It basically has no impact to existing valid images, but it's useful to
+find the gap to prepare for large PLAIN pclusters.
+
+Link: https://lore.kernel.org/r/20250123090109.973463-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Stable-dep-of: b44686c8391b ("erofs: fix large fragment handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/zmap.c | 36 ++++++++++++++++--------------------
+ 1 file changed, 16 insertions(+), 20 deletions(-)
+
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -294,27 +294,23 @@ err_bogus:
+ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
+ unsigned int initial_lcn)
+ {
+- struct super_block *sb = m->inode->i_sb;
+- struct erofs_inode *const vi = EROFS_I(m->inode);
+- struct erofs_map_blocks *const map = m->map;
+- const unsigned int lclusterbits = vi->z_logical_clusterbits;
+- unsigned long lcn;
++ struct inode *inode = m->inode;
++ struct super_block *sb = inode->i_sb;
++ struct erofs_inode *vi = EROFS_I(inode);
++ bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
++ bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
++ unsigned long lcn = m->lcn + 1;
+ int err;
+
+- DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
+- m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
+- m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
++ DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
+ DBG_BUGON(m->type != m->headtype);
+
+- if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+- ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
+- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
+- ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
+- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
+- map->m_plen = 1ULL << lclusterbits;
+- return 0;
+- }
+- lcn = m->lcn + 1;
++ if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
++ ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
++ m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
++ (lcn << vi->z_logical_clusterbits) >= inode->i_size)
++ m->compressedblks = 1;
++
+ if (m->compressedblks)
+ goto out;
+
+@@ -339,9 +335,9 @@ static int z_erofs_get_extent_compressed
+ case Z_EROFS_LCLUSTER_TYPE_HEAD2:
+ /*
+ * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
+- * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
++ * rather than CBLKCNT, it's a 1 block-sized pcluster.
+ */
+- m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
++ m->compressedblks = 1;
+ break;
+ case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
+ if (m->delta[0] != 1)
+@@ -356,7 +352,7 @@ static int z_erofs_get_extent_compressed
+ return -EFSCORRUPTED;
+ }
+ out:
+- map->m_plen = erofs_pos(sb, m->compressedblks);
++ m->map->m_plen = erofs_pos(sb, m->compressedblks);
+ return 0;
+ err_bonus_cblkcnt:
+ erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
--- /dev/null
+From stable+bounces-164450-greg=kroah.com@vger.kernel.org Wed Jul 23 15:50:58 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 09:50:07 -0400
+Subject: erofs: simplify tail inline pcluster handling
+To: stable@vger.kernel.org
+Cc: Gao Xiang <hsiangkao@linux.alibaba.com>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723135009.1089152-4-sashal@kernel.org>
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit b7710262d743aca112877d12abed61ce8a5d0d98 ]
+
+Use `z_idata_size != 0` to indicate that ztailpacking is enabled.
+`Z_EROFS_ADVISE_INLINE_PCLUSTER` cannot be ignored, as `h_idata_size`
+could be non-zero prior to erofs-utils 1.6 [1].
+
+Additionally, merge `z_idataoff` and `z_fragmentoff` since these two
+features are mutually exclusive for a given inode.
+
+[1] https://git.kernel.org/xiang/erofs-utils/c/547bea3cb71a
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250225114038.3259726-1-hsiangkao@linux.alibaba.com
+Stable-dep-of: b44686c8391b ("erofs: fix large fragment handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/internal.h | 9 ++-------
+ fs/erofs/zmap.c | 20 ++++++++++----------
+ 2 files changed, 12 insertions(+), 17 deletions(-)
+
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -277,13 +277,8 @@ struct erofs_inode {
+ unsigned char z_algorithmtype[2];
+ unsigned char z_logical_clusterbits;
+ unsigned long z_tailextent_headlcn;
+- union {
+- struct {
+- erofs_off_t z_idataoff;
+- unsigned short z_idata_size;
+- };
+- erofs_off_t z_fragmentoff;
+- };
++ erofs_off_t z_fragmentoff;
++ unsigned short z_idata_size;
+ };
+ #endif /* CONFIG_EROFS_FS_ZIP */
+ };
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -395,8 +395,8 @@ static int z_erofs_do_map_blocks(struct
+ struct erofs_map_blocks *map, int flags)
+ {
+ struct erofs_inode *const vi = EROFS_I(inode);
+- bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
+ bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
++ bool ztailpacking = vi->z_idata_size;
+ struct z_erofs_maprecorder m = {
+ .inode = inode,
+ .map = map,
+@@ -415,9 +415,8 @@ static int z_erofs_do_map_blocks(struct
+ if (err)
+ goto unmap_out;
+
+- if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
+- vi->z_idataoff = m.nextpackoff;
+-
++ if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
++ vi->z_fragmentoff = m.nextpackoff;
+ map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
+ end = (m.lcn + 1ULL) << lclusterbits;
+
+@@ -472,7 +471,7 @@ static int z_erofs_do_map_blocks(struct
+ }
+ if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
+ map->m_flags |= EROFS_MAP_META;
+- map->m_pa = vi->z_idataoff;
++ map->m_pa = vi->z_fragmentoff;
+ map->m_plen = vi->z_idata_size;
+ } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
+ map->m_flags |= EROFS_MAP_FRAGMENT;
+@@ -565,6 +564,10 @@ static int z_erofs_fill_inode_lazy(struc
+ vi->z_advise = le16_to_cpu(h->h_advise);
+ vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
+ vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
++ if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
++ vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
++ else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
++ vi->z_idata_size = le16_to_cpu(h->h_idata_size);
+
+ headnr = 0;
+ if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
+@@ -593,18 +596,16 @@ static int z_erofs_fill_inode_lazy(struc
+ goto out_put_metabuf;
+ }
+
+- if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
++ if (vi->z_idata_size) {
+ struct erofs_map_blocks map = {
+ .buf = __EROFS_BUF_INITIALIZER
+ };
+
+- vi->z_idata_size = le16_to_cpu(h->h_idata_size);
+ err = z_erofs_do_map_blocks(inode, &map,
+ EROFS_GET_BLOCKS_FINDTAIL);
+ erofs_put_metabuf(&map.buf);
+
+- if (!map.m_plen ||
+- erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
++ if (erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
+ erofs_err(sb, "invalid tail-packing pclustersize %llu",
+ map.m_plen);
+ err = -EFSCORRUPTED;
+@@ -619,7 +620,6 @@ static int z_erofs_fill_inode_lazy(struc
+ .buf = __EROFS_BUF_INITIALIZER
+ };
+
+- vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
+ err = z_erofs_do_map_blocks(inode, &map,
+ EROFS_GET_BLOCKS_FINDTAIL);
+ erofs_put_metabuf(&map.buf);
--- /dev/null
+From stable+bounces-164448-greg=kroah.com@vger.kernel.org Wed Jul 23 15:50:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 09:50:04 -0400
+Subject: erofs: simplify z_erofs_load_compact_lcluster()
+To: stable@vger.kernel.org
+Cc: Gao Xiang <hsiangkao@linux.alibaba.com>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723135009.1089152-1-sashal@kernel.org>
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 2a810ea79cd7a6d5f134ea69ca2ba726e600cbc4 ]
+
+ - Get rid of unpack_compacted_index() and fold it into
+ z_erofs_load_compact_lcluster();
+
+ - Avoid a goto.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250114034429.431408-1-hsiangkao@linux.alibaba.com
+Stable-dep-of: b44686c8391b ("erofs: fix large fragment handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/zmap.c | 89 ++++++++++++++++++++++----------------------------------
+ 1 file changed, 36 insertions(+), 53 deletions(-)
+
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -97,17 +97,48 @@ static int get_compacted_la_distance(uns
+ return d1;
+ }
+
+-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+- unsigned int amortizedshift,
+- erofs_off_t pos, bool lookahead)
++static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
++ unsigned long lcn, bool lookahead)
+ {
+- struct erofs_inode *const vi = EROFS_I(m->inode);
++ struct inode *const inode = m->inode;
++ struct erofs_inode *const vi = EROFS_I(inode);
++ const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
++ ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
++ const unsigned int totalidx = erofs_iblks(inode);
++ unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
+ unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
+- bool big_pcluster;
++ bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
++ erofs_off_t pos;
+ u8 *in, type;
+ int i;
+
++ if (lcn >= totalidx || lclusterbits > 14)
++ return -EINVAL;
++
++ m->lcn = lcn;
++ /* used to align to 32-byte (compacted_2b) alignment */
++ compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
++ compacted_2b = 0;
++ if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
++ compacted_4b_initial < totalidx)
++ compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
++
++ pos = ebase;
++ amortizedshift = 2; /* compact_4b */
++ if (lcn >= compacted_4b_initial) {
++ pos += compacted_4b_initial * 4;
++ lcn -= compacted_4b_initial;
++ if (lcn < compacted_2b) {
++ amortizedshift = 1;
++ } else {
++ pos += compacted_2b * 2;
++ lcn -= compacted_2b;
++ }
++ }
++ pos += lcn * (1 << amortizedshift);
++
++ /* figure out the lcluster count in this pack */
+ if (1 << amortizedshift == 4 && lclusterbits <= 14)
+ vcnt = 2;
+ else if (1 << amortizedshift == 2 && lclusterbits <= 12)
+@@ -122,7 +153,6 @@ static int unpack_compacted_index(struct
+ /* it doesn't equal to round_up(..) */
+ m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
+ (vcnt << amortizedshift);
+- big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
+ encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
+ bytes = pos & ((vcnt << amortizedshift) - 1);
+@@ -207,53 +237,6 @@ static int unpack_compacted_index(struct
+ return 0;
+ }
+
+-static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+- unsigned long lcn, bool lookahead)
+-{
+- struct inode *const inode = m->inode;
+- struct erofs_inode *const vi = EROFS_I(inode);
+- const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
+- ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+- unsigned int totalidx = erofs_iblks(inode);
+- unsigned int compacted_4b_initial, compacted_2b;
+- unsigned int amortizedshift;
+- erofs_off_t pos;
+-
+- if (lcn >= totalidx || vi->z_logical_clusterbits > 14)
+- return -EINVAL;
+-
+- m->lcn = lcn;
+- /* used to align to 32-byte (compacted_2b) alignment */
+- compacted_4b_initial = (32 - ebase % 32) / 4;
+- if (compacted_4b_initial == 32 / 4)
+- compacted_4b_initial = 0;
+-
+- if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+- compacted_4b_initial < totalidx)
+- compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+- else
+- compacted_2b = 0;
+-
+- pos = ebase;
+- if (lcn < compacted_4b_initial) {
+- amortizedshift = 2;
+- goto out;
+- }
+- pos += compacted_4b_initial * 4;
+- lcn -= compacted_4b_initial;
+-
+- if (lcn < compacted_2b) {
+- amortizedshift = 1;
+- goto out;
+- }
+- pos += compacted_2b * 2;
+- lcn -= compacted_2b;
+- amortizedshift = 2;
+-out:
+- pos += lcn * (1 << amortizedshift);
+- return unpack_compacted_index(m, amortizedshift, pos, lookahead);
+-}
+-
+ static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn, bool lookahead)
+ {
--- /dev/null
+From stable+bounces-164449-greg=kroah.com@vger.kernel.org Wed Jul 23 15:50:26 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 09:50:06 -0400
+Subject: erofs: use Z_EROFS_LCLUSTER_TYPE_MAX to simplify switches
+To: stable@vger.kernel.org
+Cc: Hongzhen Luo <hongzhen@linux.alibaba.com>, Gao Xiang <hsiangkao@linux.alibaba.com>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723135009.1089152-3-sashal@kernel.org>
+
+From: Hongzhen Luo <hongzhen@linux.alibaba.com>
+
+[ Upstream commit 3b7781aeaefb627d4e07c1af9be923f9e8047d8b ]
+
+There's no need to enumerate each type. No logic changes.
+
+Signed-off-by: Hongzhen Luo <hongzhen@linux.alibaba.com>
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20250210032923.3382136-1-hongzhen@linux.alibaba.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Stable-dep-of: b44686c8391b ("erofs: fix large fragment handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/zmap.c | 63 +++++++++++++++++++++-----------------------------------
+ 1 file changed, 24 insertions(+), 39 deletions(-)
+
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -265,26 +265,22 @@ static int z_erofs_extent_lookback(struc
+ if (err)
+ return err;
+
+- switch (m->type) {
+- case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
++ if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
++ erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
++ m->type, lcn, vi->nid);
++ DBG_BUGON(1);
++ return -EOPNOTSUPP;
++ } else if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ lookback_distance = m->delta[0];
+ if (!lookback_distance)
+- goto err_bogus;
++ break;
+ continue;
+- case Z_EROFS_LCLUSTER_TYPE_PLAIN:
+- case Z_EROFS_LCLUSTER_TYPE_HEAD1:
+- case Z_EROFS_LCLUSTER_TYPE_HEAD2:
++ } else {
+ m->headtype = m->type;
+ m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ return 0;
+- default:
+- erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
+- m->type, lcn, vi->nid);
+- DBG_BUGON(1);
+- return -EOPNOTSUPP;
+ }
+ }
+-err_bogus:
+ erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
+ lookback_distance, m->lcn, vi->nid);
+ DBG_BUGON(1);
+@@ -329,35 +325,28 @@ static int z_erofs_get_extent_compressed
+ DBG_BUGON(lcn == initial_lcn &&
+ m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
+
+- switch (m->type) {
+- case Z_EROFS_LCLUSTER_TYPE_PLAIN:
+- case Z_EROFS_LCLUSTER_TYPE_HEAD1:
+- case Z_EROFS_LCLUSTER_TYPE_HEAD2:
++ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
++ if (m->delta[0] != 1) {
++ erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
++ DBG_BUGON(1);
++ return -EFSCORRUPTED;
++ }
++ if (m->compressedblks)
++ goto out;
++ } else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
+ /*
+ * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
+ * rather than CBLKCNT, it's a 1 block-sized pcluster.
+ */
+ m->compressedblks = 1;
+- break;
+- case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
+- if (m->delta[0] != 1)
+- goto err_bonus_cblkcnt;
+- if (m->compressedblks)
+- break;
+- fallthrough;
+- default:
+- erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
+- vi->nid);
+- DBG_BUGON(1);
+- return -EFSCORRUPTED;
++ goto out;
+ }
++ erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
++ DBG_BUGON(1);
++ return -EFSCORRUPTED;
+ out:
+ m->map->m_plen = erofs_pos(sb, m->compressedblks);
+ return 0;
+-err_bonus_cblkcnt:
+- erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
+- DBG_BUGON(1);
+- return -EFSCORRUPTED;
+ }
+
+ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+@@ -386,9 +375,7 @@ static int z_erofs_get_extent_decompress
+ m->delta[1] = 1;
+ DBG_BUGON(1);
+ }
+- } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+- m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
+- m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
++ } else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
+ if (lcn != headlcn)
+ break; /* ends at the next HEAD lcluster */
+ m->delta[1] = 1;
+@@ -452,8 +439,7 @@ static int z_erofs_do_map_blocks(struct
+ }
+ /* m.lcn should be >= 1 if endoff < m.clusterofs */
+ if (!m.lcn) {
+- erofs_err(inode->i_sb,
+- "invalid logical cluster 0 at nid %llu",
++ erofs_err(inode->i_sb, "invalid logical cluster 0 at nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+@@ -469,8 +455,7 @@ static int z_erofs_do_map_blocks(struct
+ goto unmap_out;
+ break;
+ default:
+- erofs_err(inode->i_sb,
+- "unknown type %u @ offset %llu of nid %llu",
++ erofs_err(inode->i_sb, "unknown type %u @ offset %llu of nid %llu",
+ m.type, ofs, vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_out;
--- /dev/null
+From stable+bounces-164543-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:44 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:17 -0400
+Subject: ext4: correct the error handle in ext4_fallocate()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Baokun Li <libaokun1@huawei.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-10-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 129245cfbd6d79c6d603f357f428010ccc0f0ee7 ]
+
+The error out label of file_modified() should be out_inode_lock in
+ext4_fallocate().
+
+Fixes: 2890e5e0f49e ("ext4: move out common parts into ext4_fallocate()")
+Reported-by: Baokun Li <libaokun1@huawei.com>
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Link: https://patch.msgid.link/20250319023557.2785018-1-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4745,7 +4745,7 @@ long ext4_fallocate(struct file *file, i
+
+ ret = file_modified(file);
+ if (ret)
+- return ret;
++ goto out_inode_lock;
+
+ if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
+ ret = ext4_do_fallocate(file, offset, len, mode);
--- /dev/null
+From stable+bounces-164534-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:08 -0400
+Subject: ext4: don't explicit update times in ext4_fallocate()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-1-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 73ae756ecdfa9684446134590eef32b0f067249c ]
+
+After commit 'ad5cd4f4ee4d ("ext4: fix fallocate to use file_modified to
+update permissions consistently"), we can update mtime and ctime
+appropriately through file_modified() when doing zero range, collapse
+rage, insert range and punch hole, hence there is no need to explicit
+update times in those paths, just drop them.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-3-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 5 -----
+ fs/ext4/inode.c | 1 -
+ 2 files changed, 6 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4675,8 +4675,6 @@ static long ext4_zero_range(struct file
+ goto out_mutex;
+ }
+
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+-
+ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ flags);
+ filemap_invalidate_unlock(mapping);
+@@ -4700,7 +4698,6 @@ static long ext4_zero_range(struct file
+ goto out_mutex;
+ }
+
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ if (new_size)
+ ext4_update_inode_size(inode, new_size);
+ ret = ext4_mark_inode_dirty(handle, inode);
+@@ -5431,7 +5428,6 @@ static int ext4_collapse_range(struct fi
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+@@ -5541,7 +5537,6 @@ static int ext4_insert_range(struct file
+ /* Expand file to avoid data loss if there is error while shifting */
+ inode->i_size += len;
+ EXT4_I(inode)->i_disksize += len;
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+ goto out_stop;
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4113,7 +4113,6 @@ int ext4_punch_hole(struct file *file, l
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2))
+ ret = ret2;
--- /dev/null
+From stable+bounces-164539-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:37 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:13 -0400
+Subject: ext4: factor out ext4_do_fallocate()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-6-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit fd2f764826df5489b849a8937b5a093aae5b1816 ]
+
+Now the real job of normal fallocate are open coded in ext4_fallocate(),
+factor out a new helper ext4_do_fallocate() to do the real job, like
+others functions (e.g. ext4_zero_range()) in ext4_fallocate() do, this
+can make the code more clear, no functional changes.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-9-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 125 +++++++++++++++++++++++++-----------------------------
+ 1 file changed, 60 insertions(+), 65 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4690,6 +4690,58 @@ out:
+ return ret;
+ }
+
++static long ext4_do_fallocate(struct file *file, loff_t offset,
++ loff_t len, int mode)
++{
++ struct inode *inode = file_inode(file);
++ loff_t end = offset + len;
++ loff_t new_size = 0;
++ ext4_lblk_t start_lblk, len_lblk;
++ int ret;
++
++ trace_ext4_fallocate_enter(inode, offset, len, mode);
++
++ start_lblk = offset >> inode->i_blkbits;
++ len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
++
++ inode_lock(inode);
++
++ /* We only support preallocation for extent-based files only. */
++ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
++
++ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
++ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
++ new_size = end;
++ ret = inode_newsize_ok(inode, new_size);
++ if (ret)
++ goto out;
++ }
++
++ /* Wait all existing dio workers, newcomers will block on i_rwsem */
++ inode_dio_wait(inode);
++
++ ret = file_modified(file);
++ if (ret)
++ goto out;
++
++ ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
++ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++ if (ret)
++ goto out;
++
++ if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
++ ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
++ EXT4_I(inode)->i_sync_tid);
++ }
++out:
++ inode_unlock(inode);
++ trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
++ return ret;
++}
++
+ /*
+ * preallocate space for a file. This implements ext4's fallocate file
+ * operation, which gets called from sys_fallocate system call.
+@@ -4700,12 +4752,7 @@ out:
+ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ struct inode *inode = file_inode(file);
+- loff_t new_size = 0;
+- unsigned int max_blocks;
+- int ret = 0;
+- int flags;
+- ext4_lblk_t lblk;
+- unsigned int blkbits = inode->i_blkbits;
++ int ret;
+
+ /*
+ * Encrypted inodes can't handle collapse range or insert
+@@ -4727,71 +4774,19 @@ long ext4_fallocate(struct file *file, i
+ ret = ext4_convert_inline_data(inode);
+ inode_unlock(inode);
+ if (ret)
+- goto exit;
++ return ret;
+
+- if (mode & FALLOC_FL_PUNCH_HOLE) {
++ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = ext4_punch_hole(file, offset, len);
+- goto exit;
+- }
+-
+- if (mode & FALLOC_FL_COLLAPSE_RANGE) {
++ else if (mode & FALLOC_FL_COLLAPSE_RANGE)
+ ret = ext4_collapse_range(file, offset, len);
+- goto exit;
+- }
+-
+- if (mode & FALLOC_FL_INSERT_RANGE) {
++ else if (mode & FALLOC_FL_INSERT_RANGE)
+ ret = ext4_insert_range(file, offset, len);
+- goto exit;
+- }
+-
+- if (mode & FALLOC_FL_ZERO_RANGE) {
++ else if (mode & FALLOC_FL_ZERO_RANGE)
+ ret = ext4_zero_range(file, offset, len, mode);
+- goto exit;
+- }
+- trace_ext4_fallocate_enter(inode, offset, len, mode);
+- lblk = offset >> blkbits;
+-
+- max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
+- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+-
+- inode_lock(inode);
+-
+- /*
+- * We only support preallocation for extent-based files only
+- */
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
+-
+- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+- (offset + len > inode->i_size ||
+- offset + len > EXT4_I(inode)->i_disksize)) {
+- new_size = offset + len;
+- ret = inode_newsize_ok(inode, new_size);
+- if (ret)
+- goto out;
+- }
+-
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- goto out;
++ else
++ ret = ext4_do_fallocate(file, offset, len, mode);
+
+- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
+- if (ret)
+- goto out;
+-
+- if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+- ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
+- EXT4_I(inode)->i_sync_tid);
+- }
+-out:
+- inode_unlock(inode);
+- trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+-exit:
+ return ret;
+ }
+
--- /dev/null
+From stable+bounces-164542-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:44 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:16 -0400
+Subject: ext4: fix incorrect punch max_end
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Baokun Li <libaokun1@huawei.com>, Theodore Ts'o <tytso@mit.edu>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-9-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 29ec9bed2395061350249ae356fb300dd82a78e7 ]
+
+For the extents based inodes, the maxbytes should be sb->s_maxbytes
+instead of sbi->s_bitmap_maxbytes. Additionally, for the calculation of
+max_end, the -sb->s_blocksize operation is necessary only for
+indirect-block based inodes. Correct the maxbytes and max_end value to
+correct the behavior of punch hole.
+
+Fixes: 2da376228a24 ("ext4: limit length to bitmap_maxbytes - blocksize in punch_hole")
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Link: https://patch.msgid.link/20250506012009.3896990-2-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3992,7 +3992,7 @@ int ext4_punch_hole(struct file *file, l
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ ext4_lblk_t start_lblk, end_lblk;
+- loff_t max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
++ loff_t max_end = sb->s_maxbytes;
+ loff_t end = offset + length;
+ handle_t *handle;
+ unsigned int credits;
+@@ -4001,14 +4001,20 @@ int ext4_punch_hole(struct file *file, l
+ trace_ext4_punch_hole(inode, offset, length, 0);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
++ /*
++ * For indirect-block based inodes, make sure that the hole within
++ * one block before last range.
++ */
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
++
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+ return 0;
+
+ /*
+ * If the hole extends beyond i_size, set the hole to end after
+- * the page that contains i_size, and also make sure that the hole
+- * within one block before last range.
++ * the page that contains i_size.
+ */
+ if (end > inode->i_size)
+ end = round_up(inode->i_size, PAGE_SIZE);
--- /dev/null
+From stable+bounces-164544-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:45 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:18 -0400
+Subject: ext4: fix out of bounds punch offset
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Liebes Wang <wanghaichi0403@gmail.com>, Jan Kara <jack@suse.cz>, Baokun Li <libaokun1@huawei.com>, Theodore Ts'o <tytso@mit.edu>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-11-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit b5e58bcd79625423487fa3ecba8e8411b5396327 ]
+
+Punching a hole with a start offset that exceeds max_end is not
+permitted and will result in a negative length in the
+truncate_inode_partial_folio() function while truncating the page cache,
+potentially leading to undesirable consequences.
+
+A simple reproducer:
+
+ truncate -s 9895604649994 /mnt/foo
+ xfs_io -c "pwrite 8796093022208 4096" /mnt/foo
+ xfs_io -c "fpunch 8796093022213 25769803777" /mnt/foo
+
+ kernel BUG at include/linux/highmem.h:275!
+ Oops: invalid opcode: 0000 [#1] SMP PTI
+ CPU: 3 UID: 0 PID: 710 Comm: xfs_io Not tainted 6.15.0-rc3
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-2.fc40 04/01/2014
+ RIP: 0010:zero_user_segments.constprop.0+0xd7/0x110
+ RSP: 0018:ffffc90001cf3b38 EFLAGS: 00010287
+ RAX: 0000000000000005 RBX: ffffea0001485e40 RCX: 0000000000001000
+ RDX: 000000000040b000 RSI: 0000000000000005 RDI: 000000000040b000
+ RBP: 000000000040affb R08: ffff888000000000 R09: ffffea0000000000
+ R10: 0000000000000003 R11: 00000000fffc7fc5 R12: 0000000000000005
+ R13: 000000000040affb R14: ffffea0001485e40 R15: ffff888031cd3000
+ FS: 00007f4f63d0b780(0000) GS:ffff8880d337d000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000000001ae0b038 CR3: 00000000536aa000 CR4: 00000000000006f0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ <TASK>
+ truncate_inode_partial_folio+0x3dd/0x620
+ truncate_inode_pages_range+0x226/0x720
+ ? bdev_getblk+0x52/0x3e0
+ ? ext4_get_group_desc+0x78/0x150
+ ? crc32c_arch+0xfd/0x180
+ ? __ext4_get_inode_loc+0x18c/0x840
+ ? ext4_inode_csum+0x117/0x160
+ ? jbd2_journal_dirty_metadata+0x61/0x390
+ ? __ext4_handle_dirty_metadata+0xa0/0x2b0
+ ? kmem_cache_free+0x90/0x5a0
+ ? jbd2_journal_stop+0x1d5/0x550
+ ? __ext4_journal_stop+0x49/0x100
+ truncate_pagecache_range+0x50/0x80
+ ext4_truncate_page_cache_block_range+0x57/0x3a0
+ ext4_punch_hole+0x1fe/0x670
+ ext4_fallocate+0x792/0x17d0
+ ? __count_memcg_events+0x175/0x2a0
+ vfs_fallocate+0x121/0x560
+ ksys_fallocate+0x51/0xc0
+ __x64_sys_fallocate+0x24/0x40
+ x64_sys_call+0x18d2/0x4170
+ do_syscall_64+0xa7/0x220
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by filtering out cases where the punching start offset exceeds
+max_end.
+
+Fixes: 982bf37da09d ("ext4: refactor ext4_punch_hole()")
+Reported-by: Liebes Wang <wanghaichi0403@gmail.com>
+Closes: https://lore.kernel.org/linux-ext4/ac3a58f6-e686-488b-a9ee-fc041024e43d@huawei.com/
+Tested-by: Liebes Wang <wanghaichi0403@gmail.com>
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Link: https://patch.msgid.link/20250506012009.3896990-1-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4009,7 +4009,7 @@ int ext4_punch_hole(struct file *file, l
+ max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
+
+ /* No need to punch hole beyond i_size */
+- if (offset >= inode->i_size)
++ if (offset >= inode->i_size || offset >= max_end)
+ return 0;
+
+ /*
--- /dev/null
+From stable+bounces-164541-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:15 -0400
+Subject: ext4: move out common parts into ext4_fallocate()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-8-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 2890e5e0f49e10f3dadc5f7b7ea434e3e77e12a6 ]
+
+Currently, all zeroing ranges, punch holes, collapse ranges, and insert
+ranges first wait for all existing direct I/O workers to complete, and
+then they acquire the mapping's invalidate lock before performing the
+actual work. These common components are nearly identical, so we can
+simplify the code by factoring them out into the ext4_fallocate().
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-11-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 124 ++++++++++++++++++------------------------------------
+ fs/ext4/inode.c | 25 +---------
+ 2 files changed, 45 insertions(+), 104 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4569,7 +4569,6 @@ static long ext4_zero_range(struct file
+ loff_t len, int mode)
+ {
+ struct inode *inode = file_inode(file);
+- struct address_space *mapping = file->f_mapping;
+ handle_t *handle = NULL;
+ loff_t new_size = 0;
+ loff_t end = offset + len;
+@@ -4593,23 +4592,6 @@ static long ext4_zero_range(struct file
+ return ret;
+ }
+
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released
+- * from page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+-
+ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+ /* Preallocate the range including the unaligned edges */
+ if (!IS_ALIGNED(offset | end, blocksize)) {
+@@ -4619,17 +4601,17 @@ static long ext4_zero_range(struct file
+ ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
+ new_size, flags);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+ }
+
+ ret = ext4_update_disksize_before_punch(inode, offset, len);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ /* Now release the pages and zero block aligned part of pages */
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ /* Zero range excluding the unaligned edges */
+ start_lblk = EXT4_B_TO_LBLK(inode, offset);
+@@ -4641,11 +4623,11 @@ static long ext4_zero_range(struct file
+ ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
+ new_size, flags);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+ }
+ /* Finish zeroing out if it doesn't contain partial block */
+ if (IS_ALIGNED(offset | end, blocksize))
+- goto out_invalidate_lock;
++ return ret;
+
+ /*
+ * In worst case we have to writeout two nonadjacent unwritten
+@@ -4658,7 +4640,7 @@ static long ext4_zero_range(struct file
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(inode->i_sb, ret);
+- goto out_invalidate_lock;
++ return ret;
+ }
+
+ /* Zero out partial block at the edges of the range */
+@@ -4678,8 +4660,6 @@ static long ext4_zero_range(struct file
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+@@ -4712,13 +4692,6 @@ static long ext4_do_fallocate(struct fil
+ goto out;
+ }
+
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- goto out;
+-
+ ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
+ if (ret)
+@@ -4743,6 +4716,7 @@ out:
+ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ struct inode *inode = file_inode(file);
++ struct address_space *mapping = file->f_mapping;
+ int ret;
+
+ /*
+@@ -4766,6 +4740,29 @@ long ext4_fallocate(struct file *file, i
+ if (ret)
+ goto out_inode_lock;
+
++ /* Wait all existing dio workers, newcomers will block on i_rwsem */
++ inode_dio_wait(inode);
++
++ ret = file_modified(file);
++ if (ret)
++ return ret;
++
++ if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
++ ret = ext4_do_fallocate(file, offset, len, mode);
++ goto out_inode_lock;
++ }
++
++ /*
++ * Follow-up operations will drop page cache, hold invalidate lock
++ * to prevent page faults from reinstantiating pages we have
++ * released from page cache.
++ */
++ filemap_invalidate_lock(mapping);
++
++ ret = ext4_break_layouts(inode);
++ if (ret)
++ goto out_invalidate_lock;
++
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = ext4_punch_hole(file, offset, len);
+ else if (mode & FALLOC_FL_COLLAPSE_RANGE)
+@@ -4775,7 +4772,10 @@ long ext4_fallocate(struct file *file, i
+ else if (mode & FALLOC_FL_ZERO_RANGE)
+ ret = ext4_zero_range(file, offset, len, mode);
+ else
+- ret = ext4_do_fallocate(file, offset, len, mode);
++ ret = -EOPNOTSUPP;
++
++out_invalidate_lock:
++ filemap_invalidate_unlock(mapping);
+ out_inode_lock:
+ inode_unlock(inode);
+ return ret;
+@@ -5297,23 +5297,6 @@ static int ext4_collapse_range(struct fi
+ if (end >= inode->i_size)
+ return -EINVAL;
+
+- /* Wait for existing dio to complete */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released from
+- * page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+-
+ /*
+ * Write tail of the last page before removed range and data that
+ * will be shifted since they will get removed from the page cache
+@@ -5327,16 +5310,15 @@ static int ext4_collapse_range(struct fi
+ if (!ret)
+ ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+- if (IS_ERR(handle)) {
+- ret = PTR_ERR(handle);
+- goto out_invalidate_lock;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
+ start_lblk = offset >> inode->i_blkbits;
+@@ -5375,8 +5357,6 @@ static int ext4_collapse_range(struct fi
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+@@ -5417,23 +5397,6 @@ static int ext4_insert_range(struct file
+ if (len > inode->i_sb->s_maxbytes - inode->i_size)
+ return -EFBIG;
+
+- /* Wait for existing dio to complete */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released from
+- * page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+-
+ /*
+ * Write out all dirty pages. Need to round down to align start offset
+ * to page size boundary for page size > block size.
+@@ -5441,16 +5404,15 @@ static int ext4_insert_range(struct file
+ start = round_down(offset, PAGE_SIZE);
+ ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+- if (IS_ERR(handle)) {
+- ret = PTR_ERR(handle);
+- goto out_invalidate_lock;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
+ /* Expand file to avoid data loss if there is error while shifting */
+@@ -5521,8 +5483,6 @@ static int ext4_insert_range(struct file
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3992,7 +3992,6 @@ int ext4_punch_hole(struct file *file, l
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ ext4_lblk_t start_lblk, end_lblk;
+- struct address_space *mapping = inode->i_mapping;
+ loff_t max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
+ loff_t end = offset + length;
+ handle_t *handle;
+@@ -4027,31 +4026,15 @@ int ext4_punch_hole(struct file *file, l
+ return ret;
+ }
+
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released from
+- * page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+
+ ret = ext4_update_disksize_before_punch(inode, offset, length);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ /* Now release the pages and zero block aligned part of pages*/
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ credits = ext4_writepage_trans_blocks(inode);
+@@ -4061,7 +4044,7 @@ int ext4_punch_hole(struct file *file, l
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(sb, ret);
+- goto out_invalidate_lock;
++ return ret;
+ }
+
+ ret = ext4_zero_partial_blocks(handle, inode, offset, length);
+@@ -4106,8 +4089,6 @@ int ext4_punch_hole(struct file *file, l
+ ext4_handle_sync(handle);
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
--- /dev/null
+From stable+bounces-164540-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:14 -0400
+Subject: ext4: move out inode_lock into ext4_fallocate()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-7-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit ea3f17efd36b56c5839289716ba83eaa85893590 ]
+
+Currently, all five sub-functions of ext4_fallocate() acquire the
+inode's i_rwsem at the beginning and release it before exiting. This
+process can be simplified by factoring out the management of i_rwsem
+into the ext4_fallocate() function.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-10-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 90 ++++++++++++++++--------------------------------------
+ fs/ext4/inode.c | 13 +++----
+ 2 files changed, 33 insertions(+), 70 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4579,23 +4579,18 @@ static long ext4_zero_range(struct file
+ int ret, flags, credits;
+
+ trace_ext4_zero_range(inode, offset, len, mode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+- inode_lock(inode);
+-
+- /*
+- * Indirect files do not support unwritten extents
+- */
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
++ /* Indirect files do not support unwritten extents */
++ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
++ return -EOPNOTSUPP;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
+ new_size = end;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+- goto out;
++ return ret;
+ }
+
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+@@ -4603,7 +4598,7 @@ static long ext4_zero_range(struct file
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released
+@@ -4685,8 +4680,6 @@ out_handle:
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+@@ -4700,12 +4693,11 @@ static long ext4_do_fallocate(struct fil
+ int ret;
+
+ trace_ext4_fallocate_enter(inode, offset, len, mode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ start_lblk = offset >> inode->i_blkbits;
+ len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
+
+- inode_lock(inode);
+-
+ /* We only support preallocation for extent-based files only. */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+@@ -4737,7 +4729,6 @@ static long ext4_do_fallocate(struct fil
+ EXT4_I(inode)->i_sync_tid);
+ }
+ out:
+- inode_unlock(inode);
+ trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
+ return ret;
+ }
+@@ -4772,9 +4763,8 @@ long ext4_fallocate(struct file *file, i
+
+ inode_lock(inode);
+ ret = ext4_convert_inline_data(inode);
+- inode_unlock(inode);
+ if (ret)
+- return ret;
++ goto out_inode_lock;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = ext4_punch_hole(file, offset, len);
+@@ -4786,7 +4776,8 @@ long ext4_fallocate(struct file *file, i
+ ret = ext4_zero_range(file, offset, len, mode);
+ else
+ ret = ext4_do_fallocate(file, offset, len, mode);
+-
++out_inode_lock:
++ inode_unlock(inode);
+ return ret;
+ }
+
+@@ -5291,36 +5282,27 @@ static int ext4_collapse_range(struct fi
+ int ret;
+
+ trace_ext4_collapse_range(inode, offset, len);
+-
+- inode_lock(inode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
+-
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ return -EOPNOTSUPP;
+ /* Collapse range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
+- ret = -EINVAL;
+- goto out;
+- }
+-
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
++ return -EINVAL;
+ /*
+ * There is no need to overlap collapse range with EOF, in which case
+ * it is effectively a truncate operation
+ */
+- if (end >= inode->i_size) {
+- ret = -EINVAL;
+- goto out;
+- }
++ if (end >= inode->i_size)
++ return -EINVAL;
+
+ /* Wait for existing dio to complete */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5395,8 +5377,6 @@ out_handle:
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+@@ -5422,39 +5402,27 @@ static int ext4_insert_range(struct file
+ loff_t start;
+
+ trace_ext4_insert_range(inode, offset, len);
+-
+- inode_lock(inode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
+-
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ return -EOPNOTSUPP;
+ /* Insert range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
+- ret = -EINVAL;
+- goto out;
+- }
+-
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
++ return -EINVAL;
+ /* Offset must be less than i_size */
+- if (offset >= inode->i_size) {
+- ret = -EINVAL;
+- goto out;
+- }
+-
++ if (offset >= inode->i_size)
++ return -EINVAL;
+ /* Check whether the maximum file size would be exceeded */
+- if (len > inode->i_sb->s_maxbytes - inode->i_size) {
+- ret = -EFBIG;
+- goto out;
+- }
++ if (len > inode->i_sb->s_maxbytes - inode->i_size)
++ return -EFBIG;
+
+ /* Wait for existing dio to complete */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5555,8 +5523,6 @@ out_handle:
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3997,15 +3997,14 @@ int ext4_punch_hole(struct file *file, l
+ loff_t end = offset + length;
+ handle_t *handle;
+ unsigned int credits;
+- int ret = 0;
++ int ret;
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+-
+- inode_lock(inode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+- goto out;
++ return 0;
+
+ /*
+ * If the hole extends beyond i_size, set the hole to end after
+@@ -4025,7 +4024,7 @@ int ext4_punch_hole(struct file *file, l
+ if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+- goto out;
++ return ret;
+ }
+
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+@@ -4033,7 +4032,7 @@ int ext4_punch_hole(struct file *file, l
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -4109,8 +4108,6 @@ out_handle:
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
--- /dev/null
+From stable+bounces-164537-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:34 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:11 -0400
+Subject: ext4: refactor ext4_collapse_range()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-4-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 162e3c5ad1672ef41dccfb28ad198c704b8aa9e7 ]
+
+Simplify ext4_collapse_range() and align its code style with that of
+ext4_zero_range() and ext4_punch_hole(). Refactor it by: a) renaming
+variables, b) removing redundant input parameter checks and moving
+the remaining checks under i_rwsem in preparation for future
+refactoring, and c) renaming the three stale error tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-7-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 103 +++++++++++++++++++++++++-----------------------------
+ 1 file changed, 48 insertions(+), 55 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5288,43 +5288,36 @@ static int ext4_collapse_range(struct fi
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ struct address_space *mapping = inode->i_mapping;
+- ext4_lblk_t punch_start, punch_stop;
++ loff_t end = offset + len;
++ ext4_lblk_t start_lblk, end_lblk;
+ handle_t *handle;
+ unsigned int credits;
+- loff_t new_size, ioffset;
++ loff_t start, new_size;
+ int ret;
+
+- /*
+- * We need to test this early because xfstests assumes that a
+- * collapse range of (0, 1) will return EOPNOTSUPP if the file
+- * system does not support collapse range.
+- */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- return -EOPNOTSUPP;
++ trace_ext4_collapse_range(inode, offset, len);
+
+- /* Collapse range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
+- return -EINVAL;
++ inode_lock(inode);
+
+- trace_ext4_collapse_range(inode, offset, len);
++ /* Currently just for extent based files */
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
+
+- punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+- punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
++ /* Collapse range works only on fs cluster size aligned regions. */
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- inode_lock(inode);
+ /*
+ * There is no need to overlap collapse range with EOF, in which case
+ * it is effectively a truncate operation
+ */
+- if (offset + len >= inode->i_size) {
++ if (end >= inode->i_size) {
+ ret = -EINVAL;
+- goto out_mutex;
+- }
+-
+- /* Currently just for extent based files */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- ret = -EOPNOTSUPP;
+- goto out_mutex;
++ goto out;
+ }
+
+ /* Wait for existing dio to complete */
+@@ -5332,7 +5325,7 @@ static int ext4_collapse_range(struct fi
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5342,55 +5335,52 @@ static int ext4_collapse_range(struct fi
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+- goto out_mmap;
++ goto out_invalidate_lock;
+
+ /*
++ * Write tail of the last page before removed range and data that
++ * will be shifted since they will get removed from the page cache
++ * below. We are also protected from pages becoming dirty by
++ * i_rwsem and invalidate_lock.
+ * Need to round down offset to be aligned with page size boundary
+ * for page size > block size.
+ */
+- ioffset = round_down(offset, PAGE_SIZE);
+- /*
+- * Write tail of the last page before removed range since it will get
+- * removed from the page cache below.
+- */
+- ret = filemap_write_and_wait_range(mapping, ioffset, offset);
+- if (ret)
+- goto out_mmap;
+- /*
+- * Write data that will be shifted to preserve them when discarding
+- * page cache below. We are also protected from pages becoming dirty
+- * by i_rwsem and invalidate_lock.
+- */
+- ret = filemap_write_and_wait_range(mapping, offset + len,
+- LLONG_MAX);
++ start = round_down(offset, PAGE_SIZE);
++ ret = filemap_write_and_wait_range(mapping, start, offset);
++ if (!ret)
++ ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
+ if (ret)
+- goto out_mmap;
+- truncate_pagecache(inode, ioffset);
++ goto out_invalidate_lock;
++
++ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- goto out_mmap;
++ goto out_invalidate_lock;
+ }
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
++ start_lblk = offset >> inode->i_blkbits;
++ end_lblk = (offset + len) >> inode->i_blkbits;
++
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+- ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
++ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
+
+- ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
++ ret = ext4_ext_remove_space(inode, start_lblk, end_lblk - 1);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
++ goto out_handle;
+ }
+ ext4_discard_preallocations(inode);
+
+- ret = ext4_ext_shift_extents(inode, handle, punch_stop,
+- punch_stop - punch_start, SHIFT_LEFT);
++ ret = ext4_ext_shift_extents(inode, handle, end_lblk,
++ end_lblk - start_lblk, SHIFT_LEFT);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
++ goto out_handle;
+ }
+
+ new_size = inode->i_size - len;
+@@ -5398,16 +5388,19 @@ static int ext4_collapse_range(struct fi
+ EXT4_I(inode)->i_disksize = new_size;
+
+ up_write(&EXT4_I(inode)->i_data_sem);
+- if (IS_SYNC(inode))
+- ext4_handle_sync(handle);
+ ret = ext4_mark_inode_dirty(handle, inode);
++ if (ret)
++ goto out_handle;
++
+ ext4_update_inode_fsync_trans(handle, inode, 1);
++ if (IS_SYNC(inode))
++ ext4_handle_sync(handle);
+
+-out_stop:
++out_handle:
+ ext4_journal_stop(handle);
+-out_mmap:
++out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out_mutex:
++out:
+ inode_unlock(inode);
+ return ret;
+ }
--- /dev/null
+From stable+bounces-164538-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:12 -0400
+Subject: ext4: refactor ext4_insert_range()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-5-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 49425504376c335c68f7be54ae7c32312afd9475 ]
+
+Simplify ext4_insert_range() and align its code style with that of
+ext4_collapse_range(). Refactor it by: a) renaming variables, b)
+removing redundant input parameter checks and moving the remaining
+checks under i_rwsem in preparation for future refactoring, and c)
+renaming the three stale error tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-8-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 101 +++++++++++++++++++++++++-----------------------------
+ 1 file changed, 48 insertions(+), 53 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5421,45 +5421,37 @@ static int ext4_insert_range(struct file
+ handle_t *handle;
+ struct ext4_ext_path *path;
+ struct ext4_extent *extent;
+- ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
++ ext4_lblk_t start_lblk, len_lblk, ee_start_lblk = 0;
+ unsigned int credits, ee_len;
+- int ret = 0, depth, split_flag = 0;
+- loff_t ioffset;
+-
+- /*
+- * We need to test this early because xfstests assumes that an
+- * insert range of (0, 1) will return EOPNOTSUPP if the file
+- * system does not support insert range.
+- */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- return -EOPNOTSUPP;
+-
+- /* Insert range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
+- return -EINVAL;
++ int ret, depth, split_flag = 0;
++ loff_t start;
+
+ trace_ext4_insert_range(inode, offset, len);
+
+- offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+- len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
+-
+ inode_lock(inode);
++
+ /* Currently just for extent based files */
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ret = -EOPNOTSUPP;
+- goto out_mutex;
++ goto out;
+ }
+
+- /* Check whether the maximum file size would be exceeded */
+- if (len > inode->i_sb->s_maxbytes - inode->i_size) {
+- ret = -EFBIG;
+- goto out_mutex;
++ /* Insert range works only on fs cluster size aligned regions. */
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
++ ret = -EINVAL;
++ goto out;
+ }
+
+ /* Offset must be less than i_size */
+ if (offset >= inode->i_size) {
+ ret = -EINVAL;
+- goto out_mutex;
++ goto out;
++ }
++
++ /* Check whether the maximum file size would be exceeded */
++ if (len > inode->i_sb->s_maxbytes - inode->i_size) {
++ ret = -EFBIG;
++ goto out;
+ }
+
+ /* Wait for existing dio to complete */
+@@ -5467,7 +5459,7 @@ static int ext4_insert_range(struct file
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5477,25 +5469,24 @@ static int ext4_insert_range(struct file
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+- goto out_mmap;
++ goto out_invalidate_lock;
+
+ /*
+- * Need to round down to align start offset to page size boundary
+- * for page size > block size.
++ * Write out all dirty pages. Need to round down to align start offset
++ * to page size boundary for page size > block size.
+ */
+- ioffset = round_down(offset, PAGE_SIZE);
+- /* Write out all dirty pages */
+- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+- LLONG_MAX);
++ start = round_down(offset, PAGE_SIZE);
++ ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
+ if (ret)
+- goto out_mmap;
+- truncate_pagecache(inode, ioffset);
++ goto out_invalidate_lock;
++
++ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- goto out_mmap;
++ goto out_invalidate_lock;
+ }
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
+@@ -5504,16 +5495,19 @@ static int ext4_insert_range(struct file
+ EXT4_I(inode)->i_disksize += len;
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+- goto out_stop;
++ goto out_handle;
++
++ start_lblk = offset >> inode->i_blkbits;
++ len_lblk = len >> inode->i_blkbits;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+
+- path = ext4_find_extent(inode, offset_lblk, NULL, 0);
++ path = ext4_find_extent(inode, start_lblk, NULL, 0);
+ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
+- goto out_stop;
++ goto out_handle;
+ }
+
+ depth = ext_depth(inode);
+@@ -5523,16 +5517,16 @@ static int ext4_insert_range(struct file
+ ee_len = ext4_ext_get_actual_len(extent);
+
+ /*
+- * If offset_lblk is not the starting block of extent, split
+- * the extent @offset_lblk
++ * If start_lblk is not the starting block of extent, split
++ * the extent @start_lblk
+ */
+- if ((offset_lblk > ee_start_lblk) &&
+- (offset_lblk < (ee_start_lblk + ee_len))) {
++ if ((start_lblk > ee_start_lblk) &&
++ (start_lblk < (ee_start_lblk + ee_len))) {
+ if (ext4_ext_is_unwritten(extent))
+ split_flag = EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ path = ext4_split_extent_at(handle, inode, path,
+- offset_lblk, split_flag,
++ start_lblk, split_flag,
+ EXT4_EX_NOCACHE |
+ EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_METADATA_NOFAIL);
+@@ -5541,31 +5535,32 @@ static int ext4_insert_range(struct file
+ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
+- goto out_stop;
++ goto out_handle;
+ }
+ }
+
+ ext4_free_ext_path(path);
+- ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
++ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
+
+ /*
+- * if offset_lblk lies in a hole which is at start of file, use
++ * if start_lblk lies in a hole which is at start of file, use
+ * ee_start_lblk to shift extents
+ */
+ ret = ext4_ext_shift_extents(inode, handle,
+- max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
+-
++ max(ee_start_lblk, start_lblk), len_lblk, SHIFT_RIGHT);
+ up_write(&EXT4_I(inode)->i_data_sem);
++ if (ret)
++ goto out_handle;
++
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- if (ret >= 0)
+- ext4_update_inode_fsync_trans(handle, inode, 1);
+
+-out_stop:
++out_handle:
+ ext4_journal_stop(handle);
+-out_mmap:
++out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out_mutex:
++out:
+ inode_unlock(inode);
+ return ret;
+ }
--- /dev/null
+From stable+bounces-164535-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:09 -0400
+Subject: ext4: refactor ext4_punch_hole()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-2-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 982bf37da09d078570650b691d9084f43805a5de ]
+
+The current implementation of ext4_punch_hole() contains complex
+position calculations and stale error tags. To improve the code's
+clarity and maintainability, it is essential to clean up the code and
+improve its readability, this can be achieved by: a) simplifying and
+renaming variables; b) eliminating unnecessary position calculations;
+c) writing back all data in data=journal mode, and drop page cache from
+the original offset to the end, rather than using aligned blocks,
+d) renaming the stale error tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-5-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4.h | 2
+ fs/ext4/inode.c | 119 ++++++++++++++++++++++++--------------------------------
+ 2 files changed, 55 insertions(+), 66 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -368,6 +368,8 @@ struct ext4_io_submit {
+ #define EXT4_MAX_BLOCKS(size, offset, blkbits) \
+ ((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
+ blkbits))
++#define EXT4_B_TO_LBLK(inode, offset) \
++ (round_up((offset), i_blocksize(inode)) >> (inode)->i_blkbits)
+
+ /* Translate a block number to a cluster number */
+ #define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3991,13 +3991,13 @@ int ext4_punch_hole(struct file *file, l
+ {
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+- ext4_lblk_t first_block, stop_block;
++ ext4_lblk_t start_lblk, end_lblk;
+ struct address_space *mapping = inode->i_mapping;
+- loff_t first_block_offset, last_block_offset, max_length;
+- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++ loff_t max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
++ loff_t end = offset + length;
+ handle_t *handle;
+ unsigned int credits;
+- int ret = 0, ret2 = 0;
++ int ret = 0;
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+
+@@ -4005,36 +4005,27 @@ int ext4_punch_hole(struct file *file, l
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+- goto out_mutex;
++ goto out;
+
+ /*
+- * If the hole extends beyond i_size, set the hole
+- * to end after the page that contains i_size
++ * If the hole extends beyond i_size, set the hole to end after
++ * the page that contains i_size, and also make sure that the hole
++ * within one block before last range.
+ */
+- if (offset + length > inode->i_size) {
+- length = inode->i_size +
+- PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
+- offset;
+- }
++ if (end > inode->i_size)
++ end = round_up(inode->i_size, PAGE_SIZE);
++ if (end > max_end)
++ end = max_end;
++ length = end - offset;
+
+ /*
+- * For punch hole the length + offset needs to be within one block
+- * before last range. Adjust the length if it goes beyond that limit.
++ * Attach jinode to inode for jbd2 if we do any zeroing of partial
++ * block.
+ */
+- max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
+- if (offset + length > max_length)
+- length = max_length - offset;
+-
+- if (offset & (sb->s_blocksize - 1) ||
+- (offset + length) & (sb->s_blocksize - 1)) {
+- /*
+- * Attach jinode to inode for jbd2 if we do any zeroing of
+- * partial block
+- */
++ if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+- goto out_mutex;
+-
++ goto out;
+ }
+
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+@@ -4042,7 +4033,7 @@ int ext4_punch_hole(struct file *file, l
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -4052,22 +4043,16 @@ int ext4_punch_hole(struct file *file, l
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+- goto out_dio;
++ goto out_invalidate_lock;
+
+- first_block_offset = round_up(offset, sb->s_blocksize);
+- last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
++ ret = ext4_update_disksize_before_punch(inode, offset, length);
++ if (ret)
++ goto out_invalidate_lock;
+
+ /* Now release the pages and zero block aligned part of pages*/
+- if (last_block_offset > first_block_offset) {
+- ret = ext4_update_disksize_before_punch(inode, offset, length);
+- if (ret)
+- goto out_dio;
+-
+- ret = ext4_truncate_page_cache_block_range(inode,
+- first_block_offset, last_block_offset + 1);
+- if (ret)
+- goto out_dio;
+- }
++ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
++ if (ret)
++ goto out_invalidate_lock;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ credits = ext4_writepage_trans_blocks(inode);
+@@ -4077,52 +4062,54 @@ int ext4_punch_hole(struct file *file, l
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(sb, ret);
+- goto out_dio;
++ goto out_invalidate_lock;
+ }
+
+- ret = ext4_zero_partial_blocks(handle, inode, offset,
+- length);
++ ret = ext4_zero_partial_blocks(handle, inode, offset, length);
+ if (ret)
+- goto out_stop;
+-
+- first_block = (offset + sb->s_blocksize - 1) >>
+- EXT4_BLOCK_SIZE_BITS(sb);
+- stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
++ goto out_handle;
+
+ /* If there are blocks to remove, do it */
+- if (stop_block > first_block) {
+- ext4_lblk_t hole_len = stop_block - first_block;
++ start_lblk = EXT4_B_TO_LBLK(inode, offset);
++ end_lblk = end >> inode->i_blkbits;
++
++ if (end_lblk > start_lblk) {
++ ext4_lblk_t hole_len = end_lblk - start_lblk;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+
+- ext4_es_remove_extent(inode, first_block, hole_len);
++ ext4_es_remove_extent(inode, start_lblk, hole_len);
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- ret = ext4_ext_remove_space(inode, first_block,
+- stop_block - 1);
++ ret = ext4_ext_remove_space(inode, start_lblk,
++ end_lblk - 1);
+ else
+- ret = ext4_ind_remove_space(handle, inode, first_block,
+- stop_block);
++ ret = ext4_ind_remove_space(handle, inode, start_lblk,
++ end_lblk);
++ if (ret) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto out_handle;
++ }
+
+- ext4_es_insert_extent(inode, first_block, hole_len, ~0,
++ ext4_es_insert_extent(inode, start_lblk, hole_len, ~0,
+ EXTENT_STATUS_HOLE, 0);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
+- ext4_fc_track_range(handle, inode, first_block, stop_block);
++ ext4_fc_track_range(handle, inode, start_lblk, end_lblk);
++
++ ret = ext4_mark_inode_dirty(handle, inode);
++ if (unlikely(ret))
++ goto out_handle;
++
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+-
+- ret2 = ext4_mark_inode_dirty(handle, inode);
+- if (unlikely(ret2))
+- ret = ret2;
+- if (ret >= 0)
+- ext4_update_inode_fsync_trans(handle, inode, 1);
+-out_stop:
++out_handle:
+ ext4_journal_stop(handle);
+-out_dio:
++out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out_mutex:
++out:
+ inode_unlock(inode);
+ return ret;
+ }
--- /dev/null
+From stable+bounces-164536-greg=kroah.com@vger.kernel.org Thu Jul 24 04:57:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:57:10 -0400
+Subject: ext4: refactor ext4_zero_range()
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Jan Kara <jack@suse.cz>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724025718.1277650-3-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 53471e0bedad5891b860d02233819dc0e28189e2 ]
+
+The current implementation of ext4_zero_range() contains complex
+position calculations and stale error tags. To improve the code's
+clarity and maintainability, it is essential to clean up the code and
+improve its readability, this can be achieved by: a) simplifying and
+renaming variables, making the style the same as ext4_punch_hole(); b)
+eliminating unnecessary position calculations, writing back all data in
+data=journal mode, and drop page cache from the original offset to the
+end, rather than using aligned blocks; c) renaming the stale out_mutex
+tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-6-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 144 +++++++++++++++++++++---------------------------------
+ 1 file changed, 58 insertions(+), 86 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4571,40 +4571,15 @@ static long ext4_zero_range(struct file
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
+ handle_t *handle = NULL;
+- unsigned int max_blocks;
+ loff_t new_size = 0;
+- int ret = 0;
+- int flags;
+- int credits;
+- int partial_begin, partial_end;
+- loff_t start, end;
+- ext4_lblk_t lblk;
++ loff_t end = offset + len;
++ ext4_lblk_t start_lblk, end_lblk;
++ unsigned int blocksize = i_blocksize(inode);
+ unsigned int blkbits = inode->i_blkbits;
++ int ret, flags, credits;
+
+ trace_ext4_zero_range(inode, offset, len, mode);
+
+- /*
+- * Round up offset. This is not fallocate, we need to zero out
+- * blocks, so convert interior block aligned part of the range to
+- * unwritten and possibly manually zero out unaligned parts of the
+- * range. Here, start and partial_begin are inclusive, end and
+- * partial_end are exclusive.
+- */
+- start = round_up(offset, 1 << blkbits);
+- end = round_down((offset + len), 1 << blkbits);
+-
+- if (start < offset || end > offset + len)
+- return -EINVAL;
+- partial_begin = offset & ((1 << blkbits) - 1);
+- partial_end = (offset + len) & ((1 << blkbits) - 1);
+-
+- lblk = start >> blkbits;
+- max_blocks = (end >> blkbits);
+- if (max_blocks < lblk)
+- max_blocks = 0;
+- else
+- max_blocks -= lblk;
+-
+ inode_lock(inode);
+
+ /*
+@@ -4612,77 +4587,70 @@ static long ext4_zero_range(struct file
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+- goto out_mutex;
++ goto out;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+- (offset + len > inode->i_size ||
+- offset + len > EXT4_I(inode)->i_disksize)) {
+- new_size = offset + len;
++ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
++ new_size = end;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+- goto out_mutex;
++ goto out;
+ }
+
+- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+-
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
++ /*
++ * Prevent page faults from reinstantiating pages we have released
++ * from page cache.
++ */
++ filemap_invalidate_lock(mapping);
++
++ ret = ext4_break_layouts(inode);
++ if (ret)
++ goto out_invalidate_lock;
++
++ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+ /* Preallocate the range including the unaligned edges */
+- if (partial_begin || partial_end) {
+- ret = ext4_alloc_file_blocks(file,
+- round_down(offset, 1 << blkbits) >> blkbits,
+- (round_up((offset + len), 1 << blkbits) -
+- round_down(offset, 1 << blkbits)) >> blkbits,
+- new_size, flags);
+- if (ret)
+- goto out_mutex;
++ if (!IS_ALIGNED(offset | end, blocksize)) {
++ ext4_lblk_t alloc_lblk = offset >> blkbits;
++ ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
+
++ ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
++ new_size, flags);
++ if (ret)
++ goto out_invalidate_lock;
+ }
+
++ ret = ext4_update_disksize_before_punch(inode, offset, len);
++ if (ret)
++ goto out_invalidate_lock;
++
++ /* Now release the pages and zero block aligned part of pages */
++ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
++ if (ret)
++ goto out_invalidate_lock;
++
+ /* Zero range excluding the unaligned edges */
+- if (max_blocks > 0) {
+- flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+- EXT4_EX_NOCACHE);
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have
+- * released from page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret) {
+- filemap_invalidate_unlock(mapping);
+- goto out_mutex;
+- }
+-
+- ret = ext4_update_disksize_before_punch(inode, offset, len);
+- if (ret) {
+- filemap_invalidate_unlock(mapping);
+- goto out_mutex;
+- }
+-
+- /* Now release the pages and zero block aligned part of pages */
+- ret = ext4_truncate_page_cache_block_range(inode, start, end);
+- if (ret) {
+- filemap_invalidate_unlock(mapping);
+- goto out_mutex;
+- }
+-
+- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+- flags);
+- filemap_invalidate_unlock(mapping);
++ start_lblk = EXT4_B_TO_LBLK(inode, offset);
++ end_lblk = end >> blkbits;
++ if (end_lblk > start_lblk) {
++ ext4_lblk_t zero_blks = end_lblk - start_lblk;
++
++ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE);
++ ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
++ new_size, flags);
+ if (ret)
+- goto out_mutex;
++ goto out_invalidate_lock;
+ }
+- if (!partial_begin && !partial_end)
+- goto out_mutex;
++ /* Finish zeroing out if it doesn't contain partial block */
++ if (IS_ALIGNED(offset | end, blocksize))
++ goto out_invalidate_lock;
+
+ /*
+ * In worst case we have to writeout two nonadjacent unwritten
+@@ -4695,25 +4663,29 @@ static long ext4_zero_range(struct file
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(inode->i_sb, ret);
+- goto out_mutex;
++ goto out_invalidate_lock;
+ }
+
++ /* Zero out partial block at the edges of the range */
++ ret = ext4_zero_partial_blocks(handle, inode, offset, len);
++ if (ret)
++ goto out_handle;
++
+ if (new_size)
+ ext4_update_inode_size(inode, new_size);
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret))
+ goto out_handle;
+- /* Zero out partial block at the edges of the range */
+- ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+- if (ret >= 0)
+- ext4_update_inode_fsync_trans(handle, inode, 1);
+
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (file->f_flags & O_SYNC)
+ ext4_handle_sync(handle);
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_mutex:
++out_invalidate_lock:
++ filemap_invalidate_unlock(mapping);
++out:
+ inode_unlock(inode);
+ return ret;
+ }
--- /dev/null
+From stable+bounces-164627-greg=kroah.com@vger.kernel.org Thu Jul 24 17:19:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 11:19:22 -0400
+Subject: iio: hid-sensor-prox: Fix incorrect OFFSET calculation
+To: stable@vger.kernel.org
+Cc: Zhang Lixu <lixu.zhang@intel.com>, Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724151922.1365648-1-sashal@kernel.org>
+
+From: Zhang Lixu <lixu.zhang@intel.com>
+
+[ Upstream commit 79dabbd505210e41c88060806c92c052496dd61c ]
+
+The OFFSET calculation in the prox_read_raw() was incorrectly using the
+unit exponent, which is intended for SCALE calculations.
+
+Remove the incorrect OFFSET calculation and set it to a fixed value of 0.
+
+Cc: stable@vger.kernel.org
+Fixes: 39a3a0138f61 ("iio: hid-sensors: Added Proximity Sensor Driver")
+Signed-off-by: Zhang Lixu <lixu.zhang@intel.com>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Link: https://patch.msgid.link/20250331055022.1149736-4-lixu.zhang@intel.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ adapted prox_attr array access to single structure member access ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/light/hid-sensor-prox.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/iio/light/hid-sensor-prox.c
++++ b/drivers/iio/light/hid-sensor-prox.c
+@@ -102,8 +102,7 @@ static int prox_read_raw(struct iio_dev
+ ret_type = prox_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+- *val = hid_sensor_convert_exponent(
+- prox_state->prox_attr.unit_expo);
++ *val = 0;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
--- /dev/null
+From stable+bounces-164622-greg=kroah.com@vger.kernel.org Thu Jul 24 17:13:00 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 11:12:50 -0400
+Subject: iio: hid-sensor-prox: Restore lost scale assignments
+To: stable@vger.kernel.org
+Cc: Zhang Lixu <lixu.zhang@intel.com>, Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724151250.1363220-1-sashal@kernel.org>
+
+From: Zhang Lixu <lixu.zhang@intel.com>
+
+[ Upstream commit 83ded7cfaccccd2f4041769c313b58b4c9e265ad ]
+
+The variables `scale_pre_decml`, `scale_post_decml`, and `scale_precision`
+were assigned in commit d68c592e02f6 ("iio: hid-sensor-prox: Fix scale not
+correct issue"), but due to a merge conflict in
+commit 9c15db92a8e5 ("Merge tag 'iio-for-5.13a' of
+https://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio into staging-next"),
+these assignments were lost.
+
+Add back lost assignments and replace `st->prox_attr` with
+`st->prox_attr[0]` because commit 596ef5cf654b ("iio: hid-sensor-prox: Add
+support for more channels") changed `prox_attr` to an array.
+
+Cc: stable@vger.kernel.org # 5.13+
+Fixes: 9c15db92a8e5 ("Merge tag 'iio-for-5.13a' of https://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio into staging-next")
+Signed-off-by: Zhang Lixu <lixu.zhang@intel.com>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Link: https://patch.msgid.link/20250331055022.1149736-2-lixu.zhang@intel.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ changed st->prox_attr[0] array access to st->prox_attr single struct member ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/light/hid-sensor-prox.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/iio/light/hid-sensor-prox.c
++++ b/drivers/iio/light/hid-sensor-prox.c
+@@ -227,6 +227,11 @@ static int prox_parse_report(struct plat
+ dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr.index,
+ st->prox_attr.report_id);
+
++ st->scale_precision = hid_sensor_format_scale(hsdev->usage,
++ &st->prox_attr,
++ &st->scale_pre_decml,
++ &st->scale_post_decml);
++
+ return ret;
+ }
+
--- /dev/null
+From 8c3f9a70d2d4dd6c640afe294b05c6a0a45434d9 Mon Sep 17 00:00:00 2001
+From: Dmitry Antipov <dmantipov@yandex.ru>
+Date: Thu, 7 Nov 2024 08:42:28 +0300
+Subject: jfs: reject on-disk inodes of an unsupported type
+
+From: Dmitry Antipov <dmantipov@yandex.ru>
+
+commit 8c3f9a70d2d4dd6c640afe294b05c6a0a45434d9 upstream.
+
+Syzbot has reported the following BUG:
+
+kernel BUG at fs/inode.c:668!
+Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI
+CPU: 3 UID: 0 PID: 139 Comm: jfsCommit Not tainted 6.12.0-rc4-syzkaller-00085-g4e46774408d9 #0
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-3.fc41 04/01/2014
+RIP: 0010:clear_inode+0x168/0x190
+Code: 4c 89 f7 e8 ba fe e5 ff e9 61 ff ff ff 44 89 f1 80 e1 07 80 c1 03 38 c1 7c c1 4c 89 f7 e8 90 ff e5 ff eb b7
+ 0b e8 01 5d 7f ff 90 0f 0b e8 f9 5c 7f ff 90 0f 0b e8 f1 5c 7f
+RSP: 0018:ffffc900027dfae8 EFLAGS: 00010093
+RAX: ffffffff82157a87 RBX: 0000000000000001 RCX: ffff888104d4b980
+RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000000000000
+RBP: ffffc900027dfc90 R08: ffffffff82157977 R09: fffff520004fbf38
+R10: dffffc0000000000 R11: fffff520004fbf38 R12: dffffc0000000000
+R13: ffff88811315bc00 R14: ffff88811315bda8 R15: ffff88811315bb80
+FS: 0000000000000000(0000) GS:ffff888135f00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00005565222e0578 CR3: 0000000026ef0000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ ? __die_body+0x5f/0xb0
+ ? die+0x9e/0xc0
+ ? do_trap+0x15a/0x3a0
+ ? clear_inode+0x168/0x190
+ ? do_error_trap+0x1dc/0x2c0
+ ? clear_inode+0x168/0x190
+ ? __pfx_do_error_trap+0x10/0x10
+ ? report_bug+0x3cd/0x500
+ ? handle_invalid_op+0x34/0x40
+ ? clear_inode+0x168/0x190
+ ? exc_invalid_op+0x38/0x50
+ ? asm_exc_invalid_op+0x1a/0x20
+ ? clear_inode+0x57/0x190
+ ? clear_inode+0x167/0x190
+ ? clear_inode+0x168/0x190
+ ? clear_inode+0x167/0x190
+ jfs_evict_inode+0xb5/0x440
+ ? __pfx_jfs_evict_inode+0x10/0x10
+ evict+0x4ea/0x9b0
+ ? __pfx_evict+0x10/0x10
+ ? iput+0x713/0xa50
+ txUpdateMap+0x931/0xb10
+ ? __pfx_txUpdateMap+0x10/0x10
+ jfs_lazycommit+0x49a/0xb80
+ ? _raw_spin_unlock_irqrestore+0x8f/0x140
+ ? lockdep_hardirqs_on+0x99/0x150
+ ? __pfx_jfs_lazycommit+0x10/0x10
+ ? __pfx_default_wake_function+0x10/0x10
+ ? __kthread_parkme+0x169/0x1d0
+ ? __pfx_jfs_lazycommit+0x10/0x10
+ kthread+0x2f2/0x390
+ ? __pfx_jfs_lazycommit+0x10/0x10
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x4d/0x80
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+This happens when 'clear_inode()' makes an attempt to finalize an underlying
+JFS inode of unknown type. According to JFS layout description from
+https://jfs.sourceforge.net/project/pub/jfslayout.pdf, inode types from 5 to
+15 are reserved for future extensions and should not be encountered on a valid
+filesystem. So add an extra check for valid inode type in 'copy_from_dinode()'.
+
+Reported-by: syzbot+ac2116e48989e84a2893@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=ac2116e48989e84a2893
+Fixes: 79ac5a46c5c1 ("jfs_lookup(): don't bother with . or ..")
+Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
+Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Signed-off-by: Aditya Dutt <duttaditya18@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jfs/jfs_imap.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -3029,14 +3029,23 @@ static void duplicateIXtree(struct super
+ *
+ * RETURN VALUES:
+ * 0 - success
+- * -ENOMEM - insufficient memory
++ * -EINVAL - unexpected inode type
+ */
+ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
+ {
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
++ int fileset = le32_to_cpu(dip->di_fileset);
+
+- jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
++ switch (fileset) {
++ case AGGR_RESERVED_I: case AGGREGATE_I: case BMAP_I:
++ case LOG_I: case BADBLOCK_I: case FILESYSTEM_I:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ jfs_ip->fileset = fileset;
+ jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
+ jfs_set_inode_flags(ip);
+
--- /dev/null
+From stable+bounces-164473-greg=kroah.com@vger.kernel.org Wed Jul 23 17:19:04 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 11:14:14 -0400
+Subject: KVM: x86: Add X86EMUL_F_MSR and X86EMUL_F_DT_LOAD to aid canonical checks
+To: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723151416.1092631-3-sashal@kernel.org>
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ Upstream commit c534b37b7584e2abc5d487b4e017f61a61959ca9 ]
+
+Add emulation flags for MSR accesses and Descriptor Tables loads, and pass
+the new flags as appropriate to emul_is_noncanonical_address(). The flags
+will be used to perform the correct canonical check, as the type of access
+affects whether or not CR4.LA57 is consulted when determining the canonical
+bit.
+
+No functional change is intended.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20240906221824.491834-3-mlevitsk@redhat.com
+[sean: split to separate patch, massage changelog]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: fa787ac07b3c ("KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/emulate.c | 15 +++++++++------
+ arch/x86/kvm/kvm_emulate.h | 5 ++++-
+ arch/x86/kvm/x86.c | 2 +-
+ 3 files changed, 14 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -651,9 +651,10 @@ static inline u8 ctxt_virt_addr_bits(str
+ }
+
+ static inline bool emul_is_noncanonical_address(u64 la,
+- struct x86_emulate_ctxt *ctxt)
++ struct x86_emulate_ctxt *ctxt,
++ unsigned int flags)
+ {
+- return !ctxt->ops->is_canonical_addr(ctxt, la);
++ return !ctxt->ops->is_canonical_addr(ctxt, la, flags);
+ }
+
+ /*
+@@ -1733,7 +1734,8 @@ static int __load_segment_descriptor(str
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
+- ((u64)base3 << 32), ctxt))
++ ((u64)base3 << 32), ctxt,
++ X86EMUL_F_DT_LOAD))
+ return emulate_gp(ctxt, err_code);
+ }
+
+@@ -2516,8 +2518,8 @@ static int em_sysexit(struct x86_emulate
+ ss_sel = cs_sel + 8;
+ cs.d = 0;
+ cs.l = 1;
+- if (emul_is_noncanonical_address(rcx, ctxt) ||
+- emul_is_noncanonical_address(rdx, ctxt))
++ if (emul_is_noncanonical_address(rcx, ctxt, 0) ||
++ emul_is_noncanonical_address(rdx, ctxt, 0))
+ return emulate_gp(ctxt, 0);
+ break;
+ }
+@@ -3494,7 +3496,8 @@ static int em_lgdt_lidt(struct x86_emula
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ if (ctxt->mode == X86EMUL_MODE_PROT64 &&
+- emul_is_noncanonical_address(desc_ptr.address, ctxt))
++ emul_is_noncanonical_address(desc_ptr.address, ctxt,
++ X86EMUL_F_DT_LOAD))
+ return emulate_gp(ctxt, 0);
+ if (lgdt)
+ ctxt->ops->set_gdt(ctxt, &desc_ptr);
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -94,6 +94,8 @@ struct x86_instruction_info {
+ #define X86EMUL_F_FETCH BIT(1)
+ #define X86EMUL_F_IMPLICIT BIT(2)
+ #define X86EMUL_F_INVLPG BIT(3)
++#define X86EMUL_F_MSR BIT(4)
++#define X86EMUL_F_DT_LOAD BIT(5)
+
+ struct x86_emulate_ops {
+ void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
+@@ -236,7 +238,8 @@ struct x86_emulate_ops {
+ gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
+ unsigned int flags);
+
+- bool (*is_canonical_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr);
++ bool (*is_canonical_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
++ unsigned int flags);
+ };
+
+ /* Type, address-of, and value of an instruction's operand. */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8609,7 +8609,7 @@ static gva_t emulator_get_untagged_addr(
+ }
+
+ static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
+- gva_t addr)
++ gva_t addr, unsigned int flags)
+ {
+ return !is_noncanonical_address(addr, emul_to_vcpu(ctxt));
+ }
--- /dev/null
+From stable+bounces-164471-greg=kroah.com@vger.kernel.org Wed Jul 23 17:19:43 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 11:14:12 -0400
+Subject: KVM: x86: drop x86.h include from cpuid.h
+To: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723151416.1092631-1-sashal@kernel.org>
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ Upstream commit e52ad1ddd0a3b07777141ec9406d5dc2c9a0de17 ]
+
+Drop x86.h include from cpuid.h to allow the x86.h to include the cpuid.h
+instead.
+
+Also fix various places where x86.h was implicitly included via cpuid.h
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20240906221824.491834-2-mlevitsk@redhat.com
+[sean: fixup a missed include in mtrr.c]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: fa787ac07b3c ("KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.h | 1 -
+ arch/x86/kvm/mmu.h | 1 +
+ arch/x86/kvm/mtrr.c | 1 +
+ arch/x86/kvm/vmx/hyperv.c | 1 +
+ arch/x86/kvm/vmx/nested.c | 2 +-
+ arch/x86/kvm/vmx/sgx.c | 3 +--
+ 6 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -2,7 +2,6 @@
+ #ifndef ARCH_X86_KVM_CPUID_H
+ #define ARCH_X86_KVM_CPUID_H
+
+-#include "x86.h"
+ #include "reverse_cpuid.h"
+ #include <asm/cpu.h>
+ #include <asm/processor.h>
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/kvm_host.h>
+ #include "kvm_cache_regs.h"
++#include "x86.h"
+ #include "cpuid.h"
+
+ extern bool __read_mostly enable_mmio_caching;
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -19,6 +19,7 @@
+ #include <asm/mtrr.h>
+
+ #include "cpuid.h"
++#include "x86.h"
+
+ static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr)
+ {
+--- a/arch/x86/kvm/vmx/hyperv.c
++++ b/arch/x86/kvm/vmx/hyperv.c
+@@ -4,6 +4,7 @@
+ #include <linux/errno.h>
+ #include <linux/smp.h>
+
++#include "x86.h"
+ #include "../cpuid.h"
+ #include "hyperv.h"
+ #include "nested.h"
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -7,6 +7,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/mmu_context.h>
+
++#include "x86.h"
+ #include "cpuid.h"
+ #include "hyperv.h"
+ #include "mmu.h"
+@@ -16,7 +17,6 @@
+ #include "sgx.h"
+ #include "trace.h"
+ #include "vmx.h"
+-#include "x86.h"
+ #include "smm.h"
+
+ static bool __read_mostly enable_shadow_vmcs = 1;
+--- a/arch/x86/kvm/vmx/sgx.c
++++ b/arch/x86/kvm/vmx/sgx.c
+@@ -4,12 +4,11 @@
+
+ #include <asm/sgx.h>
+
+-#include "cpuid.h"
++#include "x86.h"
+ #include "kvm_cache_regs.h"
+ #include "nested.h"
+ #include "sgx.h"
+ #include "vmx.h"
+-#include "x86.h"
+
+ bool __read_mostly enable_sgx = 1;
+ module_param_named(sgx, enable_sgx, bool, 0444);
--- /dev/null
+From stable+bounces-164475-greg=kroah.com@vger.kernel.org Wed Jul 23 17:19:05 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 11:14:16 -0400
+Subject: KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush
+To: stable@vger.kernel.org
+Cc: Manuel Andreas <manuel.andreas@tum.de>, Vitaly Kuznetsov <vkuznets@redhat.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723151416.1092631-5-sashal@kernel.org>
+
+From: Manuel Andreas <manuel.andreas@tum.de>
+
+[ Upstream commit fa787ac07b3ceb56dd88a62d1866038498e96230 ]
+
+In KVM guests with Hyper-V hypercalls enabled, the hypercalls
+HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST and HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
+allow a guest to request invalidation of portions of a virtual TLB.
+For this, the hypercall parameter includes a list of GVAs that are supposed
+to be invalidated.
+
+However, when non-canonical GVAs are passed, there is currently no
+filtering in place and they are eventually passed to checked invocations of
+INVVPID on Intel / INVLPGA on AMD. While AMD's INVLPGA silently ignores
+non-canonical addresses (effectively a no-op), Intel's INVVPID explicitly
+signals VM-Fail and ultimately triggers the WARN_ONCE in invvpid_error():
+
+ invvpid failed: ext=0x0 vpid=1 gva=0xaaaaaaaaaaaaa000
+ WARNING: CPU: 6 PID: 326 at arch/x86/kvm/vmx/vmx.c:482
+ invvpid_error+0x91/0xa0 [kvm_intel]
+ Modules linked in: kvm_intel kvm 9pnet_virtio irqbypass fuse
+ CPU: 6 UID: 0 PID: 326 Comm: kvm-vm Not tainted 6.15.0 #14 PREEMPT(voluntary)
+ RIP: 0010:invvpid_error+0x91/0xa0 [kvm_intel]
+ Call Trace:
+ vmx_flush_tlb_gva+0x320/0x490 [kvm_intel]
+ kvm_hv_vcpu_flush_tlb+0x24f/0x4f0 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x3013/0x5810 [kvm]
+
+Hyper-V documents that invalid GVAs (those that are beyond a partition's
+GVA space) are to be ignored. While not completely clear whether this
+ruling also applies to non-canonical GVAs, it is likely fine to make that
+assumption, and manual testing on Azure confirms "real" Hyper-V interprets
+the specification in the same way.
+
+Skip non-canonical GVAs when processing the list of address to avoid
+tripping the INVVPID failure. Alternatively, KVM could filter out "bad"
+GVAs before inserting into the FIFO, but practically speaking the only
+downside of pushing validation to the final processing is that doing so
+is suboptimal for the guest, and no well-behaved guest will request TLB
+flushes for non-canonical addresses.
+
+Fixes: 260970862c88 ("KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently")
+Cc: stable@vger.kernel.org
+Signed-off-by: Manuel Andreas <manuel.andreas@tum.de>
+Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Link: https://lore.kernel.org/r/c090efb3-ef82-499f-a5e0-360fc8420fb7@tum.de
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/hyperv.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1980,6 +1980,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcp
+ if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
+ goto out_flush_all;
+
++ if (is_noncanonical_invlpg_address(entries[i], vcpu))
++ continue;
++
+ /*
+ * Lower 12 bits of 'address' encode the number of additional
+ * pages to flush.
--- /dev/null
+From stable+bounces-164474-greg=kroah.com@vger.kernel.org Wed Jul 23 17:19:42 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 11:14:15 -0400
+Subject: KVM: x86: model canonical checks more precisely
+To: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723151416.1092631-4-sashal@kernel.org>
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ Upstream commit 9245fd6b8531497d129a7a6e3eef258042862f85 ]
+
+As a result of a recent investigation, it was determined that x86 CPUs
+which support 5-level paging, don't always respect CR4.LA57 when doing
+canonical checks.
+
+In particular:
+
+1. MSRs which contain a linear address, allow full 57-bitcanonical address
+regardless of CR4.LA57 state. For example: MSR_KERNEL_GS_BASE.
+
+2. All hidden segment bases and GDT/IDT bases also behave like MSRs.
+This means that full 57-bit canonical address can be loaded to them
+regardless of CR4.LA57, both using MSRS (e.g GS_BASE) and instructions
+(e.g LGDT).
+
+3. TLB invalidation instructions also allow the user to use full 57-bit
+address regardless of the CR4.LA57.
+
+Finally, it must be noted that the CPU doesn't prevent the user from
+disabling 5-level paging, even when the full 57-bit canonical address is
+present in one of the registers mentioned above (e.g GDT base).
+
+In fact, this can happen without any userspace help, when the CPU enters
+SMM mode - some MSRs, for example MSR_KERNEL_GS_BASE are left to contain
+a non-canonical address in regard to the new mode.
+
+Since most of the affected MSRs and all segment bases can be read and
+written freely by the guest without any KVM intervention, this patch makes
+the emulator closely follow hardware behavior, which means that the
+emulator doesn't take in the account the guest CPUID support for 5-level
+paging, and only takes in the account the host CPU support.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20240906221824.491834-4-mlevitsk@redhat.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: fa787ac07b3c ("KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c | 2 -
+ arch/x86/kvm/vmx/nested.c | 22 +++++++++----------
+ arch/x86/kvm/vmx/pmu_intel.c | 2 -
+ arch/x86/kvm/vmx/sgx.c | 2 -
+ arch/x86/kvm/vmx/vmx.c | 4 +--
+ arch/x86/kvm/x86.c | 8 +++----
+ arch/x86/kvm/x86.h | 48 +++++++++++++++++++++++++++++++++++++++++--
+ 7 files changed, 66 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6234,7 +6234,7 @@ void kvm_mmu_invalidate_addr(struct kvm_
+ /* It's actually a GPA for vcpu->arch.guest_mmu. */
+ if (mmu != &vcpu->arch.guest_mmu) {
+ /* INVLPG on a non-canonical address is a NOP according to the SDM. */
+- if (is_noncanonical_address(addr, vcpu))
++ if (is_noncanonical_invlpg_address(addr, vcpu))
+ return;
+
+ kvm_x86_call(flush_tlb_gva)(vcpu, addr);
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3020,8 +3020,8 @@ static int nested_vmx_check_host_state(s
+ CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3)))
+ return -EINVAL;
+
+- if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
+- CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
++ if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
++ CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
+ return -EINVAL;
+
+ if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
+@@ -3055,12 +3055,12 @@ static int nested_vmx_check_host_state(s
+ CC(vmcs12->host_ss_selector == 0 && !ia32e))
+ return -EINVAL;
+
+- if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
+- CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
+- CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
+- CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
+- CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
+- CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
++ if (CC(is_noncanonical_base_address(vmcs12->host_fs_base, vcpu)) ||
++ CC(is_noncanonical_base_address(vmcs12->host_gs_base, vcpu)) ||
++ CC(is_noncanonical_base_address(vmcs12->host_gdtr_base, vcpu)) ||
++ CC(is_noncanonical_base_address(vmcs12->host_idtr_base, vcpu)) ||
++ CC(is_noncanonical_base_address(vmcs12->host_tr_base, vcpu)) ||
++ CC(is_noncanonical_address(vmcs12->host_rip, vcpu, 0)))
+ return -EINVAL;
+
+ /*
+@@ -3178,7 +3178,7 @@ static int nested_vmx_check_guest_state(
+ }
+
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+- (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
++ (CC(is_noncanonical_msr_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
+ CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
+ return -EINVAL;
+
+@@ -5172,7 +5172,7 @@ int get_vmx_mem_address(struct kvm_vcpu
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+- exn = is_noncanonical_address(*ret, vcpu);
++ exn = is_noncanonical_address(*ret, vcpu, 0);
+ } else {
+ /*
+ * When not in long mode, the virtual/linear address is
+@@ -5983,7 +5983,7 @@ static int handle_invvpid(struct kvm_vcp
+ * invalidation.
+ */
+ if (!operand.vpid ||
+- is_noncanonical_address(operand.gla, vcpu))
++ is_noncanonical_invlpg_address(operand.gla, vcpu))
+ return nested_vmx_fail(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ vpid_sync_vcpu_addr(vpid02, operand.gla);
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -365,7 +365,7 @@ static int intel_pmu_set_msr(struct kvm_
+ }
+ break;
+ case MSR_IA32_DS_AREA:
+- if (is_noncanonical_address(data, vcpu))
++ if (is_noncanonical_msr_address(data, vcpu))
+ return 1;
+
+ pmu->ds_area = data;
+--- a/arch/x86/kvm/vmx/sgx.c
++++ b/arch/x86/kvm/vmx/sgx.c
+@@ -37,7 +37,7 @@ static int sgx_get_encls_gva(struct kvm_
+ fault = true;
+ } else if (likely(is_64_bit_mode(vcpu))) {
+ *gva = vmx_get_untagged_addr(vcpu, *gva, 0);
+- fault = is_noncanonical_address(*gva, vcpu);
++ fault = is_noncanonical_address(*gva, vcpu, 0);
+ } else {
+ *gva &= 0xffffffff;
+ fault = (s.unusable) ||
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2284,7 +2284,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, s
+ (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
+ return 1;
+- if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
++ if (is_noncanonical_msr_address(data & PAGE_MASK, vcpu) ||
+ (data & MSR_IA32_BNDCFGS_RSVD))
+ return 1;
+
+@@ -2449,7 +2449,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, s
+ index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
+ if (index >= 2 * vmx->pt_desc.num_address_ranges)
+ return 1;
+- if (is_noncanonical_address(data, vcpu))
++ if (is_noncanonical_msr_address(data, vcpu))
+ return 1;
+ if (index % 2)
+ vmx->pt_desc.guest.addr_b[index / 2] = data;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1845,7 +1845,7 @@ static int __kvm_set_msr(struct kvm_vcpu
+ case MSR_KERNEL_GS_BASE:
+ case MSR_CSTAR:
+ case MSR_LSTAR:
+- if (is_noncanonical_address(data, vcpu))
++ if (is_noncanonical_msr_address(data, vcpu))
+ return 1;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+@@ -1862,7 +1862,7 @@ static int __kvm_set_msr(struct kvm_vcpu
+ * value, and that something deterministic happens if the guest
+ * invokes 64-bit SYSENTER.
+ */
+- data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
++ data = __canonical_address(data, max_host_virt_addr_bits());
+ break;
+ case MSR_TSC_AUX:
+ if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+@@ -8611,7 +8611,7 @@ static gva_t emulator_get_untagged_addr(
+ static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, unsigned int flags)
+ {
+- return !is_noncanonical_address(addr, emul_to_vcpu(ctxt));
++ return !is_noncanonical_address(addr, emul_to_vcpu(ctxt), flags);
+ }
+
+ static const struct x86_emulate_ops emulate_ops = {
+@@ -13763,7 +13763,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *
+ * invalidation.
+ */
+ if ((!pcid_enabled && (operand.pcid != 0)) ||
+- is_noncanonical_address(operand.gla, vcpu)) {
++ is_noncanonical_invlpg_address(operand.gla, vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -8,6 +8,7 @@
+ #include <asm/pvclock.h>
+ #include "kvm_cache_regs.h"
+ #include "kvm_emulate.h"
++#include "cpuid.h"
+
+ struct kvm_caps {
+ /* control of guest tsc rate supported? */
+@@ -233,9 +234,52 @@ static inline u8 vcpu_virt_addr_bits(str
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
+ }
+
+-static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
++static inline u8 max_host_virt_addr_bits(void)
+ {
+- return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
++ return kvm_cpu_cap_has(X86_FEATURE_LA57) ? 57 : 48;
++}
++
++/*
++ * x86 MSRs which contain linear addresses, x86 hidden segment bases, and
++ * IDT/GDT bases have static canonicality checks, the size of which depends
++ * only on the CPU's support for 5-level paging, rather than on the state of
++ * CR4.LA57. This applies to both WRMSR and to other instructions that set
++ * their values, e.g. SGDT.
++ *
++ * KVM passes through most of these MSRS and also doesn't intercept the
++ * instructions that set the hidden segment bases.
++ *
++ * Because of this, to be consistent with hardware, even if the guest doesn't
++ * have LA57 enabled in its CPUID, perform canonicality checks based on *host*
++ * support for 5 level paging.
++ *
++ * Finally, instructions which are related to MMU invalidation of a given
++ * linear address, also have a similar static canonical check on address.
++ * This allows for example to invalidate 5-level addresses of a guest from a
++ * host which uses 4-level paging.
++ */
++static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu,
++ unsigned int flags)
++{
++ if (flags & (X86EMUL_F_INVLPG | X86EMUL_F_MSR | X86EMUL_F_DT_LOAD))
++ return !__is_canonical_address(la, max_host_virt_addr_bits());
++ else
++ return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
++}
++
++static inline bool is_noncanonical_msr_address(u64 la, struct kvm_vcpu *vcpu)
++{
++ return is_noncanonical_address(la, vcpu, X86EMUL_F_MSR);
++}
++
++static inline bool is_noncanonical_base_address(u64 la, struct kvm_vcpu *vcpu)
++{
++ return is_noncanonical_address(la, vcpu, X86EMUL_F_DT_LOAD);
++}
++
++static inline bool is_noncanonical_invlpg_address(u64 la, struct kvm_vcpu *vcpu)
++{
++ return is_noncanonical_address(la, vcpu, X86EMUL_F_INVLPG);
+ }
+
+ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
--- /dev/null
+From stable+bounces-164472-greg=kroah.com@vger.kernel.org Wed Jul 23 17:19:00 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 11:14:13 -0400
+Subject: KVM: x86: Route non-canonical checks in emulator through emulate_ops
+To: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250723151416.1092631-2-sashal@kernel.org>
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ Upstream commit 16ccadefa295af434ca296e566f078223ecd79ca ]
+
+Add emulate_ops.is_canonical_addr() to perform (non-)canonical checks in
+the emulator, which will allow extending is_noncanonical_address() to
+support different flavors of canonical checks, e.g. for descriptor table
+bases vs. MSRs, without needing duplicate logic in the emulator.
+
+No functional change is intended.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20240906221824.491834-3-mlevitsk@redhat.com
+[sean: separate from additional of flags, massage changelog]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: fa787ac07b3c ("KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/emulate.c | 2 +-
+ arch/x86/kvm/kvm_emulate.h | 2 ++
+ arch/x86/kvm/x86.c | 7 +++++++
+ 3 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -653,7 +653,7 @@ static inline u8 ctxt_virt_addr_bits(str
+ static inline bool emul_is_noncanonical_address(u64 la,
+ struct x86_emulate_ctxt *ctxt)
+ {
+- return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
++ return !ctxt->ops->is_canonical_addr(ctxt, la);
+ }
+
+ /*
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -235,6 +235,8 @@ struct x86_emulate_ops {
+
+ gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
+ unsigned int flags);
++
++ bool (*is_canonical_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr);
+ };
+
+ /* Type, address-of, and value of an instruction's operand. */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8608,6 +8608,12 @@ static gva_t emulator_get_untagged_addr(
+ addr, flags);
+ }
+
++static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
++ gva_t addr)
++{
++ return !is_noncanonical_address(addr, emul_to_vcpu(ctxt));
++}
++
+ static const struct x86_emulate_ops emulate_ops = {
+ .vm_bugged = emulator_vm_bugged,
+ .read_gpr = emulator_read_gpr,
+@@ -8654,6 +8660,7 @@ static const struct x86_emulate_ops emul
+ .triple_fault = emulator_triple_fault,
+ .set_xcr = emulator_set_xcr,
+ .get_untagged_addr = emulator_get_untagged_addr,
++ .is_canonical_addr = emulator_is_canonical_addr,
+ };
+
+ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
--- /dev/null
+From stable+bounces-164531-greg=kroah.com@vger.kernel.org Thu Jul 24 04:34:09 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 22:33:58 -0400
+Subject: mtd: rawnand: qcom: Fix last codeword read in qcom_param_page_type_exec()
+To: stable@vger.kernel.org
+Cc: Md Sadre Alam <quic_mdalam@quicinc.com>, Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>, Lakshmi Sowjanya D <quic_laksd@quicinc.com>, Miquel Raynal <miquel.raynal@bootlin.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724023358.1268700-1-sashal@kernel.org>
+
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+
+[ Upstream commit 47bddabbf69da50999ec68be92b58356c687e1d6 ]
+
+For QPIC V2 onwards there is a separate register to read
+last code word "QPIC_NAND_READ_LOCATION_LAST_CW_n".
+
+qcom_param_page_type_exec() is used to read only one code word
+If it configures the number of code words to 1 in QPIC_NAND_DEV0_CFG0
+register then QPIC controller thinks its reading the last code word,
+since we are having separate register to read the last code word,
+we have to configure "QPIC_NAND_READ_LOCATION_LAST_CW_n" register
+to fetch data from QPIC buffer to system memory.
+
+Without this change page read was failing with timeout error
+
+/ # hexdump -C /dev/mtd1
+[ 129.206113] qcom-nandc 1cc8000.nand-controller: failure to read page/oob
+hexdump: /dev/mtd1: Connection timed out
+
+This issue only seen on SDX targets since SDX target used QPICv2. But
+same working on IPQ targets since IPQ used QPICv1.
+
+Cc: stable@vger.kernel.org
+Fixes: 89550beb098e ("mtd: rawnand: qcom: Implement exec_op()")
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Tested-by: Lakshmi Sowjanya D <quic_laksd@quicinc.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2858,7 +2858,12 @@ static int qcom_param_page_type_exec(str
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+- int ret;
++ int ret, reg_base;
++
++ reg_base = NAND_READ_LOCATION_0;
++
++ if (nandc->props->qpic_v2)
++ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+@@ -2910,7 +2915,10 @@ static int qcom_param_page_type_exec(str
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
++ if (nandc->props->qpic_v2)
++ nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
++ else
++ nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
+
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
--- /dev/null
+From stable+bounces-164620-greg=kroah.com@vger.kernel.org Thu Jul 24 16:47:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 10:47:04 -0400
+Subject: Revert "wifi: mt76: mt7925: Update mt7925_mcu_uni_[tx,rx]_ba for MLO"
+To: stable@vger.kernel.org
+Cc: Sean Wang <sean.wang@mediatek.com>, Ming Yen Hsieh <mingyen.hsieh@mediatek.com>, Caleb Jorden <cjorden@gmail.com>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724144704.1352141-1-sashal@kernel.org>
+
+From: Sean Wang <sean.wang@mediatek.com>
+
+[ Upstream commit 766ea2cf5a398c7eed519b12c6c6cf1631143ea2 ]
+
+For MLO, mac80211 will send the BA action for each link to
+the driver, so the driver does not need to handle it itself.
+Therefore, revert this patch.
+
+Fixes: eb2a9a12c609 ("wifi: mt76: mt7925: Update mt7925_mcu_uni_[tx,rx]_ba for MLO")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
+Tested-by: Caleb Jorden <cjorden@gmail.com>
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Link: https://patch.msgid.link/20250305000851.493671-1-sean.wang@kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+[ struct mt76_vif_link -> struct mt76_vif ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/main.c | 10 ++--
+ drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 52 ++++-----------------
+ drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h | 2
+ 3 files changed, 15 insertions(+), 49 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -1296,22 +1296,22 @@ mt7925_ampdu_action(struct ieee80211_hw
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
+ params->buf_size);
+- mt7925_mcu_uni_rx_ba(dev, vif, params, true);
++ mt7925_mcu_uni_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+- mt7925_mcu_uni_rx_ba(dev, vif, params, false);
++ mt7925_mcu_uni_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+- mt7925_mcu_uni_tx_ba(dev, vif, params, true);
++ mt7925_mcu_uni_tx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+- mt7925_mcu_uni_tx_ba(dev, vif, params, false);
++ mt7925_mcu_uni_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
+@@ -1320,7 +1320,7 @@ mt7925_ampdu_action(struct ieee80211_hw
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+- mt7925_mcu_uni_tx_ba(dev, vif, params, false);
++ mt7925_mcu_uni_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -529,10 +529,10 @@ void mt7925_mcu_rx_event(struct mt792x_d
+
+ static int
+ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+- struct mt76_wcid *wcid,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+ {
++ struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
+ struct sta_rec_ba_uni *ba;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+@@ -560,60 +560,28 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev,
+
+ /** starec & wtbl **/
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+- struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+ {
+ struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+- struct mt792x_link_sta *mlink;
+- struct mt792x_bss_conf *mconf;
+- unsigned long usable_links = ieee80211_vif_usable_links(vif);
+- struct mt76_wcid *wcid;
+- u8 link_id, ret;
+-
+- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+- mconf = mt792x_vif_to_link(mvif, link_id);
+- mlink = mt792x_sta_to_link(msta, link_id);
+- wcid = &mlink->wcid;
+-
+- if (enable && !params->amsdu)
+- mlink->wcid.amsdu = false;
+-
+- ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+- enable, true);
+- if (ret < 0)
+- break;
+- }
++ struct mt792x_vif *mvif = msta->vif;
+
+- return ret;
++ if (enable && !params->amsdu)
++ msta->deflink.wcid.amsdu = false;
++
++ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
++ enable, true);
+ }
+
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+- struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+ {
+ struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+- struct mt792x_link_sta *mlink;
+- struct mt792x_bss_conf *mconf;
+- unsigned long usable_links = ieee80211_vif_usable_links(vif);
+- struct mt76_wcid *wcid;
+- u8 link_id, ret;
+-
+- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+- mconf = mt792x_vif_to_link(mvif, link_id);
+- mlink = mt792x_sta_to_link(msta, link_id);
+- wcid = &mlink->wcid;
+-
+- ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+- enable, false);
+- if (ret < 0)
+- break;
+- }
++ struct mt792x_vif *mvif = msta->vif;
+
+- return ret;
++ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
++ enable, false);
+ }
+
+ static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -245,11 +245,9 @@ int mt7925_mcu_set_beacon_filter(struct
+ struct ieee80211_vif *vif,
+ bool enable);
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+- struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+- struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ void mt7925_scan_work(struct work_struct *work);
usb-typec-tcpm-allow-to-use-sink-in-accessory-mode.patch
usb-typec-tcpm-allow-switching-to-mode-accessory-to-mux-properly.patch
usb-typec-tcpm-apply-vbus-before-data-bringup-in-tcpm_src_attach.patch
+spi-cadence-quadspi-fix-cleanup-of-rx_chan-on-failure-paths.patch
+x86-bugs-fix-use-of-possibly-uninit-value-in-amd_check_tsa_microcode.patch
+jfs-reject-on-disk-inodes-of-an-unsupported-type.patch
+comedi-comedi_test-fix-possible-deletion-of-uninitialized-timers.patch
+crypto-powerpc-poly1305-add-depends-on-broken-for-now.patch
+mtd-rawnand-qcom-fix-last-codeword-read-in-qcom_param_page_type_exec.patch
+arm64-dts-qcom-x1e78100-t14s-mark-l12b-and-l15b-always-on.patch
+erofs-simplify-z_erofs_load_compact_lcluster.patch
+erofs-refine-z_erofs_get_extent_compressedlen.patch
+erofs-use-z_erofs_lcluster_type_max-to-simplify-switches.patch
+erofs-simplify-tail-inline-pcluster-handling.patch
+erofs-clean-up-header-parsing-for-ztailpacking-and-fragments.patch
+erofs-fix-large-fragment-handling.patch
+ext4-don-t-explicit-update-times-in-ext4_fallocate.patch
+ext4-refactor-ext4_punch_hole.patch
+ext4-refactor-ext4_zero_range.patch
+ext4-refactor-ext4_collapse_range.patch
+ext4-refactor-ext4_insert_range.patch
+ext4-factor-out-ext4_do_fallocate.patch
+ext4-move-out-inode_lock-into-ext4_fallocate.patch
+ext4-move-out-common-parts-into-ext4_fallocate.patch
+ext4-fix-incorrect-punch-max_end.patch
+ext4-correct-the-error-handle-in-ext4_fallocate.patch
+ext4-fix-out-of-bounds-punch-offset.patch
+kvm-x86-drop-x86.h-include-from-cpuid.h.patch
+kvm-x86-route-non-canonical-checks-in-emulator-through-emulate_ops.patch
+kvm-x86-add-x86emul_f_msr-and-x86emul_f_dt_load-to-aid-canonical-checks.patch
+kvm-x86-model-canonical-checks-more-precisely.patch
+kvm-x86-hyper-v-skip-non-canonical-addresses-during-pv-tlb-flush.patch
+x86-hyperv-fix-apic-id-and-vp-index-confusion-in-hv_snp_boot_ap.patch
+arm64-dts-qcom-x1-crd-fix-vreg_l2j_1p2-voltage.patch
+revert-wifi-mt76-mt7925-update-mt7925_mcu_uni__ba-for-mlo.patch
+wifi-mt76-mt7925-adjust-rm-bss-flow-to-prevent-next-connection-failure.patch
+iio-hid-sensor-prox-restore-lost-scale-assignments.patch
+iio-hid-sensor-prox-fix-incorrect-offset-calculation.patch
+arm-9448-1-use-an-absolute-path-to-unified.h-in-kbuild_aflags.patch
+drivers-hv-make-the-sysfs-node-size-for-the-ring-buffer-dynamic.patch
--- /dev/null
+From 04a8ff1bc3514808481ddebd454342ad902a3f60 Mon Sep 17 00:00:00 2001
+From: Khairul Anuar Romli <khairul.anuar.romli@altera.com>
+Date: Mon, 30 Jun 2025 17:11:56 +0800
+Subject: spi: cadence-quadspi: fix cleanup of rx_chan on failure paths
+
+From: Khairul Anuar Romli <khairul.anuar.romli@altera.com>
+
+commit 04a8ff1bc3514808481ddebd454342ad902a3f60 upstream.
+
+Remove incorrect checks on cqspi->rx_chan that cause driver breakage
+during failure cleanup. Ensure proper resource freeing on the success
+path when operating in cqspi->use_direct_mode, preventing leaks and
+improving stability.
+
+Signed-off-by: Khairul Anuar Romli <khairul.anuar.romli@altera.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/89765a2b94f047ded4f14babaefb7ef92ba07cb2.1751274389.git.khairul.anuar.romli@altera.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1931,11 +1931,6 @@ static int cqspi_probe(struct platform_d
+
+ pm_runtime_enable(dev);
+
+- if (cqspi->rx_chan) {
+- dma_release_channel(cqspi->rx_chan);
+- goto probe_setup_failed;
+- }
+-
+ pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
--- /dev/null
+From stable+bounces-164621-greg=kroah.com@vger.kernel.org Thu Jul 24 16:50:02 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 10:49:50 -0400
+Subject: wifi: mt76: mt7925: adjust rm BSS flow to prevent next connection failure
+To: stable@vger.kernel.org
+Cc: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>, Sean Wang <sean.wang@mediatek.com>, Caleb Jorden <cjorden@gmail.com>, Felix Fietkau <nbd@nbd.name>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724144950.1354067-1-sashal@kernel.org>
+
+From: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
+
+[ Upstream commit 0ebb60da8416c1d8e84c7e511a5687ce76a9467a ]
+
+Removing BSS without removing STAREC first will cause firmware
+abnormal and next connection fail.
+
+Fixes: 816161051a03 ("wifi: mt76: mt7925: Cleanup MLO settings post-disconnection")
+Cc: stable@vger.kernel.org
+Co-developed-by: Sean Wang <sean.wang@mediatek.com>
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+Tested-by: Caleb Jorden <cjorden@gmail.com>
+Signed-off-by: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
+Link: https://patch.msgid.link/20250305000851.493671-4-sean.wang@kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+[ struct mt76_vif_link -> struct mt792x_vif ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/main.c | 66 +++++++++++------------
+ drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 56 +++++++++++++++++++
+ drivers/net/wireless/mediatek/mt76/mt7925/mcu.h | 2
+ 3 files changed, 91 insertions(+), 33 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -1155,7 +1155,12 @@ static void mt7925_mac_link_sta_remove(s
+ struct mt792x_bss_conf *mconf;
+
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+- mt792x_mac_link_bss_remove(dev, mconf, mlink);
++
++ if (ieee80211_vif_is_mld(vif))
++ mt792x_mac_link_bss_remove(dev, mconf, mlink);
++ else
++ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
++ link_sta, false);
+ }
+
+ spin_lock_bh(&mdev->sta_poll_lock);
+@@ -1175,6 +1180,31 @@ mt7925_mac_sta_remove_links(struct mt792
+ struct mt76_wcid *wcid;
+ unsigned int link_id;
+
++ /* clean up bss before starec */
++ for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++ struct ieee80211_link_sta *link_sta;
++ struct ieee80211_bss_conf *link_conf;
++ struct mt792x_bss_conf *mconf;
++ struct mt792x_link_sta *mlink;
++
++ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
++ if (!link_sta)
++ continue;
++
++ mlink = mt792x_sta_to_link(msta, link_id);
++ if (!mlink)
++ continue;
++
++ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
++ if (!link_conf)
++ continue;
++
++ mconf = mt792x_link_conf_to_mconf(link_conf);
++
++ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
++ link_sta, false);
++ }
++
+ for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
+@@ -1213,44 +1243,14 @@ void mt7925_mac_sta_remove(struct mt76_d
+ {
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+- struct {
+- struct {
+- u8 omac_idx;
+- u8 band_idx;
+- __le16 pad;
+- } __packed hdr;
+- struct req_tlv {
+- __le16 tag;
+- __le16 len;
+- u8 active;
+- u8 link_idx; /* hw link idx */
+- u8 omac_addr[ETH_ALEN];
+- } __packed tlv;
+- } dev_req = {
+- .hdr = {
+- .omac_idx = 0,
+- .band_idx = 0,
+- },
+- .tlv = {
+- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+- .len = cpu_to_le16(sizeof(struct req_tlv)),
+- .active = true,
+- },
+- };
+ unsigned long rem;
+
+ rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
+
+ mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+
+- if (ieee80211_vif_is_mld(vif)) {
+- mt7925_mcu_set_dbdc(&dev->mphy, false);
+-
+- /* recovery omac address for the legacy interface */
+- memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+- mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
+- &dev_req, sizeof(dev_req), true);
+- }
++ if (ieee80211_vif_is_mld(vif))
++ mt7925_mcu_del_dev(mdev, vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -2662,6 +2662,62 @@ int mt7925_mcu_set_timing(struct mt792x_
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+
++void mt7925_mcu_del_dev(struct mt76_dev *mdev,
++ struct ieee80211_vif *vif)
++{
++ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
++ struct {
++ struct {
++ u8 omac_idx;
++ u8 band_idx;
++ __le16 pad;
++ } __packed hdr;
++ struct req_tlv {
++ __le16 tag;
++ __le16 len;
++ u8 active;
++ u8 link_idx; /* hw link idx */
++ u8 omac_addr[ETH_ALEN];
++ } __packed tlv;
++ } dev_req = {
++ .tlv = {
++ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
++ .len = cpu_to_le16(sizeof(struct req_tlv)),
++ .active = true,
++ },
++ };
++ struct {
++ struct {
++ u8 bss_idx;
++ u8 pad[3];
++ } __packed hdr;
++ struct mt76_connac_bss_basic_tlv basic;
++ } basic_req = {
++ .basic = {
++ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
++ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
++ .active = true,
++ .conn_state = 1,
++ },
++ };
++
++ dev_req.hdr.omac_idx = mvif->bss_conf.mt76.omac_idx;
++ dev_req.hdr.band_idx = mvif->bss_conf.mt76.band_idx;
++
++ basic_req.hdr.bss_idx = mvif->bss_conf.mt76.idx;
++ basic_req.basic.omac_idx = mvif->bss_conf.mt76.omac_idx;
++ basic_req.basic.band_idx = mvif->bss_conf.mt76.band_idx;
++ basic_req.basic.link_idx = mvif->bss_conf.mt76.link_idx;
++
++ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
++ &basic_req, sizeof(basic_req), true);
++
++ /* recovery omac address for the legacy interface */
++ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
++ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
++ &dev_req, sizeof(dev_req), true);
++}
++
+ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ struct ieee80211_chanctx_conf *ctx,
+ struct ieee80211_bss_conf *link_conf,
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -627,6 +627,8 @@ int mt7925_mcu_sched_scan_req(struct mt7
+ int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable);
++void mt7925_mcu_del_dev(struct mt76_dev *mdev,
++ struct ieee80211_vif *vif);
+ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ struct ieee80211_chanctx_conf *ctx,
+ struct ieee80211_bss_conf *link_conf,
--- /dev/null
+From mzhivich@akamai.com Tue Jul 29 16:34:40 2025
+From: Michael Zhivich <mzhivich@akamai.com>
+Date: Wed, 23 Jul 2025 09:40:19 -0400
+Subject: x86/bugs: Fix use of possibly uninit value in amd_check_tsa_microcode()
+To: <stable@vger.kernel.org>, <bp@alien8.de>
+Cc: <tglx@linutronix.de>, <mingo@redhat.com>, <dave.hansen@linux.intel.com>, <x86@kernel.org>, <linux-kernel@vger.kernel.org>, Michael Zhivich <mzhivich@akamai.com>
+Message-ID: <20250723134019.2370983-1-mzhivich@akamai.com>
+
+From: Michael Zhivich <mzhivich@akamai.com>
+
+For kernels compiled with CONFIG_INIT_STACK_NONE=y, the value of __reserved
+field in zen_patch_rev union on the stack may be garbage. If so, it will
+prevent correct microcode check when consulting p.ucode_rev, resulting in
+incorrect mitigation selection.
+
+This is a stable-only fix.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Michael Zhivich <mzhivich@akamai.com>
+Fixes: 7a0395f6607a5 ("x86/bugs: Add a Transient Scheduler Attacks mitigation")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -378,6 +378,8 @@ static bool amd_check_tsa_microcode(void
+ p.model = c->x86_model;
+ p.ext_model = c->x86_model >> 4;
+ p.stepping = c->x86_stepping;
++ /* reserved bits are expected to be 0 in test below */
++ p.__reserved = 0;
+
+ if (cpu_has(c, X86_FEATURE_ZEN3) ||
+ cpu_has(c, X86_FEATURE_ZEN4)) {
--- /dev/null
+From stable+bounces-164547-greg=kroah.com@vger.kernel.org Thu Jul 24 05:20:41 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 23:20:27 -0400
+Subject: x86/hyperv: Fix APIC ID and VP index confusion in hv_snp_boot_ap()
+To: stable@vger.kernel.org
+Cc: Roman Kisel <romank@linux.microsoft.com>, Michael Kelley <mhklinux@outlook.com>, Wei Liu <wei.liu@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250724032027.1283644-1-sashal@kernel.org>
+
+From: Roman Kisel <romank@linux.microsoft.com>
+
+[ Upstream commit 86c48271e0d60c82665e9fd61277002391efcef7 ]
+
+To start an application processor in SNP-isolated guest, a hypercall
+is used that takes a virtual processor index. The hv_snp_boot_ap()
+function uses that START_VP hypercall but passes as VP index to it
+what it receives as a wakeup_secondary_cpu_64 callback: the APIC ID.
+
+As those two aren't generally interchangeable, that may lead to hung
+APs if the VP index and the APIC ID don't match up.
+
+Update the parameter names to avoid confusion as to what the parameter
+is. Use the APIC ID to the VP index conversion to provide the correct
+input to the hypercall.
+
+Cc: stable@vger.kernel.org
+Fixes: 44676bb9d566 ("x86/hyperv: Add smp support for SEV-SNP guest")
+Signed-off-by: Roman Kisel <romank@linux.microsoft.com>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Link: https://lore.kernel.org/r/20250507182227.7421-2-romank@linux.microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Message-ID: <20250507182227.7421-2-romank@linux.microsoft.com>
+[ changed HVCALL_GET_VP_INDEX_FROM_APIC_ID to HVCALL_GET_VP_ID_FROM_APIC_ID ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/hyperv/hv_init.c | 33 ++++++++++++++++++++++++++++++
+ arch/x86/hyperv/hv_vtl.c | 44 +++++-----------------------------------
+ arch/x86/hyperv/ivm.c | 22 ++++++++++++++++++--
+ arch/x86/include/asm/mshyperv.h | 6 +++--
+ 4 files changed, 63 insertions(+), 42 deletions(-)
+
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -730,3 +730,36 @@ bool hv_is_hyperv_initialized(void)
+ return hypercall_msr.enable;
+ }
+ EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
++
++int hv_apicid_to_vp_index(u32 apic_id)
++{
++ u64 control;
++ u64 status;
++ unsigned long irq_flags;
++ struct hv_get_vp_from_apic_id_in *input;
++ u32 *output, ret;
++
++ local_irq_save(irq_flags);
++
++ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
++ memset(input, 0, sizeof(*input));
++ input->partition_id = HV_PARTITION_ID_SELF;
++ input->apic_ids[0] = apic_id;
++
++ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
++
++ control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
++ status = hv_do_hypercall(control, input, output);
++ ret = output[0];
++
++ local_irq_restore(irq_flags);
++
++ if (!hv_result_success(status)) {
++ pr_err("failed to get vp index from apic id %d, status %#llx\n",
++ apic_id, status);
++ return -EINVAL;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(hv_apicid_to_vp_index);
+--- a/arch/x86/hyperv/hv_vtl.c
++++ b/arch/x86/hyperv/hv_vtl.c
+@@ -175,41 +175,9 @@ free_lock:
+ return ret;
+ }
+
+-static int hv_vtl_apicid_to_vp_id(u32 apic_id)
+-{
+- u64 control;
+- u64 status;
+- unsigned long irq_flags;
+- struct hv_get_vp_from_apic_id_in *input;
+- u32 *output, ret;
+-
+- local_irq_save(irq_flags);
+-
+- input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+- memset(input, 0, sizeof(*input));
+- input->partition_id = HV_PARTITION_ID_SELF;
+- input->apic_ids[0] = apic_id;
+-
+- output = (u32 *)input;
+-
+- control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
+- status = hv_do_hypercall(control, input, output);
+- ret = output[0];
+-
+- local_irq_restore(irq_flags);
+-
+- if (!hv_result_success(status)) {
+- pr_err("failed to get vp id from apic id %d, status %#llx\n",
+- apic_id, status);
+- return -EINVAL;
+- }
+-
+- return ret;
+-}
+-
+ static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
+ {
+- int vp_id, cpu;
++ int vp_index, cpu;
+
+ /* Find the logical CPU for the APIC ID */
+ for_each_present_cpu(cpu) {
+@@ -220,18 +188,18 @@ static int hv_vtl_wakeup_secondary_cpu(u
+ return -EINVAL;
+
+ pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
+- vp_id = hv_vtl_apicid_to_vp_id(apicid);
++ vp_index = hv_apicid_to_vp_index(apicid);
+
+- if (vp_id < 0) {
++ if (vp_index < 0) {
+ pr_err("Couldn't find CPU with APIC ID %d\n", apicid);
+ return -EINVAL;
+ }
+- if (vp_id > ms_hyperv.max_vp_index) {
+- pr_err("Invalid CPU id %d for APIC ID %d\n", vp_id, apicid);
++ if (vp_index > ms_hyperv.max_vp_index) {
++ pr_err("Invalid CPU id %d for APIC ID %d\n", vp_index, apicid);
+ return -EINVAL;
+ }
+
+- return hv_vtl_bringup_vcpu(vp_id, cpu, start_eip);
++ return hv_vtl_bringup_vcpu(vp_index, cpu, start_eip);
+ }
+
+ int __init hv_vtl_early_init(void)
+--- a/arch/x86/hyperv/ivm.c
++++ b/arch/x86/hyperv/ivm.c
+@@ -10,6 +10,7 @@
+ #include <linux/hyperv.h>
+ #include <linux/types.h>
+ #include <linux/slab.h>
++#include <linux/cpu.h>
+ #include <asm/svm.h>
+ #include <asm/sev.h>
+ #include <asm/io.h>
+@@ -289,7 +290,7 @@ static void snp_cleanup_vmsa(struct sev_
+ free_page((unsigned long)vmsa);
+ }
+
+-int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
++int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip)
+ {
+ struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
+ __get_free_page(GFP_KERNEL | __GFP_ZERO);
+@@ -298,10 +299,27 @@ int hv_snp_boot_ap(u32 cpu, unsigned lon
+ u64 ret, retry = 5;
+ struct hv_enable_vp_vtl *start_vp_input;
+ unsigned long flags;
++ int cpu, vp_index;
+
+ if (!vmsa)
+ return -ENOMEM;
+
++ /* Find the Hyper-V VP index which might be not the same as APIC ID */
++ vp_index = hv_apicid_to_vp_index(apic_id);
++ if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index)
++ return -EINVAL;
++
++ /*
++ * Find the Linux CPU number for addressing the per-CPU data, and it
++ * might not be the same as APIC ID.
++ */
++ for_each_present_cpu(cpu) {
++ if (arch_match_cpu_phys_id(cpu, apic_id))
++ break;
++ }
++ if (cpu >= nr_cpu_ids)
++ return -EINVAL;
++
+ native_store_gdt(&gdtr);
+
+ vmsa->gdtr.base = gdtr.address;
+@@ -349,7 +367,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned lon
+ start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
+ memset(start_vp_input, 0, sizeof(*start_vp_input));
+ start_vp_input->partition_id = -1;
+- start_vp_input->vp_index = cpu;
++ start_vp_input->vp_index = vp_index;
+ start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
+ *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
+
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -275,11 +275,11 @@ int hv_unmap_ioapic_interrupt(int ioapic
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ bool hv_ghcb_negotiate_protocol(void);
+ void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
+-int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
++int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip);
+ #else
+ static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
+ static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
+-static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
++static inline int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip) { return 0; }
+ #endif
+
+ #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
+@@ -313,6 +313,7 @@ static __always_inline u64 hv_raw_get_ms
+ {
+ return __rdmsr(reg);
+ }
++int hv_apicid_to_vp_index(u32 apic_id);
+
+ #else /* CONFIG_HYPERV */
+ static inline void hyperv_init(void) {}
+@@ -334,6 +335,7 @@ static inline void hv_set_msr(unsigned i
+ static inline u64 hv_get_msr(unsigned int reg) { return 0; }
+ static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
+ static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
++static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; }
+ #endif /* CONFIG_HYPERV */
+
+