--- /dev/null
+From stable+bounces-211638-greg=kroah.com@vger.kernel.org Mon Jan 26 16:43:46 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 10:43:34 -0500
+Subject: arm64: dts: qcom: talos: Correct UFS clocks ordering
+To: stable@vger.kernel.org
+Cc: Pradeep P V K <pradeep.pragallapati@oss.qualcomm.com>, Bjorn Andersson <andersson@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260126154334.3313728-1-sashal@kernel.org>
+
+From: Pradeep P V K <pradeep.pragallapati@oss.qualcomm.com>
+
+[ Upstream commit 8bb3754909cde5df4f8c1012bde220b97d8ee3bc ]
+
+The current UFS clocks does not align with their respective names,
+causing the ref_clk to be set to an incorrect frequency as below,
+which results in command timeouts.
+
+ufshcd-qcom 1d84000.ufshc: invalid ref_clk setting = 300000000
+
+This commit fixes the issue by properly reordering the UFS clocks to
+match their names.
+
+Fixes: ea172f61f4fd ("arm64: dts: qcom: qcs615: Fix up UFS clocks")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pradeep P V K <pradeep.pragallapati@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20251126131146.16146-1-pradeep.pragallapati@oss.qualcomm.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sm6150.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sm6150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi
+@@ -1260,10 +1260,10 @@
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+- <&gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+- <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>;
++ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
++ <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+ clock-names = "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
--- /dev/null
+From 1fb0d830dab89d0dc99bb84a7087b0ceca63d2d8 Mon Sep 17 00:00:00 2001
+From: Ravindra <ravindra@intel.com>
+Date: Wed, 15 Oct 2025 15:09:02 +0530
+Subject: Bluetooth: btintel_pcie: Support for S4 (Hibernate)
+
+From: Ravindra <ravindra@intel.com>
+
+commit 1fb0d830dab89d0dc99bb84a7087b0ceca63d2d8 upstream.
+
+During S4 (hibernate), the Bluetooth device loses power. Upon resume,
+the driver performs the following actions:
+
+1. Unregisters hdev
+2. Calls function level reset
+3. Registers hdev
+
+Test case:
+- run command sudo rtcwake -m disk -s 60
+
+Signed-off-by: Ravindra <ravindra@intel.com>
+Signed-off-by: Kiran K <kiran.k@intel.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Cc: Mariappan Ramasamy <mail@nappairam.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bluetooth/btintel_pcie.c | 41 +++++++++++++++++++++++++++++++++++++++
+ drivers/bluetooth/btintel_pcie.h | 2 +
+ 2 files changed, 43 insertions(+)
+
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -825,6 +825,11 @@ static inline bool btintel_pcie_in_d0(st
+ return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
+ }
+
++static inline bool btintel_pcie_in_device_halt(struct btintel_pcie_data *data)
++{
++ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED;
++}
++
+ static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
+ u32 dxstate)
+ {
+@@ -2532,6 +2537,8 @@ static int btintel_pcie_suspend_late(str
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
++ data->pm_sx_event = mesg.event;
++
+ data->gp0_received = false;
+
+ start = ktime_get();
+@@ -2581,6 +2588,20 @@ static int btintel_pcie_resume(struct de
+
+ start = ktime_get();
+
++ /* When the system enters S4 (hibernate) mode, bluetooth device loses
++ * power, which results in the erasure of its loaded firmware.
++ * Consequently, function level reset (flr) is required on system
++ * resume to bring the controller back into an operational state by
++ * initiating a new firmware download.
++ */
++
++ if (data->pm_sx_event == PM_EVENT_FREEZE ||
++ data->pm_sx_event == PM_EVENT_HIBERNATE) {
++ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
++ btintel_pcie_reset(data->hdev);
++ return 0;
++ }
++
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+@@ -2589,6 +2610,26 @@ static int btintel_pcie_resume(struct de
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D0 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
++
++ /* Trigger function level reset if the controller is in error
++ * state during resume() to bring back the controller to
++ * operational mode
++ */
++
++ data->boot_stage_cache = btintel_pcie_rd_reg32(data,
++ BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
++ if (btintel_pcie_in_error(data) ||
++ btintel_pcie_in_device_halt(data)) {
++ bt_dev_err(data->hdev, "Controller in error state for D0 entry");
++ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS,
++ &data->flags)) {
++ data->dmp_hdr.trigger_reason =
++ BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
++ queue_work(data->workqueue, &data->rx_work);
++ }
++ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
++ btintel_pcie_reset(data->hdev);
++ }
+ return -EBUSY;
+ }
+
+--- a/drivers/bluetooth/btintel_pcie.h
++++ b/drivers/bluetooth/btintel_pcie.h
+@@ -464,6 +464,7 @@ struct btintel_pcie_dump_header {
+ * @txq: TX Queue struct
+ * @rxq: RX Queue struct
+ * @alive_intr_ctxt: Alive interrupt context
++ * @pm_sx_event: PM event on which system got suspended
+ */
+ struct btintel_pcie_data {
+ struct pci_dev *pdev;
+@@ -513,6 +514,7 @@ struct btintel_pcie_data {
+ u32 alive_intr_ctxt;
+ struct btintel_pcie_dbgc dbgc;
+ struct btintel_pcie_dump_header dmp_hdr;
++ u8 pm_sx_event;
+ };
+
+ static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
--- /dev/null
+From stable+bounces-211656-greg=kroah.com@vger.kernel.org Mon Jan 26 17:53:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 11:53:03 -0500
+Subject: iio: core: add separate lockdep class for info_exist_lock
+To: stable@vger.kernel.org
+Cc: Rasmus Villemoes <ravi@prevas.dk>, Peter Rosin <peda@axentia.se>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260126165303.3408060-2-sashal@kernel.org>
+
+From: Rasmus Villemoes <ravi@prevas.dk>
+
+[ Upstream commit 9910159f06590c17df4fbddedaabb4c0201cc4cb ]
+
+When one iio device is a consumer of another, it is possible that
+the ->info_exist_lock of both ends up being taken when reading the
+value of the consumer device.
+
+Since they currently belong to the same lockdep class (being
+initialized in a single location with mutex_init()), that results in a
+lockdep warning
+
+ CPU0
+ ----
+ lock(&iio_dev_opaque->info_exist_lock);
+ lock(&iio_dev_opaque->info_exist_lock);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 4 locks held by sensors/414:
+ #0: c31fd6dc (&p->lock){+.+.}-{3:3}, at: seq_read_iter+0x44/0x4e4
+ #1: c4f5a1c4 (&of->mutex){+.+.}-{3:3}, at: kernfs_seq_start+0x1c/0xac
+ #2: c2827548 (kn->active#34){.+.+}-{0:0}, at: kernfs_seq_start+0x30/0xac
+ #3: c1dd2b68 (&iio_dev_opaque->info_exist_lock){+.+.}-{3:3}, at: iio_read_channel_processed_scale+0x24/0xd8
+
+ stack backtrace:
+ CPU: 0 UID: 0 PID: 414 Comm: sensors Not tainted 6.17.11 #5 NONE
+ Hardware name: Generic AM33XX (Flattened Device Tree)
+ Call trace:
+ unwind_backtrace from show_stack+0x10/0x14
+ show_stack from dump_stack_lvl+0x44/0x60
+ dump_stack_lvl from print_deadlock_bug+0x2b8/0x334
+ print_deadlock_bug from __lock_acquire+0x13a4/0x2ab0
+ __lock_acquire from lock_acquire+0xd0/0x2c0
+ lock_acquire from __mutex_lock+0xa0/0xe8c
+ __mutex_lock from mutex_lock_nested+0x1c/0x24
+ mutex_lock_nested from iio_read_channel_raw+0x20/0x6c
+ iio_read_channel_raw from rescale_read_raw+0x128/0x1c4
+ rescale_read_raw from iio_channel_read+0xe4/0xf4
+ iio_channel_read from iio_read_channel_processed_scale+0x6c/0xd8
+ iio_read_channel_processed_scale from iio_hwmon_read_val+0x68/0xbc
+ iio_hwmon_read_val from dev_attr_show+0x18/0x48
+ dev_attr_show from sysfs_kf_seq_show+0x80/0x110
+ sysfs_kf_seq_show from seq_read_iter+0xdc/0x4e4
+ seq_read_iter from vfs_read+0x238/0x2e4
+ vfs_read from ksys_read+0x6c/0xec
+ ksys_read from ret_fast_syscall+0x0/0x1c
+
+Just as the mlock_key already has its own lockdep class, add a
+lock_class_key for the info_exist mutex.
+
+Note that this has in theory been a problem since before IIO first
+left staging, but it only occurs when a chain of consumers is in use
+and that is not often done.
+
+Fixes: ac917a81117c ("staging:iio:core set the iio_dev.info pointer to null on unregister under lock.")
+Signed-off-by: Rasmus Villemoes <ravi@prevas.dk>
+Reviewed-by: Peter Rosin <peda@axentia.se>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/industrialio-core.c | 4 +++-
+ include/linux/iio/iio-opaque.h | 2 ++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1657,6 +1657,7 @@ static void iio_dev_release(struct devic
+ mutex_destroy(&iio_dev_opaque->info_exist_lock);
+ mutex_destroy(&iio_dev_opaque->mlock);
+
++ lockdep_unregister_key(&iio_dev_opaque->info_exist_key);
+ lockdep_unregister_key(&iio_dev_opaque->mlock_key);
+
+ ida_free(&iio_ida, iio_dev_opaque->id);
+@@ -1717,9 +1718,10 @@ struct iio_dev *iio_device_alloc(struct
+ INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
+
+ lockdep_register_key(&iio_dev_opaque->mlock_key);
++ lockdep_register_key(&iio_dev_opaque->info_exist_key);
+
+ mutex_init_with_key(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
+- mutex_init(&iio_dev_opaque->info_exist_lock);
++ mutex_init_with_key(&iio_dev_opaque->info_exist_lock, &iio_dev_opaque->info_exist_key);
+
+ indio_dev->dev.parent = parent;
+ indio_dev->dev.type = &iio_device_type;
+--- a/include/linux/iio/iio-opaque.h
++++ b/include/linux/iio/iio-opaque.h
+@@ -14,6 +14,7 @@
+ * @mlock: lock used to prevent simultaneous device state changes
+ * @mlock_key: lockdep class for iio_dev lock
+ * @info_exist_lock: lock to prevent use during removal
++ * @info_exist_key: lockdep class for info_exist lock
+ * @trig_readonly: mark the current trigger immutable
+ * @event_interface: event chrdevs associated with interrupt lines
+ * @attached_buffers: array of buffers statically attached by the driver
+@@ -47,6 +48,7 @@ struct iio_dev_opaque {
+ struct mutex mlock;
+ struct lock_class_key mlock_key;
+ struct mutex info_exist_lock;
++ struct lock_class_key info_exist_key;
+ bool trig_readonly;
+ struct iio_event_interface *event_interface;
+ struct iio_buffer **attached_buffers;
--- /dev/null
+From stable+bounces-211655-greg=kroah.com@vger.kernel.org Mon Jan 26 17:53:10 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 11:53:02 -0500
+Subject: iio: core: Replace lockdep_set_class() + mutex_init() by combined call
+To: stable@vger.kernel.org
+Cc: "Andy Shevchenko" <andriy.shevchenko@linux.intel.com>, "Nuno Sá" <nuno.sa@analog.com>, "Jonathan Cameron" <Jonathan.Cameron@huawei.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260126165303.3408060-1-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit c76ba4b2644424b8dbacee80bb40991eac29d39e ]
+
+Replace lockdep_set_class() + mutex_init() by combined call
+mutex_init_with_key().
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 9910159f0659 ("iio: core: add separate lockdep class for info_exist_lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/industrialio-core.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1717,9 +1717,8 @@ struct iio_dev *iio_device_alloc(struct
+ INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
+
+ lockdep_register_key(&iio_dev_opaque->mlock_key);
+- lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
+
+- mutex_init(&iio_dev_opaque->mlock);
++ mutex_init_with_key(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
+ mutex_init(&iio_dev_opaque->info_exist_lock);
+
+ indio_dev->dev.parent = parent;
--- /dev/null
+From stable+bounces-211865-greg=kroah.com@vger.kernel.org Tue Jan 27 18:01:36 2026
+From: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
+Date: Tue, 27 Jan 2026 17:48:15 +0100
+Subject: irqchip/renesas-rzv2h: Prevent TINT spurious interrupt during resume
+To: stable@vger.kernel.org
+Cc: Biju Das <biju.das.jz@bp.renesas.com>, Thomas Gleixner <tglx@kernel.org>, Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
+Message-ID: <20260127164815.526921-1-tommaso.merciai.xr@bp.renesas.com>
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit cd4a3ced4d1cdb14ffe905657b98a91e9d239dfb ]
+
+A glitch in the edge detection circuit can cause a spurious interrupt. The
+hardware manual recommends clearing the status flag after setting the
+ICU_TSSRk register as a countermeasure.
+
+Currently, a spurious interrupt is generated on the resume path of s2idle
+for the PMIC RTC TINT interrupt due to a glitch related to unnecessary
+enabling/disabling of the TINT enable bit.
+
+Fix this issue by not setting TSSR(TINT Source) and TITSR(TINT Detection
+Method Selection) registers if the values are the same as those set
+in these registers.
+
+Fixes: 0d7605e75ac2 ("irqchip: Add RZ/V2H(P) Interrupt Control Unit (ICU) driver")
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260113125315.359967-2-biju.das.jz@bp.renesas.com
+[tm: Added field_get() to avoid build error]
+Signed-off-by: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-renesas-rzv2h.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-renesas-rzv2h.c
++++ b/drivers/irqchip/irq-renesas-rzv2h.c
+@@ -89,6 +89,8 @@
+ #define ICU_RZG3E_TSSEL_MAX_VAL 0x8c
+ #define ICU_RZV2H_TSSEL_MAX_VAL 0x55
+
++#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
++
+ /**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @tssel_lut: TINT lookup table
+@@ -328,6 +330,7 @@ static int rzv2h_tint_set_type(struct ir
+ u32 titsr, titsr_k, titsel_n, tien;
+ struct rzv2h_icu_priv *priv;
+ u32 tssr, tssr_k, tssel_n;
++ u32 titsr_cur, tssr_cur;
+ unsigned int hwirq;
+ u32 tint, sense;
+ int tint_nr;
+@@ -376,12 +379,18 @@ static int rzv2h_tint_set_type(struct ir
+ guard(raw_spinlock)(&priv->lock);
+
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
++ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
++
++ tssr_cur = field_get(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width), tssr);
++ titsr_cur = field_get(ICU_TITSR_TITSEL_MASK(titsel_n), titsr);
++ if (tssr_cur == tint && titsr_cur == sense)
++ return 0;
++
+ tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien);
+ tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width);
+
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
+
+- titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
+ titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
+ titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
+
--- /dev/null
+From stable+bounces-211674-greg=kroah.com@vger.kernel.org Mon Jan 26 20:12:28 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 14:12:20 -0500
+Subject: mm: fix some typos in mm module
+To: stable@vger.kernel.org
+Cc: "jianyun.gao" <jianyungao89@gmail.com>, SeongJae Park <sj@kernel.org>, Wei Yang <richard.weiyang@gmail.com>, Dev Jain <dev.jain@arm.com>, "Liam R. Howlett" <Liam.Howlett@oracle.com>, Chris Li <chrisl@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260126191221.3643780-1-sashal@kernel.org>
+
+From: "jianyun.gao" <jianyungao89@gmail.com>
+
+[ Upstream commit b6c46600bfb28b4be4e9cff7bad4f2cf357e0fb7 ]
+
+Below are some typos in the code comments:
+
+ intevals ==> intervals
+ addesses ==> addresses
+ unavaliable ==> unavailable
+ facor ==> factor
+ droping ==> dropping
+ exlusive ==> exclusive
+ decription ==> description
+ confict ==> conflict
+ desriptions ==> descriptions
+ otherwize ==> otherwise
+ vlaue ==> value
+ cheching ==> checking
+ exisitng ==> existing
+ modifed ==> modified
+ differenciate ==> differentiate
+ refernece ==> reference
+ permissons ==> permissions
+ indepdenent ==> independent
+ spliting ==> splitting
+
+Just fix it.
+
+Link: https://lkml.kernel.org/r/20250929002608.1633825-1-jianyungao89@gmail.com
+Signed-off-by: jianyun.gao <jianyungao89@gmail.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Dev Jain <dev.jain@arm.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Acked-by: Chris Li <chrisl@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 3937027caecb ("mm/hugetlb: fix two comments related to huge_pmd_unshare()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c | 2 +-
+ mm/gup.c | 2 +-
+ mm/hugetlb.c | 6 +++---
+ mm/hugetlb_vmemmap.c | 6 +++---
+ mm/kmsan/core.c | 2 +-
+ mm/ksm.c | 2 +-
+ mm/memory-tiers.c | 2 +-
+ mm/memory.c | 4 ++--
+ mm/secretmem.c | 2 +-
+ mm/slab_common.c | 2 +-
+ mm/slub.c | 2 +-
+ mm/swapfile.c | 2 +-
+ mm/userfaultfd.c | 2 +-
+ mm/vma.c | 4 ++--
+ 14 files changed, 20 insertions(+), 20 deletions(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1267,7 +1267,7 @@ enum damon_sysfs_cmd {
+ DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
+ /*
+ * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
+- * intevals.
++ * intervals.
+ */
+ DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
+ /*
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2710,7 +2710,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
+ *
+ * *) ptes can be read atomically by the architecture.
+ *
+- * *) valid user addesses are below TASK_MAX_SIZE
++ * *) valid user addresses are below TASK_MAX_SIZE
+ *
+ * The last two assumptions can be relaxed by the addition of helper functions.
+ *
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2934,7 +2934,7 @@ typedef enum {
+ * NOTE: This is mostly identical to MAP_CHG_NEEDED, except
+ * that currently vma_needs_reservation() has an unwanted side
+ * effect to either use end() or commit() to complete the
+- * transaction. Hence it needs to differenciate from NEEDED.
++ * transaction. Hence it needs to differentiate from NEEDED.
+ */
+ MAP_CHG_ENFORCED = 2,
+ } map_chg_state;
+@@ -6007,7 +6007,7 @@ void __unmap_hugepage_range(struct mmu_g
+ /*
+ * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+ * could defer the flush until now, since by holding i_mmap_rwsem we
+- * guaranteed that the last refernece would not be dropped. But we must
++ * guaranteed that the last reference would not be dropped. But we must
+ * do the flushing before we return, as otherwise i_mmap_rwsem will be
+ * dropped and the last reference to the shared PMDs page might be
+ * dropped as well.
+@@ -7193,7 +7193,7 @@ long hugetlb_change_protection(struct vm
+ } else if (unlikely(is_pte_marker(pte))) {
+ /*
+ * Do nothing on a poison marker; page is
+- * corrupted, permissons do not apply. Here
++ * corrupted, permissions do not apply. Here
+ * pte_marker_uffd_wp()==true implies !poison
+ * because they're mutual exclusive.
+ */
+--- a/mm/hugetlb_vmemmap.c
++++ b/mm/hugetlb_vmemmap.c
+@@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd,
+ if (likely(pmd_leaf(*pmd))) {
+ /*
+ * Higher order allocations from buddy allocator must be able to
+- * be treated as indepdenent small pages (as they can be freed
++ * be treated as independent small pages (as they can be freed
+ * individually).
+ */
+ if (!PageReserved(head))
+@@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_f
+ ret = hugetlb_vmemmap_split_folio(h, folio);
+
+ /*
+- * Spliting the PMD requires allocating a page, thus lets fail
++ * Splitting the PMD requires allocating a page, thus let's fail
+ * early once we encounter the first OOM. No point in retrying
+ * as it can be dynamically done on remap with the memory
+ * we get back from the vmemmap deduplication.
+@@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_f
+ /*
+ * Pages to be freed may have been accumulated. If we
+ * encounter an ENOMEM, free what we have and try again.
+- * This can occur in the case that both spliting fails
++ * This can occur in the case that both splitting fails
+ * halfway and head page allocation also failed. In this
+ * case __hugetlb_vmemmap_optimize_folio() would free memory
+ * allowing more vmemmap remaps to occur.
+--- a/mm/kmsan/core.c
++++ b/mm/kmsan/core.c
+@@ -33,7 +33,7 @@ bool kmsan_enabled __read_mostly;
+
+ /*
+ * Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
+- * unavaliable.
++ * unavailable.
+ */
+ DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
+
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -389,7 +389,7 @@ static unsigned long ewma(unsigned long
+ * exponentially weighted moving average. The new pages_to_scan value is
+ * multiplied with that change factor:
+ *
+- * new_pages_to_scan *= change facor
++ * new_pages_to_scan *= change factor
+ *
+ * The new_pages_to_scan value is limited by the cpu min and max values. It
+ * calculates the cpu percent for the last scan and calculates the new
+--- a/mm/memory-tiers.c
++++ b/mm/memory-tiers.c
+@@ -519,7 +519,7 @@ static inline void __init_node_memory_ty
+ * for each device getting added in the same NUMA node
+ * with this specific memtype, bump the map count. We
+ * Only take memtype device reference once, so that
+- * changing a node memtype can be done by droping the
++ * changing a node memtype can be done by dropping the
+ * only reference count taken here.
+ */
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4328,7 +4328,7 @@ static inline bool should_try_to_free_sw
+ * If we want to map a page that's in the swapcache writable, we
+ * have to detect via the refcount if we're really the exclusive
+ * user. Try freeing the swapcache to get rid of the swapcache
+- * reference only in case it's likely that we'll be the exlusive user.
++ * reference only in case it's likely that we'll be the exclusive user.
+ */
+ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
+ folio_ref_count(folio) == (1 + folio_nr_pages(folio));
+@@ -5405,7 +5405,7 @@ vm_fault_t do_set_pmd(struct vm_fault *v
+
+ /**
+ * set_pte_range - Set a range of PTEs to point to pages in a folio.
+- * @vmf: Fault decription.
++ * @vmf: Fault description.
+ * @folio: The folio that contains @page.
+ * @page: The first page to create a PTE for.
+ * @nr: The number of PTEs to create.
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -227,7 +227,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned i
+ struct file *file;
+ int fd, err;
+
+- /* make sure local flags do not confict with global fcntl.h */
++ /* make sure local flags do not conflict with global fcntl.h */
+ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
+
+ if (!secretmem_enable || !can_set_direct_map())
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -259,7 +259,7 @@ out:
+ * @object_size: The size of objects to be created in this cache.
+ * @args: Additional arguments for the cache creation (see
+ * &struct kmem_cache_args).
+- * @flags: See the desriptions of individual flags. The common ones are listed
++ * @flags: See the descriptions of individual flags. The common ones are listed
+ * in the description below.
+ *
+ * Not to be called directly, use the kmem_cache_create() wrapper with the same
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2533,7 +2533,7 @@ bool slab_free_hook(struct kmem_cache *s
+ memset((char *)kasan_reset_tag(x) + inuse, 0,
+ s->size - inuse - rsize);
+ /*
+- * Restore orig_size, otherwize kmalloc redzone overwritten
++ * Restore orig_size, otherwise kmalloc redzone overwritten
+ * would be reported
+ */
+ set_orig_size(s, x, orig_size);
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1703,7 +1703,7 @@ static bool swap_entries_put_map_nr(stru
+
+ /*
+ * Check if it's the last ref of swap entry in the freeing path.
+- * Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
++ * Qualified value includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
+ */
+ static inline bool __maybe_unused swap_is_last_ref(unsigned char count)
+ {
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1578,7 +1578,7 @@ static int validate_move_areas(struct us
+
+ /*
+ * For now, we keep it simple and only move between writable VMAs.
+- * Access flags are equal, therefore cheching only the source is enough.
++ * Access flags are equal, therefore checking only the source is enough.
+ */
+ if (!(src_vma->vm_flags & VM_WRITE))
+ return -EINVAL;
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -109,7 +109,7 @@ static inline bool is_mergeable_vma(stru
+ static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
+ {
+ struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev;
+- struct vm_area_struct *src = vmg->middle; /* exisitng merge case. */
++ struct vm_area_struct *src = vmg->middle; /* existing merge case. */
+ struct anon_vma *tgt_anon = tgt->anon_vma;
+ struct anon_vma *src_anon = vmg->anon_vma;
+
+@@ -798,7 +798,7 @@ static bool can_merge_remove_vma(struct
+ * Returns: The merged VMA if merge succeeds, or NULL otherwise.
+ *
+ * ASSUMPTIONS:
+- * - The caller must assign the VMA to be modifed to @vmg->middle.
++ * - The caller must assign the VMA to be modified to @vmg->middle.
+ * - The caller must have set @vmg->prev to the previous VMA, if there is one.
+ * - The caller must not set @vmg->next, as we determine this.
+ * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
--- /dev/null
+From stable+bounces-211675-greg=kroah.com@vger.kernel.org Mon Jan 26 20:12:30 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 14:12:21 -0500
+Subject: mm/hugetlb: fix two comments related to huge_pmd_unshare()
+To: stable@vger.kernel.org
+Cc: "David Hildenbrand (Red Hat)" <david@kernel.org>, Rik van Riel <riel@surriel.com>, Laurence Oberman <loberman@redhat.com>, Lorenzo Stoakes <lorenzo.stoakes@oracle.com>, Oscar Salvador <osalvador@suse.de>, Harry Yoo <harry.yoo@oracle.com>, Liu Shixin <liushixin2@huawei.com>, Lance Yang <lance.yang@linux.dev>, "Uschakow, Stanislav" <suschako@amazon.de>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260126191221.3643780-2-sashal@kernel.org>
+
+From: "David Hildenbrand (Red Hat)" <david@kernel.org>
+
+[ Upstream commit 3937027caecb4f8251e82dd857ba1d749bb5a428 ]
+
+Ever since we stopped using the page count to detect shared PMD page
+tables, these comments are outdated.
+
+The only reason we have to flush the TLB early is because once we drop the
+i_mmap_rwsem, the previously shared page table could get freed (to then
+get reallocated and used for other purpose). So we really have to flush
+the TLB before that could happen.
+
+So let's simplify the comments a bit.
+
+The "If we unshared PMDs, the TLB flush was not recorded in mmu_gather."
+part introduced as in commit a4a118f2eead ("hugetlbfs: flush TLBs
+correctly after huge_pmd_unshare") was confusing: sure it is recorded in
+the mmu_gather, otherwise tlb_flush_mmu_tlbonly() wouldn't do anything.
+So let's drop that comment while at it as well.
+
+We'll centralize these comments in a single helper as we rework the code
+next.
+
+Link: https://lkml.kernel.org/r/20251223214037.580860-3-david@kernel.org
+Fixes: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count")
+Signed-off-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Cc: Liu Shixin <liushixin2@huawei.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Cc: "Uschakow, Stanislav" <suschako@amazon.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c | 24 ++++++++----------------
+ 1 file changed, 8 insertions(+), 16 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6005,17 +6005,10 @@ void __unmap_hugepage_range(struct mmu_g
+ tlb_end_vma(tlb, vma);
+
+ /*
+- * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+- * could defer the flush until now, since by holding i_mmap_rwsem we
+- * guaranteed that the last reference would not be dropped. But we must
+- * do the flushing before we return, as otherwise i_mmap_rwsem will be
+- * dropped and the last reference to the shared PMDs page might be
+- * dropped as well.
+- *
+- * In theory we could defer the freeing of the PMD pages as well, but
+- * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+- * detect sharing, so we cannot defer the release of the page either.
+- * Instead, do flush now.
++ * There is nothing protecting a previously-shared page table that we
++ * unshared through huge_pmd_unshare() from getting freed after we
++ * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
++ * succeeded, flush the range corresponding to the pud.
+ */
+ if (force_flush)
+ tlb_flush_mmu_tlbonly(tlb);
+@@ -7226,11 +7219,10 @@ long hugetlb_change_protection(struct vm
+ cond_resched();
+ }
+ /*
+- * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
+- * may have cleared our pud entry and done put_page on the page table:
+- * once we release i_mmap_rwsem, another task can do the final put_page
+- * and that page table be reused and filled with junk. If we actually
+- * did unshare a page of pmds, flush the range corresponding to the pud.
++ * There is nothing protecting a previously-shared page table that we
++ * unshared through huge_pmd_unshare() from getting freed after we
++ * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
++ * succeeded, flush the range corresponding to the pud.
+ */
+ if (shared_pmd)
+ flush_hugetlb_tlb_range(vma, range.start, range.end);
--- /dev/null
+From stable+bounces-211299-greg=kroah.com@vger.kernel.org Thu Jan 22 20:01:23 2026
+From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Date: Thu, 22 Jan 2026 19:00:22 +0000
+Subject: mm/vma: enforce VMA fork limit on unfaulted,faulted mremap merge too
+To: stable@vger.kernel.org
+Message-ID: <98709f1c3e60b83b554a33d62744a13c15b3864a.1769108022.git.lorenzo.stoakes@oracle.com>
+
+From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+
+[ Upstream commit 3b617fd3d317bf9dd7e2c233e56eafef05734c9d ]
+
+The is_mergeable_anon_vma() function uses vmg->middle as the source VMA.
+However when merging a new VMA, this field is NULL.
+
+In all cases except mremap(), the new VMA will either be newly established
+and thus lack an anon_vma, or will be an expansion of an existing VMA thus
+we do not care about whether VMA is CoW'd or not.
+
+In the case of an mremap(), we can end up in a situation where we can
+accidentally allow an unfaulted/faulted merge with a VMA that has been
+forked, violating the general rule that we do not permit this for reasons
+of anon_vma lock scalability.
+
+Now we have the ability to be aware of the fact we are copying a VMA and
+also know which VMA that is, we can explicitly check for this, so do so.
+
+This is pertinent since commit 879bca0a2c4f ("mm/vma: fix incorrectly
+disallowed anonymous VMA merges"), as this patch permits unfaulted/faulted
+merges that were previously disallowed running afoul of this issue.
+
+While we are here, vma_had_uncowed_parents() is a confusing name, so make
+it simple and rename it to vma_is_fork_child().
+
+Link: https://lkml.kernel.org/r/6e2b9b3024ae1220961c8b81d74296d4720eaf2b.1767638272.git.lorenzo.stoakes@oracle.com
+Fixes: 879bca0a2c4f ("mm/vma: fix incorrectly disallowed anonymous VMA merges")
+Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Jeongjun Park <aha310510@gmail.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: David Hildenbrand (Red Hat) <david@kernel.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Pedro Falcato <pfalcato@suse.de>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Yeoreum Yun <yeoreum.yun@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ with upstream commit 61f67c230a5e backported, this simply applied correctly. Built + tested ]
+Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/vma.c | 27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -65,18 +65,13 @@ struct mmap_state {
+ .state = VMA_MERGE_START, \
+ }
+
+-/*
+- * If, at any point, the VMA had unCoW'd mappings from parents, it will maintain
+- * more than one anon_vma_chain connecting it to more than one anon_vma. A merge
+- * would mean a wider range of folios sharing the root anon_vma lock, and thus
+- * potential lock contention, we do not wish to encourage merging such that this
+- * scales to a problem.
+- */
+-static bool vma_had_uncowed_parents(struct vm_area_struct *vma)
++/* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */
++static bool vma_is_fork_child(struct vm_area_struct *vma)
+ {
+ /*
+ * The list_is_singular() test is to avoid merging VMA cloned from
+- * parents. This can improve scalability caused by anon_vma lock.
++ * parents. This can improve scalability caused by the anon_vma root
++ * lock.
+ */
+ return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain);
+ }
+@@ -121,11 +116,19 @@ static bool is_mergeable_anon_vma(struct
+ VM_WARN_ON(src && src_anon != src->anon_vma);
+
+ /* Case 1 - we will dup_anon_vma() from src into tgt. */
+- if (!tgt_anon && src_anon)
+- return !vma_had_uncowed_parents(src);
++ if (!tgt_anon && src_anon) {
++ struct vm_area_struct *copied_from = vmg->copied_from;
++
++ if (vma_is_fork_child(src))
++ return false;
++ if (vma_is_fork_child(copied_from))
++ return false;
++
++ return true;
++ }
+ /* Case 2 - we will simply use tgt's anon_vma. */
+ if (tgt_anon && !src_anon)
+- return !vma_had_uncowed_parents(tgt);
++ return !vma_is_fork_child(tgt);
+ /* Case 3 - the anon_vma's are already shared. */
+ return src_anon == tgt_anon;
+ }
--- /dev/null
+From stable+bounces-211300-greg=kroah.com@vger.kernel.org Thu Jan 22 20:01:23 2026
+From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Date: Thu, 22 Jan 2026 19:00:21 +0000
+Subject: mm/vma: fix anon_vma UAF on mremap() faulted, unfaulted merge
+To: stable@vger.kernel.org
+Message-ID: <f1e305c89aaf15fc62c6160505eb6d19adf5d49b.1769108022.git.lorenzo.stoakes@oracle.com>
+
+From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+
+[ upstream commit 61f67c230a5e7c741c352349ea80147fbe65bfae ]
+
+Patch series "mm/vma: fix anon_vma UAF on mremap() faulted, unfaulted
+merge", v2.
+
+Commit 879bca0a2c4f ("mm/vma: fix incorrectly disallowed anonymous VMA
+merges") introduced the ability to merge previously unavailable VMA merge
+scenarios.
+
+However, it is handling merges incorrectly when it comes to mremap() of a
+faulted VMA adjacent to an unfaulted VMA. The issues arise in three
+cases:
+
+1. Previous VMA unfaulted:
+
+ copied -----|
+ v
+ |-----------|.............|
+ | unfaulted |(faulted VMA)|
+ |-----------|.............|
+ prev
+
+2. Next VMA unfaulted:
+
+ copied -----|
+ v
+ |.............|-----------|
+ |(faulted VMA)| unfaulted |
+ |.............|-----------|
+ next
+
+3. Both adjacent VMAs unfaulted:
+
+ copied -----|
+ v
+ |-----------|.............|-----------|
+ | unfaulted |(faulted VMA)| unfaulted |
+ |-----------|.............|-----------|
+ prev next
+
+This series fixes each of these cases, and introduces self tests to assert
+that the issues are corrected.
+
+I also test a further case which was already handled, to assert that my
+changes continues to correctly handle it:
+
+4. prev unfaulted, next faulted:
+
+ copied -----|
+ v
+ |-----------|.............|-----------|
+ | unfaulted |(faulted VMA)| faulted |
+ |-----------|.............|-----------|
+ prev next
+
+This bug was discovered via a syzbot report, linked to in the first patch
+in the series, I confirmed that this series fixes the bug.
+
+I also discovered that we are failing to check that the faulted VMA was
+not forked when merging a copied VMA in cases 1-3 above, an issue this
+series also addresses.
+
+I also added self tests to assert that this is resolved (and confirmed
+that the tests failed prior to this).
+
+I also cleaned up vma_expand() as part of this work, renamed
+vma_had_uncowed_parents() to vma_is_fork_child() as the previous name was
+unduly confusing, and simplified the comments around this function.
+
+This patch (of 4):
+
+Commit 879bca0a2c4f ("mm/vma: fix incorrectly disallowed anonymous VMA
+merges") introduced the ability to merge previously unavailable VMA merge
+scenarios.
+
+The key piece of logic introduced was the ability to merge a faulted VMA
+immediately next to an unfaulted VMA, which relies upon dup_anon_vma() to
+correctly handle anon_vma state.
+
+In the case of the merge of an existing VMA (that is changing properties
+of a VMA and then merging if those properties are shared by adjacent
+VMAs), dup_anon_vma() is invoked correctly.
+
+However in the case of the merge of a new VMA, a corner case peculiar to
+mremap() was missed.
+
+The issue is that vma_expand() only performs dup_anon_vma() if the target
+(the VMA that will ultimately become the merged VMA): is not the next VMA,
+i.e. the one that appears after the range in which the new VMA is to be
+established.
+
+A key insight here is that in all other cases other than mremap(), a new
+VMA merge either expands an existing VMA, meaning that the target VMA will
+be that VMA, or would have anon_vma be NULL.
+
+Specifically:
+
+* __mmap_region() - no anon_vma in place, initial mapping.
+* do_brk_flags() - expanding an existing VMA.
+* vma_merge_extend() - expanding an existing VMA.
+* relocate_vma_down() - no anon_vma in place, initial mapping.
+
+In addition, we are in the unique situation of needing to duplicate
+anon_vma state from a VMA that is neither the previous or next VMA being
+merged with.
+
+dup_anon_vma() deals exclusively with the target=unfaulted, src=faulted
+case. This leaves four possibilities, in each case where the copied VMA
+is faulted:
+
+1. Previous VMA unfaulted:
+
+ copied -----|
+ v
+ |-----------|.............|
+ | unfaulted |(faulted VMA)|
+ |-----------|.............|
+ prev
+
+target = prev, expand prev to cover.
+
+2. Next VMA unfaulted:
+
+ copied -----|
+ v
+ |.............|-----------|
+ |(faulted VMA)| unfaulted |
+ |.............|-----------|
+ next
+
+target = next, expand next to cover.
+
+3. Both adjacent VMAs unfaulted:
+
+ copied -----|
+ v
+ |-----------|.............|-----------|
+ | unfaulted |(faulted VMA)| unfaulted |
+ |-----------|.............|-----------|
+ prev next
+
+target = prev, expand prev to cover.
+
+4. prev unfaulted, next faulted:
+
+ copied -----|
+ v
+ |-----------|.............|-----------|
+ | unfaulted |(faulted VMA)| faulted |
+ |-----------|.............|-----------|
+ prev next
+
+target = prev, expand prev to cover. Essentially equivalent to 3, but
+with additional requirement that next's anon_vma is the same as the copied
+VMA's. This is covered by the existing logic.
+
+To account for this very explicitly, we introduce
+vma_merge_copied_range(), which sets a newly introduced vmg->copied_from
+field, then invokes vma_merge_new_range() which handles the rest of the
+logic.
+
+We then update the key vma_expand() function to clean up the logic and
+make what's going on clearer, making the 'remove next' case less special,
+before invoking dup_anon_vma() unconditionally should we be copying from a
+VMA.
+
+Note that in case 3, the if (remove_next) ... branch will be a no-op, as
+next=src in this instance and src is unfaulted.
+
+In case 4, it won't be, but since in this instance next=src and it is
+faulted, this will have required tgt=faulted, src=faulted to be
+compatible, meaning that next->anon_vma == vmg->copied_from->anon_vma, and
+thus a single dup_anon_vma() of next suffices to copy anon_vma state for
+the copied-from VMA also.
+
+If we are copying from a VMA in a successful merge we must _always_
+propagate anon_vma state.
+
+This issue can be observed most directly by invoked mremap() to move
+around a VMA and cause this kind of merge with the MREMAP_DONTUNMAP flag
+specified.
+
+This will result in unlink_anon_vmas() being called after failing to
+duplicate anon_vma state to the target VMA, which results in the anon_vma
+itself being freed with folios still possessing dangling pointers to the
+anon_vma and thus a use-after-free bug.
+
+This bug was discovered via a syzbot report, which this patch resolves.
+
+We further make a change to update the mergeable anon_vma check to assert
+the copied-from anon_vma did not have CoW parents, as otherwise
+dup_anon_vma() might incorrectly propagate CoW ancestors from the next VMA
+in case 4 despite the anon_vma's being identical for both VMAs.
+
+Link: https://lkml.kernel.org/r/cover.1767638272.git.lorenzo.stoakes@oracle.com
+Link: https://lkml.kernel.org/r/b7930ad2b1503a657e29fe928eb33061d7eadf5b.1767638272.git.lorenzo.stoakes@oracle.com
+Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Fixes: 879bca0a2c4f ("mm/vma: fix incorrectly disallowed anonymous VMA merges")
+Reported-by: syzbot+b165fc2e11771c66d8ba@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/694a2745.050a0220.19928e.0017.GAE@google.com/
+Reported-by: syzbot+5272541ccbbb14e2ec30@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/694e3dc6.050a0220.35954c.0066.GAE@google.com/
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Jeongjun Park <aha310510@gmail.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: David Hildenbrand (Red Hat) <david@kernel.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Yeoreum Yun <yeoreum.yun@arm.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Pedro Falcato <pfalcato@suse.de>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ updated to account for lack of sticky VMA flags + built, tested confirmed working ]
+Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/vma.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++----------------
+ mm/vma.h | 3 ++
+ 2 files changed, 56 insertions(+), 18 deletions(-)
+
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -835,6 +835,8 @@ static __must_check struct vm_area_struc
+ VM_WARN_ON_VMG(middle &&
+ !(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
+ vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
++ /* An existing merge can never be used by the mremap() logic. */
++ VM_WARN_ON_VMG(vmg->copied_from, vmg);
+
+ vmg->state = VMA_MERGE_NOMERGE;
+
+@@ -1102,6 +1104,33 @@ struct vm_area_struct *vma_merge_new_ran
+ }
+
+ /*
++ * vma_merge_copied_range - Attempt to merge a VMA that is being copied by
++ * mremap()
++ *
++ * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to
++ * @vmg->end (exclusive), which we try to merge with any adjacent VMAs if
++ * possible.
++ *
++ * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO
++ * range, i.e. the target range for the VMA.
++ *
++ * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
++ * to the VMA we expanded.
++ *
++ * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain
++ * the copied-from VMA.
++ */
++static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg)
++{
++ /* We must have a copied-from VMA. */
++ VM_WARN_ON_VMG(!vmg->middle, vmg);
++
++ vmg->copied_from = vmg->middle;
++ vmg->middle = NULL;
++ return vma_merge_new_range(vmg);
++}
++
++/*
+ * vma_expand - Expand an existing VMA
+ *
+ * @vmg: Describes a VMA expansion operation.
+@@ -1123,38 +1152,45 @@ int vma_expand(struct vma_merge_struct *
+ bool remove_next = false;
+ struct vm_area_struct *target = vmg->target;
+ struct vm_area_struct *next = vmg->next;
++ int ret = 0;
+
+ VM_WARN_ON_VMG(!target, vmg);
+
+ mmap_assert_write_locked(vmg->mm);
+-
+ vma_start_write(target);
+- if (next && (target != next) && (vmg->end == next->vm_end)) {
+- int ret;
+
++ if (next && target != next && vmg->end == next->vm_end)
+ remove_next = true;
+- /* This should already have been checked by this point. */
+- VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
+- vma_start_write(next);
+- /*
+- * In this case we don't report OOM, so vmg->give_up_on_mm is
+- * safe.
+- */
+- ret = dup_anon_vma(target, next, &anon_dup);
+- if (ret)
+- return ret;
+- }
+
++ /* We must have a target. */
++ VM_WARN_ON_VMG(!target, vmg);
++ /* This should have already been checked by this point. */
++ VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg);
+ /* Not merging but overwriting any part of next is not handled. */
+ VM_WARN_ON_VMG(next && !remove_next &&
+ next != target && vmg->end > next->vm_start, vmg);
+- /* Only handles expanding */
++ /* Only handles expanding. */
+ VM_WARN_ON_VMG(target->vm_start < vmg->start ||
+ target->vm_end > vmg->end, vmg);
+
++ /*
++ * If we are removing the next VMA or copying from a VMA
++ * (e.g. mremap()'ing), we must propagate anon_vma state.
++ *
++ * Note that, by convention, callers ignore OOM for this case, so
++ * we don't need to account for vmg->give_up_on_mm here.
++ */
+ if (remove_next)
+- vmg->__remove_next = true;
++ ret = dup_anon_vma(target, next, &anon_dup);
++ if (!ret && vmg->copied_from)
++ ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
++ if (ret)
++ return ret;
+
++ if (remove_next) {
++ vma_start_write(next);
++ vmg->__remove_next = true;
++ }
+ if (commit_merge(vmg))
+ goto nomem;
+
+@@ -1837,10 +1873,9 @@ struct vm_area_struct *copy_vma(struct v
+ if (new_vma && new_vma->vm_start < addr + len)
+ return NULL; /* should never get here */
+
+- vmg.middle = NULL; /* New VMA range. */
+ vmg.pgoff = pgoff;
+ vmg.next = vma_iter_next_rewind(&vmi, NULL);
+- new_vma = vma_merge_new_range(&vmg);
++ new_vma = vma_merge_copied_range(&vmg);
+
+ if (new_vma) {
+ /*
+--- a/mm/vma.h
++++ b/mm/vma.h
+@@ -106,6 +106,9 @@ struct vma_merge_struct {
+ struct anon_vma_name *anon_name;
+ enum vma_merge_state state;
+
++ /* If copied from (i.e. mremap()'d) the VMA from which we are copying. */
++ struct vm_area_struct *copied_from;
++
+ /* Flags which callers can use to modify merge behaviour: */
+
+ /*
gpio-cdev-correct-return-code-on-memory-allocation-failure.patch
gpio-cdev-fix-resource-leaks-on-errors-in-lineinfo_changed_notify.patch
gpio-cdev-fix-resource-leaks-on-errors-in-gpiolib_cdev_register.patch
+bluetooth-btintel_pcie-support-for-s4-hibernate.patch
+mm-fix-some-typos-in-mm-module.patch
+mm-hugetlb-fix-two-comments-related-to-huge_pmd_unshare.patch
+iio-core-replace-lockdep_set_class-mutex_init-by-combined-call.patch
+iio-core-add-separate-lockdep-class-for-info_exist_lock.patch
+arm64-dts-qcom-talos-correct-ufs-clocks-ordering.patch
+irqchip-renesas-rzv2h-prevent-tint-spurious-interrupt-during-resume.patch
+mm-vma-fix-anon_vma-uaf-on-mremap-faulted-unfaulted-merge.patch
+mm-vma-enforce-vma-fork-limit-on-unfaulted-faulted-mremap-merge-too.patch