--- /dev/null
+From ecc6e60fd744ef7a2322abc2b8da9e7ce92e5e46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Dec 2023 18:31:29 +0100
+Subject: ARM: dts: qcom: sdx55: fix pdc '#interrupt-cells'
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit cc25bd06c16aa582596a058d375b2e3133f79b93 ]
+
+The Qualcomm PDC interrupt controller binding expects two cells in
+interrupt specifiers.
+
+Fixes: 9d038b2e62de ("ARM: dts: qcom: Add SDX55 platform and MTP board support")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20231213173131.29436-2-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/qcom-sdx55.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index eb1db01031b2..7bb2f41049d9 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -522,7 +522,7 @@ pdc: interrupt-controller@b210000 {
+ compatible = "qcom,sdx55-pdc", "qcom,pdc";
+ reg = <0x0b210000 0x30000>;
+ qcom,pdc-ranges = <0 179 52>;
+- #interrupt-cells = <3>;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+--
+2.43.0
+
--- /dev/null
+From d5630b16a0cc2f780c0f8cd89f24031ca175e699 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Dec 2023 18:31:30 +0100
+Subject: ARM: dts: qcom: sdx55: fix USB DP/DM HS PHY interrupts
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit de95f139394a5ed82270f005bc441d2e7c1e51b7 ]
+
+The USB DP/DM HS PHY interrupts need to be provided by the PDC interrupt
+controller in order to be able to wake the system up from low-power
+states and to be able to detect disconnect events, which requires
+triggering on falling edges.
+
+A recent commit updated the trigger type but failed to change the
+interrupt provider as required. This leads to the current Linux driver
+failing to probe instead of printing an error during suspend and USB
+wakeup not working as intended.
+
+Fixes: d0ec3c4c11c3 ("ARM: dts: qcom: sdx55: fix USB wakeup interrupt types")
+Fixes: fea4b41022f3 ("ARM: dts: qcom: sdx55: Add USB3 and PHY support")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20231213173131.29436-3-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/qcom-sdx55.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index 7bb2f41049d9..de747859fdc6 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -495,10 +495,10 @@ usb: usb@a6f8800 {
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 158 IRQ_TYPE_EDGE_BOTH>,
+- <GIC_SPI 157 IRQ_TYPE_EDGE_BOTH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+--
+2.43.0
+
--- /dev/null
+From a2288085fc51b690c487958a161afba8cdd31469 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Dec 2023 18:31:31 +0100
+Subject: ARM: dts: qcom: sdx55: fix USB SS wakeup
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 710dd03464e4ab5b3d329768388b165d61958577 ]
+
+The USB SS PHY interrupt needs to be provided by the PDC interrupt
+controller in order to be able to wake the system up from low-power
+states.
+
+Fixes: fea4b41022f3 ("ARM: dts: qcom: sdx55: Add USB3 and PHY support")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20231213173131.29436-4-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/qcom-sdx55.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index de747859fdc6..b85820448b9d 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -496,7 +496,7 @@ usb: usb@a6f8800 {
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <&intc GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 51 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 10 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+--
+2.43.0
+
--- /dev/null
+From 22c973f3e507ddeb6604ff644d6375d98bfe7f9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:21 +0100
+Subject: ARM: dts: qcom: sdx55: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit d0ec3c4c11c3b30e1f2d344973b2a7bf0f986734 ]
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: fea4b41022f3 ("ARM: dts: qcom: sdx55: Add USB3 and PHY support")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-2-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/qcom-sdx55.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index a4bf1d5ee206..eb1db01031b2 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -497,8 +497,8 @@ usb: usb@a6f8800 {
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 158 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 157 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+--
+2.43.0
+
--- /dev/null
+From 0be786d8d572bef3903cddf5ce7437ac4e2d531b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Dec 2023 23:15:54 +0100
+Subject: ARM: dts: samsung: exynos4210-i9100: Unconditionally enable LDO12
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+[ Upstream commit 84228d5e29dbc7a6be51e221000e1d122125826c ]
+
+The kernel hangs for a good 12 seconds without any info being printed to
+dmesg, very early in the boot process, if this regulator is not enabled.
+
+Force-enable it to work around this issue, until we know more about the
+underlying problem.
+
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Fixes: 8620cc2f99b7 ("ARM: dts: exynos: Add devicetree file for the Galaxy S2")
+Cc: stable@vger.kernel.org # v5.8+
+Link: https://lore.kernel.org/r/20231206221556.15348-2-paul@crapouillou.net
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/exynos4210-i9100.dts | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
+index 53e023fc1cac..b4c7b642e456 100644
+--- a/arch/arm/boot/dts/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/exynos4210-i9100.dts
+@@ -521,6 +521,14 @@ vtcam_reg: LDO12 {
+ regulator-name = "VT_CAM_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++
++ /*
++ * Force-enable this regulator; otherwise the
++ * kernel hangs very early in the boot process
++ * for about 12 seconds, without apparent
++ * reason.
++ */
++ regulator-always-on;
+ };
+
+ vcclcd_reg: LDO13 {
+--
+2.43.0
+
--- /dev/null
+From bd11fed0c4e150a503f785d91fe312b69dfc054c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 01:02:28 +0900
+Subject: btrfs: zoned: factor out prepare_allocation_zoned()
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit b271fee9a41ca1474d30639fd6cc912c9901d0f8 ]
+
+Factor out prepare_allocation_zoned() for further extension. While at
+it, optimize the if-branch a bit.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 02444f2ac26e ("btrfs: zoned: optimize hint byte for zoned allocator")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 32 +++++++++++++++++++-------------
+ 1 file changed, 19 insertions(+), 13 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 7be3fcfb3f97..2832abe3a984 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4207,6 +4207,24 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
+ return 0;
+ }
+
++static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
++ struct find_free_extent_ctl *ffe_ctl)
++{
++ if (ffe_ctl->for_treelog) {
++ spin_lock(&fs_info->treelog_bg_lock);
++ if (fs_info->treelog_bg)
++ ffe_ctl->hint_byte = fs_info->treelog_bg;
++ spin_unlock(&fs_info->treelog_bg_lock);
++ } else if (ffe_ctl->for_data_reloc) {
++ spin_lock(&fs_info->relocation_bg_lock);
++ if (fs_info->data_reloc_bg)
++ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
++ spin_unlock(&fs_info->relocation_bg_lock);
++ }
++
++ return 0;
++}
++
+ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info,
+@@ -4217,19 +4235,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ return prepare_allocation_clustered(fs_info, ffe_ctl,
+ space_info, ins);
+ case BTRFS_EXTENT_ALLOC_ZONED:
+- if (ffe_ctl->for_treelog) {
+- spin_lock(&fs_info->treelog_bg_lock);
+- if (fs_info->treelog_bg)
+- ffe_ctl->hint_byte = fs_info->treelog_bg;
+- spin_unlock(&fs_info->treelog_bg_lock);
+- }
+- if (ffe_ctl->for_data_reloc) {
+- spin_lock(&fs_info->relocation_bg_lock);
+- if (fs_info->data_reloc_bg)
+- ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+- spin_unlock(&fs_info->relocation_bg_lock);
+- }
+- return 0;
++ return prepare_allocation_zoned(fs_info, ffe_ctl);
+ default:
+ BUG();
+ }
+--
+2.43.0
+
--- /dev/null
+From b9c1b21b98a655ffcd78f16ddaeb3ce15b4626d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 01:02:29 +0900
+Subject: btrfs: zoned: optimize hint byte for zoned allocator
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit 02444f2ac26eae6385a65fcd66915084d15dffba ]
+
+Writing sequentially to a huge file on btrfs on a SMR HDD revealed a
+decline of the performance (220 MiB/s to 30 MiB/s after 500 minutes).
+
+The performance goes down because of increased latency of the extent
+allocation, which is induced by a traversing of a lot of full block groups.
+
+So, this patch optimizes the ffe_ctl->hint_byte by choosing a block group
+with sufficient size from the active block group list, which does not
+contain full block groups.
+
+After applying the patch, the performance is maintained well.
+
+Fixes: 2eda57089ea3 ("btrfs: zoned: implement sequential extent allocation")
+CC: stable@vger.kernel.org # 5.15+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 2832abe3a984..0d2cc186974d 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4220,6 +4220,24 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
+ if (fs_info->data_reloc_bg)
+ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+ spin_unlock(&fs_info->relocation_bg_lock);
++ } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
++ struct btrfs_block_group *block_group;
++
++ spin_lock(&fs_info->zone_active_bgs_lock);
++ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
++ /*
++ * No lock is OK here because avail is monotinically
++ * decreasing, and this is just a hint.
++ */
++ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
++
++ if (block_group_bits(block_group, ffe_ctl->flags) &&
++ avail >= ffe_ctl->num_bytes) {
++ ffe_ctl->hint_byte = block_group->start;
++ break;
++ }
++ }
++ spin_unlock(&fs_info->zone_active_bgs_lock);
+ }
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 31d82027e890049bdaec0dc41dd2f3837370ea0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Sep 2023 13:05:02 +0530
+Subject: bus: mhi: ep: Do not allocate event ring element on stack
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+[ Upstream commit 987fdb5a43a66764808371b54e6047834170d565 ]
+
+It is possible that the host controller driver would use DMA framework to
+write the event ring element. So avoid allocating event ring element on the
+stack as DMA cannot work on vmalloc memory.
+
+Cc: stable@vger.kernel.org
+Fixes: 961aeb689224 ("bus: mhi: ep: Add support for sending events to the host")
+Link: https://lore.kernel.org/r/20230901073502.69385-1-manivannan.sadhasivam@linaro.org
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/mhi/ep/main.c | 68 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 50 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index edd153dda40c..34e0ba6f52d0 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -71,45 +71,77 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+ {
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
+
+- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+- event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+- event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
++ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
++ event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
++ event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+- return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
++ ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
++ kfree(event);
++
++ return ret;
+ }
+
+ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+ {
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
+
+- event.dword[0] = MHI_SC_EV_DWORD0(state);
+- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
++ event->dword[0] = MHI_SC_EV_DWORD0(state);
++ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++ kfree(event);
++
++ return ret;
+ }
+
+ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+ {
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
+
+- event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
++ event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
++ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++ kfree(event);
++
++ return ret;
+ }
+
+ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+ {
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
+
+- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+- event.dword[0] = MHI_CC_EV_DWORD0(code);
+- event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
++ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
++ event->dword[0] = MHI_CC_EV_DWORD0(code);
++ event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++ kfree(event);
++
++ return ret;
+ }
+
+ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+--
+2.43.0
+
--- /dev/null
+From 95aa2bc2969bc2028bb2519c1b63fc9ab46242c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Nov 2023 15:24:38 -0600
+Subject: dlm: use kernel_connect() and kernel_bind()
+
+From: Jordan Rife <jrife@google.com>
+
+[ Upstream commit e9cdebbe23f1aa9a1caea169862f479ab3fa2773 ]
+
+Recent changes to kernel_connect() and kernel_bind() ensure that
+callers are insulated from changes to the address parameter made by BPF
+SOCK_ADDR hooks. This patch wraps direct calls to ops->connect() and
+ops->bind() with kernel_connect() and kernel_bind() to protect callers
+in such cases.
+
+Link: https://lore.kernel.org/netdev/9944248dba1bce861375fcce9de663934d933ba9.camel@redhat.com/
+Fixes: d74bad4e74ee ("bpf: Hooks for sys_connect")
+Fixes: 4fbac77d2d09 ("bpf: Hooks for sys_bind")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jordan Rife <jrife@google.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lowcomms.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 6ed09edabea0..72f34f96d015 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1845,8 +1845,8 @@ static int dlm_tcp_bind(struct socket *sock)
+ memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
+ make_sockaddr(&src_addr, 0, &addr_len);
+
+- result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
+- addr_len);
++ result = kernel_bind(sock, (struct sockaddr *)&src_addr,
++ addr_len);
+ if (result < 0) {
+ /* This *may* not indicate a critical error */
+ log_print("could not bind for connect: %d", result);
+@@ -1860,7 +1860,7 @@ static int dlm_tcp_connect(struct connection *con, struct socket *sock,
+ {
+ int ret;
+
+- ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
++ ret = kernel_connect(sock, addr, addr_len, O_NONBLOCK);
+ switch (ret) {
+ case -EINPROGRESS:
+ fallthrough;
+@@ -1900,8 +1900,8 @@ static int dlm_tcp_listen_bind(struct socket *sock)
+
+ /* Bind to our port */
+ make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+- return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0],
+- addr_len);
++ return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
++ addr_len);
+ }
+
+ static const struct dlm_proto_ops dlm_tcp_ops = {
+@@ -1928,12 +1928,12 @@ static int dlm_sctp_connect(struct connection *con, struct socket *sock,
+ int ret;
+
+ /*
+- * Make sock->ops->connect() function return in specified time,
++ * Make kernel_connect() function return in specified time,
+ * since O_NONBLOCK argument in connect() function does not work here,
+ * then, we should restore the default value of this attribute.
+ */
+ sock_set_sndtimeo(sock->sk, 5);
+- ret = sock->ops->connect(sock, addr, addr_len, 0);
++ ret = kernel_connect(sock, addr, addr_len, 0);
+ sock_set_sndtimeo(sock->sk, 0);
+ if (ret < 0)
+ return ret;
+--
+2.43.0
+
--- /dev/null
+From 22213aa4f51009d868f5190a5065e4c418a37509 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Nov 2023 12:41:51 -0800
+Subject: drm/panel-edp: drm/panel-edp: Fix AUO B116XAK01 name and timing
+
+From: Hsin-Yi Wang <hsinyi@chromium.org>
+
+[ Upstream commit fc6e7679296530106ee0954e8ddef1aa58b2e0b5 ]
+
+Rename AUO 0x405c B116XAK01 to B116XAK01.0 and adjust the timing of
+auo_b116xak01: T3=200, T12=500, T7_max = 50 according to decoding edid
+and datasheet.
+
+Fixes: da458286a5e2 ("drm/panel: Add support for AUO B116XAK01 panel")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hsin-Yi Wang <hsinyi@chromium.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231107204611.3082200-2-hsinyi@chromium.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-edp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index a163585a2a52..ecd6238749f7 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -975,6 +975,8 @@ static const struct panel_desc auo_b116xak01 = {
+ },
+ .delay = {
+ .hpd_absent = 200,
++ .unprepare = 500,
++ .enable = 50,
+ },
+ };
+
+@@ -1870,7 +1872,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+
+--
+2.43.0
+
--- /dev/null
+From 62a9d4fa6cd06b61210fe66a7caa23a86360a99a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Dec 2023 12:55:34 +0800
+Subject: erofs: fix lz4 inplace decompression
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 3c12466b6b7bf1e56f9b32c366a3d83d87afb4de ]
+
+Currently EROFS can map another compressed buffer for inplace
+decompression, that was used to handle the cases that some pages of
+compressed data are actually not in-place I/O.
+
+However, like most simple LZ77 algorithms, LZ4 expects the compressed
+data is arranged at the end of the decompressed buffer and it
+explicitly uses memmove() to handle overlapping:
+ __________________________________________________________
+ |_ direction of decompression --> ____ |_ compressed data _|
+
+Although EROFS arranges compressed data like this, it typically maps two
+individual virtual buffers so the relative order is uncertain.
+Previously, it was hardly observed since LZ4 only uses memmove() for
+short overlapped literals and x86/arm64 memmove implementations seem to
+completely cover it up and they don't have this issue. Juhyung reported
+that EROFS data corruption can be found on a new Intel x86 processor.
+After some analysis, it seems that recent x86 processors with the new
+FSRM feature expose this issue with "rep movsb".
+
+Let's strictly use the decompressed buffer for lz4 inplace
+decompression for now. Later, as an useful improvement, we could try
+to tie up these two buffers together in the correct order.
+
+Reported-and-tested-by: Juhyung Park <qkrwngud825@gmail.com>
+Closes: https://lore.kernel.org/r/CAD14+f2AVKf8Fa2OO1aAUdDNTDsVzzR6ctU_oJSmTyd6zSYR2Q@mail.gmail.com
+Fixes: 0ffd71bcc3a0 ("staging: erofs: introduce LZ4 decompression inplace")
+Fixes: 598162d05080 ("erofs: support decompress big pcluster for lz4 backend")
+Cc: stable <stable@vger.kernel.org> # 5.4+
+Tested-by: Yifan Zhao <zhaoyifan@sjtu.edu.cn>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20231206045534.3920847-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/decompressor.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index 59294182e4cb..0cfad74374ca 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -122,11 +122,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+- void *inpage, unsigned int *inputmargin, int *maptype,
+- bool may_inplace)
++ void *inpage, void *out, unsigned int *inputmargin,
++ int *maptype, bool may_inplace)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+- unsigned int omargin, total, i, j;
++ unsigned int omargin, total, i;
+ struct page **in;
+ void *src, *tmp;
+
+@@ -136,12 +136,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
+ goto docopy;
+
+- for (i = 0; i < ctx->inpages; ++i) {
+- DBG_BUGON(rq->in[i] == NULL);
+- for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
+- if (rq->out[j] == rq->in[i])
+- goto docopy;
+- }
++ for (i = 0; i < ctx->inpages; ++i)
++ if (rq->out[ctx->outpages - ctx->inpages + i] !=
++ rq->in[i])
++ goto docopy;
++ kunmap_local(inpage);
++ *maptype = 3;
++ return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
+ }
+
+ if (ctx->inpages <= 1) {
+@@ -149,7 +150,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ return inpage;
+ }
+ kunmap_local(inpage);
+- might_sleep();
+ src = erofs_vm_map_ram(rq->in, ctx->inpages);
+ if (!src)
+ return ERR_PTR(-ENOMEM);
+@@ -205,12 +205,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
+ }
+
+ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+- u8 *out)
++ u8 *dst)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+ bool support_0padding = false, may_inplace = false;
+ unsigned int inputmargin;
+- u8 *headpage, *src;
++ u8 *out, *headpage, *src;
+ int ret, maptype;
+
+ DBG_BUGON(*rq->in == NULL);
+@@ -231,11 +231,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ inputmargin = rq->pageofs_in;
+- src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
++ src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
+ &maptype, may_inplace);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
++ out = dst + rq->pageofs_out;
+ /* legacy format could compress extra data in a pcluster. */
+ if (rq->partial_decoding || !support_0padding)
+ ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+@@ -266,7 +267,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ vm_unmap_ram(src, ctx->inpages);
+ } else if (maptype == 2) {
+ erofs_put_pcpubuf(src);
+- } else {
++ } else if (maptype != 3) {
+ DBG_BUGON(1);
+ return -EFAULT;
+ }
+@@ -309,7 +310,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ }
+
+ dstmap_out:
+- ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
++ ret = z_erofs_lz4_decompress_mem(&ctx, dst);
+ if (!dst_maptype)
+ kunmap_local(dst);
+ else if (dst_maptype == 2)
+--
+2.43.0
+
--- /dev/null
+From 3d6e6efc668f90089a63f6ba8a9be3e96c8f2dd2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jun 2023 00:12:39 +0800
+Subject: erofs: get rid of the remaining kmap_atomic()
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 123ec246ebe323d468c5ca996700ea4739d20ddf ]
+
+It's unnecessary to use kmap_atomic() compared with kmap_local_page().
+In addition, kmap_atomic() is deprecated now.
+
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Yue Hu <huyue2@coolpad.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20230627161240.331-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Stable-dep-of: 3c12466b6b7b ("erofs: fix lz4 inplace decompression")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/decompressor.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index 51b7ac7166d9..59294182e4cb 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -148,7 +148,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ *maptype = 0;
+ return inpage;
+ }
+- kunmap_atomic(inpage);
++ kunmap_local(inpage);
+ might_sleep();
+ src = erofs_vm_map_ram(rq->in, ctx->inpages);
+ if (!src)
+@@ -162,7 +162,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ src = erofs_get_pcpubuf(ctx->inpages);
+ if (!src) {
+ DBG_BUGON(1);
+- kunmap_atomic(inpage);
++ kunmap_local(inpage);
+ return ERR_PTR(-EFAULT);
+ }
+
+@@ -173,9 +173,9 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
+
+ if (!inpage)
+- inpage = kmap_atomic(*in);
++ inpage = kmap_local_page(*in);
+ memcpy(tmp, inpage + *inputmargin, page_copycnt);
+- kunmap_atomic(inpage);
++ kunmap_local(inpage);
+ inpage = NULL;
+ tmp += page_copycnt;
+ total -= page_copycnt;
+@@ -214,7 +214,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ int ret, maptype;
+
+ DBG_BUGON(*rq->in == NULL);
+- headpage = kmap_atomic(*rq->in);
++ headpage = kmap_local_page(*rq->in);
+
+ /* LZ4 decompression inplace is only safe if zero_padding is enabled */
+ if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
+@@ -223,7 +223,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ min_t(unsigned int, rq->inputsize,
+ EROFS_BLKSIZ - rq->pageofs_in));
+ if (ret) {
+- kunmap_atomic(headpage);
++ kunmap_local(headpage);
+ return ret;
+ }
+ may_inplace = !((rq->pageofs_in + rq->inputsize) &
+@@ -261,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ if (maptype == 0) {
+- kunmap_atomic(headpage);
++ kunmap_local(headpage);
+ } else if (maptype == 1) {
+ vm_unmap_ram(src, ctx->inpages);
+ } else if (maptype == 2) {
+@@ -289,7 +289,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ /* one optimized fast path only for non bigpcluster cases yet */
+ if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
+ DBG_BUGON(!*rq->out);
+- dst = kmap_atomic(*rq->out);
++ dst = kmap_local_page(*rq->out);
+ dst_maptype = 0;
+ goto dstmap_out;
+ }
+@@ -311,7 +311,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ dstmap_out:
+ ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
+ if (!dst_maptype)
+- kunmap_atomic(dst);
++ kunmap_local(dst);
+ else if (dst_maptype == 2)
+ vm_unmap_ram(dst, ctx.outpages);
+ return ret;
+--
+2.43.0
+
--- /dev/null
+From 197fdd1525a929eee50ca8c5fa9acaaeb5ba5c72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Sep 2023 09:57:53 +0200
+Subject: fs/pipe: move check to pipe_has_watch_queue()
+
+From: Max Kellermann <max.kellermann@ionos.com>
+
+[ Upstream commit b4bd6b4bac8edd61eb8f7b836969d12c0c6af165 ]
+
+This declutters the code by reducing the number of #ifdefs and makes
+the watch_queue checks simpler. This has no runtime effect; the
+machine code is identical.
+
+Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
+Message-Id: <20230921075755.1378787-2-max.kellermann@ionos.com>
+Reviewed-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Stable-dep-of: e95aada4cb93 ("pipe: wakeup wr_wait after setting max_usage")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/pipe.c | 12 +++---------
+ include/linux/pipe_fs_i.h | 16 ++++++++++++++++
+ 2 files changed, 19 insertions(+), 9 deletions(-)
+
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 42c7ff41c2db..e8082ffe5171 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -436,12 +436,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ goto out;
+ }
+
+-#ifdef CONFIG_WATCH_QUEUE
+- if (pipe->watch_queue) {
++ if (pipe_has_watch_queue(pipe)) {
+ ret = -EXDEV;
+ goto out;
+ }
+-#endif
+
+ /*
+ * If it wasn't empty we try to merge new data into
+@@ -1320,10 +1318,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+ unsigned int nr_slots, size;
+ long ret = 0;
+
+-#ifdef CONFIG_WATCH_QUEUE
+- if (pipe->watch_queue)
++ if (pipe_has_watch_queue(pipe))
+ return -EBUSY;
+-#endif
+
+ size = round_pipe_size(arg);
+ nr_slots = size >> PAGE_SHIFT;
+@@ -1375,10 +1371,8 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
+
+ if (file->f_op != &pipefifo_fops || !pipe)
+ return NULL;
+-#ifdef CONFIG_WATCH_QUEUE
+- if (for_splice && pipe->watch_queue)
++ if (for_splice && pipe_has_watch_queue(pipe))
+ return NULL;
+-#endif
+ return pipe;
+ }
+
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 28b3c6a67397..1f1e7ae95320 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -124,6 +124,22 @@ struct pipe_buf_operations {
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+
++/**
++ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
++ * i.e. it was created with O_NOTIFICATION_PIPE
++ * @pipe: The pipe to check
++ *
++ * Return: true if pipe is a watch queue, false otherwise.
++ */
++static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
++{
++#ifdef CONFIG_WATCH_QUEUE
++ return pipe->watch_queue != NULL;
++#else
++ return false;
++#endif
++}
++
+ /**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+--
+2.43.0
+
--- /dev/null
+From 91c8e2c4789af2e0270e12c242589bee5ddc3904 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 17:46:08 +0800
+Subject: media: ov13b10: Enable runtime PM before registering async sub-device
+
+From: Bingbu Cao <bingbu.cao@intel.com>
+
+[ Upstream commit 7b0454cfd8edb3509619407c3b9f78a6d0dee1a5 ]
+
+As the sensor device maybe accessible right after its async sub-device is
+registered, such as ipu-bridge will try to power up sensor by sensor's
+client device's runtime PM from the async notifier callback, if runtime PM
+is not enabled, it will fail.
+
+So runtime PM should be ready before its async sub-device is registered
+and accessible by others.
+
+Fixes: 7ee850546822 ("media: Add sensor driver support for the ov13b10 camera.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/ov13b10.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
+index 722f490f9db4..368a3c2bfe34 100644
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -1439,24 +1439,27 @@ static int ov13b10_probe(struct i2c_client *client)
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+- if (ret < 0)
+- goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+-
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ if (full_power)
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov13b->sd.entity);
+
+ error_handler_free:
+@@ -1476,6 +1479,7 @@ static void ov13b10_remove(struct i2c_client *client)
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static const struct dev_pm_ops ov13b10_pm_ops = {
+--
+2.43.0
+
--- /dev/null
+From 024a3a8b148461ff9323e2f08e9aa1a695fa95d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 13:14:36 +0100
+Subject: media: ov13b10: Support device probe in non-zero ACPI D state
+
+From: Arec Kao <arec.kao@intel.com>
+
+[ Upstream commit 1af2f618acc1486e3b46cb54cb4891d47bb80c61 ]
+
+Tell ACPI device PM code that the driver supports the device being in
+non-zero ACPI D state when the driver's probe function is entered.
+
+Also do identification on the first access of the device, whether in
+probe or when starting streaming.
+
+Signed-off-by: Arec Kao <arec.kao@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Stable-dep-of: 7b0454cfd8ed ("media: ov13b10: Enable runtime PM before registering async sub-device")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/ov13b10.c | 74 +++++++++++++++++++++++--------------
+ 1 file changed, 47 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
+index 549e5d93e568..722f490f9db4 100644
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -589,6 +589,9 @@ struct ov13b10 {
+
+ /* Streaming on/off */
+ bool streaming;
++
++ /* True if the device has been identified */
++ bool identified;
+ };
+
+ #define to_ov13b10(_sd) container_of(_sd, struct ov13b10, sd)
+@@ -1023,12 +1026,42 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
+ return 0;
+ }
+
++/* Verify chip ID */
++static int ov13b10_identify_module(struct ov13b10 *ov13b)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
++ int ret;
++ u32 val;
++
++ if (ov13b->identified)
++ return 0;
++
++ ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
++ OV13B10_REG_VALUE_24BIT, &val);
++ if (ret)
++ return ret;
++
++ if (val != OV13B10_CHIP_ID) {
++ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
++ OV13B10_CHIP_ID, val);
++ return -EIO;
++ }
++
++ ov13b->identified = true;
++
++ return 0;
++}
++
+ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
+ {
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ const struct ov13b10_reg_list *reg_list;
+ int ret, link_freq_index;
+
++ ret = ov13b10_identify_module(ov13b);
++ if (ret)
++ return ret;
++
+ /* Get out of from software reset */
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
+ OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
+@@ -1144,27 +1177,6 @@ static int __maybe_unused ov13b10_resume(struct device *dev)
+ return ret;
+ }
+
+-/* Verify chip ID */
+-static int ov13b10_identify_module(struct ov13b10 *ov13b)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+- int ret;
+- u32 val;
+-
+- ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
+- OV13B10_REG_VALUE_24BIT, &val);
+- if (ret)
+- return ret;
+-
+- if (val != OV13B10_CHIP_ID) {
+- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+- OV13B10_CHIP_ID, val);
+- return -EIO;
+- }
+-
+- return 0;
+-}
+-
+ static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
+ .s_stream = ov13b10_set_stream,
+ };
+@@ -1379,6 +1391,7 @@ static int ov13b10_check_hwcfg(struct device *dev)
+ static int ov13b10_probe(struct i2c_client *client)
+ {
+ struct ov13b10 *ov13b;
++ bool full_power;
+ int ret;
+
+ /* Check HW config */
+@@ -1395,11 +1408,14 @@ static int ov13b10_probe(struct i2c_client *client)
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
+
+- /* Check module identity */
+- ret = ov13b10_identify_module(ov13b);
+- if (ret) {
+- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+- return ret;
++ full_power = acpi_dev_state_d0(&client->dev);
++ if (full_power) {
++ /* Check module identity */
++ ret = ov13b10_identify_module(ov13b);
++ if (ret) {
++ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
++ return ret;
++ }
+ }
+
+ /* Set default mode to max resolution */
+@@ -1431,7 +1447,10 @@ static int ov13b10_probe(struct i2c_client *client)
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+- pm_runtime_set_active(&client->dev);
++
++ /* Set the device's state to active if it's in D0 state. */
++ if (full_power)
++ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+@@ -1480,6 +1499,7 @@ static struct i2c_driver ov13b10_i2c_driver = {
+ },
+ .probe_new = ov13b10_probe,
+ .remove = ov13b10_remove,
++ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
+ };
+
+ module_i2c_driver(ov13b10_i2c_driver);
+--
+2.43.0
+
--- /dev/null
+From 2073110fe8693c6bc6a3cfc5f198fe444b5c2397 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jan 2024 14:58:16 +1100
+Subject: nfsd: fix RELEASE_LOCKOWNER
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit edcf9725150e42beeca42d085149f4c88fa97afd ]
+
+The test on so_count in nfsd4_release_lockowner() is nonsense and
+harmful. Revert to using check_for_locks(), changing that to not sleep.
+
+First: harmful.
+As is documented in the kdoc comment for nfsd4_release_lockowner(), the
+test on so_count can transiently return a false positive resulting in a
+return of NFS4ERR_LOCKS_HELD when in fact no locks are held. This is
+clearly a protocol violation and with the Linux NFS client it can cause
+incorrect behaviour.
+
+If RELEASE_LOCKOWNER is sent while some other thread is still
+processing a LOCK request which failed because, at the time that request
+was received, the given owner held a conflicting lock, then the nfsd
+thread processing that LOCK request can hold a reference (conflock) to
+the lock owner that causes nfsd4_release_lockowner() to return an
+incorrect error.
+
+The Linux NFS client ignores that NFS4ERR_LOCKS_HELD error because it
+never sends NFS4_RELEASE_LOCKOWNER without first releasing any locks, so
+it knows that the error is impossible. It assumes the lock owner was in
+fact released so it feels free to use the same lock owner identifier in
+some later locking request.
+
+When it does reuse a lock owner identifier for which a previous RELEASE
+failed, it will naturally use a lock_seqid of zero. However the server,
+which didn't release the lock owner, will expect a larger lock_seqid and
+so will respond with NFS4ERR_BAD_SEQID.
+
+So clearly it is harmful to allow a false positive, which testing
+so_count allows.
+
+The test is nonsense because ... well... it doesn't mean anything.
+
+so_count is the sum of three different counts.
+1/ the set of states listed on so_stateids
+2/ the set of active vfs locks owned by any of those states
+3/ various transient counts such as for conflicting locks.
+
+When it is tested against '2' it is clear that one of these is the
+transient reference obtained by find_lockowner_str_locked(). It is not
+clear what the other one is expected to be.
+
+In practice, the count is often 2 because there is precisely one state
+on so_stateids. If there were more, this would fail.
+
+In my testing I see two circumstances when RELEASE_LOCKOWNER is called.
+In one case, CLOSE is called before RELEASE_LOCKOWNER. That results in
+all the lock states being removed, and so the lockowner being discarded
+(it is removed when there are no more references which usually happens
+when the lock state is discarded). When nfsd4_release_lockowner() finds
+that the lock owner doesn't exist, it returns success.
+
+The other case shows an so_count of '2' and precisely one state listed
+in so_stateid. It appears that the Linux client uses a separate lock
+owner for each file resulting in one lock state per lock owner, so this
+test on '2' is safe. For another client it might not be safe.
+
+So this patch changes check_for_locks() to use the (newish)
+find_any_file_locked() so that it doesn't take a reference on the
+nfs4_file and so never calls nfsd_file_put(), and so never sleeps. With
+this check is it safe to restore the use of check_for_locks() rather
+than testing so_count against the mysterious '2'.
+
+Fixes: ce3c4ad7f4ce ("NFSD: Fix possible sleep during nfsd4_release_lockowner()")
+Signed-off-by: NeilBrown <neilb@suse.de>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Cc: stable@vger.kernel.org # v6.2+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index faecdbfa01a2..0443fe4e29e1 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7736,14 +7736,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ {
+ struct file_lock *fl;
+ int status = false;
+- struct nfsd_file *nf = find_any_file(fp);
++ struct nfsd_file *nf;
+ struct inode *inode;
+ struct file_lock_context *flctx;
+
++ spin_lock(&fp->fi_lock);
++ nf = find_any_file_locked(fp);
+ if (!nf) {
+ /* Any valid lock stateid should have some sort of access */
+ WARN_ON_ONCE(1);
+- return status;
++ goto out;
+ }
+
+ inode = locks_inode(nf->nf_file);
+@@ -7759,7 +7761,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ }
+ spin_unlock(&flctx->flc_lock);
+ }
+- nfsd_file_put(nf);
++out:
++ spin_unlock(&fp->fi_lock);
+ return status;
+ }
+
+@@ -7769,10 +7772,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ * @cstate: NFSv4 COMPOUND state
+ * @u: RELEASE_LOCKOWNER arguments
+ *
+- * The lockowner's so_count is bumped when a lock record is added
+- * or when copying a conflicting lock. The latter case is brief,
+- * but can lead to fleeting false positives when looking for
+- * locks-in-use.
++ * Check if theree are any locks still held and if not - free the lockowner
++ * and any lock state that is owned.
+ *
+ * Return values:
+ * %nfs_ok: lockowner released or not found
+@@ -7808,10 +7809,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+ spin_unlock(&clp->cl_lock);
+ return nfs_ok;
+ }
+- if (atomic_read(&lo->lo_owner.so_count) != 2) {
+- spin_unlock(&clp->cl_lock);
+- nfs4_put_stateowner(&lo->lo_owner);
+- return nfserr_locks_held;
++
++ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
++ if (check_for_locks(stp->st_stid.sc_file, lo)) {
++ spin_unlock(&clp->cl_lock);
++ nfs4_put_stateowner(&lo->lo_owner);
++ return nfserr_locks_held;
++ }
+ }
+ unhash_lockowner_locked(lo);
+ while (!list_empty(&lo->lo_owner.so_stateids)) {
+--
+2.43.0
+
--- /dev/null
+From 203b5930ec291bbb137c6a5bac4997c6b04c9641 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Dec 2023 11:11:28 +0100
+Subject: pipe: wakeup wr_wait after setting max_usage
+
+From: Lukas Schauer <lukas@schauer.dev>
+
+[ Upstream commit e95aada4cb93d42e25c30a0ef9eb2923d9711d4a ]
+
+Commit c73be61cede5 ("pipe: Add general notification queue support") a
+regression was introduced that would lock up resized pipes under certain
+conditions. See the reproducer in [1].
+
+The commit resizing the pipe ring size was moved to a different
+function, doing that moved the wakeup for pipe->wr_wait before actually
+raising pipe->max_usage. If a pipe was full before the resize occured it
+would result in the wakeup never actually triggering pipe_write.
+
+Set @max_usage and @nr_accounted before waking writers if this isn't a
+watch queue.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=212295 [1]
+Link: https://lore.kernel.org/r/20231201-orchideen-modewelt-e009de4562c6@brauner
+Fixes: c73be61cede5 ("pipe: Add general notification queue support")
+Reviewed-by: David Howells <dhowells@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Lukas Schauer <lukas@schauer.dev>
+[Christian Brauner <brauner@kernel.org>: rewrite to account for watch queues]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/pipe.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/pipe.c b/fs/pipe.c
+index e8082ffe5171..9873a6030df5 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -1301,6 +1301,11 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+ pipe->tail = tail;
+ pipe->head = head;
+
++ if (!pipe_has_watch_queue(pipe)) {
++ pipe->max_usage = nr_slots;
++ pipe->nr_accounted = nr_slots;
++ }
++
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
+ /* This might have made more room for writers */
+@@ -1352,8 +1357,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+ if (ret < 0)
+ goto out_revert_acct;
+
+- pipe->max_usage = nr_slots;
+- pipe->nr_accounted = nr_slots;
+ return pipe->max_usage * PAGE_SIZE;
+
+ out_revert_acct:
+--
+2.43.0
+
--- /dev/null
+From 5ababf81ae4989d57a5f313e24847ad2d78b246b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Mar 2023 06:19:35 +0800
+Subject: PM: core: Remove unnecessary (void *) conversions
+
+From: Li zeming <zeming@nfschina.com>
+
+[ Upstream commit 73d73f5ee7fb0c42ff87091d105bee720a9565f1 ]
+
+Assignments from pointer variables of type (void *) do not require
+explicit type casts, so remove such type cases from the code in
+drivers/base/power/main.c where applicable.
+
+Signed-off-by: Li zeming <zeming@nfschina.com>
+[ rjw: Subject and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 7839d0078e0d ("PM: sleep: Fix possible deadlocks in core system-wide PM code")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/power/main.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c50139207794..f85f3515c258 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -679,7 +679,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+
+ static void async_resume_noirq(void *data, async_cookie_t cookie)
+ {
+- struct device *dev = (struct device *)data;
++ struct device *dev = data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+@@ -816,7 +816,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
+
+ static void async_resume_early(void *data, async_cookie_t cookie)
+ {
+- struct device *dev = (struct device *)data;
++ struct device *dev = data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+@@ -980,7 +980,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+
+ static void async_resume(void *data, async_cookie_t cookie)
+ {
+- struct device *dev = (struct device *)data;
++ struct device *dev = data;
+ int error;
+
+ error = device_resume(dev, pm_transition, true);
+@@ -1269,7 +1269,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
+
+ static void async_suspend_noirq(void *data, async_cookie_t cookie)
+ {
+- struct device *dev = (struct device *)data;
++ struct device *dev = data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+@@ -1450,7 +1450,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
+
+ static void async_suspend_late(void *data, async_cookie_t cookie)
+ {
+- struct device *dev = (struct device *)data;
++ struct device *dev = data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+@@ -1727,7 +1727,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+
+ static void async_suspend(void *data, async_cookie_t cookie)
+ {
+- struct device *dev = (struct device *)data;
++ struct device *dev = data;
+ int error;
+
+ error = __device_suspend(dev, pm_transition, true);
+--
+2.43.0
+
--- /dev/null
+From 6b67f487079b027961420d52485fcc57865ee221 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Dec 2023 21:41:06 +0100
+Subject: PM: sleep: Fix possible deadlocks in core system-wide PM code
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 7839d0078e0d5e6cc2fa0b0dfbee71de74f1e557 ]
+
+It is reported that in low-memory situations the system-wide resume core
+code deadlocks, because async_schedule_dev() executes its argument
+function synchronously if it cannot allocate memory (and not only in
+that case) and that function attempts to acquire a mutex that is already
+held. Executing the argument function synchronously from within
+dpm_async_fn() may also be problematic for ordering reasons (it may
+cause a consumer device's resume callback to be invoked before a
+requisite supplier device's one, for example).
+
+Address this by changing the code in question to use
+async_schedule_dev_nocall() for scheduling the asynchronous
+execution of device suspend and resume functions and to directly
+run them synchronously if async_schedule_dev_nocall() returns false.
+
+Link: https://lore.kernel.org/linux-pm/ZYvjiqX6EsL15moe@perf/
+Reported-by: Youngmin Nam <youngmin.nam@samsung.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Cc: 5.7+ <stable@vger.kernel.org> # 5.7+: 6aa09a5bccd8 async: Split async_schedule_node_domain()
+Cc: 5.7+ <stable@vger.kernel.org> # 5.7+: 7d4b5d7a37bd async: Introduce async_schedule_dev_nocall()
+Cc: 5.7+ <stable@vger.kernel.org> # 5.7+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/power/main.c | 148 ++++++++++++++++++--------------------
+ 1 file changed, 68 insertions(+), 80 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index f85f3515c258..9c5a5f4dba5a 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ }
+
+ /**
+- * device_resume_noirq - Execute a "noirq resume" callback for given device.
++ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -655,7 +655,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
+ Out:
+ complete_all(&dev->power.completion);
+ TRACE_RESUME(error);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_noirq++;
++ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
++ }
+ }
+
+ static bool is_async(struct device *dev)
+@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ {
+ reinit_completion(&dev->power.completion);
+
+- if (is_async(dev)) {
+- get_device(dev);
+- async_schedule_dev(func, dev);
++ if (!is_async(dev))
++ return false;
++
++ get_device(dev);
++
++ if (async_schedule_dev_nocall(func, dev))
+ return true;
+- }
++
++ put_device(dev);
+
+ return false;
+ }
+@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ static void async_resume_noirq(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_noirq(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_noirq(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_noirq(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_noirq))
++ return;
++
++ __device_resume_noirq(dev, pm_transition, false);
++}
++
+ static void dpm_noirq_resume_devices(pm_message_t state)
+ {
+ struct device *dev;
+@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+- dpm_async_fn(dev, async_resume_noirq);
+-
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
+ get_device(dev);
+@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_noirq(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_noirq++;
+- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " noirq", error);
+- }
+- }
++ device_resume_noirq(dev);
+
+ put_device(dev);
+
+@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
+ }
+
+ /**
+- * device_resume_early - Execute an "early resume" callback for given device.
++ * __device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -811,21 +807,31 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
+
+ pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_early++;
++ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async early" : " early", error);
++ }
+ }
+
+ static void async_resume_early(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_early(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_early(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_early(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_early))
++ return;
++
++ __device_resume_early(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+- dpm_async_fn(dev, async_resume_early);
+-
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
+ get_device(dev);
+@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_early(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_early++;
+- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " early", error);
+- }
+- }
++ device_resume_early(dev);
+
+ put_device(dev);
+
+@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
+ EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+ /**
+- * device_resume - Execute "resume" callbacks for given device.
++ * __device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+-static int device_resume(struct device *dev, pm_message_t state, bool async)
++static void __device_resume(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+
+ TRACE_RESUME(error);
+
+- return error;
++ if (error) {
++ suspend_stats.failed_resume++;
++ dpm_save_failed_step(SUSPEND_RESUME);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async" : "", error);
++ }
+ }
+
+ static void async_resume(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+
+- error = device_resume(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
++ __device_resume(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume))
++ return;
++
++ __device_resume(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
+ pm_transition = state;
+ async_error = 0;
+
+- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+- dpm_async_fn(dev, async_resume);
+-
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
++
+ get_device(dev);
+- if (!is_async(dev)) {
+- int error;
+
+- mutex_unlock(&dpm_list_mtx);
++ mutex_unlock(&dpm_list_mtx);
++
++ device_resume(dev);
+
+- error = device_resume(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume++;
+- dpm_save_failed_step(SUSPEND_RESUME);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, "", error);
+- }
++ mutex_lock(&dpm_list_mtx);
+
+- mutex_lock(&dpm_list_mtx);
+- }
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+--
+2.43.0
+
--- /dev/null
+From 0b51c8ef0431c034ad17a313ebefc54fa61c9873 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 28 Jan 2024 16:20:40 -0500
+Subject: Revert "powerpc/64s: Increase default stack size to 32KB"
+
+This reverts commit 9ccf64e763aca088b0d25c1274af42b1a6a45135 which is
+upstream commit 18f14afe281648e31ed35c9ad2fcb724c4838ad9.
+
+Breaks the build:
+
+arch/powerpc/kvm/book3s_hv_rmhandlers.S:2689: Error: operand out of range (0x0000000000008310 is not between 0xffffffffffff8001 and 0x0000000000008000)
+make[3]: *** [scripts/Makefile.build:382: arch/powerpc/kvm/book3s_hv_rmhandlers.o] Error 1
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 2c94f9cf1ce0..6050e6e10d32 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -806,7 +806,6 @@ config THREAD_SHIFT
+ int "Thread shift" if EXPERT
+ range 13 15
+ default "15" if PPC_256K_PAGES
+- default "15" if PPC_PSERIES || PPC_POWERNV
+ default "14" if PPC64
+ default "13"
+ help
+--
+2.43.0
+
--- /dev/null
+From 9c0e6a27c6e2c9f67274d2c36c2cd2aca3b3fd18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 20:43:18 +0206
+Subject: serial: core: Provide port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit b0af4bcb49464c221ad5f95d40f2b1b252ceedcc ]
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+Provide wrapper functions for spin_[un]lock*(port->lock) invocations so
+that the console mechanics can be applied later on at a single place and
+does not require to copy the same logic all over the drivers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-2-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 9915753037eb ("serial: sc16is7xx: fix unconditional activation of THRI interrupt")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/serial_core.h | 79 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 79 insertions(+)
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index d657f2a42a7b..6055dbe5c12e 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -583,6 +583,85 @@ struct uart_port {
+ void *private_data; /* generic platform data pointer */
+ };
+
++/**
++ * uart_port_lock - Lock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock(struct uart_port *up)
++{
++ spin_lock(&up->lock);
++}
++
++/**
++ * uart_port_lock_irq - Lock the UART port and disable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock_irq(struct uart_port *up)
++{
++ spin_lock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ */
++static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ spin_lock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_trylock - Try to lock the UART port
++ * @up: Pointer to UART port structure
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock(struct uart_port *up)
++{
++ return spin_trylock(&up->lock);
++}
++
++/**
++ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ return spin_trylock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_unlock - Unlock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock(struct uart_port *up)
++{
++ spin_unlock(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock_irq(struct uart_port *up)
++{
++ spin_unlock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqrestore - Unlock the UART port, restore interrupts
++ * @up: Pointer to UART port structure
++ * @flags: The saved interrupt flags for restore
++ */
++static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ return up->serial_in(up, offset);
+--
+2.43.0
+
--- /dev/null
+From 17ae556ec1a78a8b46e3b8f8823696b37d182edc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 12:13:53 -0500
+Subject: serial: sc16is7xx: fix unconditional activation of THRI interrupt
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 9915753037eba7135b209fef4f2afeca841af816 ]
+
+Commit cc4c1d05eb10 ("sc16is7xx: Properly resume TX after stop") changed
+behavior to unconditionnaly set the THRI interrupt in sc16is7xx_tx_proc().
+
+For example when sending a 65 bytes message, and assuming the Tx FIFO is
+initially empty, sc16is7xx_handle_tx() will write the first 64 bytes of the
+message to the FIFO and sc16is7xx_tx_proc() will then activate THRI. When
+the THRI IRQ is fired, the driver will write the remaining byte of the
+message to the FIFO, and disable THRI by calling sc16is7xx_stop_tx().
+
+When sending a 2 bytes message, sc16is7xx_handle_tx() will write the 2
+bytes of the message to the FIFO and call sc16is7xx_stop_tx(), disabling
+THRI. After sc16is7xx_handle_tx() exits, control returns to
+sc16is7xx_tx_proc() which will unconditionally set THRI. When the THRI IRQ
+is fired, the driver simply acknowledges the interrupt and does nothing
+more, since all the data has already been written to the FIFO. This results
+in 2 register writes and 4 register reads all for nothing and taking
+precious cycles from the I2C/SPI bus.
+
+Fix this by enabling the THRI interrupt only when we fill the Tx FIFO to
+its maximum capacity and there are remaining bytes to send in the message.
+
+Fixes: cc4c1d05eb10 ("sc16is7xx: Properly resume TX after stop")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20231211171353.2901416-7-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 4def1b7266f6..e331b57d6d7d 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -678,6 +678,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+
+ if (uart_circ_empty(xmit))
+ sc16is7xx_stop_tx(port);
++ else
++ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+ uart_port_unlock_irqrestore(port, flags);
+ }
+
+@@ -804,7 +806,6 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ {
+ struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+- unsigned long flags;
+
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ (port->rs485.delay_rts_before_send > 0))
+@@ -813,10 +814,6 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ mutex_lock(&one->efr_lock);
+ sc16is7xx_handle_tx(port);
+ mutex_unlock(&one->efr_lock);
+-
+- uart_port_lock_irqsave(port, &flags);
+- sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+- uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+--
+2.43.0
+
--- /dev/null
+From 87cc8277349c1794add435a6baa115067a1c8aa2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 20:44:12 +0206
+Subject: serial: sc16is7xx: Use port lock wrappers
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit b465848be8a652e2c5fefe102661fb660cff8497 ]
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-56-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 9915753037eb ("serial: sc16is7xx: fix unconditional activation of THRI interrupt")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 40 +++++++++++++++++-----------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index ab9159441d02..4def1b7266f6 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -641,9 +641,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_stop_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+@@ -672,13 +672,13 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ sc16is7xx_fifo_write(port, to_send);
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ sc16is7xx_stop_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+@@ -709,7 +709,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+
+ one->old_mctrl = status;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
+ port->icount.rng++;
+ if (changed & TIOCM_DSR)
+@@ -720,7 +720,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ uart_handle_cts_change(port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+@@ -814,9 +814,9 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ sc16is7xx_handle_tx(port);
+ mutex_unlock(&one->efr_lock);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+@@ -827,14 +827,14 @@ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+ struct serial_rs485 *rs485 = &port->rs485;
+ unsigned long irqflags;
+
+- spin_lock_irqsave(&port->lock, irqflags);
++ uart_port_lock_irqsave(port, &irqflags);
+ if (rs485->flags & SER_RS485_ENABLED) {
+ efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
+
+ if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
+ }
+- spin_unlock_irqrestore(&port->lock, irqflags);
++ uart_port_unlock_irqrestore(port, irqflags);
+
+ sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
+ }
+@@ -845,10 +845,10 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
+ struct sc16is7xx_one_config config;
+ unsigned long irqflags;
+
+- spin_lock_irqsave(&one->port.lock, irqflags);
++ uart_port_lock_irqsave(&one->port, &irqflags);
+ config = one->config;
+ memset(&one->config, 0, sizeof(one->config));
+- spin_unlock_irqrestore(&one->port.lock, irqflags);
++ uart_port_unlock_irqrestore(&one->port, irqflags);
+
+ if (config.flags & SC16IS7XX_RECONF_MD) {
+ u8 mcr = 0;
+@@ -954,18 +954,18 @@ static void sc16is7xx_throttle(struct uart_port *port)
+ * value set in MCR register. Stop reading data from RX FIFO so the
+ * AutoRTS feature will de-activate RTS output.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_unthrottle(struct uart_port *port)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
+@@ -1103,7 +1103,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ /* Setup baudrate generator */
+ baud = sc16is7xx_set_baud(port, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Update timeout according to new baud rate */
+ uart_update_timeout(port, termios->c_cflag, baud);
+@@ -1111,7 +1111,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ sc16is7xx_enable_ms(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
+@@ -1197,9 +1197,9 @@ static int sc16is7xx_startup(struct uart_port *port)
+ sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
+
+ /* Enable modem status polling */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_enable_ms(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+--
+2.43.0
+
drm-bridge-nxp-ptn3460-simplify-some-error-checking.patch
drm-amd-display-port-dentist-hang-and-tdr-fixes-to-otg-disable-w-a.patch
drm-amdgpu-pm-fix-the-power-source-flag-error.patch
+erofs-get-rid-of-the-remaining-kmap_atomic.patch
+erofs-fix-lz4-inplace-decompression.patch
+media-ov13b10-support-device-probe-in-non-zero-acpi-.patch
+media-ov13b10-enable-runtime-pm-before-registering-a.patch
+bus-mhi-ep-do-not-allocate-event-ring-element-on-sta.patch
+pm-core-remove-unnecessary-void-conversions.patch
+pm-sleep-fix-possible-deadlocks-in-core-system-wide-.patch
+thermal-intel-hfi-refactor-enabling-code-into-helper.patch
+thermal-intel-hfi-disable-an-hfi-instance-when-all-i.patch
+thermal-intel-hfi-add-syscore-callbacks-for-system-w.patch
+fs-pipe-move-check-to-pipe_has_watch_queue.patch
+pipe-wakeup-wr_wait-after-setting-max_usage.patch
+arm-dts-qcom-sdx55-fix-usb-wakeup-interrupt-types.patch
+arm-dts-samsung-exynos4210-i9100-unconditionally-ena.patch
+arm-dts-qcom-sdx55-fix-pdc-interrupt-cells.patch
+arm-dts-qcom-sdx55-fix-usb-dp-dm-hs-phy-interrupts.patch
+arm-dts-qcom-sdx55-fix-usb-ss-wakeup.patch
+dlm-use-kernel_connect-and-kernel_bind.patch
+serial-core-provide-port-lock-wrappers.patch
+serial-sc16is7xx-use-port-lock-wrappers.patch
+serial-sc16is7xx-fix-unconditional-activation-of-thr.patch
+btrfs-zoned-factor-out-prepare_allocation_zoned.patch
+btrfs-zoned-optimize-hint-byte-for-zoned-allocator.patch
+nfsd-fix-release_lockowner.patch
+drm-panel-edp-drm-panel-edp-fix-auo-b116xak01-name-a.patch
+revert-powerpc-64s-increase-default-stack-size-to-32.patch
--- /dev/null
+From 4cc697e489707a356d12757df556221f272a242e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jan 2024 19:07:04 -0800
+Subject: thermal: intel: hfi: Add syscore callbacks for system-wide PM
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+[ Upstream commit 97566d09fd02d2ab329774bb89a2cdf2267e86d9 ]
+
+The kernel allocates a memory buffer and provides its location to the
+hardware, which uses it to update the HFI table. This allocation occurs
+during boot and remains constant throughout runtime.
+
+When resuming from hibernation, the restore kernel allocates a second
+memory buffer and reprograms the HFI hardware with the new location as
+part of a normal boot. The location of the second memory buffer may
+differ from the one allocated by the image kernel.
+
+When the restore kernel transfers control to the image kernel, its HFI
+buffer becomes invalid, potentially leading to memory corruption if the
+hardware writes to it (the hardware continues to use the buffer from the
+restore kernel).
+
+It is also possible that the hardware "forgets" the address of the memory
+buffer when resuming from "deep" suspend. Memory corruption may also occur
+in such a scenario.
+
+To prevent the described memory corruption, disable HFI when preparing to
+suspend or hibernate. Enable it when resuming.
+
+Add syscore callbacks to handle the package of the boot CPU (packages of
+non-boot CPUs are handled via CPU offline). Syscore ops always run on the
+boot CPU. Additionally, HFI only needs to be disabled during "deep" suspend
+and hibernation. Syscore ops only run in these cases.
+
+Cc: 6.1+ <stable@vger.kernel.org> # 6.1+
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+[ rjw: Comment adjustment, subject and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/intel/intel_hfi.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index 5352fcb72ea3..750dab3f259e 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -35,7 +35,9 @@
+ #include <linux/processor.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/suspend.h>
+ #include <linux/string.h>
++#include <linux/syscore_ops.h>
+ #include <linux/topology.h>
+ #include <linux/workqueue.h>
+
+@@ -559,6 +561,30 @@ static __init int hfi_parse_features(void)
+ return 0;
+ }
+
++static void hfi_do_enable(void)
++{
++ /* This code runs only on the boot CPU. */
++ struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
++ struct hfi_instance *hfi_instance = info->hfi_instance;
++
++ /* No locking needed. There is no concurrency with CPU online. */
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
++}
++
++static int hfi_do_disable(void)
++{
++ /* No locking needed. There is no concurrency with CPU offline. */
++ hfi_disable();
++
++ return 0;
++}
++
++static struct syscore_ops hfi_pm_ops = {
++ .resume = hfi_do_enable,
++ .suspend = hfi_do_disable,
++};
++
+ void __init intel_hfi_init(void)
+ {
+ struct hfi_instance *hfi_instance;
+@@ -590,6 +616,8 @@ void __init intel_hfi_init(void)
+ if (!hfi_updates_wq)
+ goto err_nomem;
+
++ register_syscore_ops(&hfi_pm_ops);
++
+ return;
+
+ err_nomem:
+--
+2.43.0
+
--- /dev/null
+From bc1d2ad59323b01a9ad506ff4817b5586c081bd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jan 2024 20:14:58 -0800
+Subject: thermal: intel: hfi: Disable an HFI instance when all its CPUs go
+ offline
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+[ Upstream commit 1c53081d773c2cb4461636559b0d55b46559ceec ]
+
+In preparation to support hibernation, add functionality to disable an HFI
+instance during CPU offline. The last CPU of an instance that goes offline
+will disable such instance.
+
+The Intel Software Development Manual states that the operating system must
+wait for the hardware to set MSR_IA32_PACKAGE_THERM_STATUS[26] after
+disabling an HFI instance to ensure that it will no longer write on the HFI
+memory. Some processors, however, do not ever set such bit. Wait a minimum
+of 2ms to give time hardware to complete any pending memory writes.
+
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 97566d09fd02 ("thermal: intel: hfi: Add syscore callbacks for system-wide PM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/intel/intel_hfi.c | 35 +++++++++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index 1a014595ed49..5352fcb72ea3 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -24,6 +24,7 @@
+ #include <linux/bitops.h>
+ #include <linux/cpufeature.h>
+ #include <linux/cpumask.h>
++#include <linux/delay.h>
+ #include <linux/gfp.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -358,6 +359,32 @@ static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
+ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+ }
+
++/* Caller must hold hfi_instance_lock. */
++static void hfi_disable(void)
++{
++ u64 msr_val;
++ int i;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++
++ /*
++ * Wait for hardware to acknowledge the disabling of HFI. Some
++ * processors may not do it. Wait for ~2ms. This is a reasonable
++ * time for hardware to complete any pending actions on the HFI
++ * memory.
++ */
++ for (i = 0; i < 2000; i++) {
++ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
++ if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
++ break;
++
++ udelay(1);
++ cpu_relax();
++ }
++}
++
+ /**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+@@ -412,6 +439,10 @@ void intel_hfi_online(unsigned int cpu)
+ /*
+ * Hardware is programmed with the physical address of the first page
+ * frame of the table. Hence, the allocated memory must be page-aligned.
++ *
++ * Some processors do not forget the initial address of the HFI table
++ * even after having been reprogrammed. Keep using the same pages. Do
++ * not free them.
+ */
+ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ GFP_KERNEL | __GFP_ZERO);
+@@ -476,6 +507,10 @@ void intel_hfi_offline(unsigned int cpu)
+
+ mutex_lock(&hfi_instance_lock);
+ cpumask_clear_cpu(cpu, hfi_instance->cpus);
++
++ if (!cpumask_weight(hfi_instance->cpus))
++ hfi_disable();
++
+ mutex_unlock(&hfi_instance_lock);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 33364855547e80e132ca4fbf61a48daa3c078434 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jan 2024 20:14:56 -0800
+Subject: thermal: intel: hfi: Refactor enabling code into helper functions
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+[ Upstream commit 8a8b6bb93c704776c4b05cb517c3fa8baffb72f5 ]
+
+In preparation for the addition of a suspend notifier, wrap the logic to
+enable HFI and program its memory buffer into helper functions. Both the
+CPU hotplug callback and the suspend notifier will use them.
+
+This refactoring does not introduce functional changes.
+
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 97566d09fd02 ("thermal: intel: hfi: Add syscore callbacks for system-wide PM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/intel/intel_hfi.c | 43 ++++++++++++++++---------------
+ 1 file changed, 22 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index a0640f762dc5..1a014595ed49 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -338,6 +338,26 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance)
+ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+ }
+
++/* Caller must hold hfi_instance_lock. */
++static void hfi_enable(void)
++{
++ u64 msr_val;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++}
++
++static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
++{
++ phys_addr_t hw_table_pa;
++ u64 msr_val;
++
++ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
++ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
++}
++
+ /**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+@@ -355,8 +375,6 @@ void intel_hfi_online(unsigned int cpu)
+ {
+ struct hfi_instance *hfi_instance;
+ struct hfi_cpu_info *info;
+- phys_addr_t hw_table_pa;
+- u64 msr_val;
+ u16 die_id;
+
+ /* Nothing to do if hfi_instances are missing. */
+@@ -400,8 +418,6 @@ void intel_hfi_online(unsigned int cpu)
+ if (!hfi_instance->hw_table)
+ goto unlock;
+
+- hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+-
+ /*
+ * Allocate memory to keep a local copy of the table that
+ * hardware generates.
+@@ -411,16 +427,6 @@ void intel_hfi_online(unsigned int cpu)
+ if (!hfi_instance->local_table)
+ goto free_hw_table;
+
+- /*
+- * Program the address of the feedback table of this die/package. On
+- * some processors, hardware remembers the old address of the HFI table
+- * even after having been reprogrammed and re-enabled. Thus, do not free
+- * the pages allocated for the table or reprogram the hardware with a
+- * new base address. Namely, program the hardware only once.
+- */
+- msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+-
+ init_hfi_instance(hfi_instance);
+
+ INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+@@ -429,13 +435,8 @@ void intel_hfi_online(unsigned int cpu)
+
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+
+- /*
+- * Enable the hardware feedback interface and never disable it. See
+- * comment on programming the address of the table.
+- */
+- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+- msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
+
+ unlock:
+ mutex_unlock(&hfi_instance_lock);
+--
+2.43.0
+