--- /dev/null
+From stable+bounces-196906-greg=kroah.com@vger.kernel.org Tue Nov 25 14:51:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 08:51:11 -0500
+Subject: drm/amd/display: Insert dccg log for easy debug
+To: stable@vger.kernel.org
+Cc: Charlene Liu <Charlene.Liu@amd.com>, "Ovidiu (Ovi) Bunea" <ovidiu.bunea@amd.com>, Yihan Zhu <yihan.zhu@amd.com>, Ivan Lipski <ivan.lipski@amd.com>, Dan Wheeler <daniel.wheeler@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251125135112.591587-1-sashal@kernel.org>
+
+From: Charlene Liu <Charlene.Liu@amd.com>
+
+[ Upstream commit 35bcc9168f3ce6416cbf3f776758be0937f84cb3 ]
+
+[why]
+Log for sequence tracking
+
+Reviewed-by: Ovidiu (Ovi) Bunea <ovidiu.bunea@amd.com>
+Reviewed-by: Yihan Zhu <yihan.zhu@amd.com>
+Signed-off-by: Charlene Liu <Charlene.Liu@amd.com>
+Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: cfa0904a35fd ("drm/amd/display: Prevent Gating DTBCLK before It Is Properly Latched")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c | 24 ++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+@@ -39,6 +39,7 @@
+
+ #define CTX \
+ dccg_dcn->base.ctx
++#include "logger_types.h"
+ #define DC_LOGGER \
+ dccg->ctx->logger
+
+@@ -1136,7 +1137,7 @@ static void dcn35_set_dppclk_enable(stru
+ default:
+ break;
+ }
+- //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
++ DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
+
+ }
+
+@@ -1406,6 +1407,10 @@ static void dccg35_set_dtbclk_dto(
+ * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
+ * programming is handled in program_pix_clk() regardless, so it can be removed from here.
+ */
++ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n",
++ __func__, params->otg_inst, params->pixclk_khz,
++ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
++
+ } else {
+ switch (params->otg_inst) {
+ case 0:
+@@ -1431,6 +1436,8 @@ static void dccg35_set_dtbclk_dto(
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
++
++ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst);
+ }
+ }
+
+@@ -1475,6 +1482,8 @@ static void dccg35_set_dpstreamclk(
+ BREAK_TO_DEBUGGER();
+ return;
+ }
++ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n",
++ __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst);
+ }
+
+
+@@ -1514,6 +1523,8 @@ static void dccg35_set_dpstreamclk_root_
+ BREAK_TO_DEBUGGER();
+ return;
+ }
++ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n",
++ __func__, dp_hpo_inst, enable ? 1 : 0);
+ }
+
+
+@@ -1553,7 +1564,7 @@ static void dccg35_set_physymclk_root_cl
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+- //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
++ DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1);
+
+ }
+
+@@ -1626,6 +1637,8 @@ static void dccg35_set_physymclk(
+ BREAK_TO_DEBUGGER();
+ return;
+ }
++ DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n",
++ __func__, phy_inst, force_enable ? 1 : 0, clk_src);
+ }
+
+ static void dccg35_set_valid_pixel_rate(
+@@ -1673,6 +1686,7 @@ static void dccg35_dpp_root_clock_contro
+ }
+
+ dccg->dpp_clock_gated[dpp_inst] = !clock_on;
++ DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
+ }
+
+ static void dccg35_disable_symclk32_se(
+@@ -1731,6 +1745,7 @@ static void dccg35_disable_symclk32_se(
+ BREAK_TO_DEBUGGER();
+ return;
+ }
++
+ }
+
+ static void dccg35_init_cb(struct dccg *dccg)
+@@ -1738,7 +1753,6 @@ static void dccg35_init_cb(struct dccg *
+ (void)dccg;
+ /* Any RCG should be done when driver enter low power mode*/
+ }
+-
+ void dccg35_init(struct dccg *dccg)
+ {
+ int otg_inst;
+@@ -1753,6 +1767,8 @@ void dccg35_init(struct dccg *dccg)
+ for (otg_inst = 0; otg_inst < 2; otg_inst++) {
+ dccg31_disable_symclk32_le(dccg, otg_inst);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
++ DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n",
++ __func__, otg_inst);
+ }
+
+ // if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+@@ -1765,6 +1781,8 @@ void dccg35_init(struct dccg *dccg)
+ dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
+ otg_inst);
+ dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
++ DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n",
++ __func__, otg_inst);
+ }
+
+ /*
--- /dev/null
+From stable+bounces-196907-greg=kroah.com@vger.kernel.org Tue Nov 25 14:52:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 08:51:12 -0500
+Subject: drm/amd/display: Prevent Gating DTBCLK before It Is Properly Latched
+To: stable@vger.kernel.org
+Cc: Fangzhi Zuo <Jerry.Zuo@amd.com>, Charlene Liu <charlene.liu@amd.com>, Aurabindo Pillai <aurabindo.pillai@amd.com>, Roman Li <roman.li@amd.com>, Dan Wheeler <daniel.wheeler@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251125135112.591587-2-sashal@kernel.org>
+
+From: Fangzhi Zuo <Jerry.Zuo@amd.com>
+
+[ Upstream commit cfa0904a35fd0231f4d05da0190f0a22ed881cce ]
+
+[why]
+1. With allow_0_dtb_clk enabled, the time required to latch DTBCLK to 600 MHz
+depends on the SMU. If DTBCLK is not latched to 600 MHz before set_mode completes,
+gating DTBCLK causes the DP2 sink to lose its clock source.
+
+2. The existing DTBCLK gating sequence ungates DTBCLK based on both pix_clk and ref_dtbclk,
+but gates DTBCLK when either pix_clk or ref_dtbclk is zero.
+pix_clk can be zero outside the set_mode sequence before DTBCLK is properly latched,
+which can lead to DTBCLK being gated by mistake.
+
+[how]
+Consider both pixel_clk and ref_dtbclk when determining when it is safe to gate DTBCLK;
+this is more accurate.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4701
+Fixes: 5949e7c4890c ("drm/amd/display: Enable Dynamic DTBCLK Switch")
+Reviewed-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Signed-off-by: Roman Li <roman.li@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit d04eb0c402780ca037b62a6aecf23b863545ebca)
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 4 +++-
+ drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c | 2 +-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
+ if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
+ new_clocks->ref_dtbclk_khz = 600000;
++ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
++ new_clocks->ref_dtbclk_khz = 0;
+
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr
+
+ actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+- if (actual_dtbclk) {
++ if (actual_dtbclk > 590000) {
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
+--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+@@ -1411,7 +1411,7 @@ static void dccg35_set_dtbclk_dto(
+ __func__, params->otg_inst, params->pixclk_khz,
+ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
+
+- } else {
++ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
+ switch (params->otg_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
--- /dev/null
+From stable+bounces-196562-greg=kroah.com@vger.kernel.org Fri Nov 21 19:45:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 13:45:22 -0500
+Subject: drm/amdgpu/jpeg: Add parse_cs for JPEG5_0_1
+To: stable@vger.kernel.org
+Cc: Sathishkumar S <sathishkumar.sundararaju@amd.com>, Leo Liu <leo.liu@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251121184522.2650460-2-sashal@kernel.org>
+
+From: Sathishkumar S <sathishkumar.sundararaju@amd.com>
+
+[ Upstream commit bbe3c115030da431c9ec843c18d5583e59482dd2 ]
+
+enable parse_cs callback for JPEG5_0_1.
+
+Signed-off-by: Sathishkumar S <sathishkumar.sundararaju@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 547985579932c1de13f57f8bcf62cd9361b9d3d3)
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+@@ -878,6 +878,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
--- /dev/null
+From stable+bounces-196561-greg=kroah.com@vger.kernel.org Fri Nov 21 19:46:36 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 13:45:21 -0500
+Subject: drm/amdgpu/jpeg: Move parse_cs to amdgpu_jpeg.c
+To: stable@vger.kernel.org
+Cc: Sathishkumar S <sathishkumar.sundararaju@amd.com>, Leo Liu <leo.liu@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251121184522.2650460-1-sashal@kernel.org>
+
+From: Sathishkumar S <sathishkumar.sundararaju@amd.com>
+
+[ Upstream commit 28f75f9bcc7da7da12e5dae2ae8d8629a2b2e42e ]
+
+Rename jpeg_v2_dec_ring_parse_cs to amdgpu_jpeg_dec_parse_cs
+and move it to amdgpu_jpeg.c as it is shared among jpeg versions.
+
+Signed-off-by: Sathishkumar S <sathishkumar.sundararaju@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: bbe3c115030d ("drm/amdgpu/jpeg: Add parse_cs for JPEG5_0_1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 65 +++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 10 ++++
+ drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 58 ---------------------------
+ drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h | 6 --
+ drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 4 -
+ drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2
+ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 2
+ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 2
+ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c | 2
+ drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c | 2
+ 10 files changed, 83 insertions(+), 70 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+@@ -539,3 +539,68 @@ void amdgpu_jpeg_print_ip_state(struct a
+ drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
+ }
+ }
++
++static inline bool amdgpu_jpeg_reg_valid(u32 reg)
++{
++ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
++ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
++ return false;
++ else
++ return true;
++}
++
++/**
++ * amdgpu_jpeg_dec_parse_cs - command submission parser
++ *
++ * @parser: Command submission parser context
++ * @job: the job to parse
++ * @ib: the IB to parse
++ *
++ * Parse the command stream, return -EINVAL for invalid packet,
++ * 0 otherwise
++ */
++
++int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib)
++{
++ u32 i, reg, res, cond, type;
++ struct amdgpu_device *adev = parser->adev;
++
++ for (i = 0; i < ib->length_dw ; i += 2) {
++ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
++ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
++ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
++ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
++
++ if (res) /* only support 0 at the moment */
++ return -EINVAL;
++
++ switch (type) {
++ case PACKETJ_TYPE0:
++ if (cond != PACKETJ_CONDITION_CHECK0 ||
++ !amdgpu_jpeg_reg_valid(reg)) {
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ return -EINVAL;
++ }
++ break;
++ case PACKETJ_TYPE3:
++ if (cond != PACKETJ_CONDITION_CHECK3 ||
++ !amdgpu_jpeg_reg_valid(reg)) {
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ return -EINVAL;
++ }
++ break;
++ case PACKETJ_TYPE6:
++ if (ib->ptr[i] == CP_PACKETJ_NOP)
++ continue;
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ return -EINVAL;
++ default:
++ dev_err(adev->dev, "Unknown packet type %d !\n", type);
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+@@ -25,11 +25,18 @@
+ #define __AMDGPU_JPEG_H__
+
+ #include "amdgpu_ras.h"
++#include "amdgpu_cs.h"
+
+ #define AMDGPU_MAX_JPEG_INSTANCES 4
+ #define AMDGPU_MAX_JPEG_RINGS 10
+ #define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+
++#define JPEG_REG_RANGE_START 0x4000
++#define JPEG_REG_RANGE_END 0x41c2
++#define JPEG_ATOMIC_RANGE_START 0x4120
++#define JPEG_ATOMIC_RANGE_END 0x412A
++
++
+ #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
+ #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+
+@@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amd
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+ void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
+ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
++int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib);
+
+ #endif /*__AMDGPU_JPEG_H__*/
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+@@ -23,7 +23,6 @@
+
+ #include "amdgpu.h"
+ #include "amdgpu_jpeg.h"
+-#include "amdgpu_cs.h"
+ #include "amdgpu_pm.h"
+ #include "soc15.h"
+ #include "soc15d.h"
+@@ -806,7 +805,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v2_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+@@ -854,58 +853,3 @@ const struct amdgpu_ip_block_version jpe
+ .rev = 0,
+ .funcs = &jpeg_v2_0_ip_funcs,
+ };
+-
+-/**
+- * jpeg_v2_dec_ring_parse_cs - command submission parser
+- *
+- * @parser: Command submission parser context
+- * @job: the job to parse
+- * @ib: the IB to parse
+- *
+- * Parse the command stream, return -EINVAL for invalid packet,
+- * 0 otherwise
+- */
+-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+- struct amdgpu_job *job,
+- struct amdgpu_ib *ib)
+-{
+- u32 i, reg, res, cond, type;
+- struct amdgpu_device *adev = parser->adev;
+-
+- for (i = 0; i < ib->length_dw ; i += 2) {
+- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+-
+- if (res) /* only support 0 at the moment */
+- return -EINVAL;
+-
+- switch (type) {
+- case PACKETJ_TYPE0:
+- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
+- reg > JPEG_REG_RANGE_END) {
+- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+- return -EINVAL;
+- }
+- break;
+- case PACKETJ_TYPE3:
+- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
+- reg > JPEG_REG_RANGE_END) {
+- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+- return -EINVAL;
+- }
+- break;
+- case PACKETJ_TYPE6:
+- if (ib->ptr[i] == CP_PACKETJ_NOP)
+- continue;
+- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+- return -EINVAL;
+- default:
+- dev_err(adev->dev, "Unknown packet type %d !\n", type);
+- return -EINVAL;
+- }
+- }
+-
+- return 0;
+-}
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+@@ -45,9 +45,6 @@
+
+ #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
+-#define JPEG_REG_RANGE_START 0x4000
+-#define JPEG_REG_RANGE_END 0x41c2
+-
+ void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
+ void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
+ void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+@@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(st
+ unsigned vmid, uint64_t pd_addr);
+ void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+ void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+- struct amdgpu_job *job,
+- struct amdgpu_ib *ib);
+
+ extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
+
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+@@ -696,7 +696,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+@@ -727,7 +727,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+@@ -597,7 +597,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v3_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v3_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v3_0_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+@@ -762,7 +762,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v4_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v4_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v4_0_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+@@ -1177,7 +1177,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
+ .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
+ .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+@@ -807,7 +807,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+@@ -683,7 +683,7 @@ static const struct amdgpu_ring_funcs jp
+ .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
+- .parse_cs = jpeg_v2_dec_ring_parse_cs,
++ .parse_cs = amdgpu_jpeg_dec_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
--- /dev/null
+From stable+bounces-196506-greg=kroah.com@vger.kernel.org Fri Nov 21 16:31:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 10:20:15 -0500
+Subject: drm/i915/dp_mst: Disable Panel Replay
+To: stable@vger.kernel.org
+Cc: "Imre Deak" <imre.deak@intel.com>, "Jouni Högander" <jouni.hogander@intel.com>, "Animesh Manna" <animesh.manna@intel.com>, "Rodrigo Vivi" <rodrigo.vivi@intel.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251121152015.2567941-2-sashal@kernel.org>
+
+From: Imre Deak <imre.deak@intel.com>
+
+[ Upstream commit f2687d3cc9f905505d7b510c50970176115066a2 ]
+
+Disable Panel Replay on MST links until it's properly implemented. For
+instance the required VSC SDP is not programmed on MST and FEC is not
+enabled if Panel Replay is enabled.
+
+Fixes: 3257e55d3ea7 ("drm/i915/panelreplay: enable/disable panel replay")
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/15174
+Cc: Jouni Högander <jouni.hogander@intel.com>
+Cc: Animesh Manna <animesh.manna@intel.com>
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Jouni Högander <jouni.hogander@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patch.msgid.link/20251107124141.911895-1-imre.deak@intel.com
+(cherry picked from commit e109f644b871df8440c886a69cdce971ed533088)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -602,6 +602,10 @@ static void _panel_replay_init_dpcd(stru
+ struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
++ /* TODO: Enable Panel Replay on MST once it's properly implemented. */
++ if (intel_dp->mst_detect == DRM_DP_MST)
++ return;
++
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+ if (ret < 0)
--- /dev/null
+From stable+bounces-196505-greg=kroah.com@vger.kernel.org Fri Nov 21 16:30:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 10:20:14 -0500
+Subject: drm/i915/psr: Check drm_dp_dpcd_read return value on PSR dpcd init
+To: stable@vger.kernel.org
+Cc: "Jouni Högander" <jouni.hogander@intel.com>, "Jani Nikula" <jani.nikula@intel.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251121152015.2567941-1-sashal@kernel.org>
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 9cc10041e9fe7f32c4817e3cdd806ff1986d266c ]
+
+Currently we are ignoriong drm_dp_dpcd_read return values when reading PSR
+and Panel Replay capability DPCD register. Rework intel_psr_dpcd a bit to
+take care of checking the return value.
+
+v2: use drm_dp_dpcd_read_data
+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://lore.kernel.org/r/20250821045918.17757-1-jouni.hogander@intel.com
+Stable-dep-of: f2687d3cc9f9 ("drm/i915/dp_mst: Disable Panel Replay")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 32 ++++++++++++++++++++-----------
+ 1 file changed, 21 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -600,6 +600,16 @@ exit:
+ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
+ {
+ struct intel_display *display = to_intel_display(intel_dp);
++ int ret;
++
++ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
++ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
++ if (ret < 0)
++ return;
++
++ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
++ DP_PANEL_REPLAY_SUPPORT))
++ return;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
+@@ -631,6 +641,15 @@ static void _panel_replay_init_dpcd(stru
+ static void _psr_init_dpcd(struct intel_dp *intel_dp)
+ {
+ struct intel_display *display = to_intel_display(intel_dp);
++ int ret;
++
++ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
++ sizeof(intel_dp->psr_dpcd));
++ if (ret < 0)
++ return;
++
++ if (!intel_dp->psr_dpcd[0])
++ return;
+
+ drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+@@ -676,18 +695,9 @@ static void _psr_init_dpcd(struct intel_
+
+ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+ {
+- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+- sizeof(intel_dp->psr_dpcd));
+-
+- drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+- &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+-
+- if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+- DP_PANEL_REPLAY_SUPPORT)
+- _panel_replay_init_dpcd(intel_dp);
++ _psr_init_dpcd(intel_dp);
+
+- if (intel_dp->psr_dpcd[0])
+- _psr_init_dpcd(intel_dp);
++ _panel_replay_init_dpcd(intel_dp);
+
+ if (intel_dp->psr.sink_psr2_support ||
+ intel_dp->psr.sink_panel_replay_su_support)
--- /dev/null
+From stable+bounces-196583-greg=kroah.com@vger.kernel.org Sat Nov 22 05:52:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 23:52:22 -0500
+Subject: kho: allocate metadata directly from the buddy allocator
+To: stable@vger.kernel.org
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>, Pratyush Yadav <pratyush@kernel.org>, "Mike Rapoport (Microsoft)" <rppt@kernel.org>, David Matlack <dmatlack@google.com>, Alexander Graf <graf@amazon.com>, Christian Brauner <brauner@kernel.org>, Jason Gunthorpe <jgg@ziepe.ca>, Jonathan Corbet <corbet@lwn.net>, Masahiro Yamada <masahiroy@kernel.org>, Miguel Ojeda <ojeda@kernel.org>, Randy Dunlap <rdunlap@infradead.org>, Samiullah Khawaja <skhawaja@google.com>, Tejun Heo <tj@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251122045222.2798582-4-sashal@kernel.org>
+
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+
+[ Upstream commit fa759cd75bce5489eed34596daa53f721849a86f ]
+
+KHO allocates metadata for its preserved memory map using the slab
+allocator via kzalloc(). This metadata is temporary and is used by the
+next kernel during early boot to find preserved memory.
+
+A problem arises when KFENCE is enabled. kzalloc() calls can be randomly
+intercepted by kfence_alloc(), which services the allocation from a
+dedicated KFENCE memory pool. This pool is allocated early in boot via
+memblock.
+
+When booting via KHO, the memblock allocator is restricted to a "scratch
+area", forcing the KFENCE pool to be allocated within it. This creates a
+conflict, as the scratch area is expected to be ephemeral and
+overwriteable by a subsequent kexec. If KHO metadata is placed in this
+KFENCE pool, it leads to memory corruption when the next kernel is loaded.
+
+To fix this, modify KHO to allocate its metadata directly from the buddy
+allocator instead of slab.
+
+Link: https://lkml.kernel.org/r/20251021000852.2924827-4-pasha.tatashin@soleen.com
+Fixes: fc33e4b44b27 ("kexec: enable KHO support for memory preservation")
+Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: David Matlack <dmatlack@google.com>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: Miguel Ojeda <ojeda@kernel.org>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Samiullah Khawaja <skhawaja@google.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/gfp.h | 3 +++
+ kernel/kexec_handover.c | 6 +++---
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -7,6 +7,7 @@
+ #include <linux/mmzone.h>
+ #include <linux/topology.h>
+ #include <linux/alloc_tag.h>
++#include <linux/cleanup.h>
+ #include <linux/sched.h>
+
+ struct vm_area_struct;
+@@ -463,4 +464,6 @@ static inline struct folio *folio_alloc_
+ /* This should be paired with folio_put() rather than free_contig_range(). */
+ #define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
++DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
++
+ #endif /* __LINUX_GFP_H */
+--- a/kernel/kexec_handover.c
++++ b/kernel/kexec_handover.c
+@@ -125,7 +125,7 @@ static void *xa_load_or_alloc(struct xar
+ if (res)
+ return res;
+
+- void *elm __free(kfree) = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);
+
+ if (!elm)
+ return ERR_PTR(-ENOMEM);
+@@ -292,9 +292,9 @@ static_assert(sizeof(struct khoser_mem_c
+ static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
+ unsigned long order)
+ {
+- struct khoser_mem_chunk *chunk __free(kfree) = NULL;
++ struct khoser_mem_chunk *chunk __free(free_page) = NULL;
+
+- chunk = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ chunk = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!chunk)
+ return ERR_PTR(-ENOMEM);
+
--- /dev/null
+From stable+bounces-196580-greg=kroah.com@vger.kernel.org Sat Nov 22 05:52:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 23:52:19 -0500
+Subject: kho: check if kho is finalized in __kho_preserve_order()
+To: stable@vger.kernel.org
+Cc: "Mike Rapoport (Microsoft)" <rppt@kernel.org>, Pratyush Yadav <pratyush@kernel.org>, Alexander Graf <graf@amazon.com>, Baoquan He <bhe@redhat.com>, Changyuan Lyu <changyuanl@google.com>, Chris Li <chrisl@kernel.org>, Jason Gunthorpe <jgg@nvidia.com>, Pasha Tatashin <pasha.tatashin@soleen.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251122045222.2798582-1-sashal@kernel.org>
+
+From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
+
+[ Upstream commit 469661d0d3a55a7ba1e7cb847c26baf78cace086 ]
+
+Patch series "kho: add support for preserving vmalloc allocations", v5.
+
+Following the discussion about preservation of memfd with LUO [1] these
+patches add support for preserving vmalloc allocations.
+
+Any KHO uses case presumes that there's a data structure that lists
+physical addresses of preserved folios (and potentially some additional
+metadata). Allowing vmalloc preservations with KHO allows scalable
+preservation of such data structures.
+
+For instance, instead of allocating array describing preserved folios in
+the fdt, memfd preservation can use vmalloc:
+
+ preserved_folios = vmalloc_array(nr_folios, sizeof(*preserved_folios));
+ memfd_luo_preserve_folios(preserved_folios, folios, nr_folios);
+ kho_preserve_vmalloc(preserved_folios, &folios_info);
+
+This patch (of 4):
+
+Instead of checking if kho is finalized in each caller of
+__kho_preserve_order(), do it in the core function itself.
+
+Link: https://lkml.kernel.org/r/20250921054458.4043761-1-rppt@kernel.org
+Link: https://lkml.kernel.org/r/20250921054458.4043761-2-rppt@kernel.org
+Link: https://lore.kernel.org/all/20250807014442.3829950-30-pasha.tatashin@soleen.com [1]
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Changyuan Lyu <changyuanl@google.com>
+Cc: Chris Li <chrisl@kernel.org>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: fa759cd75bce ("kho: allocate metadata directly from the buddy allocator")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kexec_handover.c | 55 ++++++++++++++++++++++--------------------------
+ 1 file changed, 26 insertions(+), 29 deletions(-)
+
+--- a/kernel/kexec_handover.c
++++ b/kernel/kexec_handover.c
+@@ -91,6 +91,29 @@ struct kho_serialization {
+ struct khoser_mem_chunk *preserved_mem_map;
+ };
+
++struct kho_out {
++ struct blocking_notifier_head chain_head;
++
++ struct dentry *dir;
++
++ struct mutex lock; /* protects KHO FDT finalization */
++
++ struct kho_serialization ser;
++ bool finalized;
++};
++
++static struct kho_out kho_out = {
++ .chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
++ .lock = __MUTEX_INITIALIZER(kho_out.lock),
++ .ser = {
++ .fdt_list = LIST_HEAD_INIT(kho_out.ser.fdt_list),
++ .track = {
++ .orders = XARRAY_INIT(kho_out.ser.track.orders, 0),
++ },
++ },
++ .finalized = false,
++};
++
+ static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
+ {
+ void *elm, *res;
+@@ -149,6 +172,9 @@ static int __kho_preserve_order(struct k
+
+ might_sleep();
+
++ if (kho_out.finalized)
++ return -EBUSY;
++
+ physxa = xa_load(&track->orders, order);
+ if (!physxa) {
+ int err;
+@@ -631,29 +657,6 @@ int kho_add_subtree(struct kho_serializa
+ }
+ EXPORT_SYMBOL_GPL(kho_add_subtree);
+
+-struct kho_out {
+- struct blocking_notifier_head chain_head;
+-
+- struct dentry *dir;
+-
+- struct mutex lock; /* protects KHO FDT finalization */
+-
+- struct kho_serialization ser;
+- bool finalized;
+-};
+-
+-static struct kho_out kho_out = {
+- .chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
+- .lock = __MUTEX_INITIALIZER(kho_out.lock),
+- .ser = {
+- .fdt_list = LIST_HEAD_INIT(kho_out.ser.fdt_list),
+- .track = {
+- .orders = XARRAY_INIT(kho_out.ser.track.orders, 0),
+- },
+- },
+- .finalized = false,
+-};
+-
+ int register_kho_notifier(struct notifier_block *nb)
+ {
+ return blocking_notifier_chain_register(&kho_out.chain_head, nb);
+@@ -681,9 +684,6 @@ int kho_preserve_folio(struct folio *fol
+ const unsigned int order = folio_order(folio);
+ struct kho_mem_track *track = &kho_out.ser.track;
+
+- if (kho_out.finalized)
+- return -EBUSY;
+-
+ return __kho_preserve_order(track, pfn, order);
+ }
+ EXPORT_SYMBOL_GPL(kho_preserve_folio);
+@@ -707,9 +707,6 @@ int kho_preserve_phys(phys_addr_t phys,
+ int err = 0;
+ struct kho_mem_track *track = &kho_out.ser.track;
+
+- if (kho_out.finalized)
+- return -EBUSY;
+-
+ if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
+ return -EINVAL;
+
--- /dev/null
+From stable+bounces-196582-greg=kroah.com@vger.kernel.org Sat Nov 22 05:52:32 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 23:52:21 -0500
+Subject: kho: increase metadata bitmap size to PAGE_SIZE
+To: stable@vger.kernel.org
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>, "Mike Rapoport (Microsoft)" <rppt@kernel.org>, Pratyush Yadav <pratyush@kernel.org>, Alexander Graf <graf@amazon.com>, Christian Brauner <brauner@kernel.org>, David Matlack <dmatlack@google.com>, Jason Gunthorpe <jgg@ziepe.ca>, Jonathan Corbet <corbet@lwn.net>, Masahiro Yamada <masahiroy@kernel.org>, Miguel Ojeda <ojeda@kernel.org>, Randy Dunlap <rdunlap@infradead.org>, Samiullah Khawaja <skhawaja@google.com>, Tejun Heo <tj@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251122045222.2798582-3-sashal@kernel.org>
+
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+
+[ Upstream commit a2fff99f92dae9c0eaf0d75de3def70ec68dad92 ]
+
+KHO memory preservation metadata is preserved in 512 byte chunks which
+requires their allocation from slab allocator. Slabs are not safe to be
+used with KHO because of kfence, and because partial slabs may lead leaks
+to the next kernel. Change the size to be PAGE_SIZE.
+
+The kfence specifically may cause memory corruption, where it randomly
+provides slab objects that can be within the scratch area. The reason for
+that is that kfence allocates its objects prior to KHO scratch is marked
+as CMA region.
+
+While this change could potentially increase metadata overhead on systems
+with sparsely preserved memory, this is being mitigated by ongoing work to
+reduce sparseness during preservation via 1G guest pages. Furthermore,
+this change aligns with future work on a stateless KHO, which will also
+use page-sized bitmaps for its radix tree metadata.
+
+Link: https://lkml.kernel.org/r/20251021000852.2924827-3-pasha.tatashin@soleen.com
+Fixes: fc33e4b44b27 ("kexec: enable KHO support for memory preservation")
+Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: David Matlack <dmatlack@google.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: Miguel Ojeda <ojeda@kernel.org>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Samiullah Khawaja <skhawaja@google.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: fa759cd75bce ("kho: allocate metadata directly from the buddy allocator")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kexec_handover.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/kernel/kexec_handover.c
++++ b/kernel/kexec_handover.c
+@@ -52,10 +52,10 @@ early_param("kho", kho_parse_enable);
+ * Keep track of memory that is to be preserved across KHO.
+ *
+ * The serializing side uses two levels of xarrays to manage chunks of per-order
+- * 512 byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order of a
+- * 1TB system would fit inside a single 512 byte bitmap. For order 0 allocations
+- * each bitmap will cover 16M of address space. Thus, for 16G of memory at most
+- * 512K of bitmap memory will be needed for order 0.
++ * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
++ * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
++ * allocations each bitmap will cover 128M of address space. Thus, for 16G of
++ * memory at most 512K of bitmap memory will be needed for order 0.
+ *
+ * This approach is fully incremental, as the serialization progresses folios
+ * can continue be aggregated to the tracker. The final step, immediately prior
+@@ -63,12 +63,14 @@ early_param("kho", kho_parse_enable);
+ * successor kernel to parse.
+ */
+
+-#define PRESERVE_BITS (512 * 8)
++#define PRESERVE_BITS (PAGE_SIZE * 8)
+
+ struct kho_mem_phys_bits {
+ DECLARE_BITMAP(preserve, PRESERVE_BITS);
+ };
+
++static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
++
+ struct kho_mem_phys {
+ /*
+ * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
+@@ -116,19 +118,19 @@ static struct kho_out kho_out = {
+ .finalized = false,
+ };
+
+-static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
++static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
+ {
+ void *res = xa_load(xa, index);
+
+ if (res)
+ return res;
+
+- void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
++ void *elm __free(kfree) = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!elm)
+ return ERR_PTR(-ENOMEM);
+
+- if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), sz)))
++ if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
+ return ERR_PTR(-EINVAL);
+
+ res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
+@@ -201,8 +203,7 @@ static int __kho_preserve_order(struct k
+ }
+ }
+
+- bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,
+- sizeof(*bits));
++ bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
+ if (IS_ERR(bits))
+ return PTR_ERR(bits);
+
--- /dev/null
+From stable+bounces-196581-greg=kroah.com@vger.kernel.org Sat Nov 22 05:52:32 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 23:52:20 -0500
+Subject: kho: warn and fail on metadata or preserved memory in scratch area
+To: stable@vger.kernel.org
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>, Mike Rapoport <rppt@kernel.org>, Pratyush Yadav <pratyush@kernel.org>, Alexander Graf <graf@amazon.com>, Christian Brauner <brauner@kernel.org>, David Matlack <dmatlack@google.com>, Jason Gunthorpe <jgg@ziepe.ca>, Jonathan Corbet <corbet@lwn.net>, Masahiro Yamada <masahiroy@kernel.org>, Miguel Ojeda <ojeda@kernel.org>, Randy Dunlap <rdunlap@infradead.org>, Samiullah Khawaja <skhawaja@google.com>, Tejun Heo <tj@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251122045222.2798582-2-sashal@kernel.org>
+
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+
+[ Upstream commit e38f65d317df1fd2dcafe614d9c537475ecf9992 ]
+
+Patch series "KHO: kfence + KHO memory corruption fix", v3.
+
+This series fixes a memory corruption bug in KHO that occurs when KFENCE
+is enabled.
+
+The root cause is that KHO metadata, allocated via kzalloc(), can be
+randomly serviced by kfence_alloc(). When a kernel boots via KHO, the
+early memblock allocator is restricted to a "scratch area". This forces
+the KFENCE pool to be allocated within this scratch area, creating a
+conflict. If KHO metadata is subsequently placed in this pool, it gets
+corrupted during the next kexec operation.
+
+Google is using KHO and have had obscure crashes due to this memory
+corruption, with stacks all over the place. I would prefer this fix to be
+properly backported to stable so we can also automatically consume it once
+we switch to the upstream KHO.
+
+Patch 1/3 introduces a debug-only feature (CONFIG_KEXEC_HANDOVER_DEBUG)
+that adds checks to detect and fail any operation that attempts to place
+KHO metadata or preserved memory within the scratch area. This serves as
+a validation and diagnostic tool to confirm the problem without affecting
+production builds.
+
+Patch 2/3 Increases bitmap to PAGE_SIZE, so buddy allocator can be used.
+
+Patch 3/3 Provides the fix by modifying KHO to allocate its metadata
+directly from the buddy allocator instead of slab. This bypasses the
+KFENCE interception entirely.
+
+This patch (of 3):
+
+It is invalid for KHO metadata or preserved memory regions to be located
+within the KHO scratch area, as this area is overwritten when the next
+kernel is loaded, and used early in boot by the next kernel. This can
+lead to memory corruption.
+
+Add checks to kho_preserve_* and KHO's internal metadata allocators
+(xa_load_or_alloc, new_chunk) to verify that the physical address of the
+memory does not overlap with any defined scratch region. If an overlap is
+detected, the operation will fail and a WARN_ON is triggered. To avoid
+performance overhead in production kernels, these checks are enabled only
+when CONFIG_KEXEC_HANDOVER_DEBUG is selected.
+
+[rppt@kernel.org: fix KEXEC_HANDOVER_DEBUG Kconfig dependency]
+ Link: https://lkml.kernel.org/r/aQHUyyFtiNZhx8jo@kernel.org
+[pasha.tatashin@soleen.com: build fix]
+ Link: https://lkml.kernel.org/r/CA+CK2bBnorfsTymKtv4rKvqGBHs=y=MjEMMRg_tE-RME6n-zUw@mail.gmail.com
+Link: https://lkml.kernel.org/r/20251021000852.2924827-1-pasha.tatashin@soleen.com
+Link: https://lkml.kernel.org/r/20251021000852.2924827-2-pasha.tatashin@soleen.com
+Fixes: fc33e4b44b27 ("kexec: enable KHO support for memory preservation")
+Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Mike Rapoport <rppt@kernel.org>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: David Matlack <dmatlack@google.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: Miguel Ojeda <ojeda@kernel.org>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Samiullah Khawaja <skhawaja@google.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: fa759cd75bce ("kho: allocate metadata directly from the buddy allocator")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/Kconfig.kexec | 9 ++++++
+ kernel/Makefile | 1
+ kernel/kexec_handover.c | 57 ++++++++++++++++++++++++++-------------
+ kernel/kexec_handover_debug.c | 25 +++++++++++++++++
+ kernel/kexec_handover_internal.h | 20 +++++++++++++
+ 5 files changed, 93 insertions(+), 19 deletions(-)
+ create mode 100644 kernel/kexec_handover_debug.c
+ create mode 100644 kernel/kexec_handover_internal.h
+
+--- a/kernel/Kconfig.kexec
++++ b/kernel/Kconfig.kexec
+@@ -109,6 +109,15 @@ config KEXEC_HANDOVER
+ to keep data or state alive across the kexec. For this to work,
+ both source and target kernels need to have this option enabled.
+
++config KEXEC_HANDOVER_DEBUG
++ bool "Enable Kexec Handover debug checks"
++ depends on KEXEC_HANDOVER
++ help
++ This option enables extra sanity checks for the Kexec Handover
++ subsystem. Since, KHO performance is crucial in live update
++ scenarios and the extra code might be adding overhead it is
++ only optionally enabled.
++
+ config CRASH_DUMP
+ bool "kernel crash dumps"
+ default ARCH_DEFAULT_CRASH_DUMP
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -82,6 +82,7 @@ obj-$(CONFIG_KEXEC) += kexec.o
+ obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
+ obj-$(CONFIG_KEXEC_ELF) += kexec_elf.o
+ obj-$(CONFIG_KEXEC_HANDOVER) += kexec_handover.o
++obj-$(CONFIG_KEXEC_HANDOVER_DEBUG) += kexec_handover_debug.o
+ obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
+ obj-$(CONFIG_COMPAT) += compat.o
+ obj-$(CONFIG_CGROUPS) += cgroup/
+--- a/kernel/kexec_handover.c
++++ b/kernel/kexec_handover.c
+@@ -8,6 +8,7 @@
+
+ #define pr_fmt(fmt) "KHO: " fmt
+
++#include <linux/cleanup.h>
+ #include <linux/cma.h>
+ #include <linux/count_zeros.h>
+ #include <linux/debugfs.h>
+@@ -21,6 +22,7 @@
+
+ #include <asm/early_ioremap.h>
+
++#include "kexec_handover_internal.h"
+ /*
+ * KHO is tightly coupled with mm init and needs access to some of mm
+ * internal APIs.
+@@ -116,26 +118,26 @@ static struct kho_out kho_out = {
+
+ static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
+ {
+- void *elm, *res;
++ void *res = xa_load(xa, index);
+
+- elm = xa_load(xa, index);
+- if (elm)
+- return elm;
++ if (res)
++ return res;
++
++ void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
+
+- elm = kzalloc(sz, GFP_KERNEL);
+ if (!elm)
+ return ERR_PTR(-ENOMEM);
+
++ if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), sz)))
++ return ERR_PTR(-EINVAL);
++
+ res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
+ if (xa_is_err(res))
+- res = ERR_PTR(xa_err(res));
+-
+- if (res) {
+- kfree(elm);
++ return ERR_PTR(xa_err(res));
++ else if (res)
+ return res;
+- }
+
+- return elm;
++ return no_free_ptr(elm);
+ }
+
+ static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
+@@ -289,15 +291,19 @@ static_assert(sizeof(struct khoser_mem_c
+ static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
+ unsigned long order)
+ {
+- struct khoser_mem_chunk *chunk;
++ struct khoser_mem_chunk *chunk __free(kfree) = NULL;
+
+ chunk = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!chunk)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
++
++ if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
++ return ERR_PTR(-EINVAL);
++
+ chunk->hdr.order = order;
+ if (cur_chunk)
+ KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
+- return chunk;
++ return no_free_ptr(chunk);
+ }
+
+ static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
+@@ -318,14 +324,17 @@ static int kho_mem_serialize(struct kho_
+ struct khoser_mem_chunk *chunk = NULL;
+ struct kho_mem_phys *physxa;
+ unsigned long order;
++ int err = -ENOMEM;
+
+ xa_for_each(&ser->track.orders, order, physxa) {
+ struct kho_mem_phys_bits *bits;
+ unsigned long phys;
+
+ chunk = new_chunk(chunk, order);
+- if (!chunk)
++ if (IS_ERR(chunk)) {
++ err = PTR_ERR(chunk);
+ goto err_free;
++ }
+
+ if (!first_chunk)
+ first_chunk = chunk;
+@@ -335,8 +344,10 @@ static int kho_mem_serialize(struct kho_
+
+ if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
+ chunk = new_chunk(chunk, order);
+- if (!chunk)
++ if (IS_ERR(chunk)) {
++ err = PTR_ERR(chunk);
+ goto err_free;
++ }
+ }
+
+ elm = &chunk->bitmaps[chunk->hdr.num_elms];
+@@ -353,7 +364,7 @@ static int kho_mem_serialize(struct kho_
+
+ err_free:
+ kho_mem_ser_free(first_chunk);
+- return -ENOMEM;
++ return err;
+ }
+
+ static void __init deserialize_bitmap(unsigned int order,
+@@ -406,8 +417,8 @@ static void __init kho_mem_deserialize(c
+ * area for early allocations that happen before page allocator is
+ * initialized.
+ */
+-static struct kho_scratch *kho_scratch;
+-static unsigned int kho_scratch_cnt;
++struct kho_scratch *kho_scratch;
++unsigned int kho_scratch_cnt;
+
+ /*
+ * The scratch areas are scaled by default as percent of memory allocated from
+@@ -684,6 +695,9 @@ int kho_preserve_folio(struct folio *fol
+ const unsigned int order = folio_order(folio);
+ struct kho_mem_track *track = &kho_out.ser.track;
+
++ if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
++ return -EINVAL;
++
+ return __kho_preserve_order(track, pfn, order);
+ }
+ EXPORT_SYMBOL_GPL(kho_preserve_folio);
+@@ -710,6 +724,11 @@ int kho_preserve_phys(phys_addr_t phys,
+ if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
+ return -EINVAL;
+
++ if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
++ nr_pages << PAGE_SHIFT))) {
++ return -EINVAL;
++ }
++
+ while (pfn < end_pfn) {
+ const unsigned int order =
+ min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
+--- /dev/null
++++ b/kernel/kexec_handover_debug.c
+@@ -0,0 +1,25 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * kexec_handover_debug.c - kexec handover optional debug functionality
++ * Copyright (C) 2025 Google LLC, Pasha Tatashin <pasha.tatashin@soleen.com>
++ */
++
++#define pr_fmt(fmt) "KHO: " fmt
++
++#include "kexec_handover_internal.h"
++
++bool kho_scratch_overlap(phys_addr_t phys, size_t size)
++{
++ phys_addr_t scratch_start, scratch_end;
++ unsigned int i;
++
++ for (i = 0; i < kho_scratch_cnt; i++) {
++ scratch_start = kho_scratch[i].addr;
++ scratch_end = kho_scratch[i].addr + kho_scratch[i].size;
++
++ if (phys < scratch_end && (phys + size) > scratch_start)
++ return true;
++ }
++
++ return false;
++}
+--- /dev/null
++++ b/kernel/kexec_handover_internal.h
+@@ -0,0 +1,20 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef LINUX_KEXEC_HANDOVER_INTERNAL_H
++#define LINUX_KEXEC_HANDOVER_INTERNAL_H
++
++#include <linux/kexec_handover.h>
++#include <linux/types.h>
++
++extern struct kho_scratch *kho_scratch;
++extern unsigned int kho_scratch_cnt;
++
++#ifdef CONFIG_KEXEC_HANDOVER_DEBUG
++bool kho_scratch_overlap(phys_addr_t phys, size_t size);
++#else
++static inline bool kho_scratch_overlap(phys_addr_t phys, size_t size)
++{
++ return false;
++}
++#endif /* CONFIG_KEXEC_HANDOVER_DEBUG */
++
++#endif /* LINUX_KEXEC_HANDOVER_INTERNAL_H */
--- /dev/null
+From stable+bounces-196849-greg=kroah.com@vger.kernel.org Tue Nov 25 04:58:51 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Nov 2025 22:58:42 -0500
+Subject: mptcp: fix address removal logic in mptcp_pm_nl_rm_addr
+To: stable@vger.kernel.org
+Cc: Gang Yan <yangang@kylinos.cn>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251125035842.386893-1-sashal@kernel.org>
+
+From: Gang Yan <yangang@kylinos.cn>
+
+[ Upstream commit 92e239e36d600002559074994a545fcfac9afd2d ]
+
+Fix inverted WARN_ON_ONCE condition that prevented normal address
+removal counter updates. The current code only executes decrement
+logic when the counter is already 0 (abnormal state), while
+normal removals (counter > 0) are ignored.
+
+Signed-off-by: Gang Yan <yangang@kylinos.cn>
+Fixes: 636113918508 ("mptcp: pm: remove '_nl' from mptcp_pm_nl_rm_addr_received")
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-10-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_kernel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mptcp/pm_kernel.c
++++ b/net/mptcp/pm_kernel.c
+@@ -548,7 +548,7 @@ static void mptcp_pm_nl_add_addr_receive
+
+ void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id)
+ {
+- if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
++ if (rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
+ /* Note: if the subflow has been closed before, this
+ * add_addr_accepted counter will not be decremented.
+ */
tracing-tools-fix-incorrcet-short-option-in-usage-te.patch
btrfs-set-inode-flag-btrfs_inode_copy_everything-whe.patch
drm-amdgpu-fix-gpu-page-fault-after-hibernation-on-p.patch
+smb-client-fix-incomplete-backport-in-cfids_invalidation_worker.patch
+drm-i915-psr-check-drm_dp_dpcd_read-return-value-on-psr-dpcd-init.patch
+drm-i915-dp_mst-disable-panel-replay.patch
+drm-amdgpu-jpeg-move-parse_cs-to-amdgpu_jpeg.c.patch
+drm-amdgpu-jpeg-add-parse_cs-for-jpeg5_0_1.patch
+kho-check-if-kho-is-finalized-in-__kho_preserve_order.patch
+kho-warn-and-fail-on-metadata-or-preserved-memory-in-scratch-area.patch
+kho-increase-metadata-bitmap-size-to-page_size.patch
+kho-allocate-metadata-directly-from-the-buddy-allocator.patch
+xfs-replace-strncpy-with-memcpy.patch
+xfs-fix-out-of-bounds-memory-read-error-in-symlink-repair.patch
+mptcp-fix-address-removal-logic-in-mptcp_pm_nl_rm_addr.patch
+drm-amd-display-insert-dccg-log-for-easy-debug.patch
+drm-amd-display-prevent-gating-dtbclk-before-it-is-properly-latched.patch
--- /dev/null
+From 38ef85145fd3655cd4ac16578871afdc0aa6636f Mon Sep 17 00:00:00 2001
+From: Henrique Carvalho <henrique.carvalho@suse.com>
+Date: Wed, 26 Nov 2025 10:55:53 -0300
+Subject: smb: client: fix incomplete backport in cfids_invalidation_worker()
+
+From: Henrique Carvalho <henrique.carvalho@suse.com>
+
+The previous commit bdb596ceb4b7 ("smb: client: fix potential UAF in
+smb2_close_cached_fid()") was an incomplete backport and missed one
+kref_put() call in cfids_invalidation_worker() that should have been
+converted to close_cached_dir().
+
+Fixes: bdb596ceb4b7 ("smb: client: fix potential UAF in smb2_close_cached_fid()")"
+Signed-off-by: Henrique Carvalho <henrique.carvalho@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -755,7 +755,7 @@ static void cfids_invalidation_worker(st
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+ /* Drop the ref-count acquired in invalidate_all_cached_dirs */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ close_cached_dir(cfid);
+ }
+ }
+
--- /dev/null
+From stable+bounces-196775-greg=kroah.com@vger.kernel.org Mon Nov 24 18:45:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Nov 2025 12:45:03 -0500
+Subject: xfs: fix out of bounds memory read error in symlink repair
+To: stable@vger.kernel.org
+Cc: "Darrick J. Wong" <djwong@kernel.org>, Christoph Hellwig <hch@lst.de>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251124174503.4167383-2-sashal@kernel.org>
+
+From: "Darrick J. Wong" <djwong@kernel.org>
+
+[ Upstream commit 678e1cc2f482e0985a0613ab4a5bf89c497e5acc ]
+
+xfs/286 produced this report on my test fleet:
+
+ ==================================================================
+ BUG: KFENCE: out-of-bounds read in memcpy_orig+0x54/0x110
+
+ Out-of-bounds read at 0xffff88843fe9e038 (184B right of kfence-#184):
+ memcpy_orig+0x54/0x110
+ xrep_symlink_salvage_inline+0xb3/0xf0 [xfs]
+ xrep_symlink_salvage+0x100/0x110 [xfs]
+ xrep_symlink+0x2e/0x80 [xfs]
+ xrep_attempt+0x61/0x1f0 [xfs]
+ xfs_scrub_metadata+0x34f/0x5c0 [xfs]
+ xfs_ioc_scrubv_metadata+0x387/0x560 [xfs]
+ xfs_file_ioctl+0xe23/0x10e0 [xfs]
+ __x64_sys_ioctl+0x76/0xc0
+ do_syscall_64+0x4e/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+ kfence-#184: 0xffff88843fe9df80-0xffff88843fe9dfea, size=107, cache=kmalloc-128
+
+ allocated by task 3470 on cpu 1 at 263329.131592s (192823.508886s ago):
+ xfs_init_local_fork+0x79/0xe0 [xfs]
+ xfs_iformat_local+0xa4/0x170 [xfs]
+ xfs_iformat_data_fork+0x148/0x180 [xfs]
+ xfs_inode_from_disk+0x2cd/0x480 [xfs]
+ xfs_iget+0x450/0xd60 [xfs]
+ xfs_bulkstat_one_int+0x6b/0x510 [xfs]
+ xfs_bulkstat_iwalk+0x1e/0x30 [xfs]
+ xfs_iwalk_ag_recs+0xdf/0x150 [xfs]
+ xfs_iwalk_run_callbacks+0xb9/0x190 [xfs]
+ xfs_iwalk_ag+0x1dc/0x2f0 [xfs]
+ xfs_iwalk_args.constprop.0+0x6a/0x120 [xfs]
+ xfs_iwalk+0xa4/0xd0 [xfs]
+ xfs_bulkstat+0xfa/0x170 [xfs]
+ xfs_ioc_fsbulkstat.isra.0+0x13a/0x230 [xfs]
+ xfs_file_ioctl+0xbf2/0x10e0 [xfs]
+ __x64_sys_ioctl+0x76/0xc0
+ do_syscall_64+0x4e/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+ CPU: 1 UID: 0 PID: 1300113 Comm: xfs_scrub Not tainted 6.18.0-rc4-djwx #rc4 PREEMPT(lazy) 3d744dd94e92690f00a04398d2bd8631dcef1954
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-4.module+el8.8.0+21164+ed375313 04/01/2014
+ ==================================================================
+
+On further analysis, I realized that the second parameter to min() is
+not correct. xfs_ifork::if_bytes is the size of the xfs_ifork::if_data
+buffer. if_bytes can be smaller than the data fork size because:
+
+(a) the forkoff code tries to keep the data area as large as possible
+(b) for symbolic links, if_bytes is the ondisk file size + 1
+(c) forkoff is always a multiple of 8.
+
+Case in point: for a single-byte symlink target, forkoff will be
+8 but the buffer will only be 2 bytes long.
+
+In other words, the logic here is wrong and we walk off the end of the
+incore buffer. Fix that.
+
+Cc: stable@vger.kernel.org # v6.10
+Fixes: 2651923d8d8db0 ("xfs: online repair of symbolic links")
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/scrub/symlink_repair.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/scrub/symlink_repair.c
++++ b/fs/xfs/scrub/symlink_repair.c
+@@ -184,7 +184,7 @@ xrep_symlink_salvage_inline(
+ sc->ip->i_disk_size == 1 && old_target[0] == '?')
+ return 0;
+
+- nr = min(XFS_SYMLINK_MAXLEN, xfs_inode_data_fork_size(ip));
++ nr = min(XFS_SYMLINK_MAXLEN, ifp->if_bytes);
+ memcpy(target_buf, ifp->if_data, nr);
+ return nr;
+ }
--- /dev/null
+From stable+bounces-196776-greg=kroah.com@vger.kernel.org Mon Nov 24 18:45:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Nov 2025 12:45:02 -0500
+Subject: xfs: Replace strncpy with memcpy
+To: stable@vger.kernel.org
+Cc: Marcelo Moreira <marcelomoreira1905@gmail.com>, Dave Chinner <david@fromorbit.com>, Christoph Hellwig <hch@lst.de>, Carlos Maiolino <cmaiolino@redhat.com>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251124174503.4167383-1-sashal@kernel.org>
+
+From: Marcelo Moreira <marcelomoreira1905@gmail.com>
+
+[ Upstream commit 33ddc796ecbd50cd6211aa9e9eddbf4567038b49 ]
+
+The changes modernizes the code by aligning it with current kernel best
+practices. It improves code clarity and consistency, as strncpy is deprecated
+as explained in Documentation/process/deprecated.rst. This change does
+not alter the functionality or introduce any behavioral changes.
+
+Suggested-by: Dave Chinner <david@fromorbit.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
+Signed-off-by: Marcelo Moreira <marcelomoreira1905@gmail.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Stable-dep-of: 678e1cc2f482 ("xfs: fix out of bounds memory read error in symlink repair")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/scrub/symlink_repair.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/scrub/symlink_repair.c
++++ b/fs/xfs/scrub/symlink_repair.c
+@@ -185,7 +185,7 @@ xrep_symlink_salvage_inline(
+ return 0;
+
+ nr = min(XFS_SYMLINK_MAXLEN, xfs_inode_data_fork_size(ip));
+- strncpy(target_buf, ifp->if_data, nr);
++ memcpy(target_buf, ifp->if_data, nr);
+ return nr;
+ }
+