--- /dev/null
+From 0341841ae32206a40c5d90e8aa8f4ddba028d539 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Jan 2023 10:54:12 -0500
+Subject: drm/amd/display: fix PSR-SU/DSC interoperability support
+
+From: Hamza Mahfooz <hamza.mahfooz@amd.com>
+
+[ Upstream commit 13b90cf900ab69dd5cab3cc5035bc7614037e64e ]
+
+Currently, there are issues with enabling PSR-SU + DSC. This stems from
+the fact that DSC imposes a slice height on transmitted video data and
+we are not conforming to that slice height in PSR-SU regions. So, pass
+slice_height into su_y_granularity to feed the DSC slice height into
+PSR-SU code.
+
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 0b5dfe12755f ("drm/amd/display: fix a divided-by-zero error")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 3 ++
+ .../amd/display/modules/power/power_helpers.c | 31 +++++++++++++++++++
+ .../amd/display/modules/power/power_helpers.h | 3 ++
+ 3 files changed, 37 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 26291db0a3cf6..872d06fe14364 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+ psr_config.allow_multi_disp_optimizations =
+ (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+
++ if (!psr_su_set_y_granularity(dc, link, stream, &psr_config))
++ return false;
++
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+ }
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 9b5d9b2c9a6a7..cf4fa87c7db60 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -916,3 +916,34 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
+ {
+ return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
+ }
++
++bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
++ struct dc_stream_state *stream,
++ struct psr_config *config)
++{
++ uint16_t pic_height;
++ uint8_t slice_height;
++
++ if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
++ (!dc->caps.edp_dsc_support ||
++ link->panel_config.dsc.disable_dsc_edp ||
++ !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
++ !stream->timing.dsc_cfg.num_slices_v))
++ return true;
++
++ pic_height = stream->timing.v_addressable +
++ stream->timing.v_border_top + stream->timing.v_border_bottom;
++ slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
++
++ if (slice_height) {
++ if (config->su_y_granularity &&
++ (slice_height % config->su_y_granularity)) {
++ ASSERT(0);
++ return false;
++ }
++
++ config->su_y_granularity = slice_height;
++ }
++
++ return true;
++}
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+index 316452e9dbc91..bb16b37b83da7 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
+ const struct dc_stream_state *stream);
+ bool mod_power_only_edp(const struct dc_state *context,
+ const struct dc_stream_state *stream);
++bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
++ struct dc_stream_state *stream,
++ struct psr_config *config);
+ #endif /* MODULES_POWER_POWER_HELPERS_H_ */
+--
+2.39.2
+
--- /dev/null
+From 868062f91631fb0386c010406ceaa2fe29ce564d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Apr 2023 14:04:11 -0400
+Subject: drm/amd/display: limit timing for single dimm memory
+
+From: Daniel Miess <Daniel.Miess@amd.com>
+
+[ Upstream commit 1e994cc0956b8dabd1b1fef315bbd722733b8aa8 ]
+
+[Why]
+1. It could hit bandwidth limitdation under single dimm
+memory when connecting 8K external monitor.
+2. IsSupportedVidPn got validation failed with
+2K240Hz eDP + 8K24Hz external monitor.
+3. It's better to filter out such combination in
+EnumVidPnCofuncModality
+4. For short term, filter out in dc bandwidth validation.
+
+[How]
+Force 2K@240Hz+8K@24Hz timing validation false in dc.
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Daniel Miess <Daniel.Miess@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../amd/display/dc/dcn314/dcn314_resource.c | 20 +++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 54ed3de869d3b..9ffba4c6fe550 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -1697,6 +1697,23 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
+ *panel_config = panel_config_defaults;
+ }
+
++static bool filter_modes_for_single_channel_workaround(struct dc *dc,
++ struct dc_state *context)
++{
++ // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
++ if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
++ int total_phy_pix_clk = 0;
++
++ for (int i = 0; i < context->stream_count; i++)
++ if (context->res_ctx.pipe_ctx[i].stream)
++ total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
++
++ if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
++ return true;
++ }
++ return false;
++}
++
+ bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+@@ -1712,6 +1729,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
+
+ BW_VAL_TRACE_COUNT();
+
++ if (filter_modes_for_single_channel_workaround(dc, context))
++ goto validate_fail;
++
+ DC_FP_START();
+ // do not support self refresh only
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+--
+2.39.2
+
--- /dev/null
+From 24f39f6af9b44176d37b81e8469002a3507bd676 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Feb 2023 14:16:12 -0500
+Subject: drm/amd/display: Remove stutter only configurations
+
+From: Nasir Osman <nasir.osman@amd.com>
+
+[ Upstream commit 71c4ca2d3b079d0ba4d9b3033641fea906cebfb6 ]
+
+[why]
+Newer ASICs such as DCN314 needs to allow for both self refresh and mem
+clk switching rather than just self refresh only. Otherwise, we can see
+some p-state hangs on ASICs that do support mem clk switching.
+
+[how]
+Added an allow_self_refresh_only flag for dcn30_internal_validate_bw
+and created a validate_bw method for DCN314 with the allow_self_refresh_only
+flag set to false (to support mem clk switching).
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Nasir Osman <nasir.osman@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 1e994cc0956b ("drm/amd/display: limit timing for single dimm memory")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/dc/dcn30/dcn30_resource.c | 16 +++---
+ .../drm/amd/display/dc/dcn30/dcn30_resource.h | 3 +-
+ .../drm/amd/display/dc/dcn31/dcn31_resource.c | 2 +-
+ .../amd/display/dc/dcn314/dcn314_resource.c | 57 ++++++++++++++++++-
+ .../amd/display/dc/dcn314/dcn314_resource.h | 4 ++
+ .../drm/amd/display/dc/dml/dcn30/dcn30_fpu.c | 2 +-
+ 6 files changed, 73 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index c18c52a60100e..c4e206aedf731 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -1648,7 +1648,8 @@ noinline bool dcn30_internal_validate_bw(
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+- bool fast_validate)
++ bool fast_validate,
++ bool allow_self_refresh_only)
+ {
+ bool out = false;
+ bool repopulate_pipes = false;
+@@ -1675,7 +1676,7 @@ noinline bool dcn30_internal_validate_bw(
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+- if (!fast_validate) {
++ if (!fast_validate || !allow_self_refresh_only) {
+ /*
+ * DML favors voltage over p-state, but we're more interested in
+ * supporting p-state over voltage. We can't support p-state in
+@@ -1688,11 +1689,12 @@ noinline bool dcn30_internal_validate_bw(
+ if (vlevel < context->bw_ctx.dml.soc.num_states)
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ }
+- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
++ if (allow_self_refresh_only &&
++ (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
++ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
+ /*
+- * If mode is unsupported or there's still no p-state support then
+- * fall back to favoring voltage.
++ * If mode is unsupported or there's still no p-state support
++ * then fall back to favoring voltage.
+ *
+ * We don't actually support prefetch mode 2, so require that we
+ * at least support prefetch mode 1.
+@@ -2063,7 +2065,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
+ BW_VAL_TRACE_COUNT();
+
+ DC_FP_START();
+- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ DC_FP_END();
+
+ if (pipe_cnt == 0)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+index 7d063c7d6a4bf..8e6b8b7368fdb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+@@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+- bool fast_validate);
++ bool fast_validate,
++ bool allow_self_refresh_only);
+ void dcn30_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index 3ca517dcc82dc..d3918a10773a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -1795,7 +1795,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,
+ BW_VAL_TRACE_COUNT();
+
+ DC_FP_START();
+- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ DC_FP_END();
+
+ // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 73f519dbdb531..54ed3de869d3b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -1697,6 +1697,61 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
+ *panel_config = panel_config_defaults;
+ }
+
++bool dcn314_validate_bandwidth(struct dc *dc,
++ struct dc_state *context,
++ bool fast_validate)
++{
++ bool out = false;
++
++ BW_VAL_TRACE_SETUP();
++
++ int vlevel = 0;
++ int pipe_cnt = 0;
++ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++ DC_LOGGER_INIT(dc->ctx->logger);
++
++ BW_VAL_TRACE_COUNT();
++
++ DC_FP_START();
++ // do not support self refresh only
++ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
++ DC_FP_END();
++
++ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
++ if (pipe_cnt == 0)
++ fast_validate = false;
++
++ if (!out)
++ goto validate_fail;
++
++ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
++
++ if (fast_validate) {
++ BW_VAL_TRACE_SKIP(fast);
++ goto validate_out;
++ }
++
++ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
++
++ BW_VAL_TRACE_END_WATERMARKS();
++
++ goto validate_out;
++
++validate_fail:
++ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
++ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
++
++ BW_VAL_TRACE_SKIP(fail);
++ out = false;
++
++validate_out:
++ kfree(pipes);
++
++ BW_VAL_TRACE_FINISH();
++
++ return out;
++}
++
+ static struct resource_funcs dcn314_res_pool_funcs = {
+ .destroy = dcn314_destroy_resource_pool,
+ .link_enc_create = dcn31_link_encoder_create,
+@@ -1704,7 +1759,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
+ .link_encs_assign = link_enc_cfg_link_encs_assign,
+ .link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ .panel_cntl_create = dcn31_panel_cntl_create,
+- .validate_bandwidth = dcn31_validate_bandwidth,
++ .validate_bandwidth = dcn314_validate_bandwidth,
+ .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
+ .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+index 0dd3153aa5c17..49ffe71018dfb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+@@ -39,6 +39,10 @@ struct dcn314_resource_pool {
+ struct resource_pool base;
+ };
+
++bool dcn314_validate_bandwidth(struct dc *dc,
++ struct dc_state *context,
++ bool fast_validate);
++
+ struct resource_pool *dcn314_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+index d4c0f9cdac8e2..4fa6363647937 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+@@ -634,7 +634,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ while (dummy_latency_index < max_latency_table_entries) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
++ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+
+ if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
+ dm_allow_self_refresh_and_mclk_switch)
+--
+2.39.2
+