--- /dev/null
+From 32953485c558cecf08f33fbfa251e80e44cef981 Mon Sep 17 00:00:00 2001
+From: Aric Cyr <aric.cyr@amd.com>
+Date: Wed, 8 Feb 2023 19:51:42 -0500
+Subject: drm/amd/display: Do not update DRR while BW optimizations pending
+
+From: Aric Cyr <aric.cyr@amd.com>
+
+commit 32953485c558cecf08f33fbfa251e80e44cef981 upstream.
+
+[why]
+While bandwidth optimizations are pending, it's possible a pstate change
+will occur. During this time, VSYNC handler should not also try to update
+DRR parameters causing pstate hang
+
+[how]
+Do not adjust DRR if optimize bandwidth is set.
+
+Reviewed-by: Aric Cyr <aric.cyr@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Aric Cyr <aric.cyr@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 48 ++++++++++++++++++-------------
+ 1 file changed, 29 insertions(+), 19 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -400,6 +400,13 @@ bool dc_stream_adjust_vmin_vmax(struct d
+ {
+ int i;
+
++ /*
++ * Don't adjust DRR while there's bandwidth optimizations pending to
++ * avoid conflicting with firmware updates.
++ */
++ if (dc->optimized_required || dc->wm_optimized_required)
++ return false;
++
+ stream->adjust.v_total_max = adjust->v_total_max;
+ stream->adjust.v_total_mid = adjust->v_total_mid;
+ stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
+@@ -2201,27 +2208,33 @@ void dc_post_update_surfaces_to_stream(s
+
+ post_surface_trace(dc);
+
+- if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+- else
++ /*
++ * Only relevant for DCN behavior where we can guarantee the optimization
++ * is safe to apply - retain the legacy behavior for DCE.
++ */
++
++ if (dc->ctx->dce_version < DCE_VERSION_MAX)
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
++ else {
++ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+
+- if (is_flip_pending_in_pipes(dc, context))
+- return;
++ if (is_flip_pending_in_pipes(dc, context))
++ return;
+
+- for (i = 0; i < dc->res_pool->pipe_count; i++)
+- if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+- context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+- context->res_ctx.pipe_ctx[i].pipe_idx = i;
+- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+- }
++ for (i = 0; i < dc->res_pool->pipe_count; i++)
++ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
++ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
++ context->res_ctx.pipe_ctx[i].pipe_idx = i;
++ dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
++ }
+
+- process_deferred_updates(dc);
++ process_deferred_updates(dc);
+
+- dc->hwss.optimize_bandwidth(dc, context);
++ dc->hwss.optimize_bandwidth(dc, context);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
+- dc->hwss.update_dsc_pg(dc, context, true);
++ if (dc->debug.enable_double_buffered_dsc_pg_support)
++ dc->hwss.update_dsc_pg(dc, context, true);
++ }
+
+ dc->optimized_required = false;
+ dc->wm_optimized_required = false;
+@@ -4203,12 +4216,9 @@ void dc_commit_updates_for_stream(struct
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
+ }
+- } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
++ } else if (update_type == UPDATE_TYPE_FAST) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+- *
+- * Only relevant for DCN behavior where we can guarantee the optimization
+- * is safe to apply - retain the legacy behavior for DCE.
+ */
+ dc_post_update_surfaces_to_stream(dc);
+ }