--- /dev/null
+From 97ca308925a50aa80711ccfaf814fa3898374862 Mon Sep 17 00:00:00 2001
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Date: Mon, 20 Jun 2022 16:37:07 -0400
+Subject: drm/amd/display: Add minimal pipe split transition state
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+commit 97ca308925a50aa80711ccfaf814fa3898374862 upstream.
+
+[WHY?]
+When adding/removing a plane to some configurations, unsupported pipe
+programming can occur when moving to a new plane. Such cases include pipe
+split on multi-display, with MPO, and/or ODM.
+
+[HOW?]
+Add a safe transistion state that minimizes pipe usage before programming
+new configuration. When adding a plane, the current state has the least
+pipes required so it is applied without splitting. This must be applied
+prior to updating the plane_state for seamless transition. When removing a
+plane, the new state has the least pieps required so it is applied without
+splitting.
+
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 277 +++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 18 +
+ 2 files changed, 295 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2534,6 +2534,137 @@ static void copy_stream_update_to_stream
+ }
+ }
+
++void dc_reset_state(struct dc *dc, struct dc_state *context)
++{
++ dc_resource_state_destruct(context);
++
++ /* clear the structure, but don't reset the reference count */
++ memset(context, 0, offsetof(struct dc_state, refcount));
++
++ init_state(dc, context);
++}
++
++static bool update_planes_and_stream_state(struct dc *dc,
++ struct dc_surface_update *srf_updates, int surface_count,
++ struct dc_stream_state *stream,
++ struct dc_stream_update *stream_update,
++ enum surface_update_type *new_update_type,
++ struct dc_state **new_context)
++{
++ struct dc_state *context;
++ int i, j;
++ enum surface_update_type update_type;
++ const struct dc_stream_status *stream_status;
++ struct dc_context *dc_ctx = dc->ctx;
++
++ stream_status = dc_stream_get_status(stream);
++
++ if (!stream_status) {
++ if (surface_count) /* Only an error condition if surf_count non-zero*/
++ ASSERT(false);
++
++ return false; /* Cannot commit surface to stream that is not committed */
++ }
++
++ context = dc->current_state;
++
++ update_type = dc_check_update_surfaces_for_stream(
++ dc, srf_updates, surface_count, stream_update, stream_status);
++
++ /* update current stream with the new updates */
++ copy_stream_update_to_stream(dc, context, stream, stream_update);
++
++ /* do not perform surface update if surface has invalid dimensions
++ * (all zero) and no scaling_info is provided
++ */
++ if (surface_count > 0) {
++ for (i = 0; i < surface_count; i++) {
++ if ((srf_updates[i].surface->src_rect.width == 0 ||
++ srf_updates[i].surface->src_rect.height == 0 ||
++ srf_updates[i].surface->dst_rect.width == 0 ||
++ srf_updates[i].surface->dst_rect.height == 0) &&
++ (!srf_updates[i].scaling_info ||
++ srf_updates[i].scaling_info->src_rect.width == 0 ||
++ srf_updates[i].scaling_info->src_rect.height == 0 ||
++ srf_updates[i].scaling_info->dst_rect.width == 0 ||
++ srf_updates[i].scaling_info->dst_rect.height == 0)) {
++ DC_ERROR("Invalid src/dst rects in surface update!\n");
++ return false;
++ }
++ }
++ }
++
++ if (update_type >= update_surface_trace_level)
++ update_surface_trace(dc, srf_updates, surface_count);
++
++ if (update_type >= UPDATE_TYPE_FULL) {
++ struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
++
++ for (i = 0; i < surface_count; i++)
++ new_planes[i] = srf_updates[i].surface;
++
++ /* initialize scratch memory for building context */
++ context = dc_create_state(dc);
++ if (context == NULL) {
++ DC_ERROR("Failed to allocate new validate context!\n");
++ return false;
++ }
++
++ dc_resource_state_copy_construct(
++ dc->current_state, context);
++
++ /*remove old surfaces from context */
++ if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
++
++ BREAK_TO_DEBUGGER();
++ goto fail;
++ }
++
++ /* add surface to context */
++ if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
++
++ BREAK_TO_DEBUGGER();
++ goto fail;
++ }
++ }
++
++ /* save update parameters into surface */
++ for (i = 0; i < surface_count; i++) {
++ struct dc_plane_state *surface = srf_updates[i].surface;
++
++ copy_surface_update_to_plane(surface, &srf_updates[i]);
++
++ if (update_type >= UPDATE_TYPE_MED) {
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
++
++ if (pipe_ctx->plane_state != surface)
++ continue;
++
++ resource_build_scaling_params(pipe_ctx);
++ }
++ }
++ }
++
++ if (update_type == UPDATE_TYPE_FULL) {
++ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
++ BREAK_TO_DEBUGGER();
++ goto fail;
++ }
++ }
++
++ *new_context = context;
++ *new_update_type = update_type;
++
++ return true;
++
++fail:
++ dc_release_state(context);
++
++ return false;
++
++}
++
+ static void commit_planes_do_stream_update(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+@@ -2931,6 +3062,152 @@ static void commit_planes_for_stream(str
+ }
+ }
+
++static bool commit_minimal_transition_state(struct dc *dc,
++ struct dc_state *transition_base_context)
++{
++ struct dc_state *transition_context = dc_create_state(dc);
++ enum pipe_split_policy tmp_policy;
++ enum dc_status ret = DC_ERROR_UNEXPECTED;
++ unsigned int i, j;
++
++ if (!transition_context)
++ return false;
++
++ tmp_policy = dc->debug.pipe_split_policy;
++ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
++
++ dc_resource_state_copy_construct(transition_base_context, transition_context);
++
++ //commit minimal state
++ if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
++ for (i = 0; i < transition_context->stream_count; i++) {
++ struct dc_stream_status *stream_status = &transition_context->stream_status[i];
++
++ for (j = 0; j < stream_status->plane_count; j++) {
++ struct dc_plane_state *plane_state = stream_status->plane_states[j];
++
++ /* force vsync flip when reconfiguring pipes to prevent underflow
++ * and corruption
++ */
++ plane_state->flip_immediate = false;
++ }
++ }
++
++ ret = dc_commit_state_no_check(dc, transition_context);
++ }
++
++ //always release as dc_commit_state_no_check retains in good case
++ dc_release_state(transition_context);
++
++ //restore previous pipe split policy
++ dc->debug.pipe_split_policy = tmp_policy;
++
++ if (ret != DC_OK) {
++ //this should never happen
++ BREAK_TO_DEBUGGER();
++ return false;
++ }
++
++ //force full surface update
++ for (i = 0; i < dc->current_state->stream_count; i++) {
++ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
++ dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
++ }
++ }
++
++ return true;
++}
++
++bool dc_update_planes_and_stream(struct dc *dc,
++ struct dc_surface_update *srf_updates, int surface_count,
++ struct dc_stream_state *stream,
++ struct dc_stream_update *stream_update)
++{
++ struct dc_state *context;
++ enum surface_update_type update_type;
++ int i;
++
++ /* In cases where MPO and split or ODM are used transitions can
++ * cause underflow. Apply stream configuration with minimal pipe
++ * split first to avoid unsupported transitions for active pipes.
++ */
++ bool force_minimal_pipe_splitting = false;
++ bool is_plane_addition = false;
++
++ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
++
++ if (cur_stream_status &&
++ dc->current_state->stream_count > 0 &&
++ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
++ /* determine if minimal transition is required */
++ if (cur_stream_status->plane_count > surface_count) {
++ force_minimal_pipe_splitting = true;
++ } else if (cur_stream_status->plane_count < surface_count) {
++ force_minimal_pipe_splitting = true;
++ is_plane_addition = true;
++ }
++ }
++
++ /* on plane addition, minimal state is the current one */
++ if (force_minimal_pipe_splitting && is_plane_addition &&
++ !commit_minimal_transition_state(dc, dc->current_state))
++ return false;
++
++ if (!update_planes_and_stream_state(
++ dc,
++ srf_updates,
++ surface_count,
++ stream,
++ stream_update,
++ &update_type,
++ &context))
++ return false;
++
++ /* on plane addition, minimal state is the new one */
++ if (force_minimal_pipe_splitting && !is_plane_addition) {
++ if (!commit_minimal_transition_state(dc, context)) {
++ dc_release_state(context);
++ return false;
++ }
++
++ update_type = UPDATE_TYPE_FULL;
++ }
++
++ commit_planes_for_stream(
++ dc,
++ srf_updates,
++ surface_count,
++ stream,
++ stream_update,
++ update_type,
++ context);
++
++ if (dc->current_state != context) {
++
++ /* Since memory free requires elevated IRQL, an interrupt
++ * request is generated by mem free. If this happens
++ * between freeing and reassigning the context, our vsync
++ * interrupt will call into dc and cause a memory
++ * corruption BSOD. Hence, we first reassign the context,
++ * then free the old context.
++ */
++
++ struct dc_state *old = dc->current_state;
++
++ dc->current_state = context;
++ dc_release_state(old);
++
++ // clear any forced full updates
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
++ pipe_ctx->plane_state->force_full_update = false;
++ }
++ }
++ return true;
++}
++
+ void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -288,6 +288,9 @@ bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+
+ /*
++ * Setup stream attributes if no stream updates are provided
++ * there will be no impact on the stream parameters
++ *
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+@@ -296,8 +299,23 @@ bool dc_is_stream_scaling_unchanged(
+ * After this call:
+ * Surfaces attributes are programmed and configured to be composed into stream.
+ * This does not trigger a flip. No surface address is programmed.
++ *
+ */
++bool dc_update_planes_and_stream(struct dc *dc,
++ struct dc_surface_update *surface_updates, int surface_count,
++ struct dc_stream_state *dc_stream,
++ struct dc_stream_update *stream_update);
+
++/*
++ * Set up surface attributes and associate to a stream
++ * The surfaces parameter is an absolute set of all surface active for the stream.
++ * If no surfaces are provided, the stream will be blanked; no memory read.
++ * Any flip related attribute changes must be done through this interface.
++ *
++ * After this call:
++ * Surfaces attributes are programmed and configured to be composed into stream.
++ * This does not trigger a flip. No surface address is programmed.
++ */
+ void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
--- /dev/null
+From 81f743a08f3b214638aa389e252ae5e6c3592e7c Mon Sep 17 00:00:00 2001
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Date: Thu, 23 Feb 2023 11:36:08 -0700
+Subject: drm/amd/display: Add wrapper to call planes and stream update
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+commit 81f743a08f3b214638aa389e252ae5e6c3592e7c upstream.
+
+[Why & How]
+This commit is part of a sequence of changes that replaces the commit
+sequence used in the DC with a new one. As a result of this transition,
+we moved some specific parts from the commit sequence and brought them
+to amdgpu_dm. This commit adds a wrapper inside DM that enable our
+drivers to do any necessary preparation or change before we offload the
+plane/stream update to DC.
+
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 51 +++++++++++++++++-----
+ 1 file changed, 41 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -354,6 +354,35 @@ static inline bool is_dc_timing_adjust_n
+ }
+
+ /**
++ * update_planes_and_stream_adapter() - Send planes to be updated in DC
++ *
++ * DC has a generic way to update planes and stream via
++ * dc_update_planes_and_stream function; however, DM might need some
++ * adjustments and preparation before calling it. This function is a wrapper
++ * for the dc_update_planes_and_stream that does any required configuration
++ * before passing control to DC.
++ */
++static inline bool update_planes_and_stream_adapter(struct dc *dc,
++ int update_type,
++ int planes_count,
++ struct dc_stream_state *stream,
++ struct dc_stream_update *stream_update,
++ struct dc_surface_update *array_of_surface_update)
++{
++ /*
++ * Previous frame finished and HW is ready for optimization.
++ */
++ if (update_type == UPDATE_TYPE_FAST)
++ dc_post_update_surfaces_to_stream(dc);
++
++ return dc_update_planes_and_stream(dc,
++ array_of_surface_update,
++ planes_count,
++ stream,
++ stream_update);
++}
++
++/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+@@ -2551,11 +2580,12 @@ static void dm_gpureset_commit_state(str
+ true;
+ }
+
+- dc_update_planes_and_stream(dm->dc,
+- bundle->surface_updates,
+- dc_state->stream_status->plane_count,
+- dc_state->streams[k],
+- &bundle->stream_update);
++ update_planes_and_stream_adapter(dm->dc,
++ UPDATE_TYPE_FULL,
++ dc_state->stream_status->plane_count,
++ dc_state->streams[k],
++ &bundle->stream_update,
++ bundle->surface_updates);
+ }
+
+ cleanup:
+@@ -9240,11 +9270,12 @@ static void amdgpu_dm_commit_planes(stru
+ }
+ mutex_lock(&dm->dc_lock);
+
+- dc_update_planes_and_stream(dm->dc,
+- bundle->surface_updates,
+- planes_count,
+- acrtc_state->stream,
+- &bundle->stream_update);
++ update_planes_and_stream_adapter(dm->dc,
++ acrtc_state->update_type,
++ planes_count,
++ acrtc_state->stream,
++ &bundle->stream_update,
++ bundle->surface_updates);
+
+ /**
+ * Enable or disable the interrupts on the backend.
--- /dev/null
+From f7511289821ffccc07579406d6ab520aa11049f5 Mon Sep 17 00:00:00 2001
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Date: Thu, 6 Oct 2022 16:40:55 -0400
+Subject: drm/amd/display: Use dc_update_planes_and_stream
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+commit f7511289821ffccc07579406d6ab520aa11049f5 upstream.
+
+[Why & How]
+The old dc_commit_updates_for_stream lacks manipulation for many corner
+cases where the DC feature requires special attention; as a result, it
+starts to show its limitation (e.g., the SubVP feature is not supported
+by it, among other cases). To modernize and unify our internal API, this
+commit replaces the old dc_commit_updates_for_stream with
+dc_update_planes_and_stream, which has more features.
+
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 30 +++++++++++-----------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2550,10 +2550,12 @@ static void dm_gpureset_commit_state(str
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+- dc_commit_updates_for_stream(
+- dm->dc, bundle->surface_updates,
++
++ dc_update_planes_and_stream(dm->dc,
++ bundle->surface_updates,
+ dc_state->stream_status->plane_count,
+- dc_state->streams[k], &bundle->stream_update, dc_state);
++ dc_state->streams[k],
++ &bundle->stream_update);
+ }
+
+ cleanup:
+@@ -9238,12 +9240,11 @@ static void amdgpu_dm_commit_planes(stru
+ }
+ mutex_lock(&dm->dc_lock);
+
+- dc_commit_updates_for_stream(dm->dc,
+- bundle->surface_updates,
+- planes_count,
+- acrtc_state->stream,
+- &bundle->stream_update,
+- dc_state);
++ dc_update_planes_and_stream(dm->dc,
++ bundle->surface_updates,
++ planes_count,
++ acrtc_state->stream,
++ &bundle->stream_update);
+
+ /**
+ * Enable or disable the interrupts on the backend.
+@@ -9669,12 +9670,11 @@ static void amdgpu_dm_atomic_commit_tail
+
+
+ mutex_lock(&dm->dc_lock);
+- dc_commit_updates_for_stream(dm->dc,
+- dummy_updates,
+- status->plane_count,
+- dm_new_crtc_state->stream,
+- &stream_update,
+- dc_state);
++ dc_update_planes_and_stream(dm->dc,
++ dummy_updates,
++ status->plane_count,
++ dm_new_crtc_state->stream,
++ &stream_update);
+ mutex_unlock(&dm->dc_lock);
+ }
+