From: Nicholas Kazlauskas Date: Tue, 10 Mar 2026 20:33:44 +0000 (-0400) Subject: drm/amd/display: Revert inbox0 lock for cursor due to deadlock X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=3cf496080f539b69f069027cece82642b46a6cb5;p=thirdparty%2Fkernel%2Flinux.git drm/amd/display: Revert inbox0 lock for cursor due to deadlock [Why] A deadlock occurs when using inbox0 lock for cursor operations on PSR-SU and Replays that does not when using the inbox1 locking path. This is because of a priority inversion issue where inbox1 work cannot be serviced while holding the HW lock from driver and sending cursor notifications to DMUB. Typically the lower priority of inbox1 for the lock command would allow the PSR and Replay FSMs to complete their transition prior to giving driver the lock but this is no longer the case with inbox0 having the highest priority in servicing. [How] This will reintroduce any synchronization bugs that were there with Replay or PSR-SU touching the cursor at the same time as driver. Reviewed-by: Charlene Liu Signed-off-by: Nicholas Kazlauskas Signed-off-by: Chuanyu Tseng Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 908f79b021020..daa7ab362239f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -33,7 +33,6 @@ #include "dc_dmub_srv.h" #include "dc_state_priv.h" #include "dc_stream_priv.h" -#include "dce/dmub_hw_lock_mgr.h" #define DC_LOGGER dc->ctx->logger #ifndef MIN @@ -259,7 +258,6 @@ void program_cursor_attributes( struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); - bool unlock_dmub = false; if (!stream) return; @@ -278,12 +276,6 @@ void program_cursor_attributes( if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) { dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); } else { - if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream && - should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) { - dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true); - unlock_dmub = true; - } - dc->hwss.cursor_lock(dc, pipe_to_program, true); if (pipe_to_program->next_odm_pipe) dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); @@ -306,9 +298,6 @@ void program_cursor_attributes( dc->hwss.cursor_lock(dc, pipe_to_program, false); if (pipe_to_program->next_odm_pipe) dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); - - if (unlock_dmub) - dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false); } } } @@ -416,7 +405,6 @@ void program_cursor_position( struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); - bool unlock_dmub = false; if (!stream) return; @@ -436,16 +424,10 @@ void program_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) { + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); - } else { - if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream && - should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) { - dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true); - unlock_dmub = true; - } + else dc->hwss.cursor_lock(dc, pipe_to_program, true); - } } dc->hwss.set_cursor_position(pipe_ctx); @@ -457,14 +439,10 @@ void program_cursor_position( } if (pipe_to_program) { - if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) { + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); - } else { + else dc->hwss.cursor_lock(dc, pipe_to_program, false); - - if (unlock_dmub) - dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false); - } } }