]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amd/display: wait for updates to latch before locking
authorAusef Yousof <Ausef.Yousof@amd.com>
Fri, 28 Mar 2025 15:06:27 +0000 (11:06 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 7 Apr 2025 22:01:08 +0000 (18:01 -0400)
[why&how]
It is possible for an update to acquire otg lock and begin programming
while the previous update has not completed and its values have not
latched. The correct way to go about this is to wait until the vupdate
pulses so we can be sure that previous updates have latched and we can
continue with the current update pipe programming, otherwise during
consecutive full updates we will have corruption flash on the screen.

The corruption flash occurs specifically on configs that require odm
combine, and its local to a specific pipe (will not flash across whole
screen). This ticket is across the otg slave, but it may also appear
across master.

Reviewed-by: Leo Chen <leo.chen@amd.com>
Signed-off-by: Ausef Yousof <Ausef.Yousof@amd.com>
Signed-off-by: Roman Li <roman.li@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c

index be63cc4aca1f555ea655dbf72cc081580d5c6f2e..636999fcaebb7d129b2a59a489cf85e9e65433a0 100644 (file)
@@ -2104,6 +2104,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
                dc->hwss.enable_accelerated_mode(dc, context);
        }
 
+       if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       pipe = &context->res_ctx.pipe_ctx[i];
+                       //Only delay otg master for a given config
+                       if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+                               //dc_commit_state_no_check is always a full update
+                               dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+                               break;
+                       }
+               }
+       }
+
        if (context->stream_count > get_seamless_boot_stream_count(context) ||
                context->stream_count == 0)
                dc->hwss.prepare_bandwidth(dc, context);
@@ -2168,6 +2180,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        if (dc->hwss.program_front_end_for_ctx) {
                dc->hwss.interdependent_update_lock(dc, context, true);
                dc->hwss.program_front_end_for_ctx(dc, context);
+
+               if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+                       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                               pipe = &context->res_ctx.pipe_ctx[i];
+                               dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+                       }
+               }
+
                dc->hwss.interdependent_update_lock(dc, context, false);
                dc->hwss.post_unlock_program_front_end(dc, context);
        }
@@ -4049,6 +4069,7 @@ static void commit_planes_for_stream(struct dc *dc,
                                &context->res_ctx,
                                stream);
        ASSERT(top_pipe_to_program != NULL);
+
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
@@ -4099,6 +4120,9 @@ static void commit_planes_for_stream(struct dc *dc,
                dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
        }
 
+       if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+               dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+
        if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
                if (dc->hwss.subvp_pipe_control_lock)
                        dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
@@ -4235,6 +4259,16 @@ static void commit_planes_for_stream(struct dc *dc,
        }
        if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
                dc->hwss.program_front_end_for_ctx(dc, context);
+
+               //Pipe busy until some frame and line #
+               if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+                       for (j = 0; j < dc->res_pool->pipe_count; j++) {
+                               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+                               dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+                       }
+               }
+
                if (dc->debug.validate_dml_output) {
                        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                                struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
index e34a93b703a7a5ab7db186a27930ec9ba7a91c49..f9ee55998b6b2442e67d88a002efdf2764ab87bf 100644 (file)
@@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx,
                        us_x10 % frac);
 }
 
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+       struct crtc_position position;
+       struct dc_stream_state *stream = pipe_ctx->stream;
+       unsigned int vpos, frame_count;
+       uint32_t vupdate_start, vupdate_end, vblank_start;
+       unsigned int lines_to_vupdate, us_to_vupdate;
+       unsigned int us_per_line, us_vupdate;
+
+       if (!pipe_ctx->stream ||
+               !pipe_ctx->stream_res.tg ||
+               !pipe_ctx->stream_res.stream_enc)
+               return;
+
+       if (pipe_ctx->prev_odm_pipe &&
+                               pipe_ctx->stream)
+               return;
+
+       if (!pipe_ctx->wait_is_required)
+               return;
+
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+       if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+               return;
+
+       dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+                                               &vupdate_end);
+
+       dc->hwss.get_position(&pipe_ctx, 1, &position);
+       vpos = position.vertical_count;
+
+       frame_count = tg->funcs->get_frame_count(tg);
+
+       if (frame_count - pipe_ctx->wait_frame_count > 2)
+               return;
+
+       vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+       if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+               lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+       else
+               lines_to_vupdate = vupdate_start - vpos;
+
+       us_per_line =
+               stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+       us_to_vupdate = lines_to_vupdate * us_per_line;
+
+       if (vupdate_end < vupdate_start)
+               vupdate_end += stream->timing.v_total;
+
+       if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+               us_to_vupdate = 0;
+
+       us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+       if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+               //surface updates come in at high irql
+               pipe_ctx->wait_is_required = true;
+               return;
+       }
+
+       fsleep(us_to_vupdate + us_vupdate);
+
+       //clear
+       pipe_ctx->next_vupdate = 0;
+       pipe_ctx->wait_frame_count = 0;
+       pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+       uint32_t vupdate_start, vupdate_end;
+       struct crtc_position position;
+       unsigned int vpos, cur_frame;
+
+       if (!pipe_ctx->stream ||
+               !pipe_ctx->stream_res.tg ||
+               !pipe_ctx->stream_res.stream_enc)
+               return;
+
+       dc->hwss.get_position(&pipe_ctx, 1, &position);
+       vpos = position.vertical_count;
+
+       dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+                                               &vupdate_end);
+
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+       struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+       ASSERT(optc1->max_frame_count != 0);
+
+       if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+               return;
+
+       pipe_ctx->next_vupdate = vupdate_start;
+
+       cur_frame = tg->funcs->get_frame_count(tg);
+
+       if (vpos < vupdate_start) {
+               pipe_ctx->wait_frame_count = cur_frame;
+       } else {
+               if (cur_frame + 1 > optc1->max_frame_count)
+                       pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+               else
+                       pipe_ctx->wait_frame_count = cur_frame + 1;
+       }
+
+       pipe_ctx->wait_is_required = true;
+}
+
 void dcn10_lock_all_pipes(struct dc *dc,
        struct dc_state *context,
        bool lock)
index 42ffd1e1299c856f619278e67248b5edde117d68..57d30ea225f2b67c77c05b4644d9cdf33d008de1 100644 (file)
@@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth(
 void dcn10_prepare_bandwidth(
                struct dc *dc,
                struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
+               bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+       struct dc *dc,
+       struct pipe_ctx *pipe_ctx);
 void dcn10_pipe_control_lock(
        struct dc *dc,
        struct pipe_ctx *pipe,
index 6a82a865209cb4330938992d758abe0c4dea9e59..a3ccf805bd16ae07a9bec592200d915a9c49fe64 100644 (file)
@@ -168,6 +168,8 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
        .dsc_pg_control = dcn35_dsc_pg_control,
        .dsc_pg_status = dcn32_dsc_pg_status,
        .enable_plane = dcn35_enable_plane,
+       .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+       .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
 };
 
 void dcn35_hw_sequencer_construct(struct dc *dc)
index 902a96940a015b5139be2cde0dffb4962045e5ee..a4e6b6479983ff83b096a24ccb259d28572dd58b 100644 (file)
@@ -162,6 +162,8 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
        .dsc_pg_control = dcn35_dsc_pg_control,
        .dsc_pg_status = dcn32_dsc_pg_status,
        .enable_plane = dcn35_enable_plane,
+       .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+       .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
 };
 
 void dcn351_hw_sequencer_construct(struct dc *dc)
index 22a5d4a03c988c128b39a3f440be94c3c57c03bb..09bc65c2fa23b2eb829c2474e0a39a0d1e9f5937 100644 (file)
@@ -183,6 +183,8 @@ struct hwseq_private_funcs {
                        struct dc_cm2_func_luts mcm_luts,
                        bool lut_bank_a);
        void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+       void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+       void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
 };
 
 struct dce_hwseq {
index 338bc240e80367847498e123ab9047c9261582e6..1ed3461440aad01d0ffa2af85ed695b4486f545f 100644 (file)
@@ -480,6 +480,10 @@ struct pipe_ctx {
        struct pixel_rate_divider pixel_rate_divider;
        /* pixels borrowed from hblank to hactive */
        uint8_t hblank_borrow;
+       /* next vupdate */
+       uint32_t next_vupdate;
+       uint32_t wait_frame_count;
+       bool wait_is_required;
 };
 
 /* Data used for dynamic link encoder assignment.
index 7f371cbb35cdef1c5ef5443af03d1e162904191e..0d5a8358a778070d909eee8b1326e1666ae133ad 100644 (file)
@@ -68,6 +68,7 @@ struct optc {
        int pstate_keepout;
        struct dc_crtc_timing orginal_patched_timing;
        enum signal_type signal;
+       uint32_t max_frame_count;
 };
 
 void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
index b86fe2b094f8164b6b7b401a65db97191d78b463..4cfc6c0fa147f0a501b5cedf3bd87868daf38e19 100644 (file)
@@ -507,6 +507,7 @@ void dcn35_timing_generator_init(struct optc *optc1)
        optc1->min_v_blank_interlace = 5;
        optc1->min_h_sync_width = 4;
        optc1->min_v_sync_width = 1;
+       optc1->max_frame_count = 0xFFFFFF;
 
        dcn35_timing_generator_set_fgcg(
                optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);