dc->hwss.enable_accelerated_mode(dc, context);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ //Only delay otg master for a given config
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ //dc_commit_state_no_check is always a full update
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+ break;
+ }
+ }
+ }
+
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+ }
+ }
+
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
&context->res_ctx,
stream);
ASSERT(top_pipe_to_program != NULL);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
}
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ //Pipe busy until some frame and line #
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+ }
+ }
+
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
us_x10 % frac);
}
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+ struct crtc_position position;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int vpos, frame_count;
+ uint32_t vupdate_start, vupdate_end, vblank_start;
+ unsigned int lines_to_vupdate, us_to_vupdate;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ if (pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream)
+ return;
+
+ if (!pipe_ctx->wait_is_required)
+ return;
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ frame_count = tg->funcs->get_frame_count(tg);
+
+ if (frame_count - pipe_ctx->wait_frame_count > 2)
+ return;
+
+ vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+ if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ else
+ lines_to_vupdate = vupdate_start - vpos;
+
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+ if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+ //surface updates come in at high irql
+ pipe_ctx->wait_is_required = true;
+ return;
+ }
+
+ fsleep(us_to_vupdate + us_vupdate);
+
+ //clear
+ pipe_ctx->next_vupdate = 0;
+ pipe_ctx->wait_frame_count = 0;
+ pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ uint32_t vupdate_start, vupdate_end;
+ struct crtc_position position;
+ unsigned int vpos, cur_frame;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(optc1->max_frame_count != 0);
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ pipe_ctx->next_vupdate = vupdate_start;
+
+ cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (vpos < vupdate_start) {
+ pipe_ctx->wait_frame_count = cur_frame;
+ } else {
+ if (cur_frame + 1 > optc1->max_frame_count)
+ pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+ else
+ pipe_ctx->wait_frame_count = cur_frame + 1;
+ }
+
+ pipe_ctx->wait_is_required = true;
+}
+
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn35_hw_sequencer_construct(struct dc *dc)
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn351_hw_sequencer_construct(struct dc *dc)
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+ void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+ void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
struct pixel_rate_divider pixel_rate_divider;
/* pixels borrowed from hblank to hactive */
uint8_t hblank_borrow;
+ /* next vupdate */
+ uint32_t next_vupdate;
+ uint32_t wait_frame_count;
+ bool wait_is_required;
};
/* Data used for dynamic link encoder assignment.
int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
+ uint32_t max_frame_count;
};
void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
dcn35_timing_generator_set_fgcg(
optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);