static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
return update_type;
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
return update_type;
return update_type;
}
-static struct surface_update_descriptor det_surface_update(
+static struct surface_update_descriptor check_update_surface(
const struct dc_check_config *check_config,
struct dc_surface_update *u)
{
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
if (u->surface->force_full_update) {
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_STREAM);
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (u->cursor_csc_color_matrix) {
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) {
update_flags->bits.lut_3d = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable &&
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
- // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
int surface_count,
struct dc_stream_update *stream_update)
{
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
/* When countdown finishes, promote this flip to full to trigger deferred final transition */
if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) {
if (su_flags->raw)
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- // Non-global cases
+ /* Non-global cases */
+ if (stream_update->hdr_static_metadata ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
su_flags->bits.out_tf = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
+
+ if (stream_update->periodic_interrupt) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (stream_update->dither_option) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (stream_update->cursor_position || stream_update->cursor_attributes) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ /* TODO - cleanup post blend CM */
+ if (stream_update->func_shaper || stream_update->lut3d_func) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (stream_update->pending_test_pattern) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
}
for (int i = 0 ; i < surface_count; i++) {
struct surface_update_descriptor inner_type =
- det_surface_update(check_config, &updates[i]);
+ check_update_surface(check_config, &updates[i]);
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
+/*
+ * check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * This function performs checks on the DC global state, and is therefore not re-entrant. It
+ * should not be called from DM.
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
+static struct surface_update_descriptor check_update_state_and_surfaces_for_stream(
+ const struct dc *dc,
+ const struct dc_check_config *check_config,
+ const struct dc_stream_state *stream,
+ const struct dc_surface_update *updates,
+ const int surface_count,
+ const struct dc_stream_update *stream_update)
+{
+ const struct dc_state *context = dc->current_state;
+
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE};
+
+ if (updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, updates[i].surface))
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ if (dc->idle_optimizations_allowed)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+
+ if (dc_can_clear_cursor_limit(dc))
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+
+ return overall_type;
+}
+
+/*
+ * dc_check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * This function performs checks on the DC global state, stream and surface update, and is
+ * therefore not re-entrant. It should not be called from DM.
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
+static struct surface_update_descriptor dc_check_update_state_and_surfaces_for_stream(
+ const struct dc *dc,
+ const struct dc_check_config *check_config,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ /* check updates against the entire DC state (global) first */
+ struct surface_update_descriptor overall_update_type = check_update_state_and_surfaces_for_stream(
+ dc,
+ check_config,
+ stream,
+ updates,
+ surface_count,
+ stream_update);
+
+ /* check updates for stream and plane */
+ struct surface_update_descriptor stream_update_type = dc_check_update_surfaces_for_stream(
+ check_config,
+ updates,
+ surface_count,
+ stream_update);
+ elevate_update_type(&overall_update_type, stream_update_type.update_type, stream_update_type.lock_descriptor);
+
+ return overall_update_type;
+}
+
static struct dc_stream_status *stream_get_status(
struct dc_state *ctx,
struct dc_stream_state *stream)
}
}
-static bool full_update_required_weak(
- const struct dc *dc,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream);
-
struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
- enum surface_update_type *new_update_type,
+ struct surface_update_descriptor *update_descriptor,
struct dc_state **new_context)
{
struct dc_state *context;
int i, j;
- enum surface_update_type update_type;
const struct dc_stream_status *stream_status;
struct dc_context *dc_ctx = dc->ctx;
}
context = dc->current_state;
- update_type = dc_check_update_surfaces_for_stream(
- &dc->check_config, srf_updates, surface_count, stream_update).update_type;
- if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
- update_type = UPDATE_TYPE_FULL;
+ *update_descriptor = dc_check_update_state_and_surfaces_for_stream(
+ dc,
+ &dc->check_config,
+ stream,
+ srf_updates,
+ surface_count,
+ stream_update);
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
*/
force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
- if (update_type == UPDATE_TYPE_FULL)
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
}
}
- if (update_type == UPDATE_TYPE_FULL) {
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
if (stream_update) {
uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
- if (update_type >= update_surface_trace_level)
+ if (update_descriptor->update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
for (i = 0; i < surface_count; i++)
copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
- if (update_type >= UPDATE_TYPE_FULL) {
+ if (update_descriptor->update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
for (i = 0; i < surface_count; i++)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- if (update_type != UPDATE_TYPE_MED)
+ if (update_descriptor->update_type != UPDATE_TYPE_MED)
continue;
if (surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
}
}
- if (update_type == UPDATE_TYPE_FULL) {
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
struct pipe_split_policy_backup policy;
bool minimize = false;
update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
- *new_update_type = update_type;
- if (update_type == UPDATE_TYPE_FULL)
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
program_cursor_position(dc, stream);
/* Full fe update*/
- if (update_type == UPDATE_TYPE_FAST)
+ if (update_type <= UPDATE_TYPE_FAST)
continue;
if (stream_update->dsc_config)
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
bool should_offload_fams2_flip = false;
- bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+ bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
if (should_lock_all_pipes)
determine_pipe_unlock_order(dc, context);
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type == UPDATE_TYPE_FAST &&
+ if (update_type <= UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+ bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
bool subvp_curr_use = false;
uint8_t current_stream_mask = 0;
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
- if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ if (update_type > UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_inbox1_lock(dc, stream->link)) {
}
dc->hwss.post_unlock_program_front_end(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
+ if (update_type > UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
return;
}
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type > UPDATE_TYPE_FAST) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type == UPDATE_TYPE_FAST &&
+ if (update_type <= UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
continue;
/* Full fe update*/
- if (update_type == UPDATE_TYPE_FAST)
+ if (update_type <= UPDATE_TYPE_FAST)
continue;
stream_status =
continue;
/* Full fe update*/
- if (update_type == UPDATE_TYPE_FAST)
+ if (update_type <= UPDATE_TYPE_FAST)
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
}
}
- if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.program_front_end_for_ctx && update_type > UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
//Pipe busy until some frame and line #
}
// Update Type FAST, Surface updates
- if (update_type == UPDATE_TYPE_FAST) {
+ if (update_type <= UPDATE_TYPE_FAST) {
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
/* If enabling subvp or transitioning from subvp->subvp, enable the
* phantom streams before we program front end for the phantom pipes.
*/
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type > UPDATE_TYPE_FAST) {
if (dc->hwss.enable_phantom_streams)
dc->hwss.enable_phantom_streams(dc, context);
}
}
- if (update_type != UPDATE_TYPE_FAST)
+ if (update_type > UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
if (subvp_prev_use && !subvp_curr_use) {
dc->hwss.disable_phantom_streams(dc, context);
}
- if (update_type != UPDATE_TYPE_FAST)
+ if (update_type > UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
return true;
}
-void populate_fast_updates(struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
- int surface_count,
- struct dc_stream_update *stream_update)
-{
- int i = 0;
-
- if (stream_update) {
- fast_update[0].out_transfer_func = stream_update->out_transfer_func;
- fast_update[0].output_csc_transform = stream_update->output_csc_transform;
- } else {
- fast_update[0].out_transfer_func = NULL;
- fast_update[0].output_csc_transform = NULL;
- }
-
- for (i = 0; i < surface_count; i++) {
- fast_update[i].flip_addr = srf_updates[i].flip_addr;
- fast_update[i].gamma = srf_updates[i].gamma;
- fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
- fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
- fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
- fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
- fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control;
- }
-}
-
-static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
-{
- int i;
-
- if (fast_update[0].out_transfer_func ||
- fast_update[0].output_csc_transform)
- return true;
-
- for (i = 0; i < surface_count; i++) {
- if (fast_update[i].flip_addr ||
- fast_update[i].gamma ||
- fast_update[i].gamut_remap_matrix ||
- fast_update[i].input_csc_color_matrix ||
- fast_update[i].cursor_csc_color_matrix ||
- fast_update[i].cm_hist_control ||
- fast_update[i].coeff_reduction_factor)
- return true;
- }
-
- return false;
-}
-
-bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
-{
- int i;
-
- if (fast_update[0].out_transfer_func ||
- fast_update[0].output_csc_transform)
- return true;
-
- for (i = 0; i < surface_count; i++) {
- if (fast_update[i].input_csc_color_matrix ||
- fast_update[i].gamma ||
- fast_update[i].gamut_remap_matrix ||
- fast_update[i].coeff_reduction_factor ||
- fast_update[i].cm_hist_control ||
- fast_update[i].cursor_csc_color_matrix)
- return true;
- }
-
- return false;
-}
-
-static bool full_update_required_weak(
- const struct dc *dc,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream)
-{
- const struct dc_state *context = dc->current_state;
- if (srf_updates)
- for (int i = 0; i < surface_count; i++)
- if (!is_surface_in_context(context, srf_updates[i].surface))
- return true;
-
- if (stream) {
- const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
- return true;
-
- if (dc_can_clear_cursor_limit(dc))
- return true;
-
- return false;
-}
-
-static bool full_update_required(
- const struct dc *dc,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream)
-{
- const union dc_plane_cm_flags blend_only_flags = {
- .bits = {
- .blend_enable = 1,
- }
- };
-
- if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
- return true;
-
- for (int i = 0; i < surface_count; i++) {
- if (srf_updates &&
- (srf_updates[i].plane_info ||
- srf_updates[i].scaling_info ||
- (srf_updates[i].hdr_mult.value &&
- srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
- (srf_updates[i].sdr_white_level_nits &&
- srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
- srf_updates[i].in_transfer_func ||
- srf_updates[i].surface->force_full_update ||
- (srf_updates[i].flip_addr &&
- srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
- (srf_updates[i].cm &&
- ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) ||
- (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0)))))
- return true;
- }
-
- if (stream_update &&
- (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
- (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
- stream_update->integer_scaling_update) ||
- stream_update->hdr_static_metadata ||
- stream_update->abm_level ||
- stream_update->periodic_interrupt ||
- stream_update->vrr_infopacket ||
- stream_update->vsc_infopacket ||
- stream_update->vsp_infopacket ||
- stream_update->hfvsif_infopacket ||
- stream_update->vtem_infopacket ||
- stream_update->adaptive_sync_infopacket ||
- stream_update->avi_infopacket ||
- stream_update->dpms_off ||
- stream_update->allow_freesync ||
- stream_update->vrr_active_variable ||
- stream_update->vrr_active_fixed ||
- stream_update->gamut_remap ||
- stream_update->output_color_space ||
- stream_update->dither_option ||
- stream_update->wb_update ||
- stream_update->dsc_config ||
- stream_update->mst_bw_update ||
- stream_update->func_shaper ||
- stream_update->lut3d_func ||
- stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust ||
- stream_update->scaler_sharpener_update ||
- stream_update->hw_cursor_req))
- return true;
-
- return false;
-}
-
-static bool fast_update_only(
- const struct dc *dc,
- const struct dc_fast_update *fast_update,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream)
-{
- return fast_updates_exist(fast_update, surface_count)
- && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
- enum surface_update_type update_type;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
- bool is_fast_update_only;
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
- surface_count, stream_update, stream);
+ struct surface_update_descriptor update_descriptor = {0};
+
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
surface_count,
stream,
stream_update,
- &update_type,
+ &update_descriptor,
&context))
return false;
dc_state_release(context);
return false;
}
- update_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&update_descriptor, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (dc->hwss.is_pipe_topology_transition_seamless &&
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
+ if (update_descriptor.update_type <= UPDATE_TYPE_FAST) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
- update_type,
+ update_descriptor.update_type,
context);
} else {
if (!stream_update &&
surface_count,
stream,
stream_update,
- update_type,
+ update_descriptor.update_type,
context);
}
if (dc->current_state != context)
struct dc_stream_update *stream_update,
enum surface_update_type update_type)
{
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
ASSERT(update_type < UPDATE_TYPE_FULL);
- populate_fast_updates(fast_update, srf_updates, surface_count,
- stream_update);
- if (fast_update_only(dc, fast_update, srf_updates, surface_count,
- stream_update, stream) &&
- !dc->check_config.enable_legacy_fast_update)
+ if (update_type <= UPDATE_TYPE_FAST)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
struct dc_stream_update *stream_update)
{
struct dc_state *new_context;
- enum surface_update_type update_type;
+ struct surface_update_descriptor update_descriptor = {0};
/*
* When this function returns true and new_context is not equal to
* replaced by a newer context. Refer to the use of
* swap_and_free_current_context below.
*/
- if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
- stream, stream_update, &update_type,
+ if (!update_planes_and_stream_state(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ &update_descriptor,
&new_context))
return false;
if (new_context == dc->current_state) {
commit_planes_and_stream_update_on_current_context(dc,
srf_updates, surface_count, stream,
- stream_update, update_type);
+ stream_update, update_descriptor.update_type);
if (dc->check_config.transition_countdown_to_steady_state)
dc->check_config.transition_countdown_to_steady_state--;
} else {
commit_planes_and_stream_update_with_new_context(dc,
srf_updates, surface_count, stream,
- stream_update, update_type, new_context);
+ stream_update, update_descriptor.update_type, new_context);
}
return true;
struct dc_stream_update *stream_update;
bool update_v3;
bool do_clear_update_flags;
- enum surface_update_type update_type;
+ struct surface_update_descriptor update_descriptor;
struct dc_state *new_context;
enum update_v3_flow flow;
struct dc_state *backup_context;
ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID);
dc_exit_ips_for_hw_access(scratch->dc);
- /* HWSS path determination needs to be done prior to updating the surface and stream states. */
- struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
-
- populate_fast_updates(fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update);
-
- const bool is_hwss_fast_path_only =
- fast_update_only(scratch->dc,
- fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update,
- scratch->stream) &&
- !scratch->dc->check_config.enable_legacy_fast_update;
-
if (!update_planes_and_stream_state(
scratch->dc,
scratch->surface_updates,
scratch->surface_count,
scratch->stream,
scratch->stream_update,
- &scratch->update_type,
+ &scratch->update_descriptor,
&scratch->new_context
)) {
return false;
}
if (scratch->new_context == scratch->dc->current_state) {
- ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
+ ASSERT(scratch->update_descriptor.update_type < UPDATE_TYPE_FULL);
- scratch->flow = is_hwss_fast_path_only
+ scratch->flow = scratch->update_descriptor.update_type <= UPDATE_TYPE_FAST
? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
: UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
return true;
}
- ASSERT(scratch->update_type >= UPDATE_TYPE_FULL);
+ ASSERT(scratch->update_descriptor.update_type >= UPDATE_TYPE_FULL);
const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless(
scratch->dc,
intermediate_update ? scratch->intermediate_count : scratch->surface_count,
scratch->stream,
use_stream_update ? scratch->stream_update : NULL,
- intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type,
+ intermediate_context ? UPDATE_TYPE_FULL : scratch->update_descriptor.update_type,
// `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context`
intermediate_context ? scratch->intermediate_context : scratch->new_context
);
scratch->surface_count,
scratch->stream,
scratch->stream_update,
- scratch->update_type,
+ scratch->update_descriptor.update_type,
scratch->new_context
);
break;