static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state, *old_bw_state;
const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
if (new_dbuf_state &&
- (new_dbuf_state->active_pipes !=
- old_dbuf_state->active_pipes ||
- new_dbuf_state->enabled_slices !=
- old_dbuf_state->enabled_slices))
+ new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
return true;
+ if (DISPLAY_VER(display) < 30) {
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices !=
+ old_dbuf_state->enabled_slices)
+ return true;
+ }
+
new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
if (new_cdclk_state &&
if (IS_ERR(new_dbuf_state))
return PTR_ERR(new_dbuf_state);
- new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
- new_pmdemand_state->params.active_dbufs =
- min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ if (DISPLAY_VER(i915) < 30) {
+ new_pmdemand_state->params.active_dbufs =
+ min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ new_pmdemand_state->params.active_pipes =
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
+ } else {
+ new_pmdemand_state->params.active_pipes =
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(i915));
+ }
new_cdclk_state = intel_atomic_get_cdclk_state(state);
if (IS_ERR(new_cdclk_state))
reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
- /* Set 1*/
pmdemand_state->params.qclk_gv_bw =
REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
pmdemand_state->params.voltage_index =
REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
pmdemand_state->params.qclk_gv_index =
REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
- pmdemand_state->params.active_pipes =
- REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
- pmdemand_state->params.active_dbufs =
- REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
pmdemand_state->params.active_phys =
REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
- /* Set 2*/
pmdemand_state->params.cdclk_freq_mhz =
REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
pmdemand_state->params.ddiclk_max =
REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
- pmdemand_state->params.scalers =
- REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
+
+ if (DISPLAY_VER(i915) >= 30) {
+ pmdemand_state->params.active_pipes =
+ REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
+ } else {
+ pmdemand_state->params.active_pipes =
+ REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
+ pmdemand_state->params.active_dbufs =
+ REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
+
+ pmdemand_state->params.scalers =
+ REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
+ }
unlock:
mutex_unlock(&i915->display.pmdemand.lock);
{
u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
+ /* PM Demand only tracks active dbufs on pre-Xe3 platforms */
+ if (DISPLAY_VER(i915) >= 30)
+ return;
+
mutex_lock(&i915->display.pmdemand.lock);
if (drm_WARN_ON(&i915->drm,
!intel_pmdemand_check_prev_transaction(i915)))
}
static void
-intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
+intel_pmdemand_update_params(struct intel_display *display,
+ const struct intel_pmdemand_state *new,
const struct intel_pmdemand_state *old,
u32 *reg1, u32 *reg2, bool serialized)
{
update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
- update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
- update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
/* Set 2*/
update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
- update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
+ if (DISPLAY_VER(display) >= 30) {
+ update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
+ } else {
+ update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
+ update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
+
+ update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
+ }
+
#undef update_reg
}
const struct intel_pmdemand_state *old,
bool serialized)
{
+ struct intel_display *display = &i915->display;
bool changed = false;
u32 reg1, mod_reg1;
u32 reg2, mod_reg2;
reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
mod_reg2 = reg2;
- intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
+ intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
serialized);
if (reg1 != mod_reg1) {