1 // SPDX-License-Identifier: MIT
3 * Copyright © 2023 Intel Corporation
6 #include <linux/bitops.h>
8 #include <drm/drm_print.h>
11 #include "i915_utils.h"
12 #include "intel_atomic.h"
14 #include "intel_cdclk.h"
16 #include "intel_display_trace.h"
17 #include "intel_pmdemand.h"
18 #include "intel_step.h"
19 #include "skl_watermark.h"
21 struct pmdemand_params {
26 u8 active_dbufs; /* pre-Xe3 only */
27 /* Total number of non type C active phys from active_phys_mask */
31 /* max from ddi_clocks[] */
33 u8 scalers; /* pre-Xe3 only */
36 struct intel_pmdemand_state {
37 struct intel_global_state base;
39 /* Maintain a persistent list of port clocks across all crtcs */
40 int ddi_clocks[I915_MAX_PIPES];
42 /* Maintain a persistent list of non type C phys mask */
43 u16 active_combo_phys_mask;
45 /* Parameters to be configured in the pmdemand registers */
46 struct pmdemand_params params;
49 struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
51 return container_of(obj_state, struct intel_pmdemand_state, base);
54 static struct intel_global_state *
55 intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
57 struct intel_pmdemand_state *pmdemand_state;
59 pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
63 return &pmdemand_state->base;
66 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
67 struct intel_global_state *state)
72 static const struct intel_global_state_funcs intel_pmdemand_funcs = {
73 .atomic_duplicate_state = intel_pmdemand_duplicate_state,
74 .atomic_destroy_state = intel_pmdemand_destroy_state,
77 static struct intel_pmdemand_state *
78 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
80 struct intel_display *display = to_intel_display(state);
81 struct intel_global_state *pmdemand_state =
82 intel_atomic_get_global_obj_state(state,
83 &display->pmdemand.obj);
85 if (IS_ERR(pmdemand_state))
86 return ERR_CAST(pmdemand_state);
88 return to_intel_pmdemand_state(pmdemand_state);
91 static struct intel_pmdemand_state *
92 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
94 struct intel_display *display = to_intel_display(state);
95 struct intel_global_state *pmdemand_state =
96 intel_atomic_get_old_global_obj_state(state,
97 &display->pmdemand.obj);
102 return to_intel_pmdemand_state(pmdemand_state);
105 static struct intel_pmdemand_state *
106 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
108 struct intel_display *display = to_intel_display(state);
109 struct intel_global_state *pmdemand_state =
110 intel_atomic_get_new_global_obj_state(state,
111 &display->pmdemand.obj);
116 return to_intel_pmdemand_state(pmdemand_state);
119 int intel_pmdemand_init(struct intel_display *display)
121 struct intel_pmdemand_state *pmdemand_state;
123 pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
127 intel_atomic_global_obj_init(display, &display->pmdemand.obj,
128 &pmdemand_state->base,
129 &intel_pmdemand_funcs);
131 if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
133 intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
138 void intel_pmdemand_init_early(struct intel_display *display)
140 mutex_init(&display->pmdemand.lock);
141 init_waitqueue_head(&display->pmdemand.waitqueue);
145 intel_pmdemand_update_phys_mask(struct intel_display *display,
146 struct intel_encoder *encoder,
147 struct intel_pmdemand_state *pmdemand_state,
152 if (DISPLAY_VER(display) < 14)
158 if (intel_encoder_is_tc(encoder))
161 phy = intel_encoder_to_phy(encoder);
164 pmdemand_state->active_combo_phys_mask |= BIT(phy);
166 pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
170 intel_pmdemand_update_port_clock(struct intel_display *display,
171 struct intel_pmdemand_state *pmdemand_state,
172 enum pipe pipe, int port_clock)
174 if (DISPLAY_VER(display) < 14)
177 pmdemand_state->ddi_clocks[pipe] = port_clock;
181 intel_pmdemand_update_max_ddiclk(struct intel_display *display,
182 struct intel_atomic_state *state,
183 struct intel_pmdemand_state *pmdemand_state)
186 const struct intel_crtc_state *new_crtc_state;
187 struct intel_crtc *crtc;
190 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
191 intel_pmdemand_update_port_clock(display, pmdemand_state,
193 new_crtc_state->port_clock);
195 for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
196 max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
198 pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
202 intel_pmdemand_update_connector_phys(struct intel_display *display,
203 struct intel_atomic_state *state,
204 struct drm_connector_state *conn_state,
206 struct intel_pmdemand_state *pmdemand_state)
208 struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
209 struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
210 struct intel_crtc_state *crtc_state;
216 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
218 crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
220 if (!crtc_state->hw.active)
223 intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
228 intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
229 struct intel_atomic_state *state,
230 struct intel_pmdemand_state *pmdemand_state)
232 struct drm_connector_state *old_conn_state;
233 struct drm_connector_state *new_conn_state;
234 struct drm_connector *connector;
237 for_each_oldnew_connector_in_state(&state->base, connector,
238 old_conn_state, new_conn_state, i) {
239 if (!intel_connector_needs_modeset(state, connector))
242 /* First clear the active phys in the old connector state */
243 intel_pmdemand_update_connector_phys(display, state,
244 old_conn_state, false,
247 /* Then set the active phys in new connector state */
248 intel_pmdemand_update_connector_phys(display, state,
249 new_conn_state, true,
253 pmdemand_state->params.active_phys =
254 min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
259 intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
260 struct intel_encoder *encoder)
262 return encoder && intel_encoder_is_tc(encoder);
266 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
268 struct intel_display *display = to_intel_display(state);
269 struct drm_connector_state *old_conn_state;
270 struct drm_connector_state *new_conn_state;
271 struct drm_connector *connector;
274 for_each_oldnew_connector_in_state(&state->base, connector,
275 old_conn_state, new_conn_state, i) {
276 struct intel_encoder *old_encoder =
277 to_intel_encoder(old_conn_state->best_encoder);
278 struct intel_encoder *new_encoder =
279 to_intel_encoder(new_conn_state->best_encoder);
281 if (!intel_connector_needs_modeset(state, connector))
284 if (old_encoder == new_encoder ||
285 (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
286 intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
295 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
297 struct intel_display *display = to_intel_display(state);
298 const struct intel_bw_state *new_bw_state, *old_bw_state;
299 const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
300 const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
301 const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
302 struct intel_crtc *crtc;
305 new_bw_state = intel_atomic_get_new_bw_state(state);
306 old_bw_state = intel_atomic_get_old_bw_state(state);
307 if (new_bw_state && new_bw_state->qgv_point_peakbw !=
308 old_bw_state->qgv_point_peakbw)
311 new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
312 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
313 if (new_dbuf_state &&
314 new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
317 if (DISPLAY_VER(display) < 30) {
318 if (new_dbuf_state &&
319 new_dbuf_state->enabled_slices !=
320 old_dbuf_state->enabled_slices)
324 new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
325 old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
326 if (new_cdclk_state &&
327 (new_cdclk_state->actual.cdclk !=
328 old_cdclk_state->actual.cdclk ||
329 new_cdclk_state->actual.voltage_level !=
330 old_cdclk_state->actual.voltage_level))
333 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
335 if (new_crtc_state->port_clock != old_crtc_state->port_clock)
338 return intel_pmdemand_connector_needs_update(state);
341 int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
343 struct intel_display *display = to_intel_display(state);
344 const struct intel_bw_state *new_bw_state;
345 const struct intel_cdclk_state *new_cdclk_state;
346 const struct intel_dbuf_state *new_dbuf_state;
347 struct intel_pmdemand_state *new_pmdemand_state;
349 if (DISPLAY_VER(display) < 14)
352 if (!intel_pmdemand_needs_update(state))
355 new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
356 if (IS_ERR(new_pmdemand_state))
357 return PTR_ERR(new_pmdemand_state);
359 new_bw_state = intel_atomic_get_bw_state(state);
360 if (IS_ERR(new_bw_state))
361 return PTR_ERR(new_bw_state);
363 /* firmware will calculate the qclk_gv_index, requirement is set to 0 */
364 new_pmdemand_state->params.qclk_gv_index = 0;
365 new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
367 new_dbuf_state = intel_atomic_get_dbuf_state(state);
368 if (IS_ERR(new_dbuf_state))
369 return PTR_ERR(new_dbuf_state);
371 if (DISPLAY_VER(display) < 30) {
372 new_pmdemand_state->params.active_dbufs =
373 min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
374 new_pmdemand_state->params.active_pipes =
375 min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
377 new_pmdemand_state->params.active_pipes =
378 min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
381 new_cdclk_state = intel_atomic_get_cdclk_state(state);
382 if (IS_ERR(new_cdclk_state))
383 return PTR_ERR(new_cdclk_state);
385 new_pmdemand_state->params.voltage_index =
386 new_cdclk_state->actual.voltage_level;
387 new_pmdemand_state->params.cdclk_freq_mhz =
388 DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
390 intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
392 intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
395 * Active_PLLs starts with 1 because of CDCLK PLL.
396 * TODO: Missing to account genlock filter when it gets used.
398 new_pmdemand_state->params.plls =
399 min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
402 * Setting scalers to max as it can not be calculated during flips and
403 * fastsets without taking global states locks.
405 new_pmdemand_state->params.scalers = 7;
407 if (state->base.allow_modeset)
408 return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
410 return intel_atomic_lock_global_state(&new_pmdemand_state->base);
413 static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
415 return !(intel_de_wait_for_clear(display,
416 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
417 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
418 intel_de_wait_for_clear(display,
420 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
424 intel_pmdemand_init_pmdemand_params(struct intel_display *display,
425 struct intel_pmdemand_state *pmdemand_state)
429 if (DISPLAY_VER(display) < 14)
432 mutex_lock(&display->pmdemand.lock);
433 if (drm_WARN_ON(display->drm,
434 !intel_pmdemand_check_prev_transaction(display))) {
435 memset(&pmdemand_state->params, 0,
436 sizeof(pmdemand_state->params));
440 reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
442 reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
444 pmdemand_state->params.qclk_gv_bw =
445 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
446 pmdemand_state->params.voltage_index =
447 REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
448 pmdemand_state->params.qclk_gv_index =
449 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
450 pmdemand_state->params.active_phys =
451 REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
453 pmdemand_state->params.cdclk_freq_mhz =
454 REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
455 pmdemand_state->params.ddiclk_max =
456 REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
458 if (DISPLAY_VER(display) >= 30) {
459 pmdemand_state->params.active_pipes =
460 REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
462 pmdemand_state->params.active_pipes =
463 REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
464 pmdemand_state->params.active_dbufs =
465 REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
467 pmdemand_state->params.scalers =
468 REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
472 mutex_unlock(&display->pmdemand.lock);
475 static bool intel_pmdemand_req_complete(struct intel_display *display)
477 return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
478 XELPDP_PMDEMAND_REQ_ENABLE);
481 static void intel_pmdemand_poll(struct intel_display *display)
483 const unsigned int timeout_ms = 10;
487 ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
488 XELPDP_PMDEMAND_REQ_ENABLE, 0,
489 50, timeout_ms, &status);
491 if (ret == -ETIMEDOUT)
492 drm_err(display->drm,
493 "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
497 static void intel_pmdemand_wait(struct intel_display *display)
499 /* Wa_14024400148 For lnl use polling method */
500 if (DISPLAY_VER(display) == 20) {
501 intel_pmdemand_poll(display);
503 if (!wait_event_timeout(display->pmdemand.waitqueue,
504 intel_pmdemand_req_complete(display),
505 msecs_to_jiffies_timeout(10)))
506 drm_err(display->drm,
507 "timed out waiting for Punit PM Demand Response\n");
511 /* Required to be programmed during Display Init Sequences. */
512 void intel_pmdemand_program_dbuf(struct intel_display *display,
515 u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
517 /* PM Demand only tracks active dbufs on pre-Xe3 platforms */
518 if (DISPLAY_VER(display) >= 30)
521 mutex_lock(&display->pmdemand.lock);
522 if (drm_WARN_ON(display->drm,
523 !intel_pmdemand_check_prev_transaction(display)))
526 intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
527 XELPDP_PMDEMAND_DBUFS_MASK,
528 REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
529 intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
530 XELPDP_PMDEMAND_REQ_ENABLE);
532 intel_pmdemand_wait(display);
535 mutex_unlock(&display->pmdemand.lock);
539 intel_pmdemand_update_params(struct intel_display *display,
540 const struct intel_pmdemand_state *new,
541 const struct intel_pmdemand_state *old,
542 u32 *reg1, u32 *reg2, bool serialized)
545 * The pmdemand parameter updates happens in two steps. Pre plane and
546 * post plane updates. During the pre plane, as DE might still be
547 * handling with some old operations, to avoid unexpected performance
548 * issues, program the pmdemand parameters with higher of old and new
549 * values. And then after once settled, use the new parameter values
550 * as part of the post plane update.
552 * If the pmdemand params update happens without modeset allowed, this
553 * means we can't serialize the updates. So that implies possibility of
554 * some parallel atomic commits affecting the pmdemand parameters. In
555 * that case, we need to consider the current values from the register
556 * as well. So in pre-plane case, we need to check the max of old, new
557 * and current register value if not serialized. In post plane update
558 * we need to consider max of new and current register value if not
562 #define update_reg(reg, field, mask) do { \
563 u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
564 u32 old_val = old ? old->params.field : 0; \
565 u32 new_val = new->params.field; \
568 *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
572 update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
573 update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
574 update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
575 update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
578 update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
579 update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
580 update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
582 if (DISPLAY_VER(display) >= 30) {
583 update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
585 update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
586 update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
588 update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
595 intel_pmdemand_program_params(struct intel_display *display,
596 const struct intel_pmdemand_state *new,
597 const struct intel_pmdemand_state *old,
600 bool changed = false;
604 mutex_lock(&display->pmdemand.lock);
605 if (drm_WARN_ON(display->drm,
606 !intel_pmdemand_check_prev_transaction(display)))
609 reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
612 reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
615 intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
618 if (reg1 != mod_reg1) {
619 intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
624 if (reg2 != mod_reg2) {
625 intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
630 /* Initiate pm demand request only if register values are changed */
634 drm_dbg_kms(display->drm,
635 "initiate pmdemand request values: (0x%x 0x%x)\n",
638 intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
639 XELPDP_PMDEMAND_REQ_ENABLE);
641 intel_pmdemand_wait(display);
644 mutex_unlock(&display->pmdemand.lock);
648 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
649 const struct intel_pmdemand_state *old)
651 return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
654 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
656 struct intel_display *display = to_intel_display(state);
657 const struct intel_pmdemand_state *new_pmdemand_state =
658 intel_atomic_get_new_pmdemand_state(state);
659 const struct intel_pmdemand_state *old_pmdemand_state =
660 intel_atomic_get_old_pmdemand_state(state);
662 if (DISPLAY_VER(display) < 14)
665 if (!new_pmdemand_state ||
666 !intel_pmdemand_state_changed(new_pmdemand_state,
670 WARN_ON(!new_pmdemand_state->base.changed);
672 intel_pmdemand_program_params(display, new_pmdemand_state,
674 intel_atomic_global_state_is_serialized(state));
677 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
679 struct intel_display *display = to_intel_display(state);
680 const struct intel_pmdemand_state *new_pmdemand_state =
681 intel_atomic_get_new_pmdemand_state(state);
682 const struct intel_pmdemand_state *old_pmdemand_state =
683 intel_atomic_get_old_pmdemand_state(state);
685 if (DISPLAY_VER(display) < 14)
688 if (!new_pmdemand_state ||
689 !intel_pmdemand_state_changed(new_pmdemand_state,
693 WARN_ON(!new_pmdemand_state->base.changed);
695 intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
696 intel_atomic_global_state_is_serialized(state));