1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/pm_runtime.h>
10 #include "intel_engine_regs.h"
12 #include "intel_gt_pm.h"
13 #include "intel_pcode.h"
14 #include "intel_rc6.h"
19 * RC6 is a special power stage which allows the GPU to enter an very
20 * low-voltage mode when idle, using down to 0V while at this stage. This
21 * stage is entered automatically when the GPU is idle when RC6 support is
22 * enabled, and as soon as new workload arises GPU wakes up automatically as
25 * There are different RC6 modes available in Intel GPU, which differentiate
26 * among each other with the latency required to enter and leave RC6 and
27 * voltage consumed by the GPU in different states.
29 * The combination of the following flags define which states GPU is allowed
30 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
31 * RC6pp is deepest RC6. Their support by hardware varies according to the
32 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
33 * which brings the most power savings; deeper states save more power, but
34 * require higher latency to switch to and wake up.
37 static struct intel_gt
*rc6_to_gt(struct intel_rc6
*rc6
)
39 return container_of(rc6
, struct intel_gt
, rc6
);
42 static struct intel_uncore
*rc6_to_uncore(struct intel_rc6
*rc
)
44 return rc6_to_gt(rc
)->uncore
;
47 static struct drm_i915_private
*rc6_to_i915(struct intel_rc6
*rc
)
49 return rc6_to_gt(rc
)->i915
;
52 static void set(struct intel_uncore
*uncore
, i915_reg_t reg
, u32 val
)
54 intel_uncore_write_fw(uncore
, reg
, val
);
57 static void gen11_rc6_enable(struct intel_rc6
*rc6
)
59 struct intel_gt
*gt
= rc6_to_gt(rc6
);
60 struct intel_uncore
*uncore
= gt
->uncore
;
61 struct intel_engine_cs
*engine
;
62 enum intel_engine_id id
;
67 * With GuCRC, these parameters are set by GuC
69 if (!intel_uc_uses_guc_rc(>
->uc
)) {
70 /* 2b: Program RC6 thresholds.*/
71 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16 | 85);
72 set(uncore
, GEN10_MEDIA_WAKE_RATE_LIMIT
, 150);
74 set(uncore
, GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
75 set(uncore
, GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
76 for_each_engine(engine
, rc6_to_gt(rc6
), id
)
77 set(uncore
, RING_MAX_IDLE(engine
->mmio_base
), 10);
79 set(uncore
, GUC_MAX_IDLE_COUNT
, 0xA);
81 set(uncore
, GEN6_RC_SLEEP
, 0);
83 set(uncore
, GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
87 * 2c: Program Coarse Power Gating Policies.
89 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
90 * use instead is a more conservative estimate for the maximum time
91 * it takes us to service a CS interrupt and submit a new ELSP - that
92 * is the time which the GPU is idle waiting for the CPU to select the
93 * next request to execute. If the idle hysteresis is less than that
94 * interrupt service latency, the hardware will automatically gate
95 * the power well and we will then incur the wake up cost on top of
96 * the service latency. A similar guide from plane_state is that we
97 * do not want the enable hysteresis to less than the wakeup latency.
99 * igt/gem_exec_nop/sequential provides a rough estimate for the
100 * service latency, and puts it under 10us for Icelake, similar to
101 * Broadwell+, To be conservative, we want to factor in a context
102 * switch on top (due to ksoftirqd).
104 set(uncore
, GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 60);
105 set(uncore
, GEN9_RENDER_PG_IDLE_HYSTERESIS
, 60);
109 * With GuCRC, we do not enable bit 31 of RC_CTL,
110 * thus allowing GuC to control RC6 entry/exit fully instead.
111 * We will not set the HW ENABLE and EI bits
113 if (!intel_guc_rc_enable(>
->uc
.guc
))
114 rc6
->ctl_enable
= GEN6_RC_CTL_RC6_ENABLE
;
117 GEN6_RC_CTL_HW_ENABLE
|
118 GEN6_RC_CTL_RC6_ENABLE
|
119 GEN6_RC_CTL_EI_MODE(1);
121 /* Wa_16011777198 - Render powergating must remain disabled */
122 if (IS_DG2_GRAPHICS_STEP(gt
->i915
, G10
, STEP_A0
, STEP_C0
) ||
123 IS_DG2_GRAPHICS_STEP(gt
->i915
, G11
, STEP_A0
, STEP_B0
))
125 GEN9_MEDIA_PG_ENABLE
|
126 GEN11_MEDIA_SAMPLER_PG_ENABLE
;
129 GEN9_RENDER_PG_ENABLE
|
130 GEN9_MEDIA_PG_ENABLE
|
131 GEN11_MEDIA_SAMPLER_PG_ENABLE
;
133 if (GRAPHICS_VER(gt
->i915
) >= 12) {
134 for (i
= 0; i
< I915_MAX_VCS
; i
++)
135 if (HAS_ENGINE(gt
, _VCS(i
)))
136 pg_enable
|= (VDN_HCP_POWERGATE_ENABLE(i
) |
137 VDN_MFX_POWERGATE_ENABLE(i
));
140 set(uncore
, GEN9_PG_ENABLE
, pg_enable
);
143 static void gen9_rc6_enable(struct intel_rc6
*rc6
)
145 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
146 struct intel_engine_cs
*engine
;
147 enum intel_engine_id id
;
149 /* 2b: Program RC6 thresholds.*/
150 if (GRAPHICS_VER(rc6_to_i915(rc6
)) >= 11) {
151 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16 | 85);
152 set(uncore
, GEN10_MEDIA_WAKE_RATE_LIMIT
, 150);
153 } else if (IS_SKYLAKE(rc6_to_i915(rc6
))) {
155 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
156 * when CPG is enabled
158 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 108 << 16);
160 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
163 set(uncore
, GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
164 set(uncore
, GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
165 for_each_engine(engine
, rc6_to_gt(rc6
), id
)
166 set(uncore
, RING_MAX_IDLE(engine
->mmio_base
), 10);
168 set(uncore
, GUC_MAX_IDLE_COUNT
, 0xA);
170 set(uncore
, GEN6_RC_SLEEP
, 0);
173 * 2c: Program Coarse Power Gating Policies.
175 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
176 * use instead is a more conservative estimate for the maximum time
177 * it takes us to service a CS interrupt and submit a new ELSP - that
178 * is the time which the GPU is idle waiting for the CPU to select the
179 * next request to execute. If the idle hysteresis is less than that
180 * interrupt service latency, the hardware will automatically gate
181 * the power well and we will then incur the wake up cost on top of
182 * the service latency. A similar guide from plane_state is that we
183 * do not want the enable hysteresis to less than the wakeup latency.
185 * igt/gem_exec_nop/sequential provides a rough estimate for the
186 * service latency, and puts it around 10us for Broadwell (and other
187 * big core) and around 40us for Broxton (and other low power cores).
188 * [Note that for legacy ringbuffer submission, this is less than 1us!]
189 * However, the wakeup latency on Broxton is closer to 100us. To be
190 * conservative, we have to factor in a context switch on top (due
193 set(uncore
, GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 250);
194 set(uncore
, GEN9_RENDER_PG_IDLE_HYSTERESIS
, 250);
197 set(uncore
, GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
200 GEN6_RC_CTL_HW_ENABLE
|
201 GEN6_RC_CTL_RC6_ENABLE
|
202 GEN6_RC_CTL_EI_MODE(1);
205 * WaRsDisableCoarsePowerGating:skl,cnl
206 * - Render/Media PG need to be disabled with RC6.
208 if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6
)))
209 set(uncore
, GEN9_PG_ENABLE
,
210 GEN9_RENDER_PG_ENABLE
| GEN9_MEDIA_PG_ENABLE
);
213 static void gen8_rc6_enable(struct intel_rc6
*rc6
)
215 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
216 struct intel_engine_cs
*engine
;
217 enum intel_engine_id id
;
219 /* 2b: Program RC6 thresholds.*/
220 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
221 set(uncore
, GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
222 set(uncore
, GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
223 for_each_engine(engine
, rc6_to_gt(rc6
), id
)
224 set(uncore
, RING_MAX_IDLE(engine
->mmio_base
), 10);
225 set(uncore
, GEN6_RC_SLEEP
, 0);
226 set(uncore
, GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
230 GEN6_RC_CTL_HW_ENABLE
|
231 GEN7_RC_CTL_TO_MODE
|
232 GEN6_RC_CTL_RC6_ENABLE
;
235 static void gen6_rc6_enable(struct intel_rc6
*rc6
)
237 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
238 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
239 struct intel_engine_cs
*engine
;
240 enum intel_engine_id id
;
241 u32 rc6vids
, rc6_mask
;
244 set(uncore
, GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
245 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
246 set(uncore
, GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
247 set(uncore
, GEN6_RC_EVALUATION_INTERVAL
, 125000);
248 set(uncore
, GEN6_RC_IDLE_HYSTERSIS
, 25);
250 for_each_engine(engine
, rc6_to_gt(rc6
), id
)
251 set(uncore
, RING_MAX_IDLE(engine
->mmio_base
), 10);
253 set(uncore
, GEN6_RC_SLEEP
, 0);
254 set(uncore
, GEN6_RC1e_THRESHOLD
, 1000);
255 set(uncore
, GEN6_RC6_THRESHOLD
, 50000);
256 set(uncore
, GEN6_RC6p_THRESHOLD
, 150000);
257 set(uncore
, GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
259 /* We don't use those on Haswell */
260 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
262 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
264 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
267 GEN6_RC_CTL_EI_MODE(1) |
268 GEN6_RC_CTL_HW_ENABLE
;
271 ret
= snb_pcode_read(i915
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
, NULL
);
272 if (GRAPHICS_VER(i915
) == 6 && ret
) {
273 drm_dbg(&i915
->drm
, "Couldn't check for BIOS workaround\n");
274 } else if (GRAPHICS_VER(i915
) == 6 &&
275 (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
277 "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
278 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
280 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
281 ret
= snb_pcode_write(i915
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
284 "Couldn't fix incorrect rc6 voltage\n");
288 /* Check that the pcbr address is not empty. */
289 static int chv_rc6_init(struct intel_rc6
*rc6
)
291 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
292 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
293 resource_size_t pctx_paddr
, paddr
;
294 resource_size_t pctx_size
= 32 * SZ_1K
;
297 pcbr
= intel_uncore_read(uncore
, VLV_PCBR
);
298 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
299 drm_dbg(&i915
->drm
, "BIOS didn't set up PCBR, fixing up\n");
300 paddr
= i915
->dsm
.end
+ 1 - pctx_size
;
301 GEM_BUG_ON(paddr
> U32_MAX
);
303 pctx_paddr
= (paddr
& ~4095);
304 intel_uncore_write(uncore
, VLV_PCBR
, pctx_paddr
);
310 static int vlv_rc6_init(struct intel_rc6
*rc6
)
312 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
313 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
314 struct drm_i915_gem_object
*pctx
;
315 resource_size_t pctx_paddr
;
316 resource_size_t pctx_size
= 24 * SZ_1K
;
319 pcbr
= intel_uncore_read(uncore
, VLV_PCBR
);
321 /* BIOS set it up already, grab the pre-alloc'd space */
322 resource_size_t pcbr_offset
;
324 pcbr_offset
= (pcbr
& ~4095) - i915
->dsm
.start
;
325 pctx
= i915_gem_object_create_stolen_for_preallocated(i915
,
329 return PTR_ERR(pctx
);
334 drm_dbg(&i915
->drm
, "BIOS didn't set up PCBR, fixing up\n");
337 * From the Gunit register HAS:
338 * The Gfx driver is expected to program this register and ensure
339 * proper allocation within Gfx stolen memory. For example, this
340 * register should be programmed such than the PCBR range does not
341 * overlap with other ranges, such as the frame buffer, protected
342 * memory, or any other relevant ranges.
344 pctx
= i915_gem_object_create_stolen(i915
, pctx_size
);
347 "not enough stolen space for PCTX, disabling\n");
348 return PTR_ERR(pctx
);
351 GEM_BUG_ON(range_overflows_end_t(u64
,
355 pctx_paddr
= i915
->dsm
.start
+ pctx
->stolen
->start
;
356 intel_uncore_write(uncore
, VLV_PCBR
, pctx_paddr
);
363 static void chv_rc6_enable(struct intel_rc6
*rc6
)
365 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
366 struct intel_engine_cs
*engine
;
367 enum intel_engine_id id
;
369 /* 2a: Program RC6 thresholds.*/
370 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
371 set(uncore
, GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
372 set(uncore
, GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
374 for_each_engine(engine
, rc6_to_gt(rc6
), id
)
375 set(uncore
, RING_MAX_IDLE(engine
->mmio_base
), 10);
376 set(uncore
, GEN6_RC_SLEEP
, 0);
378 /* TO threshold set to 500 us (0x186 * 1.28 us) */
379 set(uncore
, GEN6_RC6_THRESHOLD
, 0x186);
381 /* Allows RC6 residency counter to work */
382 set(uncore
, VLV_COUNTER_CONTROL
,
383 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
384 VLV_MEDIA_RC6_COUNT_EN
|
385 VLV_RENDER_RC6_COUNT_EN
));
388 rc6
->ctl_enable
= GEN7_RC_CTL_TO_MODE
;
391 static void vlv_rc6_enable(struct intel_rc6
*rc6
)
393 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
394 struct intel_engine_cs
*engine
;
395 enum intel_engine_id id
;
397 set(uncore
, GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
398 set(uncore
, GEN6_RC_EVALUATION_INTERVAL
, 125000);
399 set(uncore
, GEN6_RC_IDLE_HYSTERSIS
, 25);
401 for_each_engine(engine
, rc6_to_gt(rc6
), id
)
402 set(uncore
, RING_MAX_IDLE(engine
->mmio_base
), 10);
404 set(uncore
, GEN6_RC6_THRESHOLD
, 0x557);
406 /* Allows RC6 residency counter to work */
407 set(uncore
, VLV_COUNTER_CONTROL
,
408 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
409 VLV_MEDIA_RC0_COUNT_EN
|
410 VLV_RENDER_RC0_COUNT_EN
|
411 VLV_MEDIA_RC6_COUNT_EN
|
412 VLV_RENDER_RC6_COUNT_EN
));
415 GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
418 static bool bxt_check_bios_rc6_setup(struct intel_rc6
*rc6
)
420 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
421 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
422 u32 rc6_ctx_base
, rc_ctl
, rc_sw_target
;
423 bool enable_rc6
= true;
425 rc_ctl
= intel_uncore_read(uncore
, GEN6_RC_CONTROL
);
426 rc_sw_target
= intel_uncore_read(uncore
, GEN6_RC_STATE
);
427 rc_sw_target
&= RC_SW_TARGET_STATE_MASK
;
428 rc_sw_target
>>= RC_SW_TARGET_STATE_SHIFT
;
429 drm_dbg(&i915
->drm
, "BIOS enabled RC states: "
430 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
431 onoff(rc_ctl
& GEN6_RC_CTL_HW_ENABLE
),
432 onoff(rc_ctl
& GEN6_RC_CTL_RC6_ENABLE
),
435 if (!(intel_uncore_read(uncore
, RC6_LOCATION
) & RC6_CTX_IN_DRAM
)) {
436 drm_dbg(&i915
->drm
, "RC6 Base location not set properly.\n");
441 * The exact context size is not known for BXT, so assume a page size
445 intel_uncore_read(uncore
, RC6_CTX_BASE
) & RC6_CTX_BASE_MASK
;
446 if (!(rc6_ctx_base
>= i915
->dsm_reserved
.start
&&
447 rc6_ctx_base
+ PAGE_SIZE
< i915
->dsm_reserved
.end
)) {
448 drm_dbg(&i915
->drm
, "RC6 Base address not as expected.\n");
452 if (!((intel_uncore_read(uncore
, PWRCTX_MAXCNT(RENDER_RING_BASE
)) & IDLE_TIME_MASK
) > 1 &&
453 (intel_uncore_read(uncore
, PWRCTX_MAXCNT(GEN6_BSD_RING_BASE
)) & IDLE_TIME_MASK
) > 1 &&
454 (intel_uncore_read(uncore
, PWRCTX_MAXCNT(BLT_RING_BASE
)) & IDLE_TIME_MASK
) > 1 &&
455 (intel_uncore_read(uncore
, PWRCTX_MAXCNT(VEBOX_RING_BASE
)) & IDLE_TIME_MASK
) > 1)) {
457 "Engine Idle wait time not set properly.\n");
461 if (!intel_uncore_read(uncore
, GEN8_PUSHBUS_CONTROL
) ||
462 !intel_uncore_read(uncore
, GEN8_PUSHBUS_ENABLE
) ||
463 !intel_uncore_read(uncore
, GEN8_PUSHBUS_SHIFT
)) {
464 drm_dbg(&i915
->drm
, "Pushbus not setup properly.\n");
468 if (!intel_uncore_read(uncore
, GEN6_GFXPAUSE
)) {
469 drm_dbg(&i915
->drm
, "GFX pause not setup properly.\n");
473 if (!intel_uncore_read(uncore
, GEN8_MISC_CTRL0
)) {
474 drm_dbg(&i915
->drm
, "GPM control not setup properly.\n");
481 static bool rc6_supported(struct intel_rc6
*rc6
)
483 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
488 if (intel_vgpu_active(i915
))
491 if (is_mock_gt(rc6_to_gt(rc6
)))
494 if (IS_GEN9_LP(i915
) && !bxt_check_bios_rc6_setup(rc6
)) {
495 drm_notice(&i915
->drm
,
496 "RC6 and powersaving disabled by BIOS\n");
503 static void rpm_get(struct intel_rc6
*rc6
)
505 GEM_BUG_ON(rc6
->wakeref
);
506 pm_runtime_get_sync(rc6_to_i915(rc6
)->drm
.dev
);
510 static void rpm_put(struct intel_rc6
*rc6
)
512 GEM_BUG_ON(!rc6
->wakeref
);
513 pm_runtime_put(rc6_to_i915(rc6
)->drm
.dev
);
514 rc6
->wakeref
= false;
517 static bool pctx_corrupted(struct intel_rc6
*rc6
)
519 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
521 if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915
))
524 if (intel_uncore_read(rc6_to_uncore(rc6
), GEN8_RC6_CTX_INFO
))
527 drm_notice(&i915
->drm
,
528 "RC6 context corruption, disabling runtime power management\n");
532 static void __intel_rc6_disable(struct intel_rc6
*rc6
)
534 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
535 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
536 struct intel_gt
*gt
= rc6_to_gt(rc6
);
538 /* Take control of RC6 back from GuC */
539 intel_guc_rc_disable(>
->uc
.guc
);
541 intel_uncore_forcewake_get(uncore
, FORCEWAKE_ALL
);
542 if (GRAPHICS_VER(i915
) >= 9)
543 set(uncore
, GEN9_PG_ENABLE
, 0);
544 set(uncore
, GEN6_RC_CONTROL
, 0);
545 set(uncore
, GEN6_RC_STATE
, 0);
546 intel_uncore_forcewake_put(uncore
, FORCEWAKE_ALL
);
549 void intel_rc6_init(struct intel_rc6
*rc6
)
551 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
554 /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
557 if (!rc6_supported(rc6
))
560 if (IS_CHERRYVIEW(i915
))
561 err
= chv_rc6_init(rc6
);
562 else if (IS_VALLEYVIEW(i915
))
563 err
= vlv_rc6_init(rc6
);
567 /* Sanitize rc6, ensure it is disabled before we are ready. */
568 __intel_rc6_disable(rc6
);
570 rc6
->supported
= err
== 0;
573 void intel_rc6_sanitize(struct intel_rc6
*rc6
)
575 memset(rc6
->prev_hw_residency
, 0, sizeof(rc6
->prev_hw_residency
));
577 if (rc6
->enabled
) { /* unbalanced suspend/resume */
579 rc6
->enabled
= false;
583 __intel_rc6_disable(rc6
);
586 void intel_rc6_enable(struct intel_rc6
*rc6
)
588 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
589 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
594 GEM_BUG_ON(rc6
->enabled
);
596 intel_uncore_forcewake_get(uncore
, FORCEWAKE_ALL
);
598 if (IS_CHERRYVIEW(i915
))
600 else if (IS_VALLEYVIEW(i915
))
602 else if (GRAPHICS_VER(i915
) >= 11)
603 gen11_rc6_enable(rc6
);
604 else if (GRAPHICS_VER(i915
) >= 9)
605 gen9_rc6_enable(rc6
);
606 else if (IS_BROADWELL(i915
))
607 gen8_rc6_enable(rc6
);
608 else if (GRAPHICS_VER(i915
) >= 6)
609 gen6_rc6_enable(rc6
);
611 rc6
->manual
= rc6
->ctl_enable
& GEN6_RC_CTL_RC6_ENABLE
;
612 if (NEEDS_RC6_CTX_CORRUPTION_WA(i915
))
615 intel_uncore_forcewake_put(uncore
, FORCEWAKE_ALL
);
617 if (unlikely(pctx_corrupted(rc6
)))
620 /* rc6 is ready, runtime-pm is go! */
625 void intel_rc6_unpark(struct intel_rc6
*rc6
)
627 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
632 /* Restore HW timers for automatic RC6 entry while busy */
633 set(uncore
, GEN6_RC_CONTROL
, rc6
->ctl_enable
);
636 void intel_rc6_park(struct intel_rc6
*rc6
)
638 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
644 if (unlikely(pctx_corrupted(rc6
))) {
645 intel_rc6_disable(rc6
);
652 /* Turn off the HW timers and go directly to rc6 */
653 set(uncore
, GEN6_RC_CONTROL
, GEN6_RC_CTL_RC6_ENABLE
);
655 if (HAS_RC6pp(rc6_to_i915(rc6
)))
656 target
= 0x6; /* deepest rc6 */
657 else if (HAS_RC6p(rc6_to_i915(rc6
)))
658 target
= 0x5; /* deep rc6 */
660 target
= 0x4; /* normal rc6 */
661 set(uncore
, GEN6_RC_STATE
, target
<< RC_SW_TARGET_STATE_SHIFT
);
664 void intel_rc6_disable(struct intel_rc6
*rc6
)
670 rc6
->enabled
= false;
672 __intel_rc6_disable(rc6
);
675 void intel_rc6_fini(struct intel_rc6
*rc6
)
677 struct drm_i915_gem_object
*pctx
;
679 intel_rc6_disable(rc6
);
681 pctx
= fetch_and_zero(&rc6
->pctx
);
683 i915_gem_object_put(pctx
);
689 static u64
vlv_residency_raw(struct intel_uncore
*uncore
, const i915_reg_t reg
)
691 u32 lower
, upper
, tmp
;
695 * The register accessed do not need forcewake. We borrow
696 * uncore lock to prevent concurrent access to range reg.
698 lockdep_assert_held(&uncore
->lock
);
701 * vlv and chv residency counters are 40 bits in width.
702 * With a control bit, we can choose between upper or lower
703 * 32bit window into this counter.
705 * Although we always use the counter in high-range mode elsewhere,
706 * userspace may attempt to read the value before rc6 is initialised,
707 * before we have set the default VLV_COUNTER_CONTROL value. So always
708 * set the high bit to be safe.
710 set(uncore
, VLV_COUNTER_CONTROL
,
711 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
));
712 upper
= intel_uncore_read_fw(uncore
, reg
);
716 set(uncore
, VLV_COUNTER_CONTROL
,
717 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH
));
718 lower
= intel_uncore_read_fw(uncore
, reg
);
720 set(uncore
, VLV_COUNTER_CONTROL
,
721 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
));
722 upper
= intel_uncore_read_fw(uncore
, reg
);
723 } while (upper
!= tmp
&& --loop
);
726 * Everywhere else we always use VLV_COUNTER_CONTROL with the
727 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
731 return lower
| (u64
)upper
<< 8;
734 u64
intel_rc6_residency_ns(struct intel_rc6
*rc6
, const i915_reg_t reg
)
736 struct drm_i915_private
*i915
= rc6_to_i915(rc6
);
737 struct intel_uncore
*uncore
= rc6_to_uncore(rc6
);
738 u64 time_hw
, prev_hw
, overflow_hw
;
739 unsigned int fw_domains
;
748 * Store previous hw counter values for counter wrap-around handling.
750 * There are only four interesting registers and they live next to each
751 * other so we can use the relative address, compared to the smallest
752 * one as the index into driver storage.
754 i
= (i915_mmio_reg_offset(reg
) -
755 i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED
)) / sizeof(u32
);
756 if (drm_WARN_ON_ONCE(&i915
->drm
, i
>= ARRAY_SIZE(rc6
->cur_residency
)))
759 fw_domains
= intel_uncore_forcewake_for_reg(uncore
, reg
, FW_REG_READ
);
761 spin_lock_irqsave(&uncore
->lock
, flags
);
762 intel_uncore_forcewake_get__locked(uncore
, fw_domains
);
764 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
765 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
)) {
767 div
= i915
->czclk_freq
;
768 overflow_hw
= BIT_ULL(40);
769 time_hw
= vlv_residency_raw(uncore
, reg
);
771 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
772 if (IS_GEN9_LP(i915
)) {
780 overflow_hw
= BIT_ULL(32);
781 time_hw
= intel_uncore_read_fw(uncore
, reg
);
785 * Counter wrap handling.
787 * But relying on a sufficient frequency of queries otherwise counters
790 prev_hw
= rc6
->prev_hw_residency
[i
];
791 rc6
->prev_hw_residency
[i
] = time_hw
;
793 /* RC6 delta from last sample. */
794 if (time_hw
>= prev_hw
)
797 time_hw
+= overflow_hw
- prev_hw
;
799 /* Add delta to RC6 extended raw driver copy. */
800 time_hw
+= rc6
->cur_residency
[i
];
801 rc6
->cur_residency
[i
] = time_hw
;
803 intel_uncore_forcewake_put__locked(uncore
, fw_domains
);
804 spin_unlock_irqrestore(&uncore
->lock
, flags
);
806 return mul_u64_u32_div(time_hw
, mul
, div
);
809 u64
intel_rc6_residency_us(struct intel_rc6
*rc6
, i915_reg_t reg
)
811 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6
, reg
), 1000);
814 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
815 #include "selftest_rc6.c"