1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/string_helpers.h>
8 #include <drm/i915_drm.h>
12 #include "intel_breadcrumbs.h"
14 #include "intel_gt_clock_utils.h"
15 #include "intel_gt_irq.h"
16 #include "intel_gt_pm_irq.h"
17 #include "intel_gt_regs.h"
18 #include "intel_mchbar_regs.h"
19 #include "intel_pcode.h"
20 #include "intel_rps.h"
21 #include "vlv_sideband.h"
22 #include "../../../platform/x86/intel_ips.h"
24 #define BUSY_MAX_EI 20u /* ms */
27 * Lock protecting IPS related data structures
29 static DEFINE_SPINLOCK(mchdev_lock
);
31 static struct intel_gt
*rps_to_gt(struct intel_rps
*rps
)
33 return container_of(rps
, struct intel_gt
, rps
);
36 static struct drm_i915_private
*rps_to_i915(struct intel_rps
*rps
)
38 return rps_to_gt(rps
)->i915
;
41 static struct intel_uncore
*rps_to_uncore(struct intel_rps
*rps
)
43 return rps_to_gt(rps
)->uncore
;
46 static struct intel_guc_slpc
*rps_to_slpc(struct intel_rps
*rps
)
48 struct intel_gt
*gt
= rps_to_gt(rps
);
50 return >
->uc
.guc
.slpc
;
53 static bool rps_uses_slpc(struct intel_rps
*rps
)
55 struct intel_gt
*gt
= rps_to_gt(rps
);
57 return intel_uc_uses_guc_slpc(>
->uc
);
60 static u32
rps_pm_sanitize_mask(struct intel_rps
*rps
, u32 mask
)
62 return mask
& ~rps
->pm_intrmsk_mbz
;
65 static void set(struct intel_uncore
*uncore
, i915_reg_t reg
, u32 val
)
67 intel_uncore_write_fw(uncore
, reg
, val
);
70 static void rps_timer(struct timer_list
*t
)
72 struct intel_rps
*rps
= from_timer(rps
, t
, timer
);
73 struct intel_engine_cs
*engine
;
74 ktime_t dt
, last
, timestamp
;
75 enum intel_engine_id id
;
79 for_each_engine(engine
, rps_to_gt(rps
), id
) {
83 dt
= intel_engine_get_busy_time(engine
, ×tamp
);
84 last
= engine
->stats
.rps
;
85 engine
->stats
.rps
= dt
;
87 busy
= ktime_to_ns(ktime_sub(dt
, last
));
88 for (i
= 0; i
< ARRAY_SIZE(max_busy
); i
++) {
89 if (busy
> max_busy
[i
])
90 swap(busy
, max_busy
[i
]);
93 last
= rps
->pm_timestamp
;
94 rps
->pm_timestamp
= timestamp
;
96 if (intel_rps_is_active(rps
)) {
100 dt
= ktime_sub(timestamp
, last
);
103 * Our goal is to evaluate each engine independently, so we run
104 * at the lowest clocks required to sustain the heaviest
105 * workload. However, a task may be split into sequential
106 * dependent operations across a set of engines, such that
107 * the independent contributions do not account for high load,
108 * but overall the task is GPU bound. For example, consider
109 * video decode on vcs followed by colour post-processing
110 * on vecs, followed by general post-processing on rcs.
111 * Since multi-engines being active does imply a single
112 * continuous workload across all engines, we hedge our
113 * bets by only contributing a factor of the distributed
114 * load into our busyness calculation.
117 for (i
= 1; i
< ARRAY_SIZE(max_busy
); i
++) {
121 busy
+= div_u64(max_busy
[i
], 1 << i
);
123 GT_TRACE(rps_to_gt(rps
),
124 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
125 busy
, (int)div64_u64(100 * busy
, dt
),
126 max_busy
[0], max_busy
[1], max_busy
[2],
129 if (100 * busy
> rps
->power
.up_threshold
* dt
&&
130 rps
->cur_freq
< rps
->max_freq_softlimit
) {
131 rps
->pm_iir
|= GEN6_PM_RP_UP_THRESHOLD
;
132 rps
->pm_interval
= 1;
133 schedule_work(&rps
->work
);
134 } else if (100 * busy
< rps
->power
.down_threshold
* dt
&&
135 rps
->cur_freq
> rps
->min_freq_softlimit
) {
136 rps
->pm_iir
|= GEN6_PM_RP_DOWN_THRESHOLD
;
137 rps
->pm_interval
= 1;
138 schedule_work(&rps
->work
);
143 mod_timer(&rps
->timer
,
144 jiffies
+ msecs_to_jiffies(rps
->pm_interval
));
145 rps
->pm_interval
= min(rps
->pm_interval
* 2, BUSY_MAX_EI
);
149 static void rps_start_timer(struct intel_rps
*rps
)
151 rps
->pm_timestamp
= ktime_sub(ktime_get(), rps
->pm_timestamp
);
152 rps
->pm_interval
= 1;
153 mod_timer(&rps
->timer
, jiffies
+ 1);
156 static void rps_stop_timer(struct intel_rps
*rps
)
158 del_timer_sync(&rps
->timer
);
159 rps
->pm_timestamp
= ktime_sub(ktime_get(), rps
->pm_timestamp
);
160 cancel_work_sync(&rps
->work
);
163 static u32
rps_pm_mask(struct intel_rps
*rps
, u8 val
)
167 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
168 if (val
> rps
->min_freq_softlimit
)
169 mask
|= (GEN6_PM_RP_UP_EI_EXPIRED
|
170 GEN6_PM_RP_DOWN_THRESHOLD
|
171 GEN6_PM_RP_DOWN_TIMEOUT
);
173 if (val
< rps
->max_freq_softlimit
)
174 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
176 mask
&= rps
->pm_events
;
178 return rps_pm_sanitize_mask(rps
, ~mask
);
181 static void rps_reset_ei(struct intel_rps
*rps
)
183 memset(&rps
->ei
, 0, sizeof(rps
->ei
));
186 static void rps_enable_interrupts(struct intel_rps
*rps
)
188 struct intel_gt
*gt
= rps_to_gt(rps
);
190 GEM_BUG_ON(rps_uses_slpc(rps
));
192 GT_TRACE(gt
, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
193 rps
->pm_events
, rps_pm_mask(rps
, rps
->last_freq
));
197 spin_lock_irq(>
->irq_lock
);
198 gen6_gt_pm_enable_irq(gt
, rps
->pm_events
);
199 spin_unlock_irq(>
->irq_lock
);
201 intel_uncore_write(gt
->uncore
,
202 GEN6_PMINTRMSK
, rps_pm_mask(rps
, rps
->last_freq
));
205 static void gen6_rps_reset_interrupts(struct intel_rps
*rps
)
207 gen6_gt_pm_reset_iir(rps_to_gt(rps
), GEN6_PM_RPS_EVENTS
);
210 static void gen11_rps_reset_interrupts(struct intel_rps
*rps
)
212 while (gen11_gt_reset_one_iir(rps_to_gt(rps
), 0, GEN11_GTPM
))
216 static void rps_reset_interrupts(struct intel_rps
*rps
)
218 struct intel_gt
*gt
= rps_to_gt(rps
);
220 spin_lock_irq(>
->irq_lock
);
221 if (GRAPHICS_VER(gt
->i915
) >= 11)
222 gen11_rps_reset_interrupts(rps
);
224 gen6_rps_reset_interrupts(rps
);
227 spin_unlock_irq(>
->irq_lock
);
230 static void rps_disable_interrupts(struct intel_rps
*rps
)
232 struct intel_gt
*gt
= rps_to_gt(rps
);
234 intel_uncore_write(gt
->uncore
,
235 GEN6_PMINTRMSK
, rps_pm_sanitize_mask(rps
, ~0u));
237 spin_lock_irq(>
->irq_lock
);
238 gen6_gt_pm_disable_irq(gt
, GEN6_PM_RPS_EVENTS
);
239 spin_unlock_irq(>
->irq_lock
);
241 intel_synchronize_irq(gt
->i915
);
244 * Now that we will not be generating any more work, flush any
245 * outstanding tasks. As we are called on the RPS idle path,
246 * we will reset the GPU to minimum frequencies, so the current
247 * state of the worker can be discarded.
249 cancel_work_sync(&rps
->work
);
251 rps_reset_interrupts(rps
);
252 GT_TRACE(gt
, "interrupts:off\n");
255 static const struct cparams
{
261 { 1, 1333, 301, 28664 },
262 { 1, 1066, 294, 24460 },
263 { 1, 800, 294, 25192 },
264 { 0, 1333, 276, 27605 },
265 { 0, 1066, 276, 27605 },
266 { 0, 800, 231, 23784 },
269 static void gen5_rps_init(struct intel_rps
*rps
)
271 struct drm_i915_private
*i915
= rps_to_i915(rps
);
272 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
273 u8 fmax
, fmin
, fstart
;
277 if (i915
->fsb_freq
<= 3200)
279 else if (i915
->fsb_freq
<= 4800)
284 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
285 if (cparams
[i
].i
== c_m
&& cparams
[i
].t
== i915
->mem_freq
) {
286 rps
->ips
.m
= cparams
[i
].m
;
287 rps
->ips
.c
= cparams
[i
].c
;
292 rgvmodectl
= intel_uncore_read(uncore
, MEMMODECTL
);
294 /* Set up min, max, and cur for interrupt handling */
295 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
296 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
297 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
298 MEMMODE_FSTART_SHIFT
;
299 drm_dbg(&i915
->drm
, "fmax: %d, fmin: %d, fstart: %d\n",
302 rps
->min_freq
= fmax
;
303 rps
->efficient_freq
= fstart
;
304 rps
->max_freq
= fmin
;
308 __ips_chipset_val(struct intel_ips
*ips
)
310 struct intel_uncore
*uncore
=
311 rps_to_uncore(container_of(ips
, struct intel_rps
, ips
));
312 unsigned long now
= jiffies_to_msecs(jiffies
), dt
;
313 unsigned long result
;
316 lockdep_assert_held(&mchdev_lock
);
319 * Prevent division-by-zero if we are asking too fast.
320 * Also, we don't get interesting results if we are polling
321 * faster than once in 10ms, so just return the saved value
324 dt
= now
- ips
->last_time1
;
326 return ips
->chipset_power
;
328 /* FIXME: handle per-counter overflow */
329 total
= intel_uncore_read(uncore
, DMIEC
);
330 total
+= intel_uncore_read(uncore
, DDREC
);
331 total
+= intel_uncore_read(uncore
, CSIEC
);
333 delta
= total
- ips
->last_count1
;
335 result
= div_u64(div_u64(ips
->m
* delta
, dt
) + ips
->c
, 10);
337 ips
->last_count1
= total
;
338 ips
->last_time1
= now
;
340 ips
->chipset_power
= result
;
345 static unsigned long ips_mch_val(struct intel_uncore
*uncore
)
347 unsigned int m
, x
, b
;
350 tsfs
= intel_uncore_read(uncore
, TSFS
);
351 x
= intel_uncore_read8(uncore
, TR1
);
353 b
= tsfs
& TSFS_INTR_MASK
;
354 m
= (tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
;
356 return m
* x
/ 127 - b
;
359 static int _pxvid_to_vd(u8 pxvid
)
364 if (pxvid
>= 8 && pxvid
< 31)
367 return (pxvid
+ 2) * 125;
370 static u32
pvid_to_extvid(struct drm_i915_private
*i915
, u8 pxvid
)
372 const int vd
= _pxvid_to_vd(pxvid
);
374 if (INTEL_INFO(i915
)->is_mobile
)
375 return max(vd
- 1125, 0);
380 static void __gen5_ips_update(struct intel_ips
*ips
)
382 struct intel_uncore
*uncore
=
383 rps_to_uncore(container_of(ips
, struct intel_rps
, ips
));
387 lockdep_assert_held(&mchdev_lock
);
389 now
= ktime_get_raw_ns();
390 dt
= now
- ips
->last_time2
;
391 do_div(dt
, NSEC_PER_MSEC
);
393 /* Don't divide by 0 */
397 count
= intel_uncore_read(uncore
, GFXEC
);
398 delta
= count
- ips
->last_count2
;
400 ips
->last_count2
= count
;
401 ips
->last_time2
= now
;
403 /* More magic constants... */
404 ips
->gfx_power
= div_u64(delta
* 1181, dt
* 10);
407 static void gen5_rps_update(struct intel_rps
*rps
)
409 spin_lock_irq(&mchdev_lock
);
410 __gen5_ips_update(&rps
->ips
);
411 spin_unlock_irq(&mchdev_lock
);
414 static unsigned int gen5_invert_freq(struct intel_rps
*rps
,
417 /* Invert the frequency bin into an ips delay */
418 val
= rps
->max_freq
- val
;
419 val
= rps
->min_freq
+ val
;
424 static int __gen5_rps_set(struct intel_rps
*rps
, u8 val
)
426 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
429 lockdep_assert_held(&mchdev_lock
);
431 rgvswctl
= intel_uncore_read16(uncore
, MEMSWCTL
);
432 if (rgvswctl
& MEMCTL_CMD_STS
) {
433 DRM_DEBUG("gpu busy, RCS change rejected\n");
434 return -EBUSY
; /* still busy with another command */
437 /* Invert the frequency bin into an ips delay */
438 val
= gen5_invert_freq(rps
, val
);
441 (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
442 (val
<< MEMCTL_FREQ_SHIFT
) |
444 intel_uncore_write16(uncore
, MEMSWCTL
, rgvswctl
);
445 intel_uncore_posting_read16(uncore
, MEMSWCTL
);
447 rgvswctl
|= MEMCTL_CMD_STS
;
448 intel_uncore_write16(uncore
, MEMSWCTL
, rgvswctl
);
453 static int gen5_rps_set(struct intel_rps
*rps
, u8 val
)
457 spin_lock_irq(&mchdev_lock
);
458 err
= __gen5_rps_set(rps
, val
);
459 spin_unlock_irq(&mchdev_lock
);
464 static unsigned long intel_pxfreq(u32 vidfreq
)
466 int div
= (vidfreq
& 0x3f0000) >> 16;
467 int post
= (vidfreq
& 0x3000) >> 12;
468 int pre
= (vidfreq
& 0x7);
473 return div
* 133333 / (pre
<< post
);
476 static unsigned int init_emon(struct intel_uncore
*uncore
)
481 /* Disable to program */
482 intel_uncore_write(uncore
, ECR
, 0);
483 intel_uncore_posting_read(uncore
, ECR
);
485 /* Program energy weights for various events */
486 intel_uncore_write(uncore
, SDEW
, 0x15040d00);
487 intel_uncore_write(uncore
, CSIEW0
, 0x007f0000);
488 intel_uncore_write(uncore
, CSIEW1
, 0x1e220004);
489 intel_uncore_write(uncore
, CSIEW2
, 0x04000004);
491 for (i
= 0; i
< 5; i
++)
492 intel_uncore_write(uncore
, PEW(i
), 0);
493 for (i
= 0; i
< 3; i
++)
494 intel_uncore_write(uncore
, DEW(i
), 0);
496 /* Program P-state weights to account for frequency power adjustment */
497 for (i
= 0; i
< 16; i
++) {
498 u32 pxvidfreq
= intel_uncore_read(uncore
, PXVFREQ(i
));
499 unsigned int freq
= intel_pxfreq(pxvidfreq
);
501 (pxvidfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
;
504 val
= vid
* vid
* freq
/ 1000 * 255;
505 val
/= 127 * 127 * 900;
509 /* Render standby states get 0 weight */
513 for (i
= 0; i
< 4; i
++) {
514 intel_uncore_write(uncore
, PXW(i
),
515 pxw
[i
* 4 + 0] << 24 |
516 pxw
[i
* 4 + 1] << 16 |
517 pxw
[i
* 4 + 2] << 8 |
518 pxw
[i
* 4 + 3] << 0);
521 /* Adjust magic regs to magic values (more experimental results) */
522 intel_uncore_write(uncore
, OGW0
, 0);
523 intel_uncore_write(uncore
, OGW1
, 0);
524 intel_uncore_write(uncore
, EG0
, 0x00007f00);
525 intel_uncore_write(uncore
, EG1
, 0x0000000e);
526 intel_uncore_write(uncore
, EG2
, 0x000e0000);
527 intel_uncore_write(uncore
, EG3
, 0x68000300);
528 intel_uncore_write(uncore
, EG4
, 0x42000000);
529 intel_uncore_write(uncore
, EG5
, 0x00140031);
530 intel_uncore_write(uncore
, EG6
, 0);
531 intel_uncore_write(uncore
, EG7
, 0);
533 for (i
= 0; i
< 8; i
++)
534 intel_uncore_write(uncore
, PXWL(i
), 0);
536 /* Enable PMON + select events */
537 intel_uncore_write(uncore
, ECR
, 0x80000019);
539 return intel_uncore_read(uncore
, LCFUSE02
) & LCFUSE_HIV_MASK
;
542 static bool gen5_rps_enable(struct intel_rps
*rps
)
544 struct drm_i915_private
*i915
= rps_to_i915(rps
);
545 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
549 spin_lock_irq(&mchdev_lock
);
551 rgvmodectl
= intel_uncore_read(uncore
, MEMMODECTL
);
553 /* Enable temp reporting */
554 intel_uncore_write16(uncore
, PMMISC
,
555 intel_uncore_read16(uncore
, PMMISC
) | MCPPCE_EN
);
556 intel_uncore_write16(uncore
, TSC1
,
557 intel_uncore_read16(uncore
, TSC1
) | TSE
);
559 /* 100ms RC evaluation intervals */
560 intel_uncore_write(uncore
, RCUPEI
, 100000);
561 intel_uncore_write(uncore
, RCDNEI
, 100000);
563 /* Set max/min thresholds to 90ms and 80ms respectively */
564 intel_uncore_write(uncore
, RCBMAXAVG
, 90000);
565 intel_uncore_write(uncore
, RCBMINAVG
, 80000);
567 intel_uncore_write(uncore
, MEMIHYST
, 1);
569 /* Set up min, max, and cur for interrupt handling */
570 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
571 MEMMODE_FSTART_SHIFT
;
573 vstart
= (intel_uncore_read(uncore
, PXVFREQ(fstart
)) &
574 PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
;
576 intel_uncore_write(uncore
,
578 MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
580 intel_uncore_write(uncore
, VIDSTART
, vstart
);
581 intel_uncore_posting_read(uncore
, VIDSTART
);
583 rgvmodectl
|= MEMMODE_SWMODE_EN
;
584 intel_uncore_write(uncore
, MEMMODECTL
, rgvmodectl
);
586 if (wait_for_atomic((intel_uncore_read(uncore
, MEMSWCTL
) &
587 MEMCTL_CMD_STS
) == 0, 10))
588 drm_err(&uncore
->i915
->drm
,
589 "stuck trying to change perf mode\n");
592 __gen5_rps_set(rps
, rps
->cur_freq
);
594 rps
->ips
.last_count1
= intel_uncore_read(uncore
, DMIEC
);
595 rps
->ips
.last_count1
+= intel_uncore_read(uncore
, DDREC
);
596 rps
->ips
.last_count1
+= intel_uncore_read(uncore
, CSIEC
);
597 rps
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
599 rps
->ips
.last_count2
= intel_uncore_read(uncore
, GFXEC
);
600 rps
->ips
.last_time2
= ktime_get_raw_ns();
602 spin_lock(&i915
->irq_lock
);
603 ilk_enable_display_irq(i915
, DE_PCU_EVENT
);
604 spin_unlock(&i915
->irq_lock
);
606 spin_unlock_irq(&mchdev_lock
);
608 rps
->ips
.corr
= init_emon(uncore
);
613 static void gen5_rps_disable(struct intel_rps
*rps
)
615 struct drm_i915_private
*i915
= rps_to_i915(rps
);
616 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
619 spin_lock_irq(&mchdev_lock
);
621 spin_lock(&i915
->irq_lock
);
622 ilk_disable_display_irq(i915
, DE_PCU_EVENT
);
623 spin_unlock(&i915
->irq_lock
);
625 rgvswctl
= intel_uncore_read16(uncore
, MEMSWCTL
);
627 /* Ack interrupts, disable EFC interrupt */
628 intel_uncore_write(uncore
, MEMINTREN
,
629 intel_uncore_read(uncore
, MEMINTREN
) &
630 ~MEMINT_EVAL_CHG_EN
);
631 intel_uncore_write(uncore
, MEMINTRSTS
, MEMINT_EVAL_CHG
);
633 /* Go back to the starting frequency */
634 __gen5_rps_set(rps
, rps
->idle_freq
);
636 rgvswctl
|= MEMCTL_CMD_STS
;
637 intel_uncore_write(uncore
, MEMSWCTL
, rgvswctl
);
640 spin_unlock_irq(&mchdev_lock
);
643 static u32
rps_limits(struct intel_rps
*rps
, u8 val
)
648 * Only set the down limit when we've reached the lowest level to avoid
649 * getting more interrupts, otherwise leave this clear. This prevents a
650 * race in the hw when coming out of rc6: There's a tiny window where
651 * the hw runs at the minimal clock before selecting the desired
652 * frequency, if the down threshold expires in that window we will not
653 * receive a down interrupt.
655 if (GRAPHICS_VER(rps_to_i915(rps
)) >= 9) {
656 limits
= rps
->max_freq_softlimit
<< 23;
657 if (val
<= rps
->min_freq_softlimit
)
658 limits
|= rps
->min_freq_softlimit
<< 14;
660 limits
= rps
->max_freq_softlimit
<< 24;
661 if (val
<= rps
->min_freq_softlimit
)
662 limits
|= rps
->min_freq_softlimit
<< 16;
668 static void rps_set_power(struct intel_rps
*rps
, int new_power
)
670 struct intel_gt
*gt
= rps_to_gt(rps
);
671 struct intel_uncore
*uncore
= gt
->uncore
;
672 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
673 u32 ei_up
= 0, ei_down
= 0;
675 lockdep_assert_held(&rps
->power
.mutex
);
677 if (new_power
== rps
->power
.mode
)
683 /* Note the units here are not exactly 1us, but 1280ns. */
701 /* When byt can survive without system hang with dynamic
702 * sw freq adjustments, this restriction can be lifted.
704 if (IS_VALLEYVIEW(gt
->i915
))
708 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
709 new_power
, threshold_up
, ei_up
, threshold_down
, ei_down
);
711 set(uncore
, GEN6_RP_UP_EI
,
712 intel_gt_ns_to_pm_interval(gt
, ei_up
* 1000));
713 set(uncore
, GEN6_RP_UP_THRESHOLD
,
714 intel_gt_ns_to_pm_interval(gt
, ei_up
* threshold_up
* 10));
716 set(uncore
, GEN6_RP_DOWN_EI
,
717 intel_gt_ns_to_pm_interval(gt
, ei_down
* 1000));
718 set(uncore
, GEN6_RP_DOWN_THRESHOLD
,
719 intel_gt_ns_to_pm_interval(gt
, ei_down
* threshold_down
* 10));
721 set(uncore
, GEN6_RP_CONTROL
,
722 (GRAPHICS_VER(gt
->i915
) > 9 ? 0 : GEN6_RP_MEDIA_TURBO
) |
723 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
724 GEN6_RP_MEDIA_IS_GFX
|
726 GEN6_RP_UP_BUSY_AVG
|
727 GEN6_RP_DOWN_IDLE_AVG
);
730 rps
->power
.mode
= new_power
;
731 rps
->power
.up_threshold
= threshold_up
;
732 rps
->power
.down_threshold
= threshold_down
;
735 static void gen6_rps_set_thresholds(struct intel_rps
*rps
, u8 val
)
739 new_power
= rps
->power
.mode
;
740 switch (rps
->power
.mode
) {
742 if (val
> rps
->efficient_freq
+ 1 &&
748 if (val
<= rps
->efficient_freq
&&
750 new_power
= LOW_POWER
;
751 else if (val
>= rps
->rp0_freq
&&
753 new_power
= HIGH_POWER
;
757 if (val
< (rps
->rp1_freq
+ rps
->rp0_freq
) >> 1 &&
762 /* Max/min bins are special */
763 if (val
<= rps
->min_freq_softlimit
)
764 new_power
= LOW_POWER
;
765 if (val
>= rps
->max_freq_softlimit
)
766 new_power
= HIGH_POWER
;
768 mutex_lock(&rps
->power
.mutex
);
769 if (rps
->power
.interactive
)
770 new_power
= HIGH_POWER
;
771 rps_set_power(rps
, new_power
);
772 mutex_unlock(&rps
->power
.mutex
);
775 void intel_rps_mark_interactive(struct intel_rps
*rps
, bool interactive
)
777 GT_TRACE(rps_to_gt(rps
), "mark interactive: %s\n",
778 str_yes_no(interactive
));
780 mutex_lock(&rps
->power
.mutex
);
782 if (!rps
->power
.interactive
++ && intel_rps_is_active(rps
))
783 rps_set_power(rps
, HIGH_POWER
);
785 GEM_BUG_ON(!rps
->power
.interactive
);
786 rps
->power
.interactive
--;
788 mutex_unlock(&rps
->power
.mutex
);
791 static int gen6_rps_set(struct intel_rps
*rps
, u8 val
)
793 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
794 struct drm_i915_private
*i915
= rps_to_i915(rps
);
797 GEM_BUG_ON(rps_uses_slpc(rps
));
799 if (GRAPHICS_VER(i915
) >= 9)
800 swreq
= GEN9_FREQUENCY(val
);
801 else if (IS_HASWELL(i915
) || IS_BROADWELL(i915
))
802 swreq
= HSW_FREQUENCY(val
);
804 swreq
= (GEN6_FREQUENCY(val
) |
806 GEN6_AGGRESSIVE_TURBO
);
807 set(uncore
, GEN6_RPNSWREQ
, swreq
);
809 GT_TRACE(rps_to_gt(rps
), "set val:%x, freq:%d, swreq:%x\n",
810 val
, intel_gpu_freq(rps
, val
), swreq
);
815 static int vlv_rps_set(struct intel_rps
*rps
, u8 val
)
817 struct drm_i915_private
*i915
= rps_to_i915(rps
);
821 err
= vlv_punit_write(i915
, PUNIT_REG_GPU_FREQ_REQ
, val
);
824 GT_TRACE(rps_to_gt(rps
), "set val:%x, freq:%d\n",
825 val
, intel_gpu_freq(rps
, val
));
830 static int rps_set(struct intel_rps
*rps
, u8 val
, bool update
)
832 struct drm_i915_private
*i915
= rps_to_i915(rps
);
835 if (val
== rps
->last_freq
)
838 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
))
839 err
= vlv_rps_set(rps
, val
);
840 else if (GRAPHICS_VER(i915
) >= 6)
841 err
= gen6_rps_set(rps
, val
);
843 err
= gen5_rps_set(rps
, val
);
847 if (update
&& GRAPHICS_VER(i915
) >= 6)
848 gen6_rps_set_thresholds(rps
, val
);
849 rps
->last_freq
= val
;
854 void intel_rps_unpark(struct intel_rps
*rps
)
856 if (!intel_rps_is_enabled(rps
))
859 GT_TRACE(rps_to_gt(rps
), "unpark:%x\n", rps
->cur_freq
);
862 * Use the user's desired frequency as a guide, but for better
863 * performance, jump directly to RPe as our starting frequency.
865 mutex_lock(&rps
->lock
);
867 intel_rps_set_active(rps
);
870 rps
->min_freq_softlimit
,
871 rps
->max_freq_softlimit
));
873 mutex_unlock(&rps
->lock
);
876 if (intel_rps_has_interrupts(rps
))
877 rps_enable_interrupts(rps
);
878 if (intel_rps_uses_timer(rps
))
879 rps_start_timer(rps
);
881 if (GRAPHICS_VER(rps_to_i915(rps
)) == 5)
882 gen5_rps_update(rps
);
885 void intel_rps_park(struct intel_rps
*rps
)
889 if (!intel_rps_is_enabled(rps
))
892 if (!intel_rps_clear_active(rps
))
895 if (intel_rps_uses_timer(rps
))
897 if (intel_rps_has_interrupts(rps
))
898 rps_disable_interrupts(rps
);
900 if (rps
->last_freq
<= rps
->idle_freq
)
904 * The punit delays the write of the frequency and voltage until it
905 * determines the GPU is awake. During normal usage we don't want to
906 * waste power changing the frequency if the GPU is sleeping (rc6).
907 * However, the GPU and driver is now idle and we do not want to delay
908 * switching to minimum voltage (reducing power whilst idle) as we do
909 * not expect to be woken in the near future and so must flush the
910 * change by waking the device.
912 * We choose to take the media powerwell (either would do to trick the
913 * punit into committing the voltage change) as that takes a lot less
914 * power than the render powerwell.
916 intel_uncore_forcewake_get(rps_to_uncore(rps
), FORCEWAKE_MEDIA
);
917 rps_set(rps
, rps
->idle_freq
, false);
918 intel_uncore_forcewake_put(rps_to_uncore(rps
), FORCEWAKE_MEDIA
);
921 * Since we will try and restart from the previously requested
922 * frequency on unparking, treat this idle point as a downclock
923 * interrupt and reduce the frequency for resume. If we park/unpark
924 * more frequently than the rps worker can run, we will not respond
925 * to any EI and never see a change in frequency.
927 * (Note we accommodate Cherryview's limitation of only using an
928 * even bin by applying it to all.)
933 else /* CHV needs even encode values */
936 rps
->cur_freq
= max_t(int, rps
->cur_freq
+ adj
, rps
->min_freq
);
937 if (rps
->cur_freq
< rps
->efficient_freq
) {
938 rps
->cur_freq
= rps
->efficient_freq
;
942 GT_TRACE(rps_to_gt(rps
), "park:%x\n", rps
->cur_freq
);
945 u32
intel_rps_get_boost_frequency(struct intel_rps
*rps
)
947 struct intel_guc_slpc
*slpc
;
949 if (rps_uses_slpc(rps
)) {
950 slpc
= rps_to_slpc(rps
);
952 return slpc
->boost_freq
;
954 return intel_gpu_freq(rps
, rps
->boost_freq
);
958 static int rps_set_boost_freq(struct intel_rps
*rps
, u32 val
)
962 /* Validate against (static) hardware limits */
963 val
= intel_freq_opcode(rps
, val
);
964 if (val
< rps
->min_freq
|| val
> rps
->max_freq
)
967 mutex_lock(&rps
->lock
);
968 if (val
!= rps
->boost_freq
) {
969 rps
->boost_freq
= val
;
970 boost
= atomic_read(&rps
->num_waiters
);
972 mutex_unlock(&rps
->lock
);
974 schedule_work(&rps
->work
);
979 int intel_rps_set_boost_frequency(struct intel_rps
*rps
, u32 freq
)
981 struct intel_guc_slpc
*slpc
;
983 if (rps_uses_slpc(rps
)) {
984 slpc
= rps_to_slpc(rps
);
986 return intel_guc_slpc_set_boost_freq(slpc
, freq
);
988 return rps_set_boost_freq(rps
, freq
);
992 void intel_rps_dec_waiters(struct intel_rps
*rps
)
994 struct intel_guc_slpc
*slpc
;
996 if (rps_uses_slpc(rps
)) {
997 slpc
= rps_to_slpc(rps
);
999 intel_guc_slpc_dec_waiters(slpc
);
1001 atomic_dec(&rps
->num_waiters
);
1005 void intel_rps_boost(struct i915_request
*rq
)
1007 struct intel_guc_slpc
*slpc
;
1009 if (i915_request_signaled(rq
) || i915_request_has_waitboost(rq
))
1012 /* Serializes with i915_request_retire() */
1013 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST
, &rq
->fence
.flags
)) {
1014 struct intel_rps
*rps
= &READ_ONCE(rq
->engine
)->gt
->rps
;
1016 if (rps_uses_slpc(rps
)) {
1017 slpc
= rps_to_slpc(rps
);
1019 /* Return if old value is non zero */
1020 if (!atomic_fetch_inc(&slpc
->num_waiters
))
1021 schedule_work(&slpc
->boost_work
);
1026 if (atomic_fetch_inc(&rps
->num_waiters
))
1029 if (!intel_rps_is_active(rps
))
1032 GT_TRACE(rps_to_gt(rps
), "boost fence:%llx:%llx\n",
1033 rq
->fence
.context
, rq
->fence
.seqno
);
1035 if (READ_ONCE(rps
->cur_freq
) < rps
->boost_freq
)
1036 schedule_work(&rps
->work
);
1038 WRITE_ONCE(rps
->boosts
, rps
->boosts
+ 1); /* debug only */
1042 int intel_rps_set(struct intel_rps
*rps
, u8 val
)
1046 lockdep_assert_held(&rps
->lock
);
1047 GEM_BUG_ON(val
> rps
->max_freq
);
1048 GEM_BUG_ON(val
< rps
->min_freq
);
1050 if (intel_rps_is_active(rps
)) {
1051 err
= rps_set(rps
, val
, true);
1056 * Make sure we continue to get interrupts
1057 * until we hit the minimum or maximum frequencies.
1059 if (intel_rps_has_interrupts(rps
)) {
1060 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1063 GEN6_RP_INTERRUPT_LIMITS
, rps_limits(rps
, val
));
1065 set(uncore
, GEN6_PMINTRMSK
, rps_pm_mask(rps
, val
));
1069 rps
->cur_freq
= val
;
1073 static u32
intel_rps_read_state_cap(struct intel_rps
*rps
)
1075 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1076 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1078 if (IS_PONTEVECCHIO(i915
))
1079 return intel_uncore_read(uncore
, PVC_RP_STATE_CAP
);
1080 else if (IS_XEHPSDV(i915
))
1081 return intel_uncore_read(uncore
, XEHPSDV_RP_STATE_CAP
);
1082 else if (IS_GEN9_LP(i915
))
1083 return intel_uncore_read(uncore
, BXT_RP_STATE_CAP
);
1085 return intel_uncore_read(uncore
, GEN6_RP_STATE_CAP
);
1089 * gen6_rps_get_freq_caps - Get freq caps exposed by HW
1090 * @rps: the intel_rps structure
1091 * @caps: returned freq caps
1093 * Returned "caps" frequencies should be converted to MHz using
1096 void gen6_rps_get_freq_caps(struct intel_rps
*rps
, struct intel_rps_freq_caps
*caps
)
1098 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1101 rp_state_cap
= intel_rps_read_state_cap(rps
);
1103 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
1104 if (IS_GEN9_LP(i915
)) {
1105 caps
->rp0_freq
= (rp_state_cap
>> 16) & 0xff;
1106 caps
->rp1_freq
= (rp_state_cap
>> 8) & 0xff;
1107 caps
->min_freq
= (rp_state_cap
>> 0) & 0xff;
1109 caps
->rp0_freq
= (rp_state_cap
>> 0) & 0xff;
1110 caps
->rp1_freq
= (rp_state_cap
>> 8) & 0xff;
1111 caps
->min_freq
= (rp_state_cap
>> 16) & 0xff;
1114 if (IS_GEN9_BC(i915
) || GRAPHICS_VER(i915
) >= 11) {
1116 * In this case rp_state_cap register reports frequencies in
1117 * units of 50 MHz. Convert these to the actual "hw unit", i.e.
1118 * units of 16.67 MHz
1120 caps
->rp0_freq
*= GEN9_FREQ_SCALER
;
1121 caps
->rp1_freq
*= GEN9_FREQ_SCALER
;
1122 caps
->min_freq
*= GEN9_FREQ_SCALER
;
1126 static void gen6_rps_init(struct intel_rps
*rps
)
1128 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1129 struct intel_rps_freq_caps caps
;
1131 gen6_rps_get_freq_caps(rps
, &caps
);
1132 rps
->rp0_freq
= caps
.rp0_freq
;
1133 rps
->rp1_freq
= caps
.rp1_freq
;
1134 rps
->min_freq
= caps
.min_freq
;
1136 /* hw_max = RP0 until we check for overclocking */
1137 rps
->max_freq
= rps
->rp0_freq
;
1139 rps
->efficient_freq
= rps
->rp1_freq
;
1140 if (IS_HASWELL(i915
) || IS_BROADWELL(i915
) ||
1141 IS_GEN9_BC(i915
) || GRAPHICS_VER(i915
) >= 11) {
1142 u32 ddcc_status
= 0;
1145 if (IS_GEN9_BC(i915
) || GRAPHICS_VER(i915
) >= 11)
1146 mult
= GEN9_FREQ_SCALER
;
1147 if (snb_pcode_read(rps_to_gt(rps
)->uncore
,
1148 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
1149 &ddcc_status
, NULL
) == 0)
1150 rps
->efficient_freq
=
1152 ((ddcc_status
>> 8) & 0xff) * mult
,
1158 static bool rps_reset(struct intel_rps
*rps
)
1160 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1163 rps
->power
.mode
= -1;
1164 rps
->last_freq
= -1;
1166 if (rps_set(rps
, rps
->min_freq
, true)) {
1167 drm_err(&i915
->drm
, "Failed to reset RPS to initial values\n");
1171 rps
->cur_freq
= rps
->min_freq
;
1175 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
1176 static bool gen9_rps_enable(struct intel_rps
*rps
)
1178 struct intel_gt
*gt
= rps_to_gt(rps
);
1179 struct intel_uncore
*uncore
= gt
->uncore
;
1181 /* Program defaults and thresholds for RPS */
1182 if (GRAPHICS_VER(gt
->i915
) == 9)
1183 intel_uncore_write_fw(uncore
, GEN6_RC_VIDEO_FREQ
,
1184 GEN9_FREQUENCY(rps
->rp1_freq
));
1186 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 0xa);
1188 rps
->pm_events
= GEN6_PM_RP_UP_THRESHOLD
| GEN6_PM_RP_DOWN_THRESHOLD
;
1190 return rps_reset(rps
);
1193 static bool gen8_rps_enable(struct intel_rps
*rps
)
1195 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1197 intel_uncore_write_fw(uncore
, GEN6_RC_VIDEO_FREQ
,
1198 HSW_FREQUENCY(rps
->rp1_freq
));
1200 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1202 rps
->pm_events
= GEN6_PM_RP_UP_THRESHOLD
| GEN6_PM_RP_DOWN_THRESHOLD
;
1204 return rps_reset(rps
);
1207 static bool gen6_rps_enable(struct intel_rps
*rps
)
1209 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1211 /* Power down if completely idle for over 50ms */
1212 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_TIMEOUT
, 50000);
1213 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1215 rps
->pm_events
= (GEN6_PM_RP_UP_THRESHOLD
|
1216 GEN6_PM_RP_DOWN_THRESHOLD
|
1217 GEN6_PM_RP_DOWN_TIMEOUT
);
1219 return rps_reset(rps
);
1222 static int chv_rps_max_freq(struct intel_rps
*rps
)
1224 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1225 struct intel_gt
*gt
= rps_to_gt(rps
);
1228 val
= vlv_punit_read(i915
, FB_GFX_FMAX_AT_VMAX_FUSE
);
1230 switch (gt
->info
.sseu
.eu_total
) {
1232 /* (2 * 4) config */
1233 val
>>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
;
1236 /* (2 * 6) config */
1237 val
>>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
;
1240 /* (2 * 8) config */
1242 /* Setting (2 * 8) Min RP0 for any other combination */
1243 val
>>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
;
1247 return val
& FB_GFX_FREQ_FUSE_MASK
;
1250 static int chv_rps_rpe_freq(struct intel_rps
*rps
)
1252 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1255 val
= vlv_punit_read(i915
, PUNIT_GPU_DUTYCYCLE_REG
);
1256 val
>>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
;
1258 return val
& PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
1261 static int chv_rps_guar_freq(struct intel_rps
*rps
)
1263 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1266 val
= vlv_punit_read(i915
, FB_GFX_FMAX_AT_VMAX_FUSE
);
1268 return val
& FB_GFX_FREQ_FUSE_MASK
;
1271 static u32
chv_rps_min_freq(struct intel_rps
*rps
)
1273 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1276 val
= vlv_punit_read(i915
, FB_GFX_FMIN_AT_VMIN_FUSE
);
1277 val
>>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT
;
1279 return val
& FB_GFX_FREQ_FUSE_MASK
;
1282 static bool chv_rps_enable(struct intel_rps
*rps
)
1284 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1285 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1288 /* 1: Program defaults and thresholds for RPS*/
1289 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_TIMEOUT
, 1000000);
1290 intel_uncore_write_fw(uncore
, GEN6_RP_UP_THRESHOLD
, 59400);
1291 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_THRESHOLD
, 245000);
1292 intel_uncore_write_fw(uncore
, GEN6_RP_UP_EI
, 66000);
1293 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_EI
, 350000);
1295 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1298 intel_uncore_write_fw(uncore
, GEN6_RP_CONTROL
,
1299 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
1300 GEN6_RP_MEDIA_IS_GFX
|
1302 GEN6_RP_UP_BUSY_AVG
|
1303 GEN6_RP_DOWN_IDLE_AVG
);
1305 rps
->pm_events
= (GEN6_PM_RP_UP_THRESHOLD
|
1306 GEN6_PM_RP_DOWN_THRESHOLD
|
1307 GEN6_PM_RP_DOWN_TIMEOUT
);
1309 /* Setting Fixed Bias */
1310 vlv_punit_get(i915
);
1312 val
= VLV_OVERRIDE_EN
| VLV_SOC_TDP_EN
| CHV_BIAS_CPU_50_SOC_50
;
1313 vlv_punit_write(i915
, VLV_TURBO_SOC_OVERRIDE
, val
);
1315 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1317 vlv_punit_put(i915
);
1319 /* RPS code assumes GPLL is used */
1320 drm_WARN_ONCE(&i915
->drm
, (val
& GPLLENABLE
) == 0,
1321 "GPLL not enabled\n");
1323 drm_dbg(&i915
->drm
, "GPLL enabled? %s\n",
1324 str_yes_no(val
& GPLLENABLE
));
1325 drm_dbg(&i915
->drm
, "GPU status: 0x%08x\n", val
);
1327 return rps_reset(rps
);
1330 static int vlv_rps_guar_freq(struct intel_rps
*rps
)
1332 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1335 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FREQ_FUSE
);
1337 rp1
= val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
;
1338 rp1
>>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
1343 static int vlv_rps_max_freq(struct intel_rps
*rps
)
1345 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1348 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FREQ_FUSE
);
1350 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
1352 rp0
= min_t(u32
, rp0
, 0xea);
1357 static int vlv_rps_rpe_freq(struct intel_rps
*rps
)
1359 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1362 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
1363 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
1364 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
1365 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
1370 static int vlv_rps_min_freq(struct intel_rps
*rps
)
1372 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1375 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_LFM
) & 0xff;
1377 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1378 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1379 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1380 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1381 * to make sure it matches what Punit accepts.
1383 return max_t(u32
, val
, 0xc0);
1386 static bool vlv_rps_enable(struct intel_rps
*rps
)
1388 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1389 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1392 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_TIMEOUT
, 1000000);
1393 intel_uncore_write_fw(uncore
, GEN6_RP_UP_THRESHOLD
, 59400);
1394 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_THRESHOLD
, 245000);
1395 intel_uncore_write_fw(uncore
, GEN6_RP_UP_EI
, 66000);
1396 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_EI
, 350000);
1398 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1400 intel_uncore_write_fw(uncore
, GEN6_RP_CONTROL
,
1401 GEN6_RP_MEDIA_TURBO
|
1402 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
1403 GEN6_RP_MEDIA_IS_GFX
|
1405 GEN6_RP_UP_BUSY_AVG
|
1406 GEN6_RP_DOWN_IDLE_CONT
);
1408 /* WaGsvRC0ResidencyMethod:vlv */
1409 rps
->pm_events
= GEN6_PM_RP_UP_EI_EXPIRED
;
1411 vlv_punit_get(i915
);
1413 /* Setting Fixed Bias */
1414 val
= VLV_OVERRIDE_EN
| VLV_SOC_TDP_EN
| VLV_BIAS_CPU_125_SOC_875
;
1415 vlv_punit_write(i915
, VLV_TURBO_SOC_OVERRIDE
, val
);
1417 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1419 vlv_punit_put(i915
);
1421 /* RPS code assumes GPLL is used */
1422 drm_WARN_ONCE(&i915
->drm
, (val
& GPLLENABLE
) == 0,
1423 "GPLL not enabled\n");
1425 drm_dbg(&i915
->drm
, "GPLL enabled? %s\n",
1426 str_yes_no(val
& GPLLENABLE
));
1427 drm_dbg(&i915
->drm
, "GPU status: 0x%08x\n", val
);
1429 return rps_reset(rps
);
1432 static unsigned long __ips_gfx_val(struct intel_ips
*ips
)
1434 struct intel_rps
*rps
= container_of(ips
, typeof(*rps
), ips
);
1435 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1436 unsigned int t
, state1
, state2
;
1440 lockdep_assert_held(&mchdev_lock
);
1442 pxvid
= intel_uncore_read(uncore
, PXVFREQ(rps
->cur_freq
));
1443 pxvid
= (pxvid
>> 24) & 0x7f;
1444 ext_v
= pvid_to_extvid(rps_to_i915(rps
), pxvid
);
1448 /* Revel in the empirically derived constants */
1450 /* Correction factor in 1/100000 units */
1451 t
= ips_mch_val(uncore
);
1453 corr
= t
* 2349 + 135940;
1455 corr
= t
* 964 + 29317;
1457 corr
= t
* 301 + 1004;
1459 corr
= div_u64(corr
* 150142 * state1
, 10000) - 78642;
1460 corr2
= div_u64(corr
, 100000) * ips
->corr
;
1462 state2
= div_u64(corr2
* state1
, 10000);
1463 state2
/= 100; /* convert to mW */
1465 __gen5_ips_update(ips
);
1467 return ips
->gfx_power
+ state2
;
1470 static bool has_busy_stats(struct intel_rps
*rps
)
1472 struct intel_engine_cs
*engine
;
1473 enum intel_engine_id id
;
1475 for_each_engine(engine
, rps_to_gt(rps
), id
) {
1476 if (!intel_engine_supports_stats(engine
))
1483 void intel_rps_enable(struct intel_rps
*rps
)
1485 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1486 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1487 bool enabled
= false;
1492 if (rps_uses_slpc(rps
))
1495 intel_gt_check_clock_frequency(rps_to_gt(rps
));
1497 intel_uncore_forcewake_get(uncore
, FORCEWAKE_ALL
);
1498 if (rps
->max_freq
<= rps
->min_freq
)
1499 /* leave disabled, no room for dynamic reclocking */;
1500 else if (IS_CHERRYVIEW(i915
))
1501 enabled
= chv_rps_enable(rps
);
1502 else if (IS_VALLEYVIEW(i915
))
1503 enabled
= vlv_rps_enable(rps
);
1504 else if (GRAPHICS_VER(i915
) >= 9)
1505 enabled
= gen9_rps_enable(rps
);
1506 else if (GRAPHICS_VER(i915
) >= 8)
1507 enabled
= gen8_rps_enable(rps
);
1508 else if (GRAPHICS_VER(i915
) >= 6)
1509 enabled
= gen6_rps_enable(rps
);
1510 else if (IS_IRONLAKE_M(i915
))
1511 enabled
= gen5_rps_enable(rps
);
1513 MISSING_CASE(GRAPHICS_VER(i915
));
1514 intel_uncore_forcewake_put(uncore
, FORCEWAKE_ALL
);
1518 GT_TRACE(rps_to_gt(rps
),
1519 "min:%x, max:%x, freq:[%d, %d]\n",
1520 rps
->min_freq
, rps
->max_freq
,
1521 intel_gpu_freq(rps
, rps
->min_freq
),
1522 intel_gpu_freq(rps
, rps
->max_freq
));
1524 GEM_BUG_ON(rps
->max_freq
< rps
->min_freq
);
1525 GEM_BUG_ON(rps
->idle_freq
> rps
->max_freq
);
1527 GEM_BUG_ON(rps
->efficient_freq
< rps
->min_freq
);
1528 GEM_BUG_ON(rps
->efficient_freq
> rps
->max_freq
);
1530 if (has_busy_stats(rps
))
1531 intel_rps_set_timer(rps
);
1532 else if (GRAPHICS_VER(i915
) >= 6 && GRAPHICS_VER(i915
) <= 11)
1533 intel_rps_set_interrupts(rps
);
1535 /* Ironlake currently uses intel_ips.ko */ {}
1537 intel_rps_set_enabled(rps
);
1540 static void gen6_rps_disable(struct intel_rps
*rps
)
1542 set(rps_to_uncore(rps
), GEN6_RP_CONTROL
, 0);
1545 void intel_rps_disable(struct intel_rps
*rps
)
1547 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1549 intel_rps_clear_enabled(rps
);
1550 intel_rps_clear_interrupts(rps
);
1551 intel_rps_clear_timer(rps
);
1553 if (GRAPHICS_VER(i915
) >= 6)
1554 gen6_rps_disable(rps
);
1555 else if (IS_IRONLAKE_M(i915
))
1556 gen5_rps_disable(rps
);
1559 static int byt_gpu_freq(struct intel_rps
*rps
, int val
)
1563 * Slow = Fast = GPLL ref * N
1565 return DIV_ROUND_CLOSEST(rps
->gpll_ref_freq
* (val
- 0xb7), 1000);
1568 static int byt_freq_opcode(struct intel_rps
*rps
, int val
)
1570 return DIV_ROUND_CLOSEST(1000 * val
, rps
->gpll_ref_freq
) + 0xb7;
1573 static int chv_gpu_freq(struct intel_rps
*rps
, int val
)
1577 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1579 return DIV_ROUND_CLOSEST(rps
->gpll_ref_freq
* val
, 2 * 2 * 1000);
1582 static int chv_freq_opcode(struct intel_rps
*rps
, int val
)
1584 /* CHV needs even values */
1585 return DIV_ROUND_CLOSEST(2 * 1000 * val
, rps
->gpll_ref_freq
) * 2;
1588 int intel_gpu_freq(struct intel_rps
*rps
, int val
)
1590 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1592 if (GRAPHICS_VER(i915
) >= 9)
1593 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
1595 else if (IS_CHERRYVIEW(i915
))
1596 return chv_gpu_freq(rps
, val
);
1597 else if (IS_VALLEYVIEW(i915
))
1598 return byt_gpu_freq(rps
, val
);
1599 else if (GRAPHICS_VER(i915
) >= 6)
1600 return val
* GT_FREQUENCY_MULTIPLIER
;
1605 int intel_freq_opcode(struct intel_rps
*rps
, int val
)
1607 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1609 if (GRAPHICS_VER(i915
) >= 9)
1610 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
1611 GT_FREQUENCY_MULTIPLIER
);
1612 else if (IS_CHERRYVIEW(i915
))
1613 return chv_freq_opcode(rps
, val
);
1614 else if (IS_VALLEYVIEW(i915
))
1615 return byt_freq_opcode(rps
, val
);
1616 else if (GRAPHICS_VER(i915
) >= 6)
1617 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
1622 static void vlv_init_gpll_ref_freq(struct intel_rps
*rps
)
1624 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1626 rps
->gpll_ref_freq
=
1627 vlv_get_cck_clock(i915
, "GPLL ref",
1628 CCK_GPLL_CLOCK_CONTROL
,
1631 drm_dbg(&i915
->drm
, "GPLL reference freq: %d kHz\n",
1632 rps
->gpll_ref_freq
);
1635 static void vlv_rps_init(struct intel_rps
*rps
)
1637 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1640 vlv_iosf_sb_get(i915
,
1641 BIT(VLV_IOSF_SB_PUNIT
) |
1642 BIT(VLV_IOSF_SB_NC
) |
1643 BIT(VLV_IOSF_SB_CCK
));
1645 vlv_init_gpll_ref_freq(rps
);
1647 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1648 switch ((val
>> 6) & 3) {
1651 i915
->mem_freq
= 800;
1654 i915
->mem_freq
= 1066;
1657 i915
->mem_freq
= 1333;
1660 drm_dbg(&i915
->drm
, "DDR speed: %d MHz\n", i915
->mem_freq
);
1662 rps
->max_freq
= vlv_rps_max_freq(rps
);
1663 rps
->rp0_freq
= rps
->max_freq
;
1664 drm_dbg(&i915
->drm
, "max GPU freq: %d MHz (%u)\n",
1665 intel_gpu_freq(rps
, rps
->max_freq
), rps
->max_freq
);
1667 rps
->efficient_freq
= vlv_rps_rpe_freq(rps
);
1668 drm_dbg(&i915
->drm
, "RPe GPU freq: %d MHz (%u)\n",
1669 intel_gpu_freq(rps
, rps
->efficient_freq
), rps
->efficient_freq
);
1671 rps
->rp1_freq
= vlv_rps_guar_freq(rps
);
1672 drm_dbg(&i915
->drm
, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1673 intel_gpu_freq(rps
, rps
->rp1_freq
), rps
->rp1_freq
);
1675 rps
->min_freq
= vlv_rps_min_freq(rps
);
1676 drm_dbg(&i915
->drm
, "min GPU freq: %d MHz (%u)\n",
1677 intel_gpu_freq(rps
, rps
->min_freq
), rps
->min_freq
);
1679 vlv_iosf_sb_put(i915
,
1680 BIT(VLV_IOSF_SB_PUNIT
) |
1681 BIT(VLV_IOSF_SB_NC
) |
1682 BIT(VLV_IOSF_SB_CCK
));
1685 static void chv_rps_init(struct intel_rps
*rps
)
1687 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1690 vlv_iosf_sb_get(i915
,
1691 BIT(VLV_IOSF_SB_PUNIT
) |
1692 BIT(VLV_IOSF_SB_NC
) |
1693 BIT(VLV_IOSF_SB_CCK
));
1695 vlv_init_gpll_ref_freq(rps
);
1697 val
= vlv_cck_read(i915
, CCK_FUSE_REG
);
1699 switch ((val
>> 2) & 0x7) {
1701 i915
->mem_freq
= 2000;
1704 i915
->mem_freq
= 1600;
1707 drm_dbg(&i915
->drm
, "DDR speed: %d MHz\n", i915
->mem_freq
);
1709 rps
->max_freq
= chv_rps_max_freq(rps
);
1710 rps
->rp0_freq
= rps
->max_freq
;
1711 drm_dbg(&i915
->drm
, "max GPU freq: %d MHz (%u)\n",
1712 intel_gpu_freq(rps
, rps
->max_freq
), rps
->max_freq
);
1714 rps
->efficient_freq
= chv_rps_rpe_freq(rps
);
1715 drm_dbg(&i915
->drm
, "RPe GPU freq: %d MHz (%u)\n",
1716 intel_gpu_freq(rps
, rps
->efficient_freq
), rps
->efficient_freq
);
1718 rps
->rp1_freq
= chv_rps_guar_freq(rps
);
1719 drm_dbg(&i915
->drm
, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1720 intel_gpu_freq(rps
, rps
->rp1_freq
), rps
->rp1_freq
);
1722 rps
->min_freq
= chv_rps_min_freq(rps
);
1723 drm_dbg(&i915
->drm
, "min GPU freq: %d MHz (%u)\n",
1724 intel_gpu_freq(rps
, rps
->min_freq
), rps
->min_freq
);
1726 vlv_iosf_sb_put(i915
,
1727 BIT(VLV_IOSF_SB_PUNIT
) |
1728 BIT(VLV_IOSF_SB_NC
) |
1729 BIT(VLV_IOSF_SB_CCK
));
1731 drm_WARN_ONCE(&i915
->drm
, (rps
->max_freq
| rps
->efficient_freq
|
1732 rps
->rp1_freq
| rps
->min_freq
) & 1,
1733 "Odd GPU freq values\n");
1736 static void vlv_c0_read(struct intel_uncore
*uncore
, struct intel_rps_ei
*ei
)
1738 ei
->ktime
= ktime_get_raw();
1739 ei
->render_c0
= intel_uncore_read(uncore
, VLV_RENDER_C0_COUNT
);
1740 ei
->media_c0
= intel_uncore_read(uncore
, VLV_MEDIA_C0_COUNT
);
1743 static u32
vlv_wa_c0_ei(struct intel_rps
*rps
, u32 pm_iir
)
1745 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1746 const struct intel_rps_ei
*prev
= &rps
->ei
;
1747 struct intel_rps_ei now
;
1750 if ((pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) == 0)
1753 vlv_c0_read(uncore
, &now
);
1759 time
= ktime_us_delta(now
.ktime
, prev
->ktime
);
1761 time
*= rps_to_i915(rps
)->czclk_freq
;
1763 /* Workload can be split between render + media,
1764 * e.g. SwapBuffers being blitted in X after being rendered in
1765 * mesa. To account for this we need to combine both engines
1766 * into our activity counter.
1768 render
= now
.render_c0
- prev
->render_c0
;
1769 media
= now
.media_c0
- prev
->media_c0
;
1770 c0
= max(render
, media
);
1771 c0
*= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1773 if (c0
> time
* rps
->power
.up_threshold
)
1774 events
= GEN6_PM_RP_UP_THRESHOLD
;
1775 else if (c0
< time
* rps
->power
.down_threshold
)
1776 events
= GEN6_PM_RP_DOWN_THRESHOLD
;
1783 static void rps_work(struct work_struct
*work
)
1785 struct intel_rps
*rps
= container_of(work
, typeof(*rps
), work
);
1786 struct intel_gt
*gt
= rps_to_gt(rps
);
1787 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1788 bool client_boost
= false;
1789 int new_freq
, adj
, min
, max
;
1792 spin_lock_irq(>
->irq_lock
);
1793 pm_iir
= fetch_and_zero(&rps
->pm_iir
) & rps
->pm_events
;
1794 client_boost
= atomic_read(&rps
->num_waiters
);
1795 spin_unlock_irq(>
->irq_lock
);
1797 /* Make sure we didn't queue anything we're not going to process. */
1798 if (!pm_iir
&& !client_boost
)
1801 mutex_lock(&rps
->lock
);
1802 if (!intel_rps_is_active(rps
)) {
1803 mutex_unlock(&rps
->lock
);
1807 pm_iir
|= vlv_wa_c0_ei(rps
, pm_iir
);
1809 adj
= rps
->last_adj
;
1810 new_freq
= rps
->cur_freq
;
1811 min
= rps
->min_freq_softlimit
;
1812 max
= rps
->max_freq_softlimit
;
1814 max
= rps
->max_freq
;
1817 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1818 pm_iir
, str_yes_no(client_boost
),
1819 adj
, new_freq
, min
, max
);
1821 if (client_boost
&& new_freq
< rps
->boost_freq
) {
1822 new_freq
= rps
->boost_freq
;
1824 } else if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1827 else /* CHV needs even encode values */
1828 adj
= IS_CHERRYVIEW(gt
->i915
) ? 2 : 1;
1830 if (new_freq
>= rps
->max_freq_softlimit
)
1832 } else if (client_boost
) {
1834 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1835 if (rps
->cur_freq
> rps
->efficient_freq
)
1836 new_freq
= rps
->efficient_freq
;
1837 else if (rps
->cur_freq
> rps
->min_freq_softlimit
)
1838 new_freq
= rps
->min_freq_softlimit
;
1840 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1843 else /* CHV needs even encode values */
1844 adj
= IS_CHERRYVIEW(gt
->i915
) ? -2 : -1;
1846 if (new_freq
<= rps
->min_freq_softlimit
)
1848 } else { /* unknown event */
1853 * sysfs frequency limits may have snuck in while
1854 * servicing the interrupt
1857 new_freq
= clamp_t(int, new_freq
, min
, max
);
1859 if (intel_rps_set(rps
, new_freq
)) {
1860 drm_dbg(&i915
->drm
, "Failed to set new GPU frequency\n");
1863 rps
->last_adj
= adj
;
1865 mutex_unlock(&rps
->lock
);
1868 spin_lock_irq(>
->irq_lock
);
1869 gen6_gt_pm_unmask_irq(gt
, rps
->pm_events
);
1870 spin_unlock_irq(>
->irq_lock
);
1873 void gen11_rps_irq_handler(struct intel_rps
*rps
, u32 pm_iir
)
1875 struct intel_gt
*gt
= rps_to_gt(rps
);
1876 const u32 events
= rps
->pm_events
& pm_iir
;
1878 lockdep_assert_held(>
->irq_lock
);
1880 if (unlikely(!events
))
1883 GT_TRACE(gt
, "irq events:%x\n", events
);
1885 gen6_gt_pm_mask_irq(gt
, events
);
1887 rps
->pm_iir
|= events
;
1888 schedule_work(&rps
->work
);
1891 void gen6_rps_irq_handler(struct intel_rps
*rps
, u32 pm_iir
)
1893 struct intel_gt
*gt
= rps_to_gt(rps
);
1896 events
= pm_iir
& rps
->pm_events
;
1898 spin_lock(>
->irq_lock
);
1900 GT_TRACE(gt
, "irq events:%x\n", events
);
1902 gen6_gt_pm_mask_irq(gt
, events
);
1903 rps
->pm_iir
|= events
;
1905 schedule_work(&rps
->work
);
1906 spin_unlock(>
->irq_lock
);
1909 if (GRAPHICS_VER(gt
->i915
) >= 8)
1912 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1913 intel_engine_cs_irq(gt
->engine
[VECS0
], pm_iir
>> 10);
1915 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
)
1916 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir
);
1919 void gen5_rps_irq_handler(struct intel_rps
*rps
)
1921 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1922 u32 busy_up
, busy_down
, max_avg
, min_avg
;
1925 spin_lock(&mchdev_lock
);
1927 intel_uncore_write16(uncore
,
1929 intel_uncore_read(uncore
, MEMINTRSTS
));
1931 intel_uncore_write16(uncore
, MEMINTRSTS
, MEMINT_EVAL_CHG
);
1932 busy_up
= intel_uncore_read(uncore
, RCPREVBSYTUPAVG
);
1933 busy_down
= intel_uncore_read(uncore
, RCPREVBSYTDNAVG
);
1934 max_avg
= intel_uncore_read(uncore
, RCBMAXAVG
);
1935 min_avg
= intel_uncore_read(uncore
, RCBMINAVG
);
1937 /* Handle RCS change request from hw */
1938 new_freq
= rps
->cur_freq
;
1939 if (busy_up
> max_avg
)
1941 else if (busy_down
< min_avg
)
1943 new_freq
= clamp(new_freq
,
1944 rps
->min_freq_softlimit
,
1945 rps
->max_freq_softlimit
);
1947 if (new_freq
!= rps
->cur_freq
&& !__gen5_rps_set(rps
, new_freq
))
1948 rps
->cur_freq
= new_freq
;
1950 spin_unlock(&mchdev_lock
);
1953 void intel_rps_init_early(struct intel_rps
*rps
)
1955 mutex_init(&rps
->lock
);
1956 mutex_init(&rps
->power
.mutex
);
1958 INIT_WORK(&rps
->work
, rps_work
);
1959 timer_setup(&rps
->timer
, rps_timer
, 0);
1961 atomic_set(&rps
->num_waiters
, 0);
1964 void intel_rps_init(struct intel_rps
*rps
)
1966 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1968 if (rps_uses_slpc(rps
))
1971 if (IS_CHERRYVIEW(i915
))
1973 else if (IS_VALLEYVIEW(i915
))
1975 else if (GRAPHICS_VER(i915
) >= 6)
1977 else if (IS_IRONLAKE_M(i915
))
1980 /* Derive initial user preferences/limits from the hardware limits */
1981 rps
->max_freq_softlimit
= rps
->max_freq
;
1982 rps
->min_freq_softlimit
= rps
->min_freq
;
1984 /* After setting max-softlimit, find the overclock max freq */
1985 if (GRAPHICS_VER(i915
) == 6 || IS_IVYBRIDGE(i915
) || IS_HASWELL(i915
)) {
1988 snb_pcode_read(rps_to_gt(rps
)->uncore
, GEN6_READ_OC_PARAMS
, ¶ms
, NULL
);
1989 if (params
& BIT(31)) { /* OC supported */
1991 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1992 (rps
->max_freq
& 0xff) * 50,
1993 (params
& 0xff) * 50);
1994 rps
->max_freq
= params
& 0xff;
1998 /* Finally allow us to boost to max by default */
1999 rps
->boost_freq
= rps
->max_freq
;
2000 rps
->idle_freq
= rps
->min_freq
;
2002 /* Start in the middle, from here we will autotune based on workload */
2003 rps
->cur_freq
= rps
->efficient_freq
;
2005 rps
->pm_intrmsk_mbz
= 0;
2008 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
2009 * if GEN6_PM_UP_EI_EXPIRED is masked.
2011 * TODO: verify if this can be reproduced on VLV,CHV.
2013 if (GRAPHICS_VER(i915
) <= 7)
2014 rps
->pm_intrmsk_mbz
|= GEN6_PM_RP_UP_EI_EXPIRED
;
2016 if (GRAPHICS_VER(i915
) >= 8 && GRAPHICS_VER(i915
) < 11)
2017 rps
->pm_intrmsk_mbz
|= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC
;
2019 /* GuC needs ARAT expired interrupt unmasked */
2020 if (intel_uc_uses_guc_submission(&rps_to_gt(rps
)->uc
))
2021 rps
->pm_intrmsk_mbz
|= ARAT_EXPIRED_INTRMSK
;
2024 void intel_rps_sanitize(struct intel_rps
*rps
)
2026 if (rps_uses_slpc(rps
))
2029 if (GRAPHICS_VER(rps_to_i915(rps
)) >= 6)
2030 rps_disable_interrupts(rps
);
2033 u32
intel_rps_get_cagf(struct intel_rps
*rps
, u32 rpstat
)
2035 struct drm_i915_private
*i915
= rps_to_i915(rps
);
2038 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
))
2039 cagf
= (rpstat
>> 8) & 0xff;
2040 else if (GRAPHICS_VER(i915
) >= 9)
2041 cagf
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
2042 else if (IS_HASWELL(i915
) || IS_BROADWELL(i915
))
2043 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
2044 else if (GRAPHICS_VER(i915
) >= 6)
2045 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
2047 cagf
= gen5_invert_freq(rps
, (rpstat
& MEMSTAT_PSTATE_MASK
) >>
2048 MEMSTAT_PSTATE_SHIFT
);
2053 static u32
read_cagf(struct intel_rps
*rps
)
2055 struct drm_i915_private
*i915
= rps_to_i915(rps
);
2056 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
2059 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
)) {
2060 vlv_punit_get(i915
);
2061 freq
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
2062 vlv_punit_put(i915
);
2063 } else if (GRAPHICS_VER(i915
) >= 6) {
2064 freq
= intel_uncore_read(uncore
, GEN6_RPSTAT1
);
2066 freq
= intel_uncore_read(uncore
, MEMSTAT_ILK
);
2069 return intel_rps_get_cagf(rps
, freq
);
2072 u32
intel_rps_read_actual_frequency(struct intel_rps
*rps
)
2074 struct intel_runtime_pm
*rpm
= rps_to_uncore(rps
)->rpm
;
2075 intel_wakeref_t wakeref
;
2078 with_intel_runtime_pm_if_in_use(rpm
, wakeref
)
2079 freq
= intel_gpu_freq(rps
, read_cagf(rps
));
2084 u32
intel_rps_read_punit_req(struct intel_rps
*rps
)
2086 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
2087 struct intel_runtime_pm
*rpm
= rps_to_uncore(rps
)->rpm
;
2088 intel_wakeref_t wakeref
;
2091 with_intel_runtime_pm_if_in_use(rpm
, wakeref
)
2092 freq
= intel_uncore_read(uncore
, GEN6_RPNSWREQ
);
2097 static u32
intel_rps_get_req(u32 pureq
)
2099 u32 req
= pureq
>> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT
;
2104 u32
intel_rps_read_punit_req_frequency(struct intel_rps
*rps
)
2106 u32 freq
= intel_rps_get_req(intel_rps_read_punit_req(rps
));
2108 return intel_gpu_freq(rps
, freq
);
2111 u32
intel_rps_get_requested_frequency(struct intel_rps
*rps
)
2113 if (rps_uses_slpc(rps
))
2114 return intel_rps_read_punit_req_frequency(rps
);
2116 return intel_gpu_freq(rps
, rps
->cur_freq
);
2119 u32
intel_rps_get_max_frequency(struct intel_rps
*rps
)
2121 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2123 if (rps_uses_slpc(rps
))
2124 return slpc
->max_freq_softlimit
;
2126 return intel_gpu_freq(rps
, rps
->max_freq_softlimit
);
2129 u32
intel_rps_get_rp0_frequency(struct intel_rps
*rps
)
2131 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2133 if (rps_uses_slpc(rps
))
2134 return slpc
->rp0_freq
;
2136 return intel_gpu_freq(rps
, rps
->rp0_freq
);
2139 u32
intel_rps_get_rp1_frequency(struct intel_rps
*rps
)
2141 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2143 if (rps_uses_slpc(rps
))
2144 return slpc
->rp1_freq
;
2146 return intel_gpu_freq(rps
, rps
->rp1_freq
);
2149 u32
intel_rps_get_rpn_frequency(struct intel_rps
*rps
)
2151 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2153 if (rps_uses_slpc(rps
))
2154 return slpc
->min_freq
;
2156 return intel_gpu_freq(rps
, rps
->min_freq
);
2159 static int set_max_freq(struct intel_rps
*rps
, u32 val
)
2161 struct drm_i915_private
*i915
= rps_to_i915(rps
);
2164 mutex_lock(&rps
->lock
);
2166 val
= intel_freq_opcode(rps
, val
);
2167 if (val
< rps
->min_freq
||
2168 val
> rps
->max_freq
||
2169 val
< rps
->min_freq_softlimit
) {
2174 if (val
> rps
->rp0_freq
)
2175 drm_dbg(&i915
->drm
, "User requested overclocking to %d\n",
2176 intel_gpu_freq(rps
, val
));
2178 rps
->max_freq_softlimit
= val
;
2180 val
= clamp_t(int, rps
->cur_freq
,
2181 rps
->min_freq_softlimit
,
2182 rps
->max_freq_softlimit
);
2185 * We still need *_set_rps to process the new max_delay and
2186 * update the interrupt limits and PMINTRMSK even though
2187 * frequency request may be unchanged.
2189 intel_rps_set(rps
, val
);
2192 mutex_unlock(&rps
->lock
);
2197 int intel_rps_set_max_frequency(struct intel_rps
*rps
, u32 val
)
2199 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2201 if (rps_uses_slpc(rps
))
2202 return intel_guc_slpc_set_max_freq(slpc
, val
);
2204 return set_max_freq(rps
, val
);
2207 u32
intel_rps_get_min_frequency(struct intel_rps
*rps
)
2209 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2211 if (rps_uses_slpc(rps
))
2212 return slpc
->min_freq_softlimit
;
2214 return intel_gpu_freq(rps
, rps
->min_freq_softlimit
);
2217 static int set_min_freq(struct intel_rps
*rps
, u32 val
)
2221 mutex_lock(&rps
->lock
);
2223 val
= intel_freq_opcode(rps
, val
);
2224 if (val
< rps
->min_freq
||
2225 val
> rps
->max_freq
||
2226 val
> rps
->max_freq_softlimit
) {
2231 rps
->min_freq_softlimit
= val
;
2233 val
= clamp_t(int, rps
->cur_freq
,
2234 rps
->min_freq_softlimit
,
2235 rps
->max_freq_softlimit
);
2238 * We still need *_set_rps to process the new min_delay and
2239 * update the interrupt limits and PMINTRMSK even though
2240 * frequency request may be unchanged.
2242 intel_rps_set(rps
, val
);
2245 mutex_unlock(&rps
->lock
);
2250 int intel_rps_set_min_frequency(struct intel_rps
*rps
, u32 val
)
2252 struct intel_guc_slpc
*slpc
= rps_to_slpc(rps
);
2254 if (rps_uses_slpc(rps
))
2255 return intel_guc_slpc_set_min_freq(slpc
, val
);
2257 return set_min_freq(rps
, val
);
2260 static void intel_rps_set_manual(struct intel_rps
*rps
, bool enable
)
2262 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
2263 u32 state
= enable
? GEN9_RPSWCTL_ENABLE
: GEN9_RPSWCTL_DISABLE
;
2265 /* Allow punit to process software requests */
2266 intel_uncore_write(uncore
, GEN6_RP_CONTROL
, state
);
2269 void intel_rps_raise_unslice(struct intel_rps
*rps
)
2271 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
2273 mutex_lock(&rps
->lock
);
2275 if (rps_uses_slpc(rps
)) {
2276 /* RP limits have not been initialized yet for SLPC path */
2277 struct intel_rps_freq_caps caps
;
2279 gen6_rps_get_freq_caps(rps
, &caps
);
2281 intel_rps_set_manual(rps
, true);
2282 intel_uncore_write(uncore
, GEN6_RPNSWREQ
,
2284 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT
) |
2285 GEN9_IGNORE_SLICE_RATIO
));
2286 intel_rps_set_manual(rps
, false);
2288 intel_rps_set(rps
, rps
->rp0_freq
);
2291 mutex_unlock(&rps
->lock
);
2294 void intel_rps_lower_unslice(struct intel_rps
*rps
)
2296 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
2298 mutex_lock(&rps
->lock
);
2300 if (rps_uses_slpc(rps
)) {
2301 /* RP limits have not been initialized yet for SLPC path */
2302 struct intel_rps_freq_caps caps
;
2304 gen6_rps_get_freq_caps(rps
, &caps
);
2306 intel_rps_set_manual(rps
, true);
2307 intel_uncore_write(uncore
, GEN6_RPNSWREQ
,
2309 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT
) |
2310 GEN9_IGNORE_SLICE_RATIO
));
2311 intel_rps_set_manual(rps
, false);
2313 intel_rps_set(rps
, rps
->min_freq
);
2316 mutex_unlock(&rps
->lock
);
2319 static u32
rps_read_mmio(struct intel_rps
*rps
, i915_reg_t reg32
)
2321 struct intel_gt
*gt
= rps_to_gt(rps
);
2322 intel_wakeref_t wakeref
;
2325 with_intel_runtime_pm(gt
->uncore
->rpm
, wakeref
)
2326 val
= intel_uncore_read(gt
->uncore
, reg32
);
2331 bool rps_read_mask_mmio(struct intel_rps
*rps
,
2332 i915_reg_t reg32
, u32 mask
)
2334 return rps_read_mmio(rps
, reg32
) & mask
;
2337 /* External interface for intel_ips.ko */
2339 static struct drm_i915_private __rcu
*ips_mchdev
;
2342 * Tells the intel_ips driver that the i915 driver is now loaded, if
2343 * IPS got loaded first.
2345 * This awkward dance is so that neither module has to depend on the
2346 * other in order for IPS to do the appropriate communication of
2347 * GPU turbo limits to i915.
2350 ips_ping_for_i915_load(void)
2354 link
= symbol_get(ips_link_to_i915_driver
);
2357 symbol_put(ips_link_to_i915_driver
);
2361 void intel_rps_driver_register(struct intel_rps
*rps
)
2363 struct intel_gt
*gt
= rps_to_gt(rps
);
2366 * We only register the i915 ips part with intel-ips once everything is
2367 * set up, to avoid intel-ips sneaking in and reading bogus values.
2369 if (GRAPHICS_VER(gt
->i915
) == 5) {
2370 GEM_BUG_ON(ips_mchdev
);
2371 rcu_assign_pointer(ips_mchdev
, gt
->i915
);
2372 ips_ping_for_i915_load();
2376 void intel_rps_driver_unregister(struct intel_rps
*rps
)
2378 if (rcu_access_pointer(ips_mchdev
) == rps_to_i915(rps
))
2379 rcu_assign_pointer(ips_mchdev
, NULL
);
2382 static struct drm_i915_private
*mchdev_get(void)
2384 struct drm_i915_private
*i915
;
2387 i915
= rcu_dereference(ips_mchdev
);
2388 if (i915
&& !kref_get_unless_zero(&i915
->drm
.ref
))
2396 * i915_read_mch_val - return value for IPS use
2398 * Calculate and return a value for the IPS driver to use when deciding whether
2399 * we have thermal and power headroom to increase CPU or GPU power budget.
2401 unsigned long i915_read_mch_val(void)
2403 struct drm_i915_private
*i915
;
2404 unsigned long chipset_val
= 0;
2405 unsigned long graphics_val
= 0;
2406 intel_wakeref_t wakeref
;
2408 i915
= mchdev_get();
2412 with_intel_runtime_pm(&i915
->runtime_pm
, wakeref
) {
2413 struct intel_ips
*ips
= &to_gt(i915
)->rps
.ips
;
2415 spin_lock_irq(&mchdev_lock
);
2416 chipset_val
= __ips_chipset_val(ips
);
2417 graphics_val
= __ips_gfx_val(ips
);
2418 spin_unlock_irq(&mchdev_lock
);
2421 drm_dev_put(&i915
->drm
);
2422 return chipset_val
+ graphics_val
;
2424 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
2427 * i915_gpu_raise - raise GPU frequency limit
2429 * Raise the limit; IPS indicates we have thermal headroom.
2431 bool i915_gpu_raise(void)
2433 struct drm_i915_private
*i915
;
2434 struct intel_rps
*rps
;
2436 i915
= mchdev_get();
2440 rps
= &to_gt(i915
)->rps
;
2442 spin_lock_irq(&mchdev_lock
);
2443 if (rps
->max_freq_softlimit
< rps
->max_freq
)
2444 rps
->max_freq_softlimit
++;
2445 spin_unlock_irq(&mchdev_lock
);
2447 drm_dev_put(&i915
->drm
);
2450 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
2453 * i915_gpu_lower - lower GPU frequency limit
2455 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2456 * frequency maximum.
2458 bool i915_gpu_lower(void)
2460 struct drm_i915_private
*i915
;
2461 struct intel_rps
*rps
;
2463 i915
= mchdev_get();
2467 rps
= &to_gt(i915
)->rps
;
2469 spin_lock_irq(&mchdev_lock
);
2470 if (rps
->max_freq_softlimit
> rps
->min_freq
)
2471 rps
->max_freq_softlimit
--;
2472 spin_unlock_irq(&mchdev_lock
);
2474 drm_dev_put(&i915
->drm
);
2477 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
2480 * i915_gpu_busy - indicate GPU business to IPS
2482 * Tell the IPS driver whether or not the GPU is busy.
2484 bool i915_gpu_busy(void)
2486 struct drm_i915_private
*i915
;
2489 i915
= mchdev_get();
2493 ret
= to_gt(i915
)->awake
;
2495 drm_dev_put(&i915
->drm
);
2498 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
2501 * i915_gpu_turbo_disable - disable graphics turbo
2503 * Disable graphics turbo by resetting the max frequency and setting the
2504 * current frequency to the default.
2506 bool i915_gpu_turbo_disable(void)
2508 struct drm_i915_private
*i915
;
2509 struct intel_rps
*rps
;
2512 i915
= mchdev_get();
2516 rps
= &to_gt(i915
)->rps
;
2518 spin_lock_irq(&mchdev_lock
);
2519 rps
->max_freq_softlimit
= rps
->min_freq
;
2520 ret
= !__gen5_rps_set(&to_gt(i915
)->rps
, rps
->min_freq
);
2521 spin_unlock_irq(&mchdev_lock
);
2523 drm_dev_put(&i915
->drm
);
2526 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
2528 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2529 #include "selftest_rps.c"
2530 #include "selftest_slpc.c"