2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
31 #include <drm/drm_atomic_helper.h>
32 #include <drm/drm_fourcc.h>
33 #include <drm/drm_plane_helper.h>
35 #include "display/intel_atomic.h"
36 #include "display/intel_display_types.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_sprite.h"
40 #include "gt/intel_llc.h"
43 #include "i915_fixed.h"
45 #include "i915_trace.h"
47 #include "intel_sideband.h"
48 #include "../../../platform/x86/intel_ips.h"
50 /* Stores plane specific WM parameters */
51 struct skl_wm_params
{
52 bool x_tiled
, y_tiled
;
59 u32 plane_bytes_per_line
;
60 uint_fixed_16_16_t plane_blocks_per_line
;
61 uint_fixed_16_16_t y_tile_minimum
;
66 /* used in computing the new watermarks state */
67 struct intel_wm_config
{
68 unsigned int num_pipes_active
;
73 static void gen9_init_clock_gating(struct drm_i915_private
*dev_priv
)
75 if (HAS_LLC(dev_priv
)) {
77 * WaCompressedResourceDisplayNewHashMode:skl,kbl
78 * Display WA #0390: skl,kbl
80 * Must match Sampler, Pixel Back End, and Media. See
81 * WaCompressedResourceSamplerPbeMediaNewHashMode.
83 I915_WRITE(CHICKEN_PAR1_1
,
84 I915_READ(CHICKEN_PAR1_1
) |
85 SKL_DE_COMPRESSED_HASH_MODE
);
88 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
89 I915_WRITE(CHICKEN_PAR1_1
,
90 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
92 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
93 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
94 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
96 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
97 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
98 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
100 DISP_FBC_MEMORY_WAKE
);
102 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
103 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
104 ILK_DPFC_DISABLE_DUMMY0
);
106 if (IS_SKYLAKE(dev_priv
)) {
107 /* WaDisableDopClockGating */
108 I915_WRITE(GEN7_MISCCPCTL
, I915_READ(GEN7_MISCCPCTL
)
109 & ~GEN7_DOP_CLOCK_GATE_ENABLE
);
113 static void bxt_init_clock_gating(struct drm_i915_private
*dev_priv
)
115 gen9_init_clock_gating(dev_priv
);
117 /* WaDisableSDEUnitClockGating:bxt */
118 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
119 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
123 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
125 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
126 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
129 * Wa: Backlight PWM may stop in the asserted state, causing backlight
132 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
133 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
136 * Lower the display internal timeout.
137 * This is needed to avoid any hard hangs when DSI port PLL
138 * is off and a MMIO access is attempted by any privilege
139 * application, using batch buffers or any other means.
141 I915_WRITE(RM_TIMEOUT
, MMIO_TIMEOUT_US(950));
144 static void glk_init_clock_gating(struct drm_i915_private
*dev_priv
)
146 gen9_init_clock_gating(dev_priv
);
149 * WaDisablePWMClockGating:glk
150 * Backlight PWM may stop in the asserted state, causing backlight
153 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
154 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
157 static void pnv_get_mem_freq(struct drm_i915_private
*dev_priv
)
161 tmp
= I915_READ(CLKCFG
);
163 switch (tmp
& CLKCFG_FSB_MASK
) {
165 dev_priv
->fsb_freq
= 533; /* 133*4 */
168 dev_priv
->fsb_freq
= 800; /* 200*4 */
171 dev_priv
->fsb_freq
= 667; /* 167*4 */
174 dev_priv
->fsb_freq
= 400; /* 100*4 */
178 switch (tmp
& CLKCFG_MEM_MASK
) {
180 dev_priv
->mem_freq
= 533;
183 dev_priv
->mem_freq
= 667;
186 dev_priv
->mem_freq
= 800;
190 /* detect pineview DDR3 setting */
191 tmp
= I915_READ(CSHRDDR3CTL
);
192 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
195 static void ilk_get_mem_freq(struct drm_i915_private
*dev_priv
)
199 ddrpll
= intel_uncore_read16(&dev_priv
->uncore
, DDRMPLL1
);
200 csipll
= intel_uncore_read16(&dev_priv
->uncore
, CSIPLL0
);
202 switch (ddrpll
& 0xff) {
204 dev_priv
->mem_freq
= 800;
207 dev_priv
->mem_freq
= 1066;
210 dev_priv
->mem_freq
= 1333;
213 dev_priv
->mem_freq
= 1600;
216 drm_dbg(&dev_priv
->drm
, "unknown memory frequency 0x%02x\n",
218 dev_priv
->mem_freq
= 0;
222 switch (csipll
& 0x3ff) {
224 dev_priv
->fsb_freq
= 3200;
227 dev_priv
->fsb_freq
= 3733;
230 dev_priv
->fsb_freq
= 4266;
233 dev_priv
->fsb_freq
= 4800;
236 dev_priv
->fsb_freq
= 5333;
239 dev_priv
->fsb_freq
= 5866;
242 dev_priv
->fsb_freq
= 6400;
245 drm_dbg(&dev_priv
->drm
, "unknown fsb frequency 0x%04x\n",
247 dev_priv
->fsb_freq
= 0;
252 static const struct cxsr_latency cxsr_latency_table
[] = {
253 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
254 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
255 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
256 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
257 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
259 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
260 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
261 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
262 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
263 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
265 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
266 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
267 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
268 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
269 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
271 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
272 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
273 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
274 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
275 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
277 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
278 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
279 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
280 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
281 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
283 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
284 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
285 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
286 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
287 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
290 static const struct cxsr_latency
*intel_get_cxsr_latency(bool is_desktop
,
295 const struct cxsr_latency
*latency
;
298 if (fsb
== 0 || mem
== 0)
301 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
302 latency
= &cxsr_latency_table
[i
];
303 if (is_desktop
== latency
->is_desktop
&&
304 is_ddr3
== latency
->is_ddr3
&&
305 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
309 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
314 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
318 vlv_punit_get(dev_priv
);
320 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
322 val
&= ~FORCE_DDR_HIGH_FREQ
;
324 val
|= FORCE_DDR_HIGH_FREQ
;
325 val
&= ~FORCE_DDR_LOW_FREQ
;
326 val
|= FORCE_DDR_FREQ_REQ_ACK
;
327 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
329 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
330 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
331 drm_err(&dev_priv
->drm
,
332 "timed out waiting for Punit DDR DVFS request\n");
334 vlv_punit_put(dev_priv
);
337 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
341 vlv_punit_get(dev_priv
);
343 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPSSPM
);
345 val
|= DSP_MAXFIFO_PM5_ENABLE
;
347 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
348 vlv_punit_write(dev_priv
, PUNIT_REG_DSPSSPM
, val
);
350 vlv_punit_put(dev_priv
);
353 #define FW_WM(value, plane) \
354 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
356 static bool _intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
361 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
362 was_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
363 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
364 POSTING_READ(FW_BLC_SELF_VLV
);
365 } else if (IS_G4X(dev_priv
) || IS_I965GM(dev_priv
)) {
366 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
367 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
368 POSTING_READ(FW_BLC_SELF
);
369 } else if (IS_PINEVIEW(dev_priv
)) {
370 val
= I915_READ(DSPFW3
);
371 was_enabled
= val
& PINEVIEW_SELF_REFRESH_EN
;
373 val
|= PINEVIEW_SELF_REFRESH_EN
;
375 val
&= ~PINEVIEW_SELF_REFRESH_EN
;
376 I915_WRITE(DSPFW3
, val
);
377 POSTING_READ(DSPFW3
);
378 } else if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
)) {
379 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
380 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
381 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
382 I915_WRITE(FW_BLC_SELF
, val
);
383 POSTING_READ(FW_BLC_SELF
);
384 } else if (IS_I915GM(dev_priv
)) {
386 * FIXME can't find a bit like this for 915G, and
387 * and yet it does have the related watermark in
388 * FW_BLC_SELF. What's going on?
390 was_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
391 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
392 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
393 I915_WRITE(INSTPM
, val
);
394 POSTING_READ(INSTPM
);
399 trace_intel_memory_cxsr(dev_priv
, was_enabled
, enable
);
401 drm_dbg_kms(&dev_priv
->drm
, "memory self-refresh is %s (was %s)\n",
402 enableddisabled(enable
),
403 enableddisabled(was_enabled
));
409 * intel_set_memory_cxsr - Configure CxSR state
410 * @dev_priv: i915 device
411 * @enable: Allow vs. disallow CxSR
413 * Allow or disallow the system to enter a special CxSR
414 * (C-state self refresh) state. What typically happens in CxSR mode
415 * is that several display FIFOs may get combined into a single larger
416 * FIFO for a particular plane (so called max FIFO mode) to allow the
417 * system to defer memory fetches longer, and the memory will enter
420 * Note that enabling CxSR does not guarantee that the system enter
421 * this special mode, nor does it guarantee that the system stays
422 * in that mode once entered. So this just allows/disallows the system
423 * to autonomously utilize the CxSR mode. Other factors such as core
424 * C-states will affect when/if the system actually enters/exits the
427 * Note that on VLV/CHV this actually only controls the max FIFO mode,
428 * and the system is free to enter/exit memory self refresh at any time
429 * even when the use of CxSR has been disallowed.
431 * While the system is actually in the CxSR/max FIFO mode, some plane
432 * control registers will not get latched on vblank. Thus in order to
433 * guarantee the system will respond to changes in the plane registers
434 * we must always disallow CxSR prior to making changes to those registers.
435 * Unfortunately the system will re-evaluate the CxSR conditions at
436 * frame start which happens after vblank start (which is when the plane
437 * registers would get latched), so we can't proceed with the plane update
438 * during the same frame where we disallowed CxSR.
440 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
441 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
442 * the hardware w.r.t. HPLL SR when writing to plane registers.
443 * Disallowing just CxSR is sufficient.
445 bool intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
449 mutex_lock(&dev_priv
->wm
.wm_mutex
);
450 ret
= _intel_set_memory_cxsr(dev_priv
, enable
);
451 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
452 dev_priv
->wm
.vlv
.cxsr
= enable
;
453 else if (IS_G4X(dev_priv
))
454 dev_priv
->wm
.g4x
.cxsr
= enable
;
455 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
461 * Latency for FIFO fetches is dependent on several factors:
462 * - memory configuration (speed, channels)
464 * - current MCH state
465 * It can be fairly high in some situations, so here we assume a fairly
466 * pessimal value. It's a tradeoff between extra memory fetches (if we
467 * set this value too high, the FIFO will fetch frequently to stay full)
468 * and power consumption (set it too low to save power and we might see
469 * FIFO underruns and display "flicker").
471 * A value of 5us seems to be a good balance; safe for very low end
472 * platforms but not overly aggressive on lower latency configs.
474 static const int pessimal_latency_ns
= 5000;
476 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
477 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
479 static void vlv_get_fifo_size(struct intel_crtc_state
*crtc_state
)
481 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
482 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
483 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
484 enum pipe pipe
= crtc
->pipe
;
485 int sprite0_start
, sprite1_start
;
486 u32 dsparb
, dsparb2
, dsparb3
;
490 dsparb
= I915_READ(DSPARB
);
491 dsparb2
= I915_READ(DSPARB2
);
492 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
493 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
496 dsparb
= I915_READ(DSPARB
);
497 dsparb2
= I915_READ(DSPARB2
);
498 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
499 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
502 dsparb2
= I915_READ(DSPARB2
);
503 dsparb3
= I915_READ(DSPARB3
);
504 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
505 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
512 fifo_state
->plane
[PLANE_PRIMARY
] = sprite0_start
;
513 fifo_state
->plane
[PLANE_SPRITE0
] = sprite1_start
- sprite0_start
;
514 fifo_state
->plane
[PLANE_SPRITE1
] = 511 - sprite1_start
;
515 fifo_state
->plane
[PLANE_CURSOR
] = 63;
518 static int i9xx_get_fifo_size(struct drm_i915_private
*dev_priv
,
519 enum i9xx_plane_id i9xx_plane
)
521 u32 dsparb
= I915_READ(DSPARB
);
524 size
= dsparb
& 0x7f;
525 if (i9xx_plane
== PLANE_B
)
526 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
528 drm_dbg_kms(&dev_priv
->drm
, "FIFO size - (0x%08x) %c: %d\n",
529 dsparb
, plane_name(i9xx_plane
), size
);
534 static int i830_get_fifo_size(struct drm_i915_private
*dev_priv
,
535 enum i9xx_plane_id i9xx_plane
)
537 u32 dsparb
= I915_READ(DSPARB
);
540 size
= dsparb
& 0x1ff;
541 if (i9xx_plane
== PLANE_B
)
542 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
543 size
>>= 1; /* Convert to cachelines */
545 drm_dbg_kms(&dev_priv
->drm
, "FIFO size - (0x%08x) %c: %d\n",
546 dsparb
, plane_name(i9xx_plane
), size
);
551 static int i845_get_fifo_size(struct drm_i915_private
*dev_priv
,
552 enum i9xx_plane_id i9xx_plane
)
554 u32 dsparb
= I915_READ(DSPARB
);
557 size
= dsparb
& 0x7f;
558 size
>>= 2; /* Convert to cachelines */
560 drm_dbg_kms(&dev_priv
->drm
, "FIFO size - (0x%08x) %c: %d\n",
561 dsparb
, plane_name(i9xx_plane
), size
);
566 /* Pineview has different values for various configs */
567 static const struct intel_watermark_params pnv_display_wm
= {
568 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
569 .max_wm
= PINEVIEW_MAX_WM
,
570 .default_wm
= PINEVIEW_DFT_WM
,
571 .guard_size
= PINEVIEW_GUARD_WM
,
572 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
575 static const struct intel_watermark_params pnv_display_hplloff_wm
= {
576 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
577 .max_wm
= PINEVIEW_MAX_WM
,
578 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
579 .guard_size
= PINEVIEW_GUARD_WM
,
580 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
583 static const struct intel_watermark_params pnv_cursor_wm
= {
584 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
585 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
586 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
587 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
588 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
591 static const struct intel_watermark_params pnv_cursor_hplloff_wm
= {
592 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
593 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
594 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
595 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
596 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
599 static const struct intel_watermark_params i965_cursor_wm_info
= {
600 .fifo_size
= I965_CURSOR_FIFO
,
601 .max_wm
= I965_CURSOR_MAX_WM
,
602 .default_wm
= I965_CURSOR_DFT_WM
,
604 .cacheline_size
= I915_FIFO_LINE_SIZE
,
607 static const struct intel_watermark_params i945_wm_info
= {
608 .fifo_size
= I945_FIFO_SIZE
,
609 .max_wm
= I915_MAX_WM
,
612 .cacheline_size
= I915_FIFO_LINE_SIZE
,
615 static const struct intel_watermark_params i915_wm_info
= {
616 .fifo_size
= I915_FIFO_SIZE
,
617 .max_wm
= I915_MAX_WM
,
620 .cacheline_size
= I915_FIFO_LINE_SIZE
,
623 static const struct intel_watermark_params i830_a_wm_info
= {
624 .fifo_size
= I855GM_FIFO_SIZE
,
625 .max_wm
= I915_MAX_WM
,
628 .cacheline_size
= I830_FIFO_LINE_SIZE
,
631 static const struct intel_watermark_params i830_bc_wm_info
= {
632 .fifo_size
= I855GM_FIFO_SIZE
,
633 .max_wm
= I915_MAX_WM
/2,
636 .cacheline_size
= I830_FIFO_LINE_SIZE
,
639 static const struct intel_watermark_params i845_wm_info
= {
640 .fifo_size
= I830_FIFO_SIZE
,
641 .max_wm
= I915_MAX_WM
,
644 .cacheline_size
= I830_FIFO_LINE_SIZE
,
648 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
649 * @pixel_rate: Pipe pixel rate in kHz
650 * @cpp: Plane bytes per pixel
651 * @latency: Memory wakeup latency in 0.1us units
653 * Compute the watermark using the method 1 or "small buffer"
654 * formula. The caller may additonally add extra cachelines
655 * to account for TLB misses and clock crossings.
657 * This method is concerned with the short term drain rate
658 * of the FIFO, ie. it does not account for blanking periods
659 * which would effectively reduce the average drain rate across
660 * a longer period. The name "small" refers to the fact the
661 * FIFO is relatively small compared to the amount of data
664 * The FIFO level vs. time graph might look something like:
668 * __---__---__ (- plane active, _ blanking)
671 * or perhaps like this:
674 * __----__----__ (- plane active, _ blanking)
678 * The watermark in bytes
680 static unsigned int intel_wm_method1(unsigned int pixel_rate
,
682 unsigned int latency
)
686 ret
= mul_u32_u32(pixel_rate
, cpp
* latency
);
687 ret
= DIV_ROUND_UP_ULL(ret
, 10000);
693 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
694 * @pixel_rate: Pipe pixel rate in kHz
695 * @htotal: Pipe horizontal total
696 * @width: Plane width in pixels
697 * @cpp: Plane bytes per pixel
698 * @latency: Memory wakeup latency in 0.1us units
700 * Compute the watermark using the method 2 or "large buffer"
701 * formula. The caller may additonally add extra cachelines
702 * to account for TLB misses and clock crossings.
704 * This method is concerned with the long term drain rate
705 * of the FIFO, ie. it does account for blanking periods
706 * which effectively reduce the average drain rate across
707 * a longer period. The name "large" refers to the fact the
708 * FIFO is relatively large compared to the amount of data
711 * The FIFO level vs. time graph might look something like:
716 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
720 * The watermark in bytes
722 static unsigned int intel_wm_method2(unsigned int pixel_rate
,
726 unsigned int latency
)
731 * FIXME remove once all users are computing
732 * watermarks in the correct place.
734 if (WARN_ON_ONCE(htotal
== 0))
737 ret
= (latency
* pixel_rate
) / (htotal
* 10000);
738 ret
= (ret
+ 1) * width
* cpp
;
744 * intel_calculate_wm - calculate watermark level
745 * @pixel_rate: pixel clock
746 * @wm: chip FIFO params
747 * @fifo_size: size of the FIFO buffer
748 * @cpp: bytes per pixel
749 * @latency_ns: memory latency for the platform
751 * Calculate the watermark level (the level at which the display plane will
752 * start fetching from memory again). Each chip has a different display
753 * FIFO size and allocation, so the caller needs to figure that out and pass
754 * in the correct intel_watermark_params structure.
756 * As the pixel clock runs, the FIFO will be drained at a rate that depends
757 * on the pixel size. When it reaches the watermark level, it'll start
758 * fetching FIFO line sized based chunks from memory until the FIFO fills
759 * past the watermark point. If the FIFO drains completely, a FIFO underrun
760 * will occur, and a display engine hang could result.
762 static unsigned int intel_calculate_wm(int pixel_rate
,
763 const struct intel_watermark_params
*wm
,
764 int fifo_size
, int cpp
,
765 unsigned int latency_ns
)
767 int entries
, wm_size
;
770 * Note: we need to make sure we don't overflow for various clock &
772 * clocks go from a few thousand to several hundred thousand.
773 * latency is usually a few thousand
775 entries
= intel_wm_method1(pixel_rate
, cpp
,
777 entries
= DIV_ROUND_UP(entries
, wm
->cacheline_size
) +
779 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries
);
781 wm_size
= fifo_size
- entries
;
782 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size
);
784 /* Don't promote wm_size to unsigned... */
785 if (wm_size
> wm
->max_wm
)
786 wm_size
= wm
->max_wm
;
788 wm_size
= wm
->default_wm
;
791 * Bspec seems to indicate that the value shouldn't be lower than
792 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
793 * Lets go for 8 which is the burst size since certain platforms
794 * already use a hardcoded 8 (which is what the spec says should be
803 static bool is_disabling(int old
, int new, int threshold
)
805 return old
>= threshold
&& new < threshold
;
808 static bool is_enabling(int old
, int new, int threshold
)
810 return old
< threshold
&& new >= threshold
;
813 static int intel_wm_num_levels(struct drm_i915_private
*dev_priv
)
815 return dev_priv
->wm
.max_level
+ 1;
818 static bool intel_wm_plane_visible(const struct intel_crtc_state
*crtc_state
,
819 const struct intel_plane_state
*plane_state
)
821 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
823 /* FIXME check the 'enable' instead */
824 if (!crtc_state
->hw
.active
)
828 * Treat cursor with fb as always visible since cursor updates
829 * can happen faster than the vrefresh rate, and the current
830 * watermark code doesn't handle that correctly. Cursor updates
831 * which set/clear the fb or change the cursor size are going
832 * to get throttled by intel_legacy_cursor_update() to work
833 * around this problem with the watermark code.
835 if (plane
->id
== PLANE_CURSOR
)
836 return plane_state
->hw
.fb
!= NULL
;
838 return plane_state
->uapi
.visible
;
841 static bool intel_crtc_active(struct intel_crtc
*crtc
)
843 /* Be paranoid as we can arrive here with only partial
844 * state retrieved from the hardware during setup.
846 * We can ditch the adjusted_mode.crtc_clock check as soon
847 * as Haswell has gained clock readout/fastboot support.
849 * We can ditch the crtc->primary->state->fb check as soon as we can
850 * properly reconstruct framebuffers.
852 * FIXME: The intel_crtc->active here should be switched to
853 * crtc->state->active once we have proper CRTC states wired up
856 return crtc
->active
&& crtc
->base
.primary
->state
->fb
&&
857 crtc
->config
->hw
.adjusted_mode
.crtc_clock
;
860 static struct intel_crtc
*single_enabled_crtc(struct drm_i915_private
*dev_priv
)
862 struct intel_crtc
*crtc
, *enabled
= NULL
;
864 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
865 if (intel_crtc_active(crtc
)) {
875 static void pnv_update_wm(struct intel_crtc
*unused_crtc
)
877 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
878 struct intel_crtc
*crtc
;
879 const struct cxsr_latency
*latency
;
883 latency
= intel_get_cxsr_latency(!IS_MOBILE(dev_priv
),
888 drm_dbg_kms(&dev_priv
->drm
,
889 "Unknown FSB/MEM found, disable CxSR\n");
890 intel_set_memory_cxsr(dev_priv
, false);
894 crtc
= single_enabled_crtc(dev_priv
);
896 const struct drm_display_mode
*adjusted_mode
=
897 &crtc
->config
->hw
.adjusted_mode
;
898 const struct drm_framebuffer
*fb
=
899 crtc
->base
.primary
->state
->fb
;
900 int cpp
= fb
->format
->cpp
[0];
901 int clock
= adjusted_mode
->crtc_clock
;
904 wm
= intel_calculate_wm(clock
, &pnv_display_wm
,
905 pnv_display_wm
.fifo_size
,
906 cpp
, latency
->display_sr
);
907 reg
= I915_READ(DSPFW1
);
908 reg
&= ~DSPFW_SR_MASK
;
909 reg
|= FW_WM(wm
, SR
);
910 I915_WRITE(DSPFW1
, reg
);
911 drm_dbg_kms(&dev_priv
->drm
, "DSPFW1 register is %x\n", reg
);
914 wm
= intel_calculate_wm(clock
, &pnv_cursor_wm
,
915 pnv_display_wm
.fifo_size
,
916 4, latency
->cursor_sr
);
917 reg
= I915_READ(DSPFW3
);
918 reg
&= ~DSPFW_CURSOR_SR_MASK
;
919 reg
|= FW_WM(wm
, CURSOR_SR
);
920 I915_WRITE(DSPFW3
, reg
);
922 /* Display HPLL off SR */
923 wm
= intel_calculate_wm(clock
, &pnv_display_hplloff_wm
,
924 pnv_display_hplloff_wm
.fifo_size
,
925 cpp
, latency
->display_hpll_disable
);
926 reg
= I915_READ(DSPFW3
);
927 reg
&= ~DSPFW_HPLL_SR_MASK
;
928 reg
|= FW_WM(wm
, HPLL_SR
);
929 I915_WRITE(DSPFW3
, reg
);
931 /* cursor HPLL off SR */
932 wm
= intel_calculate_wm(clock
, &pnv_cursor_hplloff_wm
,
933 pnv_display_hplloff_wm
.fifo_size
,
934 4, latency
->cursor_hpll_disable
);
935 reg
= I915_READ(DSPFW3
);
936 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
937 reg
|= FW_WM(wm
, HPLL_CURSOR
);
938 I915_WRITE(DSPFW3
, reg
);
939 drm_dbg_kms(&dev_priv
->drm
, "DSPFW3 register is %x\n", reg
);
941 intel_set_memory_cxsr(dev_priv
, true);
943 intel_set_memory_cxsr(dev_priv
, false);
948 * Documentation says:
949 * "If the line size is small, the TLB fetches can get in the way of the
950 * data fetches, causing some lag in the pixel data return which is not
951 * accounted for in the above formulas. The following adjustment only
952 * needs to be applied if eight whole lines fit in the buffer at once.
953 * The WM is adjusted upwards by the difference between the FIFO size
954 * and the size of 8 whole lines. This adjustment is always performed
955 * in the actual pixel depth regardless of whether FBC is enabled or not."
957 static unsigned int g4x_tlb_miss_wa(int fifo_size
, int width
, int cpp
)
959 int tlb_miss
= fifo_size
* 64 - width
* cpp
* 8;
961 return max(0, tlb_miss
);
964 static void g4x_write_wm_values(struct drm_i915_private
*dev_priv
,
965 const struct g4x_wm_values
*wm
)
969 for_each_pipe(dev_priv
, pipe
)
970 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
973 FW_WM(wm
->sr
.plane
, SR
) |
974 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
975 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
976 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
978 (wm
->fbc_en
? DSPFW_FBC_SR_EN
: 0) |
979 FW_WM(wm
->sr
.fbc
, FBC_SR
) |
980 FW_WM(wm
->hpll
.fbc
, FBC_HPLL_SR
) |
981 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEB
) |
982 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
983 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
985 (wm
->hpll_en
? DSPFW_HPLL_SR_EN
: 0) |
986 FW_WM(wm
->sr
.cursor
, CURSOR_SR
) |
987 FW_WM(wm
->hpll
.cursor
, HPLL_CURSOR
) |
988 FW_WM(wm
->hpll
.plane
, HPLL_SR
));
990 POSTING_READ(DSPFW1
);
993 #define FW_WM_VLV(value, plane) \
994 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
996 static void vlv_write_wm_values(struct drm_i915_private
*dev_priv
,
997 const struct vlv_wm_values
*wm
)
1001 for_each_pipe(dev_priv
, pipe
) {
1002 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
1004 I915_WRITE(VLV_DDL(pipe
),
1005 (wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] << DDL_CURSOR_SHIFT
) |
1006 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] << DDL_SPRITE_SHIFT(1)) |
1007 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] << DDL_SPRITE_SHIFT(0)) |
1008 (wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] << DDL_PLANE_SHIFT
));
1012 * Zero the (unused) WM1 watermarks, and also clear all the
1013 * high order bits so that there are no out of bounds values
1014 * present in the registers during the reprogramming.
1016 I915_WRITE(DSPHOWM
, 0);
1017 I915_WRITE(DSPHOWM1
, 0);
1018 I915_WRITE(DSPFW4
, 0);
1019 I915_WRITE(DSPFW5
, 0);
1020 I915_WRITE(DSPFW6
, 0);
1023 FW_WM(wm
->sr
.plane
, SR
) |
1024 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
1025 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
1026 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
1028 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
], SPRITEB
) |
1029 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
1030 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
1032 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
1034 if (IS_CHERRYVIEW(dev_priv
)) {
1035 I915_WRITE(DSPFW7_CHV
,
1036 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
1037 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
1038 I915_WRITE(DSPFW8_CHV
,
1039 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
], SPRITEF
) |
1040 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
], SPRITEE
));
1041 I915_WRITE(DSPFW9_CHV
,
1042 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
], PLANEC
) |
1043 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
], CURSORC
));
1045 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
1046 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] >> 8, SPRITEF_HI
) |
1047 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] >> 8, SPRITEE_HI
) |
1048 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] >> 8, PLANEC_HI
) |
1049 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
1050 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
1051 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
1052 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
1053 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
1054 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
1057 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
1058 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
1060 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
1061 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
1062 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
1063 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
1064 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
1065 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
1066 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
1069 POSTING_READ(DSPFW1
);
1074 static void g4x_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1076 /* all latencies in usec */
1077 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_NORMAL
] = 5;
1078 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_SR
] = 12;
1079 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_HPLL
] = 35;
1081 dev_priv
->wm
.max_level
= G4X_WM_LEVEL_HPLL
;
1084 static int g4x_plane_fifo_size(enum plane_id plane_id
, int level
)
1087 * DSPCNTR[13] supposedly controls whether the
1088 * primary plane can use the FIFO space otherwise
1089 * reserved for the sprite plane. It's not 100% clear
1090 * what the actual FIFO size is, but it looks like we
1091 * can happily set both primary and sprite watermarks
1092 * up to 127 cachelines. So that would seem to mean
1093 * that either DSPCNTR[13] doesn't do anything, or that
1094 * the total FIFO is >= 256 cachelines in size. Either
1095 * way, we don't seem to have to worry about this
1096 * repartitioning as the maximum watermark value the
1097 * register can hold for each plane is lower than the
1098 * minimum FIFO size.
1104 return level
== G4X_WM_LEVEL_NORMAL
? 127 : 511;
1106 return level
== G4X_WM_LEVEL_NORMAL
? 127 : 0;
1108 MISSING_CASE(plane_id
);
1113 static int g4x_fbc_fifo_size(int level
)
1116 case G4X_WM_LEVEL_SR
:
1118 case G4X_WM_LEVEL_HPLL
:
1121 MISSING_CASE(level
);
1126 static u16
g4x_compute_wm(const struct intel_crtc_state
*crtc_state
,
1127 const struct intel_plane_state
*plane_state
,
1130 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1131 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1132 const struct drm_display_mode
*adjusted_mode
=
1133 &crtc_state
->hw
.adjusted_mode
;
1134 unsigned int latency
= dev_priv
->wm
.pri_latency
[level
] * 10;
1135 unsigned int clock
, htotal
, cpp
, width
, wm
;
1140 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
1143 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
1146 * Not 100% sure which way ELK should go here as the
1147 * spec only says CL/CTG should assume 32bpp and BW
1148 * doesn't need to. But as these things followed the
1149 * mobile vs. desktop lines on gen3 as well, let's
1150 * assume ELK doesn't need this.
1152 * The spec also fails to list such a restriction for
1153 * the HPLL watermark, which seems a little strange.
1154 * Let's use 32bpp for the HPLL watermark as well.
1156 if (IS_GM45(dev_priv
) && plane
->id
== PLANE_PRIMARY
&&
1157 level
!= G4X_WM_LEVEL_NORMAL
)
1160 clock
= adjusted_mode
->crtc_clock
;
1161 htotal
= adjusted_mode
->crtc_htotal
;
1163 width
= drm_rect_width(&plane_state
->uapi
.dst
);
1165 if (plane
->id
== PLANE_CURSOR
) {
1166 wm
= intel_wm_method2(clock
, htotal
, width
, cpp
, latency
);
1167 } else if (plane
->id
== PLANE_PRIMARY
&&
1168 level
== G4X_WM_LEVEL_NORMAL
) {
1169 wm
= intel_wm_method1(clock
, cpp
, latency
);
1171 unsigned int small
, large
;
1173 small
= intel_wm_method1(clock
, cpp
, latency
);
1174 large
= intel_wm_method2(clock
, htotal
, width
, cpp
, latency
);
1176 wm
= min(small
, large
);
1179 wm
+= g4x_tlb_miss_wa(g4x_plane_fifo_size(plane
->id
, level
),
1182 wm
= DIV_ROUND_UP(wm
, 64) + 2;
1184 return min_t(unsigned int, wm
, USHRT_MAX
);
1187 static bool g4x_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1188 int level
, enum plane_id plane_id
, u16 value
)
1190 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1193 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1194 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1196 dirty
|= raw
->plane
[plane_id
] != value
;
1197 raw
->plane
[plane_id
] = value
;
1203 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state
*crtc_state
,
1204 int level
, u16 value
)
1206 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1209 /* NORMAL level doesn't have an FBC watermark */
1210 level
= max(level
, G4X_WM_LEVEL_SR
);
1212 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1213 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1215 dirty
|= raw
->fbc
!= value
;
1222 static u32
ilk_compute_fbc_wm(const struct intel_crtc_state
*crtc_state
,
1223 const struct intel_plane_state
*plane_state
,
1226 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1227 const struct intel_plane_state
*plane_state
)
1229 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1230 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1231 int num_levels
= intel_wm_num_levels(to_i915(plane
->base
.dev
));
1232 enum plane_id plane_id
= plane
->id
;
1236 if (!intel_wm_plane_visible(crtc_state
, plane_state
)) {
1237 dirty
|= g4x_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1238 if (plane_id
== PLANE_PRIMARY
)
1239 dirty
|= g4x_raw_fbc_wm_set(crtc_state
, 0, 0);
1243 for (level
= 0; level
< num_levels
; level
++) {
1244 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1247 wm
= g4x_compute_wm(crtc_state
, plane_state
, level
);
1248 max_wm
= g4x_plane_fifo_size(plane_id
, level
);
1253 dirty
|= raw
->plane
[plane_id
] != wm
;
1254 raw
->plane
[plane_id
] = wm
;
1256 if (plane_id
!= PLANE_PRIMARY
||
1257 level
== G4X_WM_LEVEL_NORMAL
)
1260 wm
= ilk_compute_fbc_wm(crtc_state
, plane_state
,
1261 raw
->plane
[plane_id
]);
1262 max_wm
= g4x_fbc_fifo_size(level
);
1265 * FBC wm is not mandatory as we
1266 * can always just disable its use.
1271 dirty
|= raw
->fbc
!= wm
;
1275 /* mark watermarks as invalid */
1276 dirty
|= g4x_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1278 if (plane_id
== PLANE_PRIMARY
)
1279 dirty
|= g4x_raw_fbc_wm_set(crtc_state
, level
, USHRT_MAX
);
1283 drm_dbg_kms(&dev_priv
->drm
,
1284 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1286 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_NORMAL
].plane
[plane_id
],
1287 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_SR
].plane
[plane_id
],
1288 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_HPLL
].plane
[plane_id
]);
1290 if (plane_id
== PLANE_PRIMARY
)
1291 drm_dbg_kms(&dev_priv
->drm
,
1292 "FBC watermarks: SR=%d, HPLL=%d\n",
1293 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_SR
].fbc
,
1294 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_HPLL
].fbc
);
1300 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1301 enum plane_id plane_id
, int level
)
1303 const struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1305 return raw
->plane
[plane_id
] <= g4x_plane_fifo_size(plane_id
, level
);
1308 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1311 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1313 if (level
> dev_priv
->wm
.max_level
)
1316 return g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1317 g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1318 g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1321 /* mark all levels starting from 'level' as invalid */
1322 static void g4x_invalidate_wms(struct intel_crtc
*crtc
,
1323 struct g4x_wm_state
*wm_state
, int level
)
1325 if (level
<= G4X_WM_LEVEL_NORMAL
) {
1326 enum plane_id plane_id
;
1328 for_each_plane_id_on_crtc(crtc
, plane_id
)
1329 wm_state
->wm
.plane
[plane_id
] = USHRT_MAX
;
1332 if (level
<= G4X_WM_LEVEL_SR
) {
1333 wm_state
->cxsr
= false;
1334 wm_state
->sr
.cursor
= USHRT_MAX
;
1335 wm_state
->sr
.plane
= USHRT_MAX
;
1336 wm_state
->sr
.fbc
= USHRT_MAX
;
1339 if (level
<= G4X_WM_LEVEL_HPLL
) {
1340 wm_state
->hpll_en
= false;
1341 wm_state
->hpll
.cursor
= USHRT_MAX
;
1342 wm_state
->hpll
.plane
= USHRT_MAX
;
1343 wm_state
->hpll
.fbc
= USHRT_MAX
;
1347 static int g4x_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1349 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1350 struct intel_atomic_state
*state
=
1351 to_intel_atomic_state(crtc_state
->uapi
.state
);
1352 struct g4x_wm_state
*wm_state
= &crtc_state
->wm
.g4x
.optimal
;
1353 int num_active_planes
= hweight8(crtc_state
->active_planes
&
1354 ~BIT(PLANE_CURSOR
));
1355 const struct g4x_pipe_wm
*raw
;
1356 const struct intel_plane_state
*old_plane_state
;
1357 const struct intel_plane_state
*new_plane_state
;
1358 struct intel_plane
*plane
;
1359 enum plane_id plane_id
;
1361 unsigned int dirty
= 0;
1363 for_each_oldnew_intel_plane_in_state(state
, plane
,
1365 new_plane_state
, i
) {
1366 if (new_plane_state
->hw
.crtc
!= &crtc
->base
&&
1367 old_plane_state
->hw
.crtc
!= &crtc
->base
)
1370 if (g4x_raw_plane_wm_compute(crtc_state
, new_plane_state
))
1371 dirty
|= BIT(plane
->id
);
1377 level
= G4X_WM_LEVEL_NORMAL
;
1378 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1381 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1382 for_each_plane_id_on_crtc(crtc
, plane_id
)
1383 wm_state
->wm
.plane
[plane_id
] = raw
->plane
[plane_id
];
1385 level
= G4X_WM_LEVEL_SR
;
1387 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1390 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1391 wm_state
->sr
.plane
= raw
->plane
[PLANE_PRIMARY
];
1392 wm_state
->sr
.cursor
= raw
->plane
[PLANE_CURSOR
];
1393 wm_state
->sr
.fbc
= raw
->fbc
;
1395 wm_state
->cxsr
= num_active_planes
== BIT(PLANE_PRIMARY
);
1397 level
= G4X_WM_LEVEL_HPLL
;
1399 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1402 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1403 wm_state
->hpll
.plane
= raw
->plane
[PLANE_PRIMARY
];
1404 wm_state
->hpll
.cursor
= raw
->plane
[PLANE_CURSOR
];
1405 wm_state
->hpll
.fbc
= raw
->fbc
;
1407 wm_state
->hpll_en
= wm_state
->cxsr
;
1412 if (level
== G4X_WM_LEVEL_NORMAL
)
1415 /* invalidate the higher levels */
1416 g4x_invalidate_wms(crtc
, wm_state
, level
);
1419 * Determine if the FBC watermark(s) can be used. IF
1420 * this isn't the case we prefer to disable the FBC
1421 ( watermark(s) rather than disable the SR/HPLL
1422 * level(s) entirely.
1424 wm_state
->fbc_en
= level
> G4X_WM_LEVEL_NORMAL
;
1426 if (level
>= G4X_WM_LEVEL_SR
&&
1427 wm_state
->sr
.fbc
> g4x_fbc_fifo_size(G4X_WM_LEVEL_SR
))
1428 wm_state
->fbc_en
= false;
1429 else if (level
>= G4X_WM_LEVEL_HPLL
&&
1430 wm_state
->hpll
.fbc
> g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL
))
1431 wm_state
->fbc_en
= false;
1436 static int g4x_compute_intermediate_wm(struct intel_crtc_state
*new_crtc_state
)
1438 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
1439 struct g4x_wm_state
*intermediate
= &new_crtc_state
->wm
.g4x
.intermediate
;
1440 const struct g4x_wm_state
*optimal
= &new_crtc_state
->wm
.g4x
.optimal
;
1441 struct intel_atomic_state
*intel_state
=
1442 to_intel_atomic_state(new_crtc_state
->uapi
.state
);
1443 const struct intel_crtc_state
*old_crtc_state
=
1444 intel_atomic_get_old_crtc_state(intel_state
, crtc
);
1445 const struct g4x_wm_state
*active
= &old_crtc_state
->wm
.g4x
.optimal
;
1446 enum plane_id plane_id
;
1448 if (!new_crtc_state
->hw
.active
|| drm_atomic_crtc_needs_modeset(&new_crtc_state
->uapi
)) {
1449 *intermediate
= *optimal
;
1451 intermediate
->cxsr
= false;
1452 intermediate
->hpll_en
= false;
1456 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
1457 !new_crtc_state
->disable_cxsr
;
1458 intermediate
->hpll_en
= optimal
->hpll_en
&& active
->hpll_en
&&
1459 !new_crtc_state
->disable_cxsr
;
1460 intermediate
->fbc_en
= optimal
->fbc_en
&& active
->fbc_en
;
1462 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1463 intermediate
->wm
.plane
[plane_id
] =
1464 max(optimal
->wm
.plane
[plane_id
],
1465 active
->wm
.plane
[plane_id
]);
1467 WARN_ON(intermediate
->wm
.plane
[plane_id
] >
1468 g4x_plane_fifo_size(plane_id
, G4X_WM_LEVEL_NORMAL
));
1471 intermediate
->sr
.plane
= max(optimal
->sr
.plane
,
1473 intermediate
->sr
.cursor
= max(optimal
->sr
.cursor
,
1475 intermediate
->sr
.fbc
= max(optimal
->sr
.fbc
,
1478 intermediate
->hpll
.plane
= max(optimal
->hpll
.plane
,
1479 active
->hpll
.plane
);
1480 intermediate
->hpll
.cursor
= max(optimal
->hpll
.cursor
,
1481 active
->hpll
.cursor
);
1482 intermediate
->hpll
.fbc
= max(optimal
->hpll
.fbc
,
1485 WARN_ON((intermediate
->sr
.plane
>
1486 g4x_plane_fifo_size(PLANE_PRIMARY
, G4X_WM_LEVEL_SR
) ||
1487 intermediate
->sr
.cursor
>
1488 g4x_plane_fifo_size(PLANE_CURSOR
, G4X_WM_LEVEL_SR
)) &&
1489 intermediate
->cxsr
);
1490 WARN_ON((intermediate
->sr
.plane
>
1491 g4x_plane_fifo_size(PLANE_PRIMARY
, G4X_WM_LEVEL_HPLL
) ||
1492 intermediate
->sr
.cursor
>
1493 g4x_plane_fifo_size(PLANE_CURSOR
, G4X_WM_LEVEL_HPLL
)) &&
1494 intermediate
->hpll_en
);
1496 WARN_ON(intermediate
->sr
.fbc
> g4x_fbc_fifo_size(1) &&
1497 intermediate
->fbc_en
&& intermediate
->cxsr
);
1498 WARN_ON(intermediate
->hpll
.fbc
> g4x_fbc_fifo_size(2) &&
1499 intermediate
->fbc_en
&& intermediate
->hpll_en
);
1503 * If our intermediate WM are identical to the final WM, then we can
1504 * omit the post-vblank programming; only update if it's different.
1506 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
1507 new_crtc_state
->wm
.need_postvbl_update
= true;
1512 static void g4x_merge_wm(struct drm_i915_private
*dev_priv
,
1513 struct g4x_wm_values
*wm
)
1515 struct intel_crtc
*crtc
;
1516 int num_active_pipes
= 0;
1522 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1523 const struct g4x_wm_state
*wm_state
= &crtc
->wm
.active
.g4x
;
1528 if (!wm_state
->cxsr
)
1530 if (!wm_state
->hpll_en
)
1531 wm
->hpll_en
= false;
1532 if (!wm_state
->fbc_en
)
1538 if (num_active_pipes
!= 1) {
1540 wm
->hpll_en
= false;
1544 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1545 const struct g4x_wm_state
*wm_state
= &crtc
->wm
.active
.g4x
;
1546 enum pipe pipe
= crtc
->pipe
;
1548 wm
->pipe
[pipe
] = wm_state
->wm
;
1549 if (crtc
->active
&& wm
->cxsr
)
1550 wm
->sr
= wm_state
->sr
;
1551 if (crtc
->active
&& wm
->hpll_en
)
1552 wm
->hpll
= wm_state
->hpll
;
1556 static void g4x_program_watermarks(struct drm_i915_private
*dev_priv
)
1558 struct g4x_wm_values
*old_wm
= &dev_priv
->wm
.g4x
;
1559 struct g4x_wm_values new_wm
= {};
1561 g4x_merge_wm(dev_priv
, &new_wm
);
1563 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
1566 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1567 _intel_set_memory_cxsr(dev_priv
, false);
1569 g4x_write_wm_values(dev_priv
, &new_wm
);
1571 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1572 _intel_set_memory_cxsr(dev_priv
, true);
1577 static void g4x_initial_watermarks(struct intel_atomic_state
*state
,
1578 struct intel_crtc
*crtc
)
1580 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1581 const struct intel_crtc_state
*crtc_state
=
1582 intel_atomic_get_new_crtc_state(state
, crtc
);
1584 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1585 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.intermediate
;
1586 g4x_program_watermarks(dev_priv
);
1587 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1590 static void g4x_optimize_watermarks(struct intel_atomic_state
*state
,
1591 struct intel_crtc
*crtc
)
1593 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1594 const struct intel_crtc_state
*crtc_state
=
1595 intel_atomic_get_new_crtc_state(state
, crtc
);
1597 if (!crtc_state
->wm
.need_postvbl_update
)
1600 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1601 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.optimal
;
1602 g4x_program_watermarks(dev_priv
);
1603 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1606 /* latency must be in 0.1us units. */
1607 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
1608 unsigned int htotal
,
1611 unsigned int latency
)
1615 ret
= intel_wm_method2(pixel_rate
, htotal
,
1616 width
, cpp
, latency
);
1617 ret
= DIV_ROUND_UP(ret
, 64);
1622 static void vlv_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1624 /* all latencies in usec */
1625 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
1627 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
1629 if (IS_CHERRYVIEW(dev_priv
)) {
1630 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
1631 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
1633 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
1637 static u16
vlv_compute_wm_level(const struct intel_crtc_state
*crtc_state
,
1638 const struct intel_plane_state
*plane_state
,
1641 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1642 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1643 const struct drm_display_mode
*adjusted_mode
=
1644 &crtc_state
->hw
.adjusted_mode
;
1645 unsigned int clock
, htotal
, cpp
, width
, wm
;
1647 if (dev_priv
->wm
.pri_latency
[level
] == 0)
1650 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
1653 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
1654 clock
= adjusted_mode
->crtc_clock
;
1655 htotal
= adjusted_mode
->crtc_htotal
;
1656 width
= crtc_state
->pipe_src_w
;
1658 if (plane
->id
== PLANE_CURSOR
) {
1660 * FIXME the formula gives values that are
1661 * too big for the cursor FIFO, and hence we
1662 * would never be able to use cursors. For
1663 * now just hardcode the watermark.
1667 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
1668 dev_priv
->wm
.pri_latency
[level
] * 10);
1671 return min_t(unsigned int, wm
, USHRT_MAX
);
1674 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes
)
1676 return (active_planes
& (BIT(PLANE_SPRITE0
) |
1677 BIT(PLANE_SPRITE1
))) == BIT(PLANE_SPRITE1
);
1680 static int vlv_compute_fifo(struct intel_crtc_state
*crtc_state
)
1682 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1683 const struct g4x_pipe_wm
*raw
=
1684 &crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
];
1685 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
1686 unsigned int active_planes
= crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
);
1687 int num_active_planes
= hweight8(active_planes
);
1688 const int fifo_size
= 511;
1689 int fifo_extra
, fifo_left
= fifo_size
;
1690 int sprite0_fifo_extra
= 0;
1691 unsigned int total_rate
;
1692 enum plane_id plane_id
;
1695 * When enabling sprite0 after sprite1 has already been enabled
1696 * we tend to get an underrun unless sprite0 already has some
1697 * FIFO space allcoated. Hence we always allocate at least one
1698 * cacheline for sprite0 whenever sprite1 is enabled.
1700 * All other plane enable sequences appear immune to this problem.
1702 if (vlv_need_sprite0_fifo_workaround(active_planes
))
1703 sprite0_fifo_extra
= 1;
1705 total_rate
= raw
->plane
[PLANE_PRIMARY
] +
1706 raw
->plane
[PLANE_SPRITE0
] +
1707 raw
->plane
[PLANE_SPRITE1
] +
1710 if (total_rate
> fifo_size
)
1713 if (total_rate
== 0)
1716 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1719 if ((active_planes
& BIT(plane_id
)) == 0) {
1720 fifo_state
->plane
[plane_id
] = 0;
1724 rate
= raw
->plane
[plane_id
];
1725 fifo_state
->plane
[plane_id
] = fifo_size
* rate
/ total_rate
;
1726 fifo_left
-= fifo_state
->plane
[plane_id
];
1729 fifo_state
->plane
[PLANE_SPRITE0
] += sprite0_fifo_extra
;
1730 fifo_left
-= sprite0_fifo_extra
;
1732 fifo_state
->plane
[PLANE_CURSOR
] = 63;
1734 fifo_extra
= DIV_ROUND_UP(fifo_left
, num_active_planes
?: 1);
1736 /* spread the remainder evenly */
1737 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1743 if ((active_planes
& BIT(plane_id
)) == 0)
1746 plane_extra
= min(fifo_extra
, fifo_left
);
1747 fifo_state
->plane
[plane_id
] += plane_extra
;
1748 fifo_left
-= plane_extra
;
1751 WARN_ON(active_planes
!= 0 && fifo_left
!= 0);
1753 /* give it all to the first plane if none are active */
1754 if (active_planes
== 0) {
1755 WARN_ON(fifo_left
!= fifo_size
);
1756 fifo_state
->plane
[PLANE_PRIMARY
] = fifo_left
;
1762 /* mark all levels starting from 'level' as invalid */
1763 static void vlv_invalidate_wms(struct intel_crtc
*crtc
,
1764 struct vlv_wm_state
*wm_state
, int level
)
1766 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1768 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1769 enum plane_id plane_id
;
1771 for_each_plane_id_on_crtc(crtc
, plane_id
)
1772 wm_state
->wm
[level
].plane
[plane_id
] = USHRT_MAX
;
1774 wm_state
->sr
[level
].cursor
= USHRT_MAX
;
1775 wm_state
->sr
[level
].plane
= USHRT_MAX
;
1779 static u16
vlv_invert_wm_value(u16 wm
, u16 fifo_size
)
1784 return fifo_size
- wm
;
1788 * Starting from 'level' set all higher
1789 * levels to 'value' in the "raw" watermarks.
1791 static bool vlv_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1792 int level
, enum plane_id plane_id
, u16 value
)
1794 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1795 int num_levels
= intel_wm_num_levels(dev_priv
);
1798 for (; level
< num_levels
; level
++) {
1799 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1801 dirty
|= raw
->plane
[plane_id
] != value
;
1802 raw
->plane
[plane_id
] = value
;
1808 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1809 const struct intel_plane_state
*plane_state
)
1811 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1812 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1813 enum plane_id plane_id
= plane
->id
;
1814 int num_levels
= intel_wm_num_levels(to_i915(plane
->base
.dev
));
1818 if (!intel_wm_plane_visible(crtc_state
, plane_state
)) {
1819 dirty
|= vlv_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1823 for (level
= 0; level
< num_levels
; level
++) {
1824 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1825 int wm
= vlv_compute_wm_level(crtc_state
, plane_state
, level
);
1826 int max_wm
= plane_id
== PLANE_CURSOR
? 63 : 511;
1831 dirty
|= raw
->plane
[plane_id
] != wm
;
1832 raw
->plane
[plane_id
] = wm
;
1835 /* mark all higher levels as invalid */
1836 dirty
|= vlv_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1840 drm_dbg_kms(&dev_priv
->drm
,
1841 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1843 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
].plane
[plane_id
],
1844 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM5
].plane
[plane_id
],
1845 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_DDR_DVFS
].plane
[plane_id
]);
1850 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1851 enum plane_id plane_id
, int level
)
1853 const struct g4x_pipe_wm
*raw
=
1854 &crtc_state
->wm
.vlv
.raw
[level
];
1855 const struct vlv_fifo_state
*fifo_state
=
1856 &crtc_state
->wm
.vlv
.fifo_state
;
1858 return raw
->plane
[plane_id
] <= fifo_state
->plane
[plane_id
];
1861 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
, int level
)
1863 return vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1864 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1865 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE1
, level
) &&
1866 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1869 static int vlv_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1871 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1872 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1873 struct intel_atomic_state
*state
=
1874 to_intel_atomic_state(crtc_state
->uapi
.state
);
1875 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
1876 const struct vlv_fifo_state
*fifo_state
=
1877 &crtc_state
->wm
.vlv
.fifo_state
;
1878 int num_active_planes
= hweight8(crtc_state
->active_planes
&
1879 ~BIT(PLANE_CURSOR
));
1880 bool needs_modeset
= drm_atomic_crtc_needs_modeset(&crtc_state
->uapi
);
1881 const struct intel_plane_state
*old_plane_state
;
1882 const struct intel_plane_state
*new_plane_state
;
1883 struct intel_plane
*plane
;
1884 enum plane_id plane_id
;
1886 unsigned int dirty
= 0;
1888 for_each_oldnew_intel_plane_in_state(state
, plane
,
1890 new_plane_state
, i
) {
1891 if (new_plane_state
->hw
.crtc
!= &crtc
->base
&&
1892 old_plane_state
->hw
.crtc
!= &crtc
->base
)
1895 if (vlv_raw_plane_wm_compute(crtc_state
, new_plane_state
))
1896 dirty
|= BIT(plane
->id
);
1900 * DSPARB registers may have been reset due to the
1901 * power well being turned off. Make sure we restore
1902 * them to a consistent state even if no primary/sprite
1903 * planes are initially active.
1906 crtc_state
->fifo_changed
= true;
1911 /* cursor changes don't warrant a FIFO recompute */
1912 if (dirty
& ~BIT(PLANE_CURSOR
)) {
1913 const struct intel_crtc_state
*old_crtc_state
=
1914 intel_atomic_get_old_crtc_state(state
, crtc
);
1915 const struct vlv_fifo_state
*old_fifo_state
=
1916 &old_crtc_state
->wm
.vlv
.fifo_state
;
1918 ret
= vlv_compute_fifo(crtc_state
);
1922 if (needs_modeset
||
1923 memcmp(old_fifo_state
, fifo_state
,
1924 sizeof(*fifo_state
)) != 0)
1925 crtc_state
->fifo_changed
= true;
1928 /* initially allow all levels */
1929 wm_state
->num_levels
= intel_wm_num_levels(dev_priv
);
1931 * Note that enabling cxsr with no primary/sprite planes
1932 * enabled can wedge the pipe. Hence we only allow cxsr
1933 * with exactly one enabled primary/sprite plane.
1935 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& num_active_planes
== 1;
1937 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1938 const struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1939 const int sr_fifo_size
= INTEL_NUM_PIPES(dev_priv
) * 512 - 1;
1941 if (!vlv_raw_crtc_wm_is_valid(crtc_state
, level
))
1944 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1945 wm_state
->wm
[level
].plane
[plane_id
] =
1946 vlv_invert_wm_value(raw
->plane
[plane_id
],
1947 fifo_state
->plane
[plane_id
]);
1950 wm_state
->sr
[level
].plane
=
1951 vlv_invert_wm_value(max3(raw
->plane
[PLANE_PRIMARY
],
1952 raw
->plane
[PLANE_SPRITE0
],
1953 raw
->plane
[PLANE_SPRITE1
]),
1956 wm_state
->sr
[level
].cursor
=
1957 vlv_invert_wm_value(raw
->plane
[PLANE_CURSOR
],
1964 /* limit to only levels we can actually handle */
1965 wm_state
->num_levels
= level
;
1967 /* invalidate the higher levels */
1968 vlv_invalidate_wms(crtc
, wm_state
, level
);
1973 #define VLV_FIFO(plane, value) \
1974 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1976 static void vlv_atomic_update_fifo(struct intel_atomic_state
*state
,
1977 struct intel_crtc
*crtc
)
1979 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1980 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
1981 const struct intel_crtc_state
*crtc_state
=
1982 intel_atomic_get_new_crtc_state(state
, crtc
);
1983 const struct vlv_fifo_state
*fifo_state
=
1984 &crtc_state
->wm
.vlv
.fifo_state
;
1985 int sprite0_start
, sprite1_start
, fifo_size
;
1986 u32 dsparb
, dsparb2
, dsparb3
;
1988 if (!crtc_state
->fifo_changed
)
1991 sprite0_start
= fifo_state
->plane
[PLANE_PRIMARY
];
1992 sprite1_start
= fifo_state
->plane
[PLANE_SPRITE0
] + sprite0_start
;
1993 fifo_size
= fifo_state
->plane
[PLANE_SPRITE1
] + sprite1_start
;
1995 drm_WARN_ON(&dev_priv
->drm
, fifo_state
->plane
[PLANE_CURSOR
] != 63);
1996 drm_WARN_ON(&dev_priv
->drm
, fifo_size
!= 511);
1998 trace_vlv_fifo_size(crtc
, sprite0_start
, sprite1_start
, fifo_size
);
2001 * uncore.lock serves a double purpose here. It allows us to
2002 * use the less expensive I915_{READ,WRITE}_FW() functions, and
2003 * it protects the DSPARB registers from getting clobbered by
2004 * parallel updates from multiple pipes.
2006 * intel_pipe_update_start() has already disabled interrupts
2007 * for us, so a plain spin_lock() is sufficient here.
2009 spin_lock(&uncore
->lock
);
2011 switch (crtc
->pipe
) {
2013 dsparb
= intel_uncore_read_fw(uncore
, DSPARB
);
2014 dsparb2
= intel_uncore_read_fw(uncore
, DSPARB2
);
2016 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
2017 VLV_FIFO(SPRITEB
, 0xff));
2018 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
2019 VLV_FIFO(SPRITEB
, sprite1_start
));
2021 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
2022 VLV_FIFO(SPRITEB_HI
, 0x1));
2023 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
2024 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
2026 intel_uncore_write_fw(uncore
, DSPARB
, dsparb
);
2027 intel_uncore_write_fw(uncore
, DSPARB2
, dsparb2
);
2030 dsparb
= intel_uncore_read_fw(uncore
, DSPARB
);
2031 dsparb2
= intel_uncore_read_fw(uncore
, DSPARB2
);
2033 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
2034 VLV_FIFO(SPRITED
, 0xff));
2035 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
2036 VLV_FIFO(SPRITED
, sprite1_start
));
2038 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
2039 VLV_FIFO(SPRITED_HI
, 0xff));
2040 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
2041 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
2043 intel_uncore_write_fw(uncore
, DSPARB
, dsparb
);
2044 intel_uncore_write_fw(uncore
, DSPARB2
, dsparb2
);
2047 dsparb3
= intel_uncore_read_fw(uncore
, DSPARB3
);
2048 dsparb2
= intel_uncore_read_fw(uncore
, DSPARB2
);
2050 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
2051 VLV_FIFO(SPRITEF
, 0xff));
2052 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
2053 VLV_FIFO(SPRITEF
, sprite1_start
));
2055 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
2056 VLV_FIFO(SPRITEF_HI
, 0xff));
2057 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
2058 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
2060 intel_uncore_write_fw(uncore
, DSPARB3
, dsparb3
);
2061 intel_uncore_write_fw(uncore
, DSPARB2
, dsparb2
);
2067 intel_uncore_posting_read_fw(uncore
, DSPARB
);
2069 spin_unlock(&uncore
->lock
);
2074 static int vlv_compute_intermediate_wm(struct intel_crtc_state
*new_crtc_state
)
2076 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
2077 struct vlv_wm_state
*intermediate
= &new_crtc_state
->wm
.vlv
.intermediate
;
2078 const struct vlv_wm_state
*optimal
= &new_crtc_state
->wm
.vlv
.optimal
;
2079 struct intel_atomic_state
*intel_state
=
2080 to_intel_atomic_state(new_crtc_state
->uapi
.state
);
2081 const struct intel_crtc_state
*old_crtc_state
=
2082 intel_atomic_get_old_crtc_state(intel_state
, crtc
);
2083 const struct vlv_wm_state
*active
= &old_crtc_state
->wm
.vlv
.optimal
;
2086 if (!new_crtc_state
->hw
.active
|| drm_atomic_crtc_needs_modeset(&new_crtc_state
->uapi
)) {
2087 *intermediate
= *optimal
;
2089 intermediate
->cxsr
= false;
2093 intermediate
->num_levels
= min(optimal
->num_levels
, active
->num_levels
);
2094 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
2095 !new_crtc_state
->disable_cxsr
;
2097 for (level
= 0; level
< intermediate
->num_levels
; level
++) {
2098 enum plane_id plane_id
;
2100 for_each_plane_id_on_crtc(crtc
, plane_id
) {
2101 intermediate
->wm
[level
].plane
[plane_id
] =
2102 min(optimal
->wm
[level
].plane
[plane_id
],
2103 active
->wm
[level
].plane
[plane_id
]);
2106 intermediate
->sr
[level
].plane
= min(optimal
->sr
[level
].plane
,
2107 active
->sr
[level
].plane
);
2108 intermediate
->sr
[level
].cursor
= min(optimal
->sr
[level
].cursor
,
2109 active
->sr
[level
].cursor
);
2112 vlv_invalidate_wms(crtc
, intermediate
, level
);
2116 * If our intermediate WM are identical to the final WM, then we can
2117 * omit the post-vblank programming; only update if it's different.
2119 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
2120 new_crtc_state
->wm
.need_postvbl_update
= true;
2125 static void vlv_merge_wm(struct drm_i915_private
*dev_priv
,
2126 struct vlv_wm_values
*wm
)
2128 struct intel_crtc
*crtc
;
2129 int num_active_pipes
= 0;
2131 wm
->level
= dev_priv
->wm
.max_level
;
2134 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
2135 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
2140 if (!wm_state
->cxsr
)
2144 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
2147 if (num_active_pipes
!= 1)
2150 if (num_active_pipes
> 1)
2151 wm
->level
= VLV_WM_LEVEL_PM2
;
2153 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
2154 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
2155 enum pipe pipe
= crtc
->pipe
;
2157 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
2158 if (crtc
->active
&& wm
->cxsr
)
2159 wm
->sr
= wm_state
->sr
[wm
->level
];
2161 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] = DDL_PRECISION_HIGH
| 2;
2162 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] = DDL_PRECISION_HIGH
| 2;
2163 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] = DDL_PRECISION_HIGH
| 2;
2164 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] = DDL_PRECISION_HIGH
| 2;
2168 static void vlv_program_watermarks(struct drm_i915_private
*dev_priv
)
2170 struct vlv_wm_values
*old_wm
= &dev_priv
->wm
.vlv
;
2171 struct vlv_wm_values new_wm
= {};
2173 vlv_merge_wm(dev_priv
, &new_wm
);
2175 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
2178 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
2179 chv_set_memory_dvfs(dev_priv
, false);
2181 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
2182 chv_set_memory_pm5(dev_priv
, false);
2184 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
2185 _intel_set_memory_cxsr(dev_priv
, false);
2187 vlv_write_wm_values(dev_priv
, &new_wm
);
2189 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
2190 _intel_set_memory_cxsr(dev_priv
, true);
2192 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
2193 chv_set_memory_pm5(dev_priv
, true);
2195 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
2196 chv_set_memory_dvfs(dev_priv
, true);
2201 static void vlv_initial_watermarks(struct intel_atomic_state
*state
,
2202 struct intel_crtc
*crtc
)
2204 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2205 const struct intel_crtc_state
*crtc_state
=
2206 intel_atomic_get_new_crtc_state(state
, crtc
);
2208 mutex_lock(&dev_priv
->wm
.wm_mutex
);
2209 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.intermediate
;
2210 vlv_program_watermarks(dev_priv
);
2211 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
2214 static void vlv_optimize_watermarks(struct intel_atomic_state
*state
,
2215 struct intel_crtc
*crtc
)
2217 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2218 const struct intel_crtc_state
*crtc_state
=
2219 intel_atomic_get_new_crtc_state(state
, crtc
);
2221 if (!crtc_state
->wm
.need_postvbl_update
)
2224 mutex_lock(&dev_priv
->wm
.wm_mutex
);
2225 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
2226 vlv_program_watermarks(dev_priv
);
2227 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
2230 static void i965_update_wm(struct intel_crtc
*unused_crtc
)
2232 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2233 struct intel_crtc
*crtc
;
2238 /* Calc sr entries for one plane configs */
2239 crtc
= single_enabled_crtc(dev_priv
);
2241 /* self-refresh has much higher latency */
2242 static const int sr_latency_ns
= 12000;
2243 const struct drm_display_mode
*adjusted_mode
=
2244 &crtc
->config
->hw
.adjusted_mode
;
2245 const struct drm_framebuffer
*fb
=
2246 crtc
->base
.primary
->state
->fb
;
2247 int clock
= adjusted_mode
->crtc_clock
;
2248 int htotal
= adjusted_mode
->crtc_htotal
;
2249 int hdisplay
= crtc
->config
->pipe_src_w
;
2250 int cpp
= fb
->format
->cpp
[0];
2253 entries
= intel_wm_method2(clock
, htotal
,
2254 hdisplay
, cpp
, sr_latency_ns
/ 100);
2255 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
2256 srwm
= I965_FIFO_SIZE
- entries
;
2260 drm_dbg_kms(&dev_priv
->drm
,
2261 "self-refresh entries: %d, wm: %d\n",
2264 entries
= intel_wm_method2(clock
, htotal
,
2265 crtc
->base
.cursor
->state
->crtc_w
, 4,
2266 sr_latency_ns
/ 100);
2267 entries
= DIV_ROUND_UP(entries
,
2268 i965_cursor_wm_info
.cacheline_size
) +
2269 i965_cursor_wm_info
.guard_size
;
2271 cursor_sr
= i965_cursor_wm_info
.fifo_size
- entries
;
2272 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
2273 cursor_sr
= i965_cursor_wm_info
.max_wm
;
2275 drm_dbg_kms(&dev_priv
->drm
,
2276 "self-refresh watermark: display plane %d "
2277 "cursor %d\n", srwm
, cursor_sr
);
2279 cxsr_enabled
= true;
2281 cxsr_enabled
= false;
2282 /* Turn off self refresh if both pipes are enabled */
2283 intel_set_memory_cxsr(dev_priv
, false);
2286 drm_dbg_kms(&dev_priv
->drm
,
2287 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2290 /* 965 has limitations... */
2291 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
2295 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
2296 FW_WM(8, PLANEC_OLD
));
2297 /* update cursor SR watermark */
2298 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
2301 intel_set_memory_cxsr(dev_priv
, true);
2306 static void i9xx_update_wm(struct intel_crtc
*unused_crtc
)
2308 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2309 const struct intel_watermark_params
*wm_info
;
2314 int planea_wm
, planeb_wm
;
2315 struct intel_crtc
*crtc
, *enabled
= NULL
;
2317 if (IS_I945GM(dev_priv
))
2318 wm_info
= &i945_wm_info
;
2319 else if (!IS_GEN(dev_priv
, 2))
2320 wm_info
= &i915_wm_info
;
2322 wm_info
= &i830_a_wm_info
;
2324 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, PLANE_A
);
2325 crtc
= intel_get_crtc_for_plane(dev_priv
, PLANE_A
);
2326 if (intel_crtc_active(crtc
)) {
2327 const struct drm_display_mode
*adjusted_mode
=
2328 &crtc
->config
->hw
.adjusted_mode
;
2329 const struct drm_framebuffer
*fb
=
2330 crtc
->base
.primary
->state
->fb
;
2333 if (IS_GEN(dev_priv
, 2))
2336 cpp
= fb
->format
->cpp
[0];
2338 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2339 wm_info
, fifo_size
, cpp
,
2340 pessimal_latency_ns
);
2343 planea_wm
= fifo_size
- wm_info
->guard_size
;
2344 if (planea_wm
> (long)wm_info
->max_wm
)
2345 planea_wm
= wm_info
->max_wm
;
2348 if (IS_GEN(dev_priv
, 2))
2349 wm_info
= &i830_bc_wm_info
;
2351 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, PLANE_B
);
2352 crtc
= intel_get_crtc_for_plane(dev_priv
, PLANE_B
);
2353 if (intel_crtc_active(crtc
)) {
2354 const struct drm_display_mode
*adjusted_mode
=
2355 &crtc
->config
->hw
.adjusted_mode
;
2356 const struct drm_framebuffer
*fb
=
2357 crtc
->base
.primary
->state
->fb
;
2360 if (IS_GEN(dev_priv
, 2))
2363 cpp
= fb
->format
->cpp
[0];
2365 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2366 wm_info
, fifo_size
, cpp
,
2367 pessimal_latency_ns
);
2368 if (enabled
== NULL
)
2373 planeb_wm
= fifo_size
- wm_info
->guard_size
;
2374 if (planeb_wm
> (long)wm_info
->max_wm
)
2375 planeb_wm
= wm_info
->max_wm
;
2378 drm_dbg_kms(&dev_priv
->drm
,
2379 "FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
2381 if (IS_I915GM(dev_priv
) && enabled
) {
2382 struct drm_i915_gem_object
*obj
;
2384 obj
= intel_fb_obj(enabled
->base
.primary
->state
->fb
);
2386 /* self-refresh seems busted with untiled */
2387 if (!i915_gem_object_is_tiled(obj
))
2392 * Overlay gets an aggressive default since video jitter is bad.
2396 /* Play safe and disable self-refresh before adjusting watermarks. */
2397 intel_set_memory_cxsr(dev_priv
, false);
2399 /* Calc sr entries for one plane configs */
2400 if (HAS_FW_BLC(dev_priv
) && enabled
) {
2401 /* self-refresh has much higher latency */
2402 static const int sr_latency_ns
= 6000;
2403 const struct drm_display_mode
*adjusted_mode
=
2404 &enabled
->config
->hw
.adjusted_mode
;
2405 const struct drm_framebuffer
*fb
=
2406 enabled
->base
.primary
->state
->fb
;
2407 int clock
= adjusted_mode
->crtc_clock
;
2408 int htotal
= adjusted_mode
->crtc_htotal
;
2409 int hdisplay
= enabled
->config
->pipe_src_w
;
2413 if (IS_I915GM(dev_priv
) || IS_I945GM(dev_priv
))
2416 cpp
= fb
->format
->cpp
[0];
2418 entries
= intel_wm_method2(clock
, htotal
, hdisplay
, cpp
,
2419 sr_latency_ns
/ 100);
2420 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
2421 drm_dbg_kms(&dev_priv
->drm
,
2422 "self-refresh entries: %d\n", entries
);
2423 srwm
= wm_info
->fifo_size
- entries
;
2427 if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
2428 I915_WRITE(FW_BLC_SELF
,
2429 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
2431 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
2434 drm_dbg_kms(&dev_priv
->drm
,
2435 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2436 planea_wm
, planeb_wm
, cwm
, srwm
);
2438 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
2439 fwater_hi
= (cwm
& 0x1f);
2441 /* Set request length to 8 cachelines per fetch */
2442 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
2443 fwater_hi
= fwater_hi
| (1 << 8);
2445 I915_WRITE(FW_BLC
, fwater_lo
);
2446 I915_WRITE(FW_BLC2
, fwater_hi
);
2449 intel_set_memory_cxsr(dev_priv
, true);
2452 static void i845_update_wm(struct intel_crtc
*unused_crtc
)
2454 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2455 struct intel_crtc
*crtc
;
2456 const struct drm_display_mode
*adjusted_mode
;
2460 crtc
= single_enabled_crtc(dev_priv
);
2464 adjusted_mode
= &crtc
->config
->hw
.adjusted_mode
;
2465 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2467 dev_priv
->display
.get_fifo_size(dev_priv
, PLANE_A
),
2468 4, pessimal_latency_ns
);
2469 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
2470 fwater_lo
|= (3<<8) | planea_wm
;
2472 drm_dbg_kms(&dev_priv
->drm
,
2473 "Setting FIFO watermarks - A: %d\n", planea_wm
);
2475 I915_WRITE(FW_BLC
, fwater_lo
);
2478 /* latency must be in 0.1us units. */
2479 static unsigned int ilk_wm_method1(unsigned int pixel_rate
,
2481 unsigned int latency
)
2485 ret
= intel_wm_method1(pixel_rate
, cpp
, latency
);
2486 ret
= DIV_ROUND_UP(ret
, 64) + 2;
2491 /* latency must be in 0.1us units. */
2492 static unsigned int ilk_wm_method2(unsigned int pixel_rate
,
2493 unsigned int htotal
,
2496 unsigned int latency
)
2500 ret
= intel_wm_method2(pixel_rate
, htotal
,
2501 width
, cpp
, latency
);
2502 ret
= DIV_ROUND_UP(ret
, 64) + 2;
2507 static u32
ilk_wm_fbc(u32 pri_val
, u32 horiz_pixels
, u8 cpp
)
2510 * Neither of these should be possible since this function shouldn't be
2511 * called if the CRTC is off or the plane is invisible. But let's be
2512 * extra paranoid to avoid a potential divide-by-zero if we screw up
2513 * elsewhere in the driver.
2517 if (WARN_ON(!horiz_pixels
))
2520 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
2523 struct ilk_wm_maximums
{
2531 * For both WM_PIPE and WM_LP.
2532 * mem_value must be in 0.1us units.
2534 static u32
ilk_compute_pri_wm(const struct intel_crtc_state
*crtc_state
,
2535 const struct intel_plane_state
*plane_state
,
2536 u32 mem_value
, bool is_lp
)
2538 u32 method1
, method2
;
2544 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2547 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2549 method1
= ilk_wm_method1(crtc_state
->pixel_rate
, cpp
, mem_value
);
2554 method2
= ilk_wm_method2(crtc_state
->pixel_rate
,
2555 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
2556 drm_rect_width(&plane_state
->uapi
.dst
),
2559 return min(method1
, method2
);
2563 * For both WM_PIPE and WM_LP.
2564 * mem_value must be in 0.1us units.
2566 static u32
ilk_compute_spr_wm(const struct intel_crtc_state
*crtc_state
,
2567 const struct intel_plane_state
*plane_state
,
2570 u32 method1
, method2
;
2576 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2579 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2581 method1
= ilk_wm_method1(crtc_state
->pixel_rate
, cpp
, mem_value
);
2582 method2
= ilk_wm_method2(crtc_state
->pixel_rate
,
2583 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
2584 drm_rect_width(&plane_state
->uapi
.dst
),
2586 return min(method1
, method2
);
2590 * For both WM_PIPE and WM_LP.
2591 * mem_value must be in 0.1us units.
2593 static u32
ilk_compute_cur_wm(const struct intel_crtc_state
*crtc_state
,
2594 const struct intel_plane_state
*plane_state
,
2602 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2605 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2607 return ilk_wm_method2(crtc_state
->pixel_rate
,
2608 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
2609 drm_rect_width(&plane_state
->uapi
.dst
),
2613 /* Only for WM_LP. */
2614 static u32
ilk_compute_fbc_wm(const struct intel_crtc_state
*crtc_state
,
2615 const struct intel_plane_state
*plane_state
,
2620 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2623 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2625 return ilk_wm_fbc(pri_val
, drm_rect_width(&plane_state
->uapi
.dst
),
2630 ilk_display_fifo_size(const struct drm_i915_private
*dev_priv
)
2632 if (INTEL_GEN(dev_priv
) >= 8)
2634 else if (INTEL_GEN(dev_priv
) >= 7)
2641 ilk_plane_wm_reg_max(const struct drm_i915_private
*dev_priv
,
2642 int level
, bool is_sprite
)
2644 if (INTEL_GEN(dev_priv
) >= 8)
2645 /* BDW primary/sprite plane watermarks */
2646 return level
== 0 ? 255 : 2047;
2647 else if (INTEL_GEN(dev_priv
) >= 7)
2648 /* IVB/HSW primary/sprite plane watermarks */
2649 return level
== 0 ? 127 : 1023;
2650 else if (!is_sprite
)
2651 /* ILK/SNB primary plane watermarks */
2652 return level
== 0 ? 127 : 511;
2654 /* ILK/SNB sprite plane watermarks */
2655 return level
== 0 ? 63 : 255;
2659 ilk_cursor_wm_reg_max(const struct drm_i915_private
*dev_priv
, int level
)
2661 if (INTEL_GEN(dev_priv
) >= 7)
2662 return level
== 0 ? 63 : 255;
2664 return level
== 0 ? 31 : 63;
2667 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private
*dev_priv
)
2669 if (INTEL_GEN(dev_priv
) >= 8)
2675 /* Calculate the maximum primary/sprite plane watermark */
2676 static unsigned int ilk_plane_wm_max(const struct drm_i915_private
*dev_priv
,
2678 const struct intel_wm_config
*config
,
2679 enum intel_ddb_partitioning ddb_partitioning
,
2682 unsigned int fifo_size
= ilk_display_fifo_size(dev_priv
);
2684 /* if sprites aren't enabled, sprites get nothing */
2685 if (is_sprite
&& !config
->sprites_enabled
)
2688 /* HSW allows LP1+ watermarks even with multiple pipes */
2689 if (level
== 0 || config
->num_pipes_active
> 1) {
2690 fifo_size
/= INTEL_NUM_PIPES(dev_priv
);
2693 * For some reason the non self refresh
2694 * FIFO size is only half of the self
2695 * refresh FIFO size on ILK/SNB.
2697 if (INTEL_GEN(dev_priv
) <= 6)
2701 if (config
->sprites_enabled
) {
2702 /* level 0 is always calculated with 1:1 split */
2703 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
2712 /* clamp to max that the registers can hold */
2713 return min(fifo_size
, ilk_plane_wm_reg_max(dev_priv
, level
, is_sprite
));
2716 /* Calculate the maximum cursor plane watermark */
2717 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private
*dev_priv
,
2719 const struct intel_wm_config
*config
)
2721 /* HSW LP1+ watermarks w/ multiple pipes */
2722 if (level
> 0 && config
->num_pipes_active
> 1)
2725 /* otherwise just report max that registers can hold */
2726 return ilk_cursor_wm_reg_max(dev_priv
, level
);
2729 static void ilk_compute_wm_maximums(const struct drm_i915_private
*dev_priv
,
2731 const struct intel_wm_config
*config
,
2732 enum intel_ddb_partitioning ddb_partitioning
,
2733 struct ilk_wm_maximums
*max
)
2735 max
->pri
= ilk_plane_wm_max(dev_priv
, level
, config
, ddb_partitioning
, false);
2736 max
->spr
= ilk_plane_wm_max(dev_priv
, level
, config
, ddb_partitioning
, true);
2737 max
->cur
= ilk_cursor_wm_max(dev_priv
, level
, config
);
2738 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
2741 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private
*dev_priv
,
2743 struct ilk_wm_maximums
*max
)
2745 max
->pri
= ilk_plane_wm_reg_max(dev_priv
, level
, false);
2746 max
->spr
= ilk_plane_wm_reg_max(dev_priv
, level
, true);
2747 max
->cur
= ilk_cursor_wm_reg_max(dev_priv
, level
);
2748 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
2751 static bool ilk_validate_wm_level(int level
,
2752 const struct ilk_wm_maximums
*max
,
2753 struct intel_wm_level
*result
)
2757 /* already determined to be invalid? */
2758 if (!result
->enable
)
2761 result
->enable
= result
->pri_val
<= max
->pri
&&
2762 result
->spr_val
<= max
->spr
&&
2763 result
->cur_val
<= max
->cur
;
2765 ret
= result
->enable
;
2768 * HACK until we can pre-compute everything,
2769 * and thus fail gracefully if LP0 watermarks
2772 if (level
== 0 && !result
->enable
) {
2773 if (result
->pri_val
> max
->pri
)
2774 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2775 level
, result
->pri_val
, max
->pri
);
2776 if (result
->spr_val
> max
->spr
)
2777 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2778 level
, result
->spr_val
, max
->spr
);
2779 if (result
->cur_val
> max
->cur
)
2780 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2781 level
, result
->cur_val
, max
->cur
);
2783 result
->pri_val
= min_t(u32
, result
->pri_val
, max
->pri
);
2784 result
->spr_val
= min_t(u32
, result
->spr_val
, max
->spr
);
2785 result
->cur_val
= min_t(u32
, result
->cur_val
, max
->cur
);
2786 result
->enable
= true;
2792 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2793 const struct intel_crtc
*crtc
,
2795 struct intel_crtc_state
*crtc_state
,
2796 const struct intel_plane_state
*pristate
,
2797 const struct intel_plane_state
*sprstate
,
2798 const struct intel_plane_state
*curstate
,
2799 struct intel_wm_level
*result
)
2801 u16 pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2802 u16 spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2803 u16 cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2805 /* WM1+ latency values stored in 0.5us units */
2813 result
->pri_val
= ilk_compute_pri_wm(crtc_state
, pristate
,
2814 pri_latency
, level
);
2815 result
->fbc_val
= ilk_compute_fbc_wm(crtc_state
, pristate
, result
->pri_val
);
2819 result
->spr_val
= ilk_compute_spr_wm(crtc_state
, sprstate
, spr_latency
);
2822 result
->cur_val
= ilk_compute_cur_wm(crtc_state
, curstate
, cur_latency
);
2824 result
->enable
= true;
2827 static void intel_read_wm_latency(struct drm_i915_private
*dev_priv
,
2830 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
2832 if (INTEL_GEN(dev_priv
) >= 9) {
2835 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2837 /* read the first set of memory latencies[0:3] */
2838 val
= 0; /* data0 to be programmed to 0 for first set */
2839 ret
= sandybridge_pcode_read(dev_priv
,
2840 GEN9_PCODE_READ_MEM_LATENCY
,
2844 drm_err(&dev_priv
->drm
,
2845 "SKL Mailbox read error = %d\n", ret
);
2849 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2850 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2851 GEN9_MEM_LATENCY_LEVEL_MASK
;
2852 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2853 GEN9_MEM_LATENCY_LEVEL_MASK
;
2854 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2855 GEN9_MEM_LATENCY_LEVEL_MASK
;
2857 /* read the second set of memory latencies[4:7] */
2858 val
= 1; /* data0 to be programmed to 1 for second set */
2859 ret
= sandybridge_pcode_read(dev_priv
,
2860 GEN9_PCODE_READ_MEM_LATENCY
,
2863 drm_err(&dev_priv
->drm
,
2864 "SKL Mailbox read error = %d\n", ret
);
2868 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2869 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2870 GEN9_MEM_LATENCY_LEVEL_MASK
;
2871 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2872 GEN9_MEM_LATENCY_LEVEL_MASK
;
2873 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2874 GEN9_MEM_LATENCY_LEVEL_MASK
;
2877 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2878 * need to be disabled. We make sure to sanitize the values out
2879 * of the punit to satisfy this requirement.
2881 for (level
= 1; level
<= max_level
; level
++) {
2882 if (wm
[level
] == 0) {
2883 for (i
= level
+ 1; i
<= max_level
; i
++)
2890 * WaWmMemoryReadLatency:skl+,glk
2892 * punit doesn't take into account the read latency so we need
2893 * to add 2us to the various latency levels we retrieve from the
2894 * punit when level 0 response data us 0us.
2898 for (level
= 1; level
<= max_level
; level
++) {
2906 * WA Level-0 adjustment for 16GB DIMMs: SKL+
2907 * If we could not get dimm info enable this WA to prevent from
2908 * any underrun. If not able to get Dimm info assume 16GB dimm
2909 * to avoid any underrun.
2911 if (dev_priv
->dram_info
.is_16gb_dimm
)
2914 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2915 u64 sskpd
= intel_uncore_read64(uncore
, MCH_SSKPD
);
2917 wm
[0] = (sskpd
>> 56) & 0xFF;
2919 wm
[0] = sskpd
& 0xF;
2920 wm
[1] = (sskpd
>> 4) & 0xFF;
2921 wm
[2] = (sskpd
>> 12) & 0xFF;
2922 wm
[3] = (sskpd
>> 20) & 0x1FF;
2923 wm
[4] = (sskpd
>> 32) & 0x1FF;
2924 } else if (INTEL_GEN(dev_priv
) >= 6) {
2925 u32 sskpd
= intel_uncore_read(uncore
, MCH_SSKPD
);
2927 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2928 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2929 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2930 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2931 } else if (INTEL_GEN(dev_priv
) >= 5) {
2932 u32 mltr
= intel_uncore_read(uncore
, MLTR_ILK
);
2934 /* ILK primary LP0 latency is 700 ns */
2936 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2937 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2939 MISSING_CASE(INTEL_DEVID(dev_priv
));
2943 static void intel_fixup_spr_wm_latency(struct drm_i915_private
*dev_priv
,
2946 /* ILK sprite LP0 latency is 1300 ns */
2947 if (IS_GEN(dev_priv
, 5))
2951 static void intel_fixup_cur_wm_latency(struct drm_i915_private
*dev_priv
,
2954 /* ILK cursor LP0 latency is 1300 ns */
2955 if (IS_GEN(dev_priv
, 5))
2959 int ilk_wm_max_level(const struct drm_i915_private
*dev_priv
)
2961 /* how many WM levels are we expecting */
2962 if (INTEL_GEN(dev_priv
) >= 9)
2964 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2966 else if (INTEL_GEN(dev_priv
) >= 6)
2972 static void intel_print_wm_latency(struct drm_i915_private
*dev_priv
,
2976 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2978 for (level
= 0; level
<= max_level
; level
++) {
2979 unsigned int latency
= wm
[level
];
2982 drm_dbg_kms(&dev_priv
->drm
,
2983 "%s WM%d latency not provided\n",
2989 * - latencies are in us on gen9.
2990 * - before then, WM1+ latency values are in 0.5us units
2992 if (INTEL_GEN(dev_priv
) >= 9)
2997 drm_dbg_kms(&dev_priv
->drm
,
2998 "%s WM%d latency %u (%u.%u usec)\n", name
, level
,
2999 wm
[level
], latency
/ 10, latency
% 10);
3003 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
3006 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3011 wm
[0] = max(wm
[0], min
);
3012 for (level
= 1; level
<= max_level
; level
++)
3013 wm
[level
] = max_t(u16
, wm
[level
], DIV_ROUND_UP(min
, 5));
3018 static void snb_wm_latency_quirk(struct drm_i915_private
*dev_priv
)
3023 * The BIOS provided WM memory latency values are often
3024 * inadequate for high resolution displays. Adjust them.
3026 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
3027 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
3028 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
3033 drm_dbg_kms(&dev_priv
->drm
,
3034 "WM latency values increased to avoid potential underruns\n");
3035 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3036 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3037 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3040 static void snb_wm_lp3_irq_quirk(struct drm_i915_private
*dev_priv
)
3043 * On some SNB machines (Thinkpad X220 Tablet at least)
3044 * LP3 usage can cause vblank interrupts to be lost.
3045 * The DEIIR bit will go high but it looks like the CPU
3046 * never gets interrupted.
3048 * It's not clear whether other interrupt source could
3049 * be affected or if this is somehow limited to vblank
3050 * interrupts only. To play it safe we disable LP3
3051 * watermarks entirely.
3053 if (dev_priv
->wm
.pri_latency
[3] == 0 &&
3054 dev_priv
->wm
.spr_latency
[3] == 0 &&
3055 dev_priv
->wm
.cur_latency
[3] == 0)
3058 dev_priv
->wm
.pri_latency
[3] = 0;
3059 dev_priv
->wm
.spr_latency
[3] = 0;
3060 dev_priv
->wm
.cur_latency
[3] = 0;
3062 drm_dbg_kms(&dev_priv
->drm
,
3063 "LP3 watermarks disabled due to potential for lost interrupts\n");
3064 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3065 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3066 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3069 static void ilk_setup_wm_latency(struct drm_i915_private
*dev_priv
)
3071 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
);
3073 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
3074 sizeof(dev_priv
->wm
.pri_latency
));
3075 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
3076 sizeof(dev_priv
->wm
.pri_latency
));
3078 intel_fixup_spr_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
);
3079 intel_fixup_cur_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
);
3081 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3082 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3083 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3085 if (IS_GEN(dev_priv
, 6)) {
3086 snb_wm_latency_quirk(dev_priv
);
3087 snb_wm_lp3_irq_quirk(dev_priv
);
3091 static void skl_setup_wm_latency(struct drm_i915_private
*dev_priv
)
3093 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.skl_latency
);
3094 intel_print_wm_latency(dev_priv
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
3097 static bool ilk_validate_pipe_wm(const struct drm_i915_private
*dev_priv
,
3098 struct intel_pipe_wm
*pipe_wm
)
3100 /* LP0 watermark maximums depend on this pipe alone */
3101 const struct intel_wm_config config
= {
3102 .num_pipes_active
= 1,
3103 .sprites_enabled
= pipe_wm
->sprites_enabled
,
3104 .sprites_scaled
= pipe_wm
->sprites_scaled
,
3106 struct ilk_wm_maximums max
;
3108 /* LP0 watermarks always use 1/2 DDB partitioning */
3109 ilk_compute_wm_maximums(dev_priv
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
3111 /* At least LP0 must be valid */
3112 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
3113 drm_dbg_kms(&dev_priv
->drm
, "LP0 watermark invalid\n");
3120 /* Compute new watermarks for the pipe */
3121 static int ilk_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
3123 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
3124 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
3125 struct intel_pipe_wm
*pipe_wm
;
3126 struct intel_plane
*plane
;
3127 const struct intel_plane_state
*plane_state
;
3128 const struct intel_plane_state
*pristate
= NULL
;
3129 const struct intel_plane_state
*sprstate
= NULL
;
3130 const struct intel_plane_state
*curstate
= NULL
;
3131 int level
, max_level
= ilk_wm_max_level(dev_priv
), usable_level
;
3132 struct ilk_wm_maximums max
;
3134 pipe_wm
= &crtc_state
->wm
.ilk
.optimal
;
3136 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
3137 if (plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
3138 pristate
= plane_state
;
3139 else if (plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
3140 sprstate
= plane_state
;
3141 else if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
3142 curstate
= plane_state
;
3145 pipe_wm
->pipe_enabled
= crtc_state
->hw
.active
;
3147 pipe_wm
->sprites_enabled
= sprstate
->uapi
.visible
;
3148 pipe_wm
->sprites_scaled
= sprstate
->uapi
.visible
&&
3149 (drm_rect_width(&sprstate
->uapi
.dst
) != drm_rect_width(&sprstate
->uapi
.src
) >> 16 ||
3150 drm_rect_height(&sprstate
->uapi
.dst
) != drm_rect_height(&sprstate
->uapi
.src
) >> 16);
3153 usable_level
= max_level
;
3155 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3156 if (INTEL_GEN(dev_priv
) <= 6 && pipe_wm
->sprites_enabled
)
3159 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3160 if (pipe_wm
->sprites_scaled
)
3163 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
3164 ilk_compute_wm_level(dev_priv
, crtc
, 0, crtc_state
,
3165 pristate
, sprstate
, curstate
, &pipe_wm
->wm
[0]);
3167 if (!ilk_validate_pipe_wm(dev_priv
, pipe_wm
))
3170 ilk_compute_wm_reg_maximums(dev_priv
, 1, &max
);
3172 for (level
= 1; level
<= usable_level
; level
++) {
3173 struct intel_wm_level
*wm
= &pipe_wm
->wm
[level
];
3175 ilk_compute_wm_level(dev_priv
, crtc
, level
, crtc_state
,
3176 pristate
, sprstate
, curstate
, wm
);
3179 * Disable any watermark level that exceeds the
3180 * register maximums since such watermarks are
3183 if (!ilk_validate_wm_level(level
, &max
, wm
)) {
3184 memset(wm
, 0, sizeof(*wm
));
3193 * Build a set of 'intermediate' watermark values that satisfy both the old
3194 * state and the new state. These can be programmed to the hardware
3197 static int ilk_compute_intermediate_wm(struct intel_crtc_state
*newstate
)
3199 struct intel_crtc
*intel_crtc
= to_intel_crtc(newstate
->uapi
.crtc
);
3200 struct drm_i915_private
*dev_priv
= to_i915(intel_crtc
->base
.dev
);
3201 struct intel_pipe_wm
*a
= &newstate
->wm
.ilk
.intermediate
;
3202 struct intel_atomic_state
*intel_state
=
3203 to_intel_atomic_state(newstate
->uapi
.state
);
3204 const struct intel_crtc_state
*oldstate
=
3205 intel_atomic_get_old_crtc_state(intel_state
, intel_crtc
);
3206 const struct intel_pipe_wm
*b
= &oldstate
->wm
.ilk
.optimal
;
3207 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3210 * Start with the final, target watermarks, then combine with the
3211 * currently active watermarks to get values that are safe both before
3212 * and after the vblank.
3214 *a
= newstate
->wm
.ilk
.optimal
;
3215 if (!newstate
->hw
.active
|| drm_atomic_crtc_needs_modeset(&newstate
->uapi
) ||
3216 intel_state
->skip_intermediate_wm
)
3219 a
->pipe_enabled
|= b
->pipe_enabled
;
3220 a
->sprites_enabled
|= b
->sprites_enabled
;
3221 a
->sprites_scaled
|= b
->sprites_scaled
;
3223 for (level
= 0; level
<= max_level
; level
++) {
3224 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
3225 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
3227 a_wm
->enable
&= b_wm
->enable
;
3228 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
3229 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
3230 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
3231 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
3235 * We need to make sure that these merged watermark values are
3236 * actually a valid configuration themselves. If they're not,
3237 * there's no safe way to transition from the old state to
3238 * the new state, so we need to fail the atomic transaction.
3240 if (!ilk_validate_pipe_wm(dev_priv
, a
))
3244 * If our intermediate WM are identical to the final WM, then we can
3245 * omit the post-vblank programming; only update if it's different.
3247 if (memcmp(a
, &newstate
->wm
.ilk
.optimal
, sizeof(*a
)) != 0)
3248 newstate
->wm
.need_postvbl_update
= true;
3254 * Merge the watermarks from all active pipes for a specific level.
3256 static void ilk_merge_wm_level(struct drm_i915_private
*dev_priv
,
3258 struct intel_wm_level
*ret_wm
)
3260 const struct intel_crtc
*intel_crtc
;
3262 ret_wm
->enable
= true;
3264 for_each_intel_crtc(&dev_priv
->drm
, intel_crtc
) {
3265 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
3266 const struct intel_wm_level
*wm
= &active
->wm
[level
];
3268 if (!active
->pipe_enabled
)
3272 * The watermark values may have been used in the past,
3273 * so we must maintain them in the registers for some
3274 * time even if the level is now disabled.
3277 ret_wm
->enable
= false;
3279 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
3280 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
3281 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
3282 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
3287 * Merge all low power watermarks for all active pipes.
3289 static void ilk_wm_merge(struct drm_i915_private
*dev_priv
,
3290 const struct intel_wm_config
*config
,
3291 const struct ilk_wm_maximums
*max
,
3292 struct intel_pipe_wm
*merged
)
3294 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3295 int last_enabled_level
= max_level
;
3297 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3298 if ((INTEL_GEN(dev_priv
) <= 6 || IS_IVYBRIDGE(dev_priv
)) &&
3299 config
->num_pipes_active
> 1)
3300 last_enabled_level
= 0;
3302 /* ILK: FBC WM must be disabled always */
3303 merged
->fbc_wm_enabled
= INTEL_GEN(dev_priv
) >= 6;
3305 /* merge each WM1+ level */
3306 for (level
= 1; level
<= max_level
; level
++) {
3307 struct intel_wm_level
*wm
= &merged
->wm
[level
];
3309 ilk_merge_wm_level(dev_priv
, level
, wm
);
3311 if (level
> last_enabled_level
)
3313 else if (!ilk_validate_wm_level(level
, max
, wm
))
3314 /* make sure all following levels get disabled */
3315 last_enabled_level
= level
- 1;
3318 * The spec says it is preferred to disable
3319 * FBC WMs instead of disabling a WM level.
3321 if (wm
->fbc_val
> max
->fbc
) {
3323 merged
->fbc_wm_enabled
= false;
3328 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3330 * FIXME this is racy. FBC might get enabled later.
3331 * What we should check here is whether FBC can be
3332 * enabled sometime later.
3334 if (IS_GEN(dev_priv
, 5) && !merged
->fbc_wm_enabled
&&
3335 intel_fbc_is_active(dev_priv
)) {
3336 for (level
= 2; level
<= max_level
; level
++) {
3337 struct intel_wm_level
*wm
= &merged
->wm
[level
];
3344 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
3346 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3347 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
3350 /* The value we need to program into the WM_LPx latency field */
3351 static unsigned int ilk_wm_lp_latency(struct drm_i915_private
*dev_priv
,
3354 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
3357 return dev_priv
->wm
.pri_latency
[level
];
3360 static void ilk_compute_wm_results(struct drm_i915_private
*dev_priv
,
3361 const struct intel_pipe_wm
*merged
,
3362 enum intel_ddb_partitioning partitioning
,
3363 struct ilk_wm_values
*results
)
3365 struct intel_crtc
*intel_crtc
;
3368 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
3369 results
->partitioning
= partitioning
;
3371 /* LP1+ register values */
3372 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
3373 const struct intel_wm_level
*r
;
3375 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
3377 r
= &merged
->wm
[level
];
3380 * Maintain the watermark values even if the level is
3381 * disabled. Doing otherwise could cause underruns.
3383 results
->wm_lp
[wm_lp
- 1] =
3384 (ilk_wm_lp_latency(dev_priv
, level
) << WM1_LP_LATENCY_SHIFT
) |
3385 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
3389 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
3391 if (INTEL_GEN(dev_priv
) >= 8)
3392 results
->wm_lp
[wm_lp
- 1] |=
3393 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
3395 results
->wm_lp
[wm_lp
- 1] |=
3396 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
3399 * Always set WM1S_LP_EN when spr_val != 0, even if the
3400 * level is disabled. Doing otherwise could cause underruns.
3402 if (INTEL_GEN(dev_priv
) <= 6 && r
->spr_val
) {
3403 drm_WARN_ON(&dev_priv
->drm
, wm_lp
!= 1);
3404 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
3406 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
3409 /* LP0 register values */
3410 for_each_intel_crtc(&dev_priv
->drm
, intel_crtc
) {
3411 enum pipe pipe
= intel_crtc
->pipe
;
3412 const struct intel_pipe_wm
*pipe_wm
= &intel_crtc
->wm
.active
.ilk
;
3413 const struct intel_wm_level
*r
= &pipe_wm
->wm
[0];
3415 if (drm_WARN_ON(&dev_priv
->drm
, !r
->enable
))
3418 results
->wm_pipe
[pipe
] =
3419 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
3420 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
3425 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3426 * case both are at the same level. Prefer r1 in case they're the same. */
3427 static struct intel_pipe_wm
*
3428 ilk_find_best_result(struct drm_i915_private
*dev_priv
,
3429 struct intel_pipe_wm
*r1
,
3430 struct intel_pipe_wm
*r2
)
3432 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3433 int level1
= 0, level2
= 0;
3435 for (level
= 1; level
<= max_level
; level
++) {
3436 if (r1
->wm
[level
].enable
)
3438 if (r2
->wm
[level
].enable
)
3442 if (level1
== level2
) {
3443 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
3447 } else if (level1
> level2
) {
3454 /* dirty bits used to track which watermarks need changes */
3455 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3456 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3457 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3458 #define WM_DIRTY_FBC (1 << 24)
3459 #define WM_DIRTY_DDB (1 << 25)
3461 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
3462 const struct ilk_wm_values
*old
,
3463 const struct ilk_wm_values
*new)
3465 unsigned int dirty
= 0;
3469 for_each_pipe(dev_priv
, pipe
) {
3470 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
3471 dirty
|= WM_DIRTY_PIPE(pipe
);
3472 /* Must disable LP1+ watermarks too */
3473 dirty
|= WM_DIRTY_LP_ALL
;
3477 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
3478 dirty
|= WM_DIRTY_FBC
;
3479 /* Must disable LP1+ watermarks too */
3480 dirty
|= WM_DIRTY_LP_ALL
;
3483 if (old
->partitioning
!= new->partitioning
) {
3484 dirty
|= WM_DIRTY_DDB
;
3485 /* Must disable LP1+ watermarks too */
3486 dirty
|= WM_DIRTY_LP_ALL
;
3489 /* LP1+ watermarks already deemed dirty, no need to continue */
3490 if (dirty
& WM_DIRTY_LP_ALL
)
3493 /* Find the lowest numbered LP1+ watermark in need of an update... */
3494 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
3495 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
3496 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
3500 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3501 for (; wm_lp
<= 3; wm_lp
++)
3502 dirty
|= WM_DIRTY_LP(wm_lp
);
3507 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
3510 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
3511 bool changed
= false;
3513 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
3514 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
3515 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
3518 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
3519 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
3520 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
3523 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
3524 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
3525 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
3530 * Don't touch WM1S_LP_EN here.
3531 * Doing so could cause underruns.
3538 * The spec says we shouldn't write when we don't need, because every write
3539 * causes WMs to be re-evaluated, expending some power.
3541 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
3542 struct ilk_wm_values
*results
)
3544 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
3548 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
3552 _ilk_disable_lp_wm(dev_priv
, dirty
);
3554 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
3555 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
3556 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
3557 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
3558 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
3559 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
3561 if (dirty
& WM_DIRTY_DDB
) {
3562 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
3563 val
= I915_READ(WM_MISC
);
3564 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3565 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
3567 val
|= WM_MISC_DATA_PARTITION_5_6
;
3568 I915_WRITE(WM_MISC
, val
);
3570 val
= I915_READ(DISP_ARB_CTL2
);
3571 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3572 val
&= ~DISP_DATA_PARTITION_5_6
;
3574 val
|= DISP_DATA_PARTITION_5_6
;
3575 I915_WRITE(DISP_ARB_CTL2
, val
);
3579 if (dirty
& WM_DIRTY_FBC
) {
3580 val
= I915_READ(DISP_ARB_CTL
);
3581 if (results
->enable_fbc_wm
)
3582 val
&= ~DISP_FBC_WM_DIS
;
3584 val
|= DISP_FBC_WM_DIS
;
3585 I915_WRITE(DISP_ARB_CTL
, val
);
3588 if (dirty
& WM_DIRTY_LP(1) &&
3589 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
3590 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
3592 if (INTEL_GEN(dev_priv
) >= 7) {
3593 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
3594 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
3595 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
3596 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
3599 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
3600 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
3601 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
3602 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
3603 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
3604 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
3606 dev_priv
->wm
.hw
= *results
;
3609 bool ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
)
3611 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
3614 u8
intel_enabled_dbuf_slices_mask(struct drm_i915_private
*dev_priv
)
3617 int max_slices
= INTEL_INFO(dev_priv
)->num_supported_dbuf_slices
;
3618 u8 enabled_slices_mask
= 0;
3620 for (i
= 0; i
< max_slices
; i
++) {
3621 if (I915_READ(DBUF_CTL_S(i
)) & DBUF_POWER_STATE
)
3622 enabled_slices_mask
|= BIT(i
);
3625 return enabled_slices_mask
;
3629 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3630 * so assume we'll always need it in order to avoid underruns.
3632 static bool skl_needs_memory_bw_wa(struct drm_i915_private
*dev_priv
)
3634 return IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
);
3638 intel_has_sagv(struct drm_i915_private
*dev_priv
)
3641 if (IS_GEN(dev_priv
, 12))
3644 return (IS_GEN9_BC(dev_priv
) || INTEL_GEN(dev_priv
) >= 10) &&
3645 dev_priv
->sagv_status
!= I915_SAGV_NOT_CONTROLLED
;
3649 skl_setup_sagv_block_time(struct drm_i915_private
*dev_priv
)
3651 if (INTEL_GEN(dev_priv
) >= 12) {
3655 ret
= sandybridge_pcode_read(dev_priv
,
3656 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US
,
3659 dev_priv
->sagv_block_time_us
= val
;
3663 drm_dbg(&dev_priv
->drm
, "Couldn't read SAGV block time!\n");
3664 } else if (IS_GEN(dev_priv
, 11)) {
3665 dev_priv
->sagv_block_time_us
= 10;
3667 } else if (IS_GEN(dev_priv
, 10)) {
3668 dev_priv
->sagv_block_time_us
= 20;
3670 } else if (IS_GEN(dev_priv
, 9)) {
3671 dev_priv
->sagv_block_time_us
= 30;
3674 MISSING_CASE(INTEL_GEN(dev_priv
));
3677 /* Default to an unusable block time */
3678 dev_priv
->sagv_block_time_us
= -1;
3682 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3683 * depending on power and performance requirements. The display engine access
3684 * to system memory is blocked during the adjustment time. Because of the
3685 * blocking time, having this enabled can cause full system hangs and/or pipe
3686 * underruns if we don't meet all of the following requirements:
3688 * - <= 1 pipe enabled
3689 * - All planes can enable watermarks for latencies >= SAGV engine block time
3690 * - We're not using an interlaced display configuration
3693 intel_enable_sagv(struct drm_i915_private
*dev_priv
)
3697 if (!intel_has_sagv(dev_priv
))
3700 if (dev_priv
->sagv_status
== I915_SAGV_ENABLED
)
3703 drm_dbg_kms(&dev_priv
->drm
, "Enabling SAGV\n");
3704 ret
= sandybridge_pcode_write(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3707 /* We don't need to wait for SAGV when enabling */
3710 * Some skl systems, pre-release machines in particular,
3711 * don't actually have SAGV.
3713 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3714 drm_dbg(&dev_priv
->drm
, "No SAGV found on system, ignoring\n");
3715 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3717 } else if (ret
< 0) {
3718 drm_err(&dev_priv
->drm
, "Failed to enable SAGV\n");
3722 dev_priv
->sagv_status
= I915_SAGV_ENABLED
;
3727 intel_disable_sagv(struct drm_i915_private
*dev_priv
)
3731 if (!intel_has_sagv(dev_priv
))
3734 if (dev_priv
->sagv_status
== I915_SAGV_DISABLED
)
3737 drm_dbg_kms(&dev_priv
->drm
, "Disabling SAGV\n");
3738 /* bspec says to keep retrying for at least 1 ms */
3739 ret
= skl_pcode_request(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3741 GEN9_SAGV_IS_DISABLED
, GEN9_SAGV_IS_DISABLED
,
3744 * Some skl systems, pre-release machines in particular,
3745 * don't actually have SAGV.
3747 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3748 drm_dbg(&dev_priv
->drm
, "No SAGV found on system, ignoring\n");
3749 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3751 } else if (ret
< 0) {
3752 drm_err(&dev_priv
->drm
, "Failed to disable SAGV (%d)\n", ret
);
3756 dev_priv
->sagv_status
= I915_SAGV_DISABLED
;
3760 bool intel_can_enable_sagv(struct intel_atomic_state
*state
)
3762 struct drm_device
*dev
= state
->base
.dev
;
3763 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3764 struct intel_crtc
*crtc
;
3765 struct intel_plane
*plane
;
3766 struct intel_crtc_state
*crtc_state
;
3770 if (!intel_has_sagv(dev_priv
))
3774 * If there are no active CRTCs, no additional checks need be performed
3776 if (hweight8(state
->active_pipes
) == 0)
3780 * SKL+ workaround: bspec recommends we disable SAGV when we have
3781 * more then one pipe enabled
3783 if (hweight8(state
->active_pipes
) > 1)
3786 /* Since we're now guaranteed to only have one active CRTC... */
3787 pipe
= ffs(state
->active_pipes
) - 1;
3788 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
3789 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
3791 if (crtc_state
->hw
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
3794 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
3795 struct skl_plane_wm
*wm
=
3796 &crtc_state
->wm
.skl
.optimal
.planes
[plane
->id
];
3798 /* Skip this plane if it's not enabled */
3799 if (!wm
->wm
[0].plane_en
)
3802 /* Find the highest enabled wm level for this plane */
3803 for (level
= ilk_wm_max_level(dev_priv
);
3804 !wm
->wm
[level
].plane_en
; --level
)
3807 latency
= dev_priv
->wm
.skl_latency
[level
];
3809 if (skl_needs_memory_bw_wa(dev_priv
) &&
3810 plane
->base
.state
->fb
->modifier
==
3811 I915_FORMAT_MOD_X_TILED
)
3815 * If any of the planes on this pipe don't enable wm levels that
3816 * incur memory latencies higher than sagv_block_time_us we
3817 * can't enable SAGV.
3819 if (latency
< dev_priv
->sagv_block_time_us
)
3827 * Calculate initial DBuf slice offset, based on slice size
3828 * and mask(i.e if slice size is 1024 and second slice is enabled
3829 * offset would be 1024)
3832 icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask
,
3836 unsigned int offset
= 0;
3838 if (!dbuf_slice_mask
)
3841 offset
= (ffs(dbuf_slice_mask
) - 1) * slice_size
;
3843 WARN_ON(offset
>= ddb_size
);
3847 static u16
intel_get_ddb_size(struct drm_i915_private
*dev_priv
)
3849 u16 ddb_size
= INTEL_INFO(dev_priv
)->ddb_size
;
3851 drm_WARN_ON(&dev_priv
->drm
, ddb_size
== 0);
3853 if (INTEL_GEN(dev_priv
) < 11)
3854 return ddb_size
- 4; /* 4 blocks for bypass path allocation */
3859 static u8
skl_compute_dbuf_slices(const struct intel_crtc_state
*crtc_state
,
3863 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private
*dev_priv
,
3864 const struct intel_crtc_state
*crtc_state
,
3865 const u64 total_data_rate
,
3866 struct skl_ddb_entry
*alloc
, /* out */
3867 int *num_active
/* out */)
3869 struct drm_atomic_state
*state
= crtc_state
->uapi
.state
;
3870 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3871 struct drm_crtc
*for_crtc
= crtc_state
->uapi
.crtc
;
3872 const struct intel_crtc
*crtc
;
3873 u32 pipe_width
= 0, total_width_in_range
= 0, width_before_pipe_in_range
= 0;
3874 enum pipe for_pipe
= to_intel_crtc(for_crtc
)->pipe
;
3878 u32 dbuf_slice_mask
;
3882 u32 total_slice_mask
;
3885 if (drm_WARN_ON(&dev_priv
->drm
, !state
) || !crtc_state
->hw
.active
) {
3888 *num_active
= hweight8(dev_priv
->active_pipes
);
3892 if (intel_state
->active_pipe_changes
)
3893 active_pipes
= intel_state
->active_pipes
;
3895 active_pipes
= dev_priv
->active_pipes
;
3897 *num_active
= hweight8(active_pipes
);
3899 ddb_size
= intel_get_ddb_size(dev_priv
);
3901 slice_size
= ddb_size
/ INTEL_INFO(dev_priv
)->num_supported_dbuf_slices
;
3904 * If the state doesn't change the active CRTC's or there is no
3905 * modeset request, then there's no need to recalculate;
3906 * the existing pipe allocation limits should remain unchanged.
3907 * Note that we're safe from racing commits since any racing commit
3908 * that changes the active CRTC list or do modeset would need to
3909 * grab _all_ crtc locks, including the one we currently hold.
3911 if (!intel_state
->active_pipe_changes
&& !intel_state
->modeset
) {
3913 * alloc may be cleared by clear_intel_crtc_state,
3914 * copy from old state to be sure
3916 *alloc
= to_intel_crtc_state(for_crtc
->state
)->wm
.skl
.ddb
;
3921 * Get allowed DBuf slices for correspondent pipe and platform.
3923 dbuf_slice_mask
= skl_compute_dbuf_slices(crtc_state
, active_pipes
);
3925 DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
3927 pipe_name(for_pipe
), active_pipes
);
3930 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
3931 * and slice size is 1024, the offset would be 1024
3933 offset
= icl_get_first_dbuf_slice_offset(dbuf_slice_mask
,
3934 slice_size
, ddb_size
);
3937 * Figure out total size of allowed DBuf slices, which is basically
3938 * a number of allowed slices for that pipe multiplied by slice size.
3940 * range ddb entries are still allocated in proportion to display width.
3942 ddb_range_size
= hweight8(dbuf_slice_mask
) * slice_size
;
3945 * Watermark/ddb requirement highly depends upon width of the
3946 * framebuffer, So instead of allocating DDB equally among pipes
3947 * distribute DDB based on resolution/width of the display.
3949 total_slice_mask
= dbuf_slice_mask
;
3950 for_each_new_intel_crtc_in_state(intel_state
, crtc
, crtc_state
, i
) {
3951 const struct drm_display_mode
*adjusted_mode
=
3952 &crtc_state
->hw
.adjusted_mode
;
3953 enum pipe pipe
= crtc
->pipe
;
3954 int hdisplay
, vdisplay
;
3955 u32 pipe_dbuf_slice_mask
;
3957 if (!crtc_state
->hw
.active
)
3960 pipe_dbuf_slice_mask
= skl_compute_dbuf_slices(crtc_state
,
3964 * According to BSpec pipe can share one dbuf slice with another
3965 * pipes or pipe can use multiple dbufs, in both cases we
3966 * account for other pipes only if they have exactly same mask.
3967 * However we need to account how many slices we should enable
3970 total_slice_mask
|= pipe_dbuf_slice_mask
;
3973 * Do not account pipes using other slice sets
3974 * luckily as of current BSpec slice sets do not partially
3975 * intersect(pipes share either same one slice or same slice set
3976 * i.e no partial intersection), so it is enough to check for
3979 if (dbuf_slice_mask
!= pipe_dbuf_slice_mask
)
3982 drm_mode_get_hv_timing(adjusted_mode
, &hdisplay
, &vdisplay
);
3984 total_width_in_range
+= hdisplay
;
3986 if (pipe
< for_pipe
)
3987 width_before_pipe_in_range
+= hdisplay
;
3988 else if (pipe
== for_pipe
)
3989 pipe_width
= hdisplay
;
3993 * FIXME: For now we always enable slice S1 as per
3994 * the Bspec display initialization sequence.
3996 intel_state
->enabled_dbuf_slices_mask
= total_slice_mask
| BIT(DBUF_S1
);
3998 start
= ddb_range_size
* width_before_pipe_in_range
/ total_width_in_range
;
3999 end
= ddb_range_size
*
4000 (width_before_pipe_in_range
+ pipe_width
) / total_width_in_range
;
4002 alloc
->start
= offset
+ start
;
4003 alloc
->end
= offset
+ end
;
4005 DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe
,
4006 alloc
->start
, alloc
->end
);
4007 DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
4008 intel_state
->enabled_dbuf_slices_mask
,
4009 INTEL_INFO(dev_priv
)->num_supported_dbuf_slices
);
4012 static int skl_compute_wm_params(const struct intel_crtc_state
*crtc_state
,
4013 int width
, const struct drm_format_info
*format
,
4014 u64 modifier
, unsigned int rotation
,
4015 u32 plane_pixel_rate
, struct skl_wm_params
*wp
,
4017 static void skl_compute_plane_wm(const struct intel_crtc_state
*crtc_state
,
4019 const struct skl_wm_params
*wp
,
4020 const struct skl_wm_level
*result_prev
,
4021 struct skl_wm_level
*result
/* out */);
4024 skl_cursor_allocation(const struct intel_crtc_state
*crtc_state
,
4027 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
4028 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4029 struct skl_wm_level wm
= {};
4030 int ret
, min_ddb_alloc
= 0;
4031 struct skl_wm_params wp
;
4033 ret
= skl_compute_wm_params(crtc_state
, 256,
4034 drm_format_info(DRM_FORMAT_ARGB8888
),
4035 DRM_FORMAT_MOD_LINEAR
,
4037 crtc_state
->pixel_rate
, &wp
, 0);
4038 drm_WARN_ON(&dev_priv
->drm
, ret
);
4040 for (level
= 0; level
<= max_level
; level
++) {
4041 skl_compute_plane_wm(crtc_state
, level
, &wp
, &wm
, &wm
);
4042 if (wm
.min_ddb_alloc
== U16_MAX
)
4045 min_ddb_alloc
= wm
.min_ddb_alloc
;
4048 return max(num_active
== 1 ? 32 : 8, min_ddb_alloc
);
4051 static void skl_ddb_entry_init_from_hw(struct drm_i915_private
*dev_priv
,
4052 struct skl_ddb_entry
*entry
, u32 reg
)
4055 entry
->start
= reg
& DDB_ENTRY_MASK
;
4056 entry
->end
= (reg
>> DDB_ENTRY_END_SHIFT
) & DDB_ENTRY_MASK
;
4063 skl_ddb_get_hw_plane_state(struct drm_i915_private
*dev_priv
,
4064 const enum pipe pipe
,
4065 const enum plane_id plane_id
,
4066 struct skl_ddb_entry
*ddb_y
,
4067 struct skl_ddb_entry
*ddb_uv
)
4072 /* Cursor doesn't support NV12/planar, so no extra calculation needed */
4073 if (plane_id
== PLANE_CURSOR
) {
4074 val
= I915_READ(CUR_BUF_CFG(pipe
));
4075 skl_ddb_entry_init_from_hw(dev_priv
, ddb_y
, val
);
4079 val
= I915_READ(PLANE_CTL(pipe
, plane_id
));
4081 /* No DDB allocated for disabled planes */
4082 if (val
& PLANE_CTL_ENABLE
)
4083 fourcc
= skl_format_to_fourcc(val
& PLANE_CTL_FORMAT_MASK
,
4084 val
& PLANE_CTL_ORDER_RGBX
,
4085 val
& PLANE_CTL_ALPHA_MASK
);
4087 if (INTEL_GEN(dev_priv
) >= 11) {
4088 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane_id
));
4089 skl_ddb_entry_init_from_hw(dev_priv
, ddb_y
, val
);
4091 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane_id
));
4092 val2
= I915_READ(PLANE_NV12_BUF_CFG(pipe
, plane_id
));
4095 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc
)))
4098 skl_ddb_entry_init_from_hw(dev_priv
, ddb_y
, val
);
4099 skl_ddb_entry_init_from_hw(dev_priv
, ddb_uv
, val2
);
4103 void skl_pipe_ddb_get_hw_state(struct intel_crtc
*crtc
,
4104 struct skl_ddb_entry
*ddb_y
,
4105 struct skl_ddb_entry
*ddb_uv
)
4107 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4108 enum intel_display_power_domain power_domain
;
4109 enum pipe pipe
= crtc
->pipe
;
4110 intel_wakeref_t wakeref
;
4111 enum plane_id plane_id
;
4113 power_domain
= POWER_DOMAIN_PIPE(pipe
);
4114 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
4118 for_each_plane_id_on_crtc(crtc
, plane_id
)
4119 skl_ddb_get_hw_plane_state(dev_priv
, pipe
,
4124 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
4127 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
)
4129 dev_priv
->enabled_dbuf_slices_mask
=
4130 intel_enabled_dbuf_slices_mask(dev_priv
);
4134 * Determines the downscale amount of a plane for the purposes of watermark calculations.
4135 * The bspec defines downscale amount as:
4138 * Horizontal down scale amount = maximum[1, Horizontal source size /
4139 * Horizontal destination size]
4140 * Vertical down scale amount = maximum[1, Vertical source size /
4141 * Vertical destination size]
4142 * Total down scale amount = Horizontal down scale amount *
4143 * Vertical down scale amount
4146 * Return value is provided in 16.16 fixed point form to retain fractional part.
4147 * Caller should take care of dividing & rounding off the value.
4149 static uint_fixed_16_16_t
4150 skl_plane_downscale_amount(const struct intel_crtc_state
*crtc_state
,
4151 const struct intel_plane_state
*plane_state
)
4153 u32 src_w
, src_h
, dst_w
, dst_h
;
4154 uint_fixed_16_16_t fp_w_ratio
, fp_h_ratio
;
4155 uint_fixed_16_16_t downscale_h
, downscale_w
;
4157 if (WARN_ON(!intel_wm_plane_visible(crtc_state
, plane_state
)))
4158 return u32_to_fixed16(0);
4161 * Src coordinates are already rotated by 270 degrees for
4162 * the 90/270 degree plane rotation cases (to match the
4163 * GTT mapping), hence no need to account for rotation here.
4165 * n.b., src is 16.16 fixed point, dst is whole integer.
4167 src_w
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
4168 src_h
= drm_rect_height(&plane_state
->uapi
.src
) >> 16;
4169 dst_w
= drm_rect_width(&plane_state
->uapi
.dst
);
4170 dst_h
= drm_rect_height(&plane_state
->uapi
.dst
);
4172 fp_w_ratio
= div_fixed16(src_w
, dst_w
);
4173 fp_h_ratio
= div_fixed16(src_h
, dst_h
);
4174 downscale_w
= max_fixed16(fp_w_ratio
, u32_to_fixed16(1));
4175 downscale_h
= max_fixed16(fp_h_ratio
, u32_to_fixed16(1));
4177 return mul_fixed16(downscale_w
, downscale_h
);
4180 struct dbuf_slice_conf_entry
{
4182 u8 dbuf_mask
[I915_MAX_PIPES
];
4186 * Table taken from Bspec 12716
4187 * Pipes do have some preferred DBuf slice affinity,
4188 * plus there are some hardcoded requirements on how
4189 * those should be distributed for multipipe scenarios.
4190 * For more DBuf slices algorithm can get even more messy
4191 * and less readable, so decided to use a table almost
4192 * as is from BSpec itself - that way it is at least easier
4193 * to compare, change and check.
4195 static const struct dbuf_slice_conf_entry icl_allowed_dbufs
[] =
4196 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
4199 .active_pipes
= BIT(PIPE_A
),
4201 [PIPE_A
] = BIT(DBUF_S1
),
4205 .active_pipes
= BIT(PIPE_B
),
4207 [PIPE_B
] = BIT(DBUF_S1
),
4211 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_B
),
4213 [PIPE_A
] = BIT(DBUF_S1
),
4214 [PIPE_B
] = BIT(DBUF_S2
),
4218 .active_pipes
= BIT(PIPE_C
),
4220 [PIPE_C
] = BIT(DBUF_S2
),
4224 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_C
),
4226 [PIPE_A
] = BIT(DBUF_S1
),
4227 [PIPE_C
] = BIT(DBUF_S2
),
4231 .active_pipes
= BIT(PIPE_B
) | BIT(PIPE_C
),
4233 [PIPE_B
] = BIT(DBUF_S1
),
4234 [PIPE_C
] = BIT(DBUF_S2
),
4238 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_B
) | BIT(PIPE_C
),
4240 [PIPE_A
] = BIT(DBUF_S1
),
4241 [PIPE_B
] = BIT(DBUF_S1
),
4242 [PIPE_C
] = BIT(DBUF_S2
),
4249 * Table taken from Bspec 49255
4250 * Pipes do have some preferred DBuf slice affinity,
4251 * plus there are some hardcoded requirements on how
4252 * those should be distributed for multipipe scenarios.
4253 * For more DBuf slices algorithm can get even more messy
4254 * and less readable, so decided to use a table almost
4255 * as is from BSpec itself - that way it is at least easier
4256 * to compare, change and check.
4258 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs
[] =
4259 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
4262 .active_pipes
= BIT(PIPE_A
),
4264 [PIPE_A
] = BIT(DBUF_S1
) | BIT(DBUF_S2
),
4268 .active_pipes
= BIT(PIPE_B
),
4270 [PIPE_B
] = BIT(DBUF_S1
) | BIT(DBUF_S2
),
4274 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_B
),
4276 [PIPE_A
] = BIT(DBUF_S2
),
4277 [PIPE_B
] = BIT(DBUF_S1
),
4281 .active_pipes
= BIT(PIPE_C
),
4283 [PIPE_C
] = BIT(DBUF_S2
) | BIT(DBUF_S1
),
4287 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_C
),
4289 [PIPE_A
] = BIT(DBUF_S1
),
4290 [PIPE_C
] = BIT(DBUF_S2
),
4294 .active_pipes
= BIT(PIPE_B
) | BIT(PIPE_C
),
4296 [PIPE_B
] = BIT(DBUF_S1
),
4297 [PIPE_C
] = BIT(DBUF_S2
),
4301 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_B
) | BIT(PIPE_C
),
4303 [PIPE_A
] = BIT(DBUF_S1
),
4304 [PIPE_B
] = BIT(DBUF_S1
),
4305 [PIPE_C
] = BIT(DBUF_S2
),
4309 .active_pipes
= BIT(PIPE_D
),
4311 [PIPE_D
] = BIT(DBUF_S2
) | BIT(DBUF_S1
),
4315 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_D
),
4317 [PIPE_A
] = BIT(DBUF_S1
),
4318 [PIPE_D
] = BIT(DBUF_S2
),
4322 .active_pipes
= BIT(PIPE_B
) | BIT(PIPE_D
),
4324 [PIPE_B
] = BIT(DBUF_S1
),
4325 [PIPE_D
] = BIT(DBUF_S2
),
4329 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_B
) | BIT(PIPE_D
),
4331 [PIPE_A
] = BIT(DBUF_S1
),
4332 [PIPE_B
] = BIT(DBUF_S1
),
4333 [PIPE_D
] = BIT(DBUF_S2
),
4337 .active_pipes
= BIT(PIPE_C
) | BIT(PIPE_D
),
4339 [PIPE_C
] = BIT(DBUF_S1
),
4340 [PIPE_D
] = BIT(DBUF_S2
),
4344 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_C
) | BIT(PIPE_D
),
4346 [PIPE_A
] = BIT(DBUF_S1
),
4347 [PIPE_C
] = BIT(DBUF_S2
),
4348 [PIPE_D
] = BIT(DBUF_S2
),
4352 .active_pipes
= BIT(PIPE_B
) | BIT(PIPE_C
) | BIT(PIPE_D
),
4354 [PIPE_B
] = BIT(DBUF_S1
),
4355 [PIPE_C
] = BIT(DBUF_S2
),
4356 [PIPE_D
] = BIT(DBUF_S2
),
4360 .active_pipes
= BIT(PIPE_A
) | BIT(PIPE_B
) | BIT(PIPE_C
) | BIT(PIPE_D
),
4362 [PIPE_A
] = BIT(DBUF_S1
),
4363 [PIPE_B
] = BIT(DBUF_S1
),
4364 [PIPE_C
] = BIT(DBUF_S2
),
4365 [PIPE_D
] = BIT(DBUF_S2
),
4371 static u8
compute_dbuf_slices(enum pipe pipe
, u8 active_pipes
,
4372 const struct dbuf_slice_conf_entry
*dbuf_slices
)
4376 for (i
= 0; i
< dbuf_slices
[i
].active_pipes
; i
++) {
4377 if (dbuf_slices
[i
].active_pipes
== active_pipes
)
4378 return dbuf_slices
[i
].dbuf_mask
[pipe
];
4384 * This function finds an entry with same enabled pipe configuration and
4385 * returns correspondent DBuf slice mask as stated in BSpec for particular
4388 static u8
icl_compute_dbuf_slices(enum pipe pipe
, u8 active_pipes
)
4391 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
4392 * required calculating "pipe ratio" in order to determine
4393 * if one or two slices can be used for single pipe configurations
4394 * as additional constraint to the existing table.
4395 * However based on recent info, it should be not "pipe ratio"
4396 * but rather ratio between pixel_rate and cdclk with additional
4397 * constants, so for now we are using only table until this is
4398 * clarified. Also this is the reason why crtc_state param is
4399 * still here - we will need it once those additional constraints
4402 return compute_dbuf_slices(pipe
, active_pipes
, icl_allowed_dbufs
);
4405 static u8
tgl_compute_dbuf_slices(enum pipe pipe
, u8 active_pipes
)
4407 return compute_dbuf_slices(pipe
, active_pipes
, tgl_allowed_dbufs
);
4410 static u8
skl_compute_dbuf_slices(const struct intel_crtc_state
*crtc_state
,
4413 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
4414 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4415 enum pipe pipe
= crtc
->pipe
;
4417 if (IS_GEN(dev_priv
, 12))
4418 return tgl_compute_dbuf_slices(pipe
, active_pipes
);
4419 else if (IS_GEN(dev_priv
, 11))
4420 return icl_compute_dbuf_slices(pipe
, active_pipes
);
4422 * For anything else just return one slice yet.
4423 * Should be extended for other platforms.
4425 return BIT(DBUF_S1
);
4429 skl_plane_relative_data_rate(const struct intel_crtc_state
*crtc_state
,
4430 const struct intel_plane_state
*plane_state
,
4433 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
4434 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
4436 u32 width
= 0, height
= 0;
4437 uint_fixed_16_16_t down_scale_amount
;
4440 if (!plane_state
->uapi
.visible
)
4443 if (plane
->id
== PLANE_CURSOR
)
4446 if (color_plane
== 1 &&
4447 !intel_format_info_is_yuv_semiplanar(fb
->format
, fb
->modifier
))
4451 * Src coordinates are already rotated by 270 degrees for
4452 * the 90/270 degree plane rotation cases (to match the
4453 * GTT mapping), hence no need to account for rotation here.
4455 width
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
4456 height
= drm_rect_height(&plane_state
->uapi
.src
) >> 16;
4458 /* UV plane does 1/2 pixel sub-sampling */
4459 if (color_plane
== 1) {
4464 data_rate
= width
* height
;
4466 down_scale_amount
= skl_plane_downscale_amount(crtc_state
, plane_state
);
4468 rate
= mul_round_up_u32_fixed16(data_rate
, down_scale_amount
);
4470 rate
*= fb
->format
->cpp
[color_plane
];
4475 skl_get_total_relative_data_rate(struct intel_crtc_state
*crtc_state
,
4476 u64
*plane_data_rate
,
4477 u64
*uv_plane_data_rate
)
4479 struct intel_plane
*plane
;
4480 const struct intel_plane_state
*plane_state
;
4481 u64 total_data_rate
= 0;
4483 /* Calculate and cache data rate for each plane */
4484 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
4485 enum plane_id plane_id
= plane
->id
;
4489 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 0);
4490 plane_data_rate
[plane_id
] = rate
;
4491 total_data_rate
+= rate
;
4494 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 1);
4495 uv_plane_data_rate
[plane_id
] = rate
;
4496 total_data_rate
+= rate
;
4499 return total_data_rate
;
4503 icl_get_total_relative_data_rate(struct intel_crtc_state
*crtc_state
,
4504 u64
*plane_data_rate
)
4506 struct intel_plane
*plane
;
4507 const struct intel_plane_state
*plane_state
;
4508 u64 total_data_rate
= 0;
4510 /* Calculate and cache data rate for each plane */
4511 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
4512 enum plane_id plane_id
= plane
->id
;
4515 if (!plane_state
->planar_linked_plane
) {
4516 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 0);
4517 plane_data_rate
[plane_id
] = rate
;
4518 total_data_rate
+= rate
;
4520 enum plane_id y_plane_id
;
4523 * The slave plane might not iterate in
4524 * intel_atomic_crtc_state_for_each_plane_state(),
4525 * and needs the master plane state which may be
4526 * NULL if we try get_new_plane_state(), so we
4527 * always calculate from the master.
4529 if (plane_state
->planar_slave
)
4532 /* Y plane rate is calculated on the slave */
4533 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 0);
4534 y_plane_id
= plane_state
->planar_linked_plane
->id
;
4535 plane_data_rate
[y_plane_id
] = rate
;
4536 total_data_rate
+= rate
;
4538 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 1);
4539 plane_data_rate
[plane_id
] = rate
;
4540 total_data_rate
+= rate
;
4544 return total_data_rate
;
4548 skl_allocate_pipe_ddb(struct intel_crtc_state
*crtc_state
)
4550 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
4551 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4552 struct skl_ddb_entry
*alloc
= &crtc_state
->wm
.skl
.ddb
;
4553 u16 alloc_size
, start
= 0;
4554 u16 total
[I915_MAX_PLANES
] = {};
4555 u16 uv_total
[I915_MAX_PLANES
] = {};
4556 u64 total_data_rate
;
4557 enum plane_id plane_id
;
4559 u64 plane_data_rate
[I915_MAX_PLANES
] = {};
4560 u64 uv_plane_data_rate
[I915_MAX_PLANES
] = {};
4564 /* Clear the partitioning for disabled planes. */
4565 memset(crtc_state
->wm
.skl
.plane_ddb_y
, 0, sizeof(crtc_state
->wm
.skl
.plane_ddb_y
));
4566 memset(crtc_state
->wm
.skl
.plane_ddb_uv
, 0, sizeof(crtc_state
->wm
.skl
.plane_ddb_uv
));
4568 if (!crtc_state
->hw
.active
) {
4569 alloc
->start
= alloc
->end
= 0;
4573 if (INTEL_GEN(dev_priv
) >= 11)
4575 icl_get_total_relative_data_rate(crtc_state
,
4579 skl_get_total_relative_data_rate(crtc_state
,
4581 uv_plane_data_rate
);
4584 skl_ddb_get_pipe_allocation_limits(dev_priv
, crtc_state
, total_data_rate
,
4585 alloc
, &num_active
);
4586 alloc_size
= skl_ddb_entry_size(alloc
);
4587 if (alloc_size
== 0)
4590 /* Allocate fixed number of blocks for cursor. */
4591 total
[PLANE_CURSOR
] = skl_cursor_allocation(crtc_state
, num_active
);
4592 alloc_size
-= total
[PLANE_CURSOR
];
4593 crtc_state
->wm
.skl
.plane_ddb_y
[PLANE_CURSOR
].start
=
4594 alloc
->end
- total
[PLANE_CURSOR
];
4595 crtc_state
->wm
.skl
.plane_ddb_y
[PLANE_CURSOR
].end
= alloc
->end
;
4597 if (total_data_rate
== 0)
4601 * Find the highest watermark level for which we can satisfy the block
4602 * requirement of active planes.
4604 for (level
= ilk_wm_max_level(dev_priv
); level
>= 0; level
--) {
4606 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4607 const struct skl_plane_wm
*wm
=
4608 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4610 if (plane_id
== PLANE_CURSOR
) {
4611 if (wm
->wm
[level
].min_ddb_alloc
> total
[PLANE_CURSOR
]) {
4612 drm_WARN_ON(&dev_priv
->drm
,
4613 wm
->wm
[level
].min_ddb_alloc
!= U16_MAX
);
4620 blocks
+= wm
->wm
[level
].min_ddb_alloc
;
4621 blocks
+= wm
->uv_wm
[level
].min_ddb_alloc
;
4624 if (blocks
<= alloc_size
) {
4625 alloc_size
-= blocks
;
4631 drm_dbg_kms(&dev_priv
->drm
,
4632 "Requested display configuration exceeds system DDB limitations");
4633 drm_dbg_kms(&dev_priv
->drm
, "minimum required %d/%d\n",
4634 blocks
, alloc_size
);
4639 * Grant each plane the blocks it requires at the highest achievable
4640 * watermark level, plus an extra share of the leftover blocks
4641 * proportional to its relative data rate.
4643 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4644 const struct skl_plane_wm
*wm
=
4645 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4649 if (plane_id
== PLANE_CURSOR
)
4653 * We've accounted for all active planes; remaining planes are
4656 if (total_data_rate
== 0)
4659 rate
= plane_data_rate
[plane_id
];
4660 extra
= min_t(u16
, alloc_size
,
4661 DIV64_U64_ROUND_UP(alloc_size
* rate
,
4663 total
[plane_id
] = wm
->wm
[level
].min_ddb_alloc
+ extra
;
4664 alloc_size
-= extra
;
4665 total_data_rate
-= rate
;
4667 if (total_data_rate
== 0)
4670 rate
= uv_plane_data_rate
[plane_id
];
4671 extra
= min_t(u16
, alloc_size
,
4672 DIV64_U64_ROUND_UP(alloc_size
* rate
,
4674 uv_total
[plane_id
] = wm
->uv_wm
[level
].min_ddb_alloc
+ extra
;
4675 alloc_size
-= extra
;
4676 total_data_rate
-= rate
;
4678 drm_WARN_ON(&dev_priv
->drm
, alloc_size
!= 0 || total_data_rate
!= 0);
4680 /* Set the actual DDB start/end points for each plane */
4681 start
= alloc
->start
;
4682 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4683 struct skl_ddb_entry
*plane_alloc
=
4684 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
4685 struct skl_ddb_entry
*uv_plane_alloc
=
4686 &crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
];
4688 if (plane_id
== PLANE_CURSOR
)
4691 /* Gen11+ uses a separate plane for UV watermarks */
4692 drm_WARN_ON(&dev_priv
->drm
,
4693 INTEL_GEN(dev_priv
) >= 11 && uv_total
[plane_id
]);
4695 /* Leave disabled planes at (0,0) */
4696 if (total
[plane_id
]) {
4697 plane_alloc
->start
= start
;
4698 start
+= total
[plane_id
];
4699 plane_alloc
->end
= start
;
4702 if (uv_total
[plane_id
]) {
4703 uv_plane_alloc
->start
= start
;
4704 start
+= uv_total
[plane_id
];
4705 uv_plane_alloc
->end
= start
;
4710 * When we calculated watermark values we didn't know how high
4711 * of a level we'd actually be able to hit, so we just marked
4712 * all levels as "enabled." Go back now and disable the ones
4713 * that aren't actually possible.
4715 for (level
++; level
<= ilk_wm_max_level(dev_priv
); level
++) {
4716 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4717 struct skl_plane_wm
*wm
=
4718 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4721 * We only disable the watermarks for each plane if
4722 * they exceed the ddb allocation of said plane. This
4723 * is done so that we don't end up touching cursor
4724 * watermarks needlessly when some other plane reduces
4725 * our max possible watermark level.
4727 * Bspec has this to say about the PLANE_WM enable bit:
4728 * "All the watermarks at this level for all enabled
4729 * planes must be enabled before the level will be used."
4730 * So this is actually safe to do.
4732 if (wm
->wm
[level
].min_ddb_alloc
> total
[plane_id
] ||
4733 wm
->uv_wm
[level
].min_ddb_alloc
> uv_total
[plane_id
])
4734 memset(&wm
->wm
[level
], 0, sizeof(wm
->wm
[level
]));
4737 * Wa_1408961008:icl, ehl
4738 * Underruns with WM1+ disabled
4740 if (IS_GEN(dev_priv
, 11) &&
4741 level
== 1 && wm
->wm
[0].plane_en
) {
4742 wm
->wm
[level
].plane_res_b
= wm
->wm
[0].plane_res_b
;
4743 wm
->wm
[level
].plane_res_l
= wm
->wm
[0].plane_res_l
;
4744 wm
->wm
[level
].ignore_lines
= wm
->wm
[0].ignore_lines
;
4750 * Go back and disable the transition watermark if it turns out we
4751 * don't have enough DDB blocks for it.
4753 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4754 struct skl_plane_wm
*wm
=
4755 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4757 if (wm
->trans_wm
.plane_res_b
>= total
[plane_id
])
4758 memset(&wm
->trans_wm
, 0, sizeof(wm
->trans_wm
));
4765 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4766 * for the read latency) and cpp should always be <= 8, so that
4767 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4768 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4770 static uint_fixed_16_16_t
4771 skl_wm_method1(const struct drm_i915_private
*dev_priv
, u32 pixel_rate
,
4772 u8 cpp
, u32 latency
, u32 dbuf_block_size
)
4774 u32 wm_intermediate_val
;
4775 uint_fixed_16_16_t ret
;
4778 return FP_16_16_MAX
;
4780 wm_intermediate_val
= latency
* pixel_rate
* cpp
;
4781 ret
= div_fixed16(wm_intermediate_val
, 1000 * dbuf_block_size
);
4783 if (INTEL_GEN(dev_priv
) >= 10)
4784 ret
= add_fixed16_u32(ret
, 1);
4789 static uint_fixed_16_16_t
4790 skl_wm_method2(u32 pixel_rate
, u32 pipe_htotal
, u32 latency
,
4791 uint_fixed_16_16_t plane_blocks_per_line
)
4793 u32 wm_intermediate_val
;
4794 uint_fixed_16_16_t ret
;
4797 return FP_16_16_MAX
;
4799 wm_intermediate_val
= latency
* pixel_rate
;
4800 wm_intermediate_val
= DIV_ROUND_UP(wm_intermediate_val
,
4801 pipe_htotal
* 1000);
4802 ret
= mul_u32_fixed16(wm_intermediate_val
, plane_blocks_per_line
);
4806 static uint_fixed_16_16_t
4807 intel_get_linetime_us(const struct intel_crtc_state
*crtc_state
)
4811 uint_fixed_16_16_t linetime_us
;
4813 if (!crtc_state
->hw
.active
)
4814 return u32_to_fixed16(0);
4816 pixel_rate
= crtc_state
->pixel_rate
;
4818 if (WARN_ON(pixel_rate
== 0))
4819 return u32_to_fixed16(0);
4821 crtc_htotal
= crtc_state
->hw
.adjusted_mode
.crtc_htotal
;
4822 linetime_us
= div_fixed16(crtc_htotal
* 1000, pixel_rate
);
4828 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state
*crtc_state
,
4829 const struct intel_plane_state
*plane_state
)
4831 u64 adjusted_pixel_rate
;
4832 uint_fixed_16_16_t downscale_amount
;
4834 /* Shouldn't reach here on disabled planes... */
4835 if (WARN_ON(!intel_wm_plane_visible(crtc_state
, plane_state
)))
4839 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4840 * with additional adjustments for plane-specific scaling.
4842 adjusted_pixel_rate
= crtc_state
->pixel_rate
;
4843 downscale_amount
= skl_plane_downscale_amount(crtc_state
, plane_state
);
4845 return mul_round_up_u32_fixed16(adjusted_pixel_rate
,
4850 skl_compute_wm_params(const struct intel_crtc_state
*crtc_state
,
4851 int width
, const struct drm_format_info
*format
,
4852 u64 modifier
, unsigned int rotation
,
4853 u32 plane_pixel_rate
, struct skl_wm_params
*wp
,
4856 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
4857 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4860 /* only planar format has two planes */
4861 if (color_plane
== 1 &&
4862 !intel_format_info_is_yuv_semiplanar(format
, modifier
)) {
4863 drm_dbg_kms(&dev_priv
->drm
,
4864 "Non planar format have single plane\n");
4868 wp
->y_tiled
= modifier
== I915_FORMAT_MOD_Y_TILED
||
4869 modifier
== I915_FORMAT_MOD_Yf_TILED
||
4870 modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
4871 modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
4872 wp
->x_tiled
= modifier
== I915_FORMAT_MOD_X_TILED
;
4873 wp
->rc_surface
= modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
4874 modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
4875 wp
->is_planar
= intel_format_info_is_yuv_semiplanar(format
, modifier
);
4878 if (color_plane
== 1 && wp
->is_planar
)
4881 wp
->cpp
= format
->cpp
[color_plane
];
4882 wp
->plane_pixel_rate
= plane_pixel_rate
;
4884 if (INTEL_GEN(dev_priv
) >= 11 &&
4885 modifier
== I915_FORMAT_MOD_Yf_TILED
&& wp
->cpp
== 1)
4886 wp
->dbuf_block_size
= 256;
4888 wp
->dbuf_block_size
= 512;
4890 if (drm_rotation_90_or_270(rotation
)) {
4893 wp
->y_min_scanlines
= 16;
4896 wp
->y_min_scanlines
= 8;
4899 wp
->y_min_scanlines
= 4;
4902 MISSING_CASE(wp
->cpp
);
4906 wp
->y_min_scanlines
= 4;
4909 if (skl_needs_memory_bw_wa(dev_priv
))
4910 wp
->y_min_scanlines
*= 2;
4912 wp
->plane_bytes_per_line
= wp
->width
* wp
->cpp
;
4914 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
*
4915 wp
->y_min_scanlines
,
4916 wp
->dbuf_block_size
);
4918 if (INTEL_GEN(dev_priv
) >= 10)
4921 wp
->plane_blocks_per_line
= div_fixed16(interm_pbpl
,
4922 wp
->y_min_scanlines
);
4923 } else if (wp
->x_tiled
&& IS_GEN(dev_priv
, 9)) {
4924 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
,
4925 wp
->dbuf_block_size
);
4926 wp
->plane_blocks_per_line
= u32_to_fixed16(interm_pbpl
);
4928 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
,
4929 wp
->dbuf_block_size
) + 1;
4930 wp
->plane_blocks_per_line
= u32_to_fixed16(interm_pbpl
);
4933 wp
->y_tile_minimum
= mul_u32_fixed16(wp
->y_min_scanlines
,
4934 wp
->plane_blocks_per_line
);
4936 wp
->linetime_us
= fixed16_to_u32_round_up(
4937 intel_get_linetime_us(crtc_state
));
4943 skl_compute_plane_wm_params(const struct intel_crtc_state
*crtc_state
,
4944 const struct intel_plane_state
*plane_state
,
4945 struct skl_wm_params
*wp
, int color_plane
)
4947 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
4951 * Src coordinates are already rotated by 270 degrees for
4952 * the 90/270 degree plane rotation cases (to match the
4953 * GTT mapping), hence no need to account for rotation here.
4955 width
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
4957 return skl_compute_wm_params(crtc_state
, width
,
4958 fb
->format
, fb
->modifier
,
4959 plane_state
->hw
.rotation
,
4960 skl_adjusted_plane_pixel_rate(crtc_state
, plane_state
),
4964 static bool skl_wm_has_lines(struct drm_i915_private
*dev_priv
, int level
)
4966 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
4969 /* The number of lines are ignored for the level 0 watermark. */
4973 static void skl_compute_plane_wm(const struct intel_crtc_state
*crtc_state
,
4975 const struct skl_wm_params
*wp
,
4976 const struct skl_wm_level
*result_prev
,
4977 struct skl_wm_level
*result
/* out */)
4979 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
4980 u32 latency
= dev_priv
->wm
.skl_latency
[level
];
4981 uint_fixed_16_16_t method1
, method2
;
4982 uint_fixed_16_16_t selected_result
;
4983 u32 res_blocks
, res_lines
, min_ddb_alloc
= 0;
4987 result
->min_ddb_alloc
= U16_MAX
;
4992 * WaIncreaseLatencyIPCEnabled: kbl,cfl
4993 * Display WA #1141: kbl,cfl
4995 if ((IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
)) &&
4996 dev_priv
->ipc_enabled
)
4999 if (skl_needs_memory_bw_wa(dev_priv
) && wp
->x_tiled
)
5002 method1
= skl_wm_method1(dev_priv
, wp
->plane_pixel_rate
,
5003 wp
->cpp
, latency
, wp
->dbuf_block_size
);
5004 method2
= skl_wm_method2(wp
->plane_pixel_rate
,
5005 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
5007 wp
->plane_blocks_per_line
);
5010 selected_result
= max_fixed16(method2
, wp
->y_tile_minimum
);
5012 if ((wp
->cpp
* crtc_state
->hw
.adjusted_mode
.crtc_htotal
/
5013 wp
->dbuf_block_size
< 1) &&
5014 (wp
->plane_bytes_per_line
/ wp
->dbuf_block_size
< 1)) {
5015 selected_result
= method2
;
5016 } else if (latency
>= wp
->linetime_us
) {
5017 if (IS_GEN(dev_priv
, 9) &&
5018 !IS_GEMINILAKE(dev_priv
))
5019 selected_result
= min_fixed16(method1
, method2
);
5021 selected_result
= method2
;
5023 selected_result
= method1
;
5027 res_blocks
= fixed16_to_u32_round_up(selected_result
) + 1;
5028 res_lines
= div_round_up_fixed16(selected_result
,
5029 wp
->plane_blocks_per_line
);
5031 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
)) {
5032 /* Display WA #1125: skl,bxt,kbl */
5033 if (level
== 0 && wp
->rc_surface
)
5035 fixed16_to_u32_round_up(wp
->y_tile_minimum
);
5037 /* Display WA #1126: skl,bxt,kbl */
5038 if (level
>= 1 && level
<= 7) {
5041 fixed16_to_u32_round_up(wp
->y_tile_minimum
);
5042 res_lines
+= wp
->y_min_scanlines
;
5048 * Make sure result blocks for higher latency levels are
5049 * atleast as high as level below the current level.
5050 * Assumption in DDB algorithm optimization for special
5051 * cases. Also covers Display WA #1125 for RC.
5053 if (result_prev
->plane_res_b
> res_blocks
)
5054 res_blocks
= result_prev
->plane_res_b
;
5058 if (INTEL_GEN(dev_priv
) >= 11) {
5062 if (res_lines
% wp
->y_min_scanlines
== 0)
5063 extra_lines
= wp
->y_min_scanlines
;
5065 extra_lines
= wp
->y_min_scanlines
* 2 -
5066 res_lines
% wp
->y_min_scanlines
;
5068 min_ddb_alloc
= mul_round_up_u32_fixed16(res_lines
+ extra_lines
,
5069 wp
->plane_blocks_per_line
);
5071 min_ddb_alloc
= res_blocks
+
5072 DIV_ROUND_UP(res_blocks
, 10);
5076 if (!skl_wm_has_lines(dev_priv
, level
))
5079 if (res_lines
> 31) {
5081 result
->min_ddb_alloc
= U16_MAX
;
5086 * If res_lines is valid, assume we can use this watermark level
5087 * for now. We'll come back and disable it after we calculate the
5088 * DDB allocation if it turns out we don't actually have enough
5089 * blocks to satisfy it.
5091 result
->plane_res_b
= res_blocks
;
5092 result
->plane_res_l
= res_lines
;
5093 /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
5094 result
->min_ddb_alloc
= max(min_ddb_alloc
, res_blocks
) + 1;
5095 result
->plane_en
= true;
5099 skl_compute_wm_levels(const struct intel_crtc_state
*crtc_state
,
5100 const struct skl_wm_params
*wm_params
,
5101 struct skl_wm_level
*levels
)
5103 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
5104 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5105 struct skl_wm_level
*result_prev
= &levels
[0];
5107 for (level
= 0; level
<= max_level
; level
++) {
5108 struct skl_wm_level
*result
= &levels
[level
];
5110 skl_compute_plane_wm(crtc_state
, level
, wm_params
,
5111 result_prev
, result
);
5113 result_prev
= result
;
5117 static void skl_compute_transition_wm(const struct intel_crtc_state
*crtc_state
,
5118 const struct skl_wm_params
*wp
,
5119 struct skl_plane_wm
*wm
)
5121 struct drm_device
*dev
= crtc_state
->uapi
.crtc
->dev
;
5122 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
5123 u16 trans_min
, trans_amount
, trans_y_tile_min
;
5124 u16 wm0_sel_res_b
, trans_offset_b
, res_blocks
;
5126 /* Transition WM don't make any sense if ipc is disabled */
5127 if (!dev_priv
->ipc_enabled
)
5131 * WaDisableTWM:skl,kbl,cfl,bxt
5132 * Transition WM are not recommended by HW team for GEN9
5134 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
))
5137 if (INTEL_GEN(dev_priv
) >= 11)
5142 /* Display WA #1140: glk,cnl */
5143 if (IS_CANNONLAKE(dev_priv
) || IS_GEMINILAKE(dev_priv
))
5146 trans_amount
= 10; /* This is configurable amount */
5148 trans_offset_b
= trans_min
+ trans_amount
;
5151 * The spec asks for Selected Result Blocks for wm0 (the real value),
5152 * not Result Blocks (the integer value). Pay attention to the capital
5153 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
5154 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
5155 * and since we later will have to get the ceiling of the sum in the
5156 * transition watermarks calculation, we can just pretend Selected
5157 * Result Blocks is Result Blocks minus 1 and it should work for the
5158 * current platforms.
5160 wm0_sel_res_b
= wm
->wm
[0].plane_res_b
- 1;
5164 (u16
)mul_round_up_u32_fixed16(2, wp
->y_tile_minimum
);
5165 res_blocks
= max(wm0_sel_res_b
, trans_y_tile_min
) +
5168 res_blocks
= wm0_sel_res_b
+ trans_offset_b
;
5170 /* WA BUG:1938466 add one block for non y-tile planes */
5171 if (IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_A0
))
5176 * Just assume we can enable the transition watermark. After
5177 * computing the DDB we'll come back and disable it if that
5178 * assumption turns out to be false.
5180 wm
->trans_wm
.plane_res_b
= res_blocks
+ 1;
5181 wm
->trans_wm
.plane_en
= true;
5184 static int skl_build_plane_wm_single(struct intel_crtc_state
*crtc_state
,
5185 const struct intel_plane_state
*plane_state
,
5186 enum plane_id plane_id
, int color_plane
)
5188 struct skl_plane_wm
*wm
= &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
5189 struct skl_wm_params wm_params
;
5192 ret
= skl_compute_plane_wm_params(crtc_state
, plane_state
,
5193 &wm_params
, color_plane
);
5197 skl_compute_wm_levels(crtc_state
, &wm_params
, wm
->wm
);
5198 skl_compute_transition_wm(crtc_state
, &wm_params
, wm
);
5203 static int skl_build_plane_wm_uv(struct intel_crtc_state
*crtc_state
,
5204 const struct intel_plane_state
*plane_state
,
5205 enum plane_id plane_id
)
5207 struct skl_plane_wm
*wm
= &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
5208 struct skl_wm_params wm_params
;
5211 wm
->is_planar
= true;
5213 /* uv plane watermarks must also be validated for NV12/Planar */
5214 ret
= skl_compute_plane_wm_params(crtc_state
, plane_state
,
5219 skl_compute_wm_levels(crtc_state
, &wm_params
, wm
->uv_wm
);
5224 static int skl_build_plane_wm(struct intel_crtc_state
*crtc_state
,
5225 const struct intel_plane_state
*plane_state
)
5227 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
5228 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
5229 enum plane_id plane_id
= plane
->id
;
5232 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
5235 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5240 if (fb
->format
->is_yuv
&& fb
->format
->num_planes
> 1) {
5241 ret
= skl_build_plane_wm_uv(crtc_state
, plane_state
,
5250 static int icl_build_plane_wm(struct intel_crtc_state
*crtc_state
,
5251 const struct intel_plane_state
*plane_state
)
5253 enum plane_id plane_id
= to_intel_plane(plane_state
->uapi
.plane
)->id
;
5256 /* Watermarks calculated in master */
5257 if (plane_state
->planar_slave
)
5260 if (plane_state
->planar_linked_plane
) {
5261 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
5262 enum plane_id y_plane_id
= plane_state
->planar_linked_plane
->id
;
5264 WARN_ON(!intel_wm_plane_visible(crtc_state
, plane_state
));
5265 WARN_ON(!fb
->format
->is_yuv
||
5266 fb
->format
->num_planes
== 1);
5268 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5273 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5277 } else if (intel_wm_plane_visible(crtc_state
, plane_state
)) {
5278 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5287 static int skl_build_pipe_wm(struct intel_crtc_state
*crtc_state
)
5289 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
5290 struct skl_pipe_wm
*pipe_wm
= &crtc_state
->wm
.skl
.optimal
;
5291 struct intel_plane
*plane
;
5292 const struct intel_plane_state
*plane_state
;
5296 * We'll only calculate watermarks for planes that are actually
5297 * enabled, so make sure all other planes are set as disabled.
5299 memset(pipe_wm
->planes
, 0, sizeof(pipe_wm
->planes
));
5301 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
,
5304 if (INTEL_GEN(dev_priv
) >= 11)
5305 ret
= icl_build_plane_wm(crtc_state
, plane_state
);
5307 ret
= skl_build_plane_wm(crtc_state
, plane_state
);
5315 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
5317 const struct skl_ddb_entry
*entry
)
5320 intel_de_write_fw(dev_priv
, reg
,
5321 (entry
->end
- 1) << 16 | entry
->start
);
5323 intel_de_write_fw(dev_priv
, reg
, 0);
5326 static void skl_write_wm_level(struct drm_i915_private
*dev_priv
,
5328 const struct skl_wm_level
*level
)
5332 if (level
->plane_en
)
5334 if (level
->ignore_lines
)
5335 val
|= PLANE_WM_IGNORE_LINES
;
5336 val
|= level
->plane_res_b
;
5337 val
|= level
->plane_res_l
<< PLANE_WM_LINES_SHIFT
;
5339 intel_de_write_fw(dev_priv
, reg
, val
);
5342 void skl_write_plane_wm(struct intel_plane
*plane
,
5343 const struct intel_crtc_state
*crtc_state
)
5345 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
5346 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5347 enum plane_id plane_id
= plane
->id
;
5348 enum pipe pipe
= plane
->pipe
;
5349 const struct skl_plane_wm
*wm
=
5350 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
5351 const struct skl_ddb_entry
*ddb_y
=
5352 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5353 const struct skl_ddb_entry
*ddb_uv
=
5354 &crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
];
5356 for (level
= 0; level
<= max_level
; level
++) {
5357 skl_write_wm_level(dev_priv
, PLANE_WM(pipe
, plane_id
, level
),
5360 skl_write_wm_level(dev_priv
, PLANE_WM_TRANS(pipe
, plane_id
),
5363 if (INTEL_GEN(dev_priv
) >= 11) {
5364 skl_ddb_entry_write(dev_priv
,
5365 PLANE_BUF_CFG(pipe
, plane_id
), ddb_y
);
5370 swap(ddb_y
, ddb_uv
);
5372 skl_ddb_entry_write(dev_priv
,
5373 PLANE_BUF_CFG(pipe
, plane_id
), ddb_y
);
5374 skl_ddb_entry_write(dev_priv
,
5375 PLANE_NV12_BUF_CFG(pipe
, plane_id
), ddb_uv
);
5378 void skl_write_cursor_wm(struct intel_plane
*plane
,
5379 const struct intel_crtc_state
*crtc_state
)
5381 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
5382 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5383 enum plane_id plane_id
= plane
->id
;
5384 enum pipe pipe
= plane
->pipe
;
5385 const struct skl_plane_wm
*wm
=
5386 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
5387 const struct skl_ddb_entry
*ddb
=
5388 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5390 for (level
= 0; level
<= max_level
; level
++) {
5391 skl_write_wm_level(dev_priv
, CUR_WM(pipe
, level
),
5394 skl_write_wm_level(dev_priv
, CUR_WM_TRANS(pipe
), &wm
->trans_wm
);
5396 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
), ddb
);
5399 bool skl_wm_level_equals(const struct skl_wm_level
*l1
,
5400 const struct skl_wm_level
*l2
)
5402 return l1
->plane_en
== l2
->plane_en
&&
5403 l1
->ignore_lines
== l2
->ignore_lines
&&
5404 l1
->plane_res_l
== l2
->plane_res_l
&&
5405 l1
->plane_res_b
== l2
->plane_res_b
;
5408 static bool skl_plane_wm_equals(struct drm_i915_private
*dev_priv
,
5409 const struct skl_plane_wm
*wm1
,
5410 const struct skl_plane_wm
*wm2
)
5412 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5414 for (level
= 0; level
<= max_level
; level
++) {
5416 * We don't check uv_wm as the hardware doesn't actually
5417 * use it. It only gets used for calculating the required
5420 if (!skl_wm_level_equals(&wm1
->wm
[level
], &wm2
->wm
[level
]))
5424 return skl_wm_level_equals(&wm1
->trans_wm
, &wm2
->trans_wm
);
5427 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry
*a
,
5428 const struct skl_ddb_entry
*b
)
5430 return a
->start
< b
->end
&& b
->start
< a
->end
;
5433 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry
*ddb
,
5434 const struct skl_ddb_entry
*entries
,
5435 int num_entries
, int ignore_idx
)
5439 for (i
= 0; i
< num_entries
; i
++) {
5440 if (i
!= ignore_idx
&&
5441 skl_ddb_entries_overlap(ddb
, &entries
[i
]))
5449 skl_ddb_add_affected_planes(const struct intel_crtc_state
*old_crtc_state
,
5450 struct intel_crtc_state
*new_crtc_state
)
5452 struct intel_atomic_state
*state
= to_intel_atomic_state(new_crtc_state
->uapi
.state
);
5453 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
5454 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5455 struct intel_plane
*plane
;
5457 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5458 struct intel_plane_state
*plane_state
;
5459 enum plane_id plane_id
= plane
->id
;
5461 if (skl_ddb_entry_equal(&old_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
],
5462 &new_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
]) &&
5463 skl_ddb_entry_equal(&old_crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
],
5464 &new_crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
]))
5467 plane_state
= intel_atomic_get_plane_state(state
, plane
);
5468 if (IS_ERR(plane_state
))
5469 return PTR_ERR(plane_state
);
5471 new_crtc_state
->update_planes
|= BIT(plane_id
);
5478 skl_compute_ddb(struct intel_atomic_state
*state
)
5480 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5481 struct intel_crtc_state
*old_crtc_state
;
5482 struct intel_crtc_state
*new_crtc_state
;
5483 struct intel_crtc
*crtc
;
5486 state
->enabled_dbuf_slices_mask
= dev_priv
->enabled_dbuf_slices_mask
;
5488 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5489 new_crtc_state
, i
) {
5490 ret
= skl_allocate_pipe_ddb(new_crtc_state
);
5494 ret
= skl_ddb_add_affected_planes(old_crtc_state
,
5503 static char enast(bool enable
)
5505 return enable
? '*' : ' ';
5509 skl_print_wm_changes(struct intel_atomic_state
*state
)
5511 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5512 const struct intel_crtc_state
*old_crtc_state
;
5513 const struct intel_crtc_state
*new_crtc_state
;
5514 struct intel_plane
*plane
;
5515 struct intel_crtc
*crtc
;
5518 if (!drm_debug_enabled(DRM_UT_KMS
))
5521 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5522 new_crtc_state
, i
) {
5523 const struct skl_pipe_wm
*old_pipe_wm
, *new_pipe_wm
;
5525 old_pipe_wm
= &old_crtc_state
->wm
.skl
.optimal
;
5526 new_pipe_wm
= &new_crtc_state
->wm
.skl
.optimal
;
5528 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5529 enum plane_id plane_id
= plane
->id
;
5530 const struct skl_ddb_entry
*old
, *new;
5532 old
= &old_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5533 new = &new_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5535 if (skl_ddb_entry_equal(old
, new))
5538 drm_dbg_kms(&dev_priv
->drm
,
5539 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5540 plane
->base
.base
.id
, plane
->base
.name
,
5541 old
->start
, old
->end
, new->start
, new->end
,
5542 skl_ddb_entry_size(old
), skl_ddb_entry_size(new));
5545 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5546 enum plane_id plane_id
= plane
->id
;
5547 const struct skl_plane_wm
*old_wm
, *new_wm
;
5549 old_wm
= &old_pipe_wm
->planes
[plane_id
];
5550 new_wm
= &new_pipe_wm
->planes
[plane_id
];
5552 if (skl_plane_wm_equals(dev_priv
, old_wm
, new_wm
))
5555 drm_dbg_kms(&dev_priv
->drm
,
5556 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5557 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5558 plane
->base
.base
.id
, plane
->base
.name
,
5559 enast(old_wm
->wm
[0].plane_en
), enast(old_wm
->wm
[1].plane_en
),
5560 enast(old_wm
->wm
[2].plane_en
), enast(old_wm
->wm
[3].plane_en
),
5561 enast(old_wm
->wm
[4].plane_en
), enast(old_wm
->wm
[5].plane_en
),
5562 enast(old_wm
->wm
[6].plane_en
), enast(old_wm
->wm
[7].plane_en
),
5563 enast(old_wm
->trans_wm
.plane_en
),
5564 enast(new_wm
->wm
[0].plane_en
), enast(new_wm
->wm
[1].plane_en
),
5565 enast(new_wm
->wm
[2].plane_en
), enast(new_wm
->wm
[3].plane_en
),
5566 enast(new_wm
->wm
[4].plane_en
), enast(new_wm
->wm
[5].plane_en
),
5567 enast(new_wm
->wm
[6].plane_en
), enast(new_wm
->wm
[7].plane_en
),
5568 enast(new_wm
->trans_wm
.plane_en
));
5570 drm_dbg_kms(&dev_priv
->drm
,
5571 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5572 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5573 plane
->base
.base
.id
, plane
->base
.name
,
5574 enast(old_wm
->wm
[0].ignore_lines
), old_wm
->wm
[0].plane_res_l
,
5575 enast(old_wm
->wm
[1].ignore_lines
), old_wm
->wm
[1].plane_res_l
,
5576 enast(old_wm
->wm
[2].ignore_lines
), old_wm
->wm
[2].plane_res_l
,
5577 enast(old_wm
->wm
[3].ignore_lines
), old_wm
->wm
[3].plane_res_l
,
5578 enast(old_wm
->wm
[4].ignore_lines
), old_wm
->wm
[4].plane_res_l
,
5579 enast(old_wm
->wm
[5].ignore_lines
), old_wm
->wm
[5].plane_res_l
,
5580 enast(old_wm
->wm
[6].ignore_lines
), old_wm
->wm
[6].plane_res_l
,
5581 enast(old_wm
->wm
[7].ignore_lines
), old_wm
->wm
[7].plane_res_l
,
5582 enast(old_wm
->trans_wm
.ignore_lines
), old_wm
->trans_wm
.plane_res_l
,
5584 enast(new_wm
->wm
[0].ignore_lines
), new_wm
->wm
[0].plane_res_l
,
5585 enast(new_wm
->wm
[1].ignore_lines
), new_wm
->wm
[1].plane_res_l
,
5586 enast(new_wm
->wm
[2].ignore_lines
), new_wm
->wm
[2].plane_res_l
,
5587 enast(new_wm
->wm
[3].ignore_lines
), new_wm
->wm
[3].plane_res_l
,
5588 enast(new_wm
->wm
[4].ignore_lines
), new_wm
->wm
[4].plane_res_l
,
5589 enast(new_wm
->wm
[5].ignore_lines
), new_wm
->wm
[5].plane_res_l
,
5590 enast(new_wm
->wm
[6].ignore_lines
), new_wm
->wm
[6].plane_res_l
,
5591 enast(new_wm
->wm
[7].ignore_lines
), new_wm
->wm
[7].plane_res_l
,
5592 enast(new_wm
->trans_wm
.ignore_lines
), new_wm
->trans_wm
.plane_res_l
);
5594 drm_dbg_kms(&dev_priv
->drm
,
5595 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5596 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5597 plane
->base
.base
.id
, plane
->base
.name
,
5598 old_wm
->wm
[0].plane_res_b
, old_wm
->wm
[1].plane_res_b
,
5599 old_wm
->wm
[2].plane_res_b
, old_wm
->wm
[3].plane_res_b
,
5600 old_wm
->wm
[4].plane_res_b
, old_wm
->wm
[5].plane_res_b
,
5601 old_wm
->wm
[6].plane_res_b
, old_wm
->wm
[7].plane_res_b
,
5602 old_wm
->trans_wm
.plane_res_b
,
5603 new_wm
->wm
[0].plane_res_b
, new_wm
->wm
[1].plane_res_b
,
5604 new_wm
->wm
[2].plane_res_b
, new_wm
->wm
[3].plane_res_b
,
5605 new_wm
->wm
[4].plane_res_b
, new_wm
->wm
[5].plane_res_b
,
5606 new_wm
->wm
[6].plane_res_b
, new_wm
->wm
[7].plane_res_b
,
5607 new_wm
->trans_wm
.plane_res_b
);
5609 drm_dbg_kms(&dev_priv
->drm
,
5610 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5611 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5612 plane
->base
.base
.id
, plane
->base
.name
,
5613 old_wm
->wm
[0].min_ddb_alloc
, old_wm
->wm
[1].min_ddb_alloc
,
5614 old_wm
->wm
[2].min_ddb_alloc
, old_wm
->wm
[3].min_ddb_alloc
,
5615 old_wm
->wm
[4].min_ddb_alloc
, old_wm
->wm
[5].min_ddb_alloc
,
5616 old_wm
->wm
[6].min_ddb_alloc
, old_wm
->wm
[7].min_ddb_alloc
,
5617 old_wm
->trans_wm
.min_ddb_alloc
,
5618 new_wm
->wm
[0].min_ddb_alloc
, new_wm
->wm
[1].min_ddb_alloc
,
5619 new_wm
->wm
[2].min_ddb_alloc
, new_wm
->wm
[3].min_ddb_alloc
,
5620 new_wm
->wm
[4].min_ddb_alloc
, new_wm
->wm
[5].min_ddb_alloc
,
5621 new_wm
->wm
[6].min_ddb_alloc
, new_wm
->wm
[7].min_ddb_alloc
,
5622 new_wm
->trans_wm
.min_ddb_alloc
);
5627 static int intel_add_all_pipes(struct intel_atomic_state
*state
)
5629 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5630 struct intel_crtc
*crtc
;
5632 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5633 struct intel_crtc_state
*crtc_state
;
5635 crtc_state
= intel_atomic_get_crtc_state(&state
->base
, crtc
);
5636 if (IS_ERR(crtc_state
))
5637 return PTR_ERR(crtc_state
);
5644 skl_ddb_add_affected_pipes(struct intel_atomic_state
*state
)
5646 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5650 * If this is our first atomic update following hardware readout,
5651 * we can't trust the DDB that the BIOS programmed for us. Let's
5652 * pretend that all pipes switched active status so that we'll
5653 * ensure a full DDB recompute.
5655 if (dev_priv
->wm
.distrust_bios_wm
) {
5656 ret
= drm_modeset_lock(&dev_priv
->drm
.mode_config
.connection_mutex
,
5657 state
->base
.acquire_ctx
);
5661 state
->active_pipe_changes
= INTEL_INFO(dev_priv
)->pipe_mask
;
5664 * We usually only initialize state->active_pipes if we
5665 * we're doing a modeset; make sure this field is always
5666 * initialized during the sanitization process that happens
5667 * on the first commit too.
5669 if (!state
->modeset
)
5670 state
->active_pipes
= dev_priv
->active_pipes
;
5674 * If the modeset changes which CRTC's are active, we need to
5675 * recompute the DDB allocation for *all* active pipes, even
5676 * those that weren't otherwise being modified in any way by this
5677 * atomic commit. Due to the shrinking of the per-pipe allocations
5678 * when new active CRTC's are added, it's possible for a pipe that
5679 * we were already using and aren't changing at all here to suddenly
5680 * become invalid if its DDB needs exceeds its new allocation.
5682 * Note that if we wind up doing a full DDB recompute, we can't let
5683 * any other display updates race with this transaction, so we need
5684 * to grab the lock on *all* CRTC's.
5686 if (state
->active_pipe_changes
|| state
->modeset
) {
5687 ret
= intel_add_all_pipes(state
);
5696 * To make sure the cursor watermark registers are always consistent
5697 * with our computed state the following scenario needs special
5701 * 2. move cursor entirely offscreen
5704 * Step 2. does call .disable_plane() but does not zero the watermarks
5705 * (since we consider an offscreen cursor still active for the purposes
5706 * of watermarks). Step 3. would not normally call .disable_plane()
5707 * because the actual plane visibility isn't changing, and we don't
5708 * deallocate the cursor ddb until the pipe gets disabled. So we must
5709 * force step 3. to call .disable_plane() to update the watermark
5710 * registers properly.
5712 * Other planes do not suffer from this issues as their watermarks are
5713 * calculated based on the actual plane visibility. The only time this
5714 * can trigger for the other planes is during the initial readout as the
5715 * default value of the watermarks registers is not zero.
5717 static int skl_wm_add_affected_planes(struct intel_atomic_state
*state
,
5718 struct intel_crtc
*crtc
)
5720 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5721 const struct intel_crtc_state
*old_crtc_state
=
5722 intel_atomic_get_old_crtc_state(state
, crtc
);
5723 struct intel_crtc_state
*new_crtc_state
=
5724 intel_atomic_get_new_crtc_state(state
, crtc
);
5725 struct intel_plane
*plane
;
5727 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5728 struct intel_plane_state
*plane_state
;
5729 enum plane_id plane_id
= plane
->id
;
5732 * Force a full wm update for every plane on modeset.
5733 * Required because the reset value of the wm registers
5734 * is non-zero, whereas we want all disabled planes to
5735 * have zero watermarks. So if we turn off the relevant
5736 * power well the hardware state will go out of sync
5737 * with the software state.
5739 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state
->uapi
) &&
5740 skl_plane_wm_equals(dev_priv
,
5741 &old_crtc_state
->wm
.skl
.optimal
.planes
[plane_id
],
5742 &new_crtc_state
->wm
.skl
.optimal
.planes
[plane_id
]))
5745 plane_state
= intel_atomic_get_plane_state(state
, plane
);
5746 if (IS_ERR(plane_state
))
5747 return PTR_ERR(plane_state
);
5749 new_crtc_state
->update_planes
|= BIT(plane_id
);
5756 skl_compute_wm(struct intel_atomic_state
*state
)
5758 struct intel_crtc
*crtc
;
5759 struct intel_crtc_state
*new_crtc_state
;
5760 struct intel_crtc_state
*old_crtc_state
;
5763 ret
= skl_ddb_add_affected_pipes(state
);
5768 * Calculate WM's for all pipes that are part of this transaction.
5769 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
5770 * weren't otherwise being modified if pipe allocations had to change.
5772 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5773 new_crtc_state
, i
) {
5774 ret
= skl_build_pipe_wm(new_crtc_state
);
5779 ret
= skl_compute_ddb(state
);
5784 * skl_compute_ddb() will have adjusted the final watermarks
5785 * based on how much ddb is available. Now we can actually
5786 * check if the final watermarks changed.
5788 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5789 new_crtc_state
, i
) {
5790 ret
= skl_wm_add_affected_planes(state
, crtc
);
5795 skl_print_wm_changes(state
);
5800 static void ilk_compute_wm_config(struct drm_i915_private
*dev_priv
,
5801 struct intel_wm_config
*config
)
5803 struct intel_crtc
*crtc
;
5805 /* Compute the currently _active_ config */
5806 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5807 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
5809 if (!wm
->pipe_enabled
)
5812 config
->sprites_enabled
|= wm
->sprites_enabled
;
5813 config
->sprites_scaled
|= wm
->sprites_scaled
;
5814 config
->num_pipes_active
++;
5818 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
5820 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
5821 struct ilk_wm_maximums max
;
5822 struct intel_wm_config config
= {};
5823 struct ilk_wm_values results
= {};
5824 enum intel_ddb_partitioning partitioning
;
5826 ilk_compute_wm_config(dev_priv
, &config
);
5828 ilk_compute_wm_maximums(dev_priv
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
5829 ilk_wm_merge(dev_priv
, &config
, &max
, &lp_wm_1_2
);
5831 /* 5/6 split only in single pipe config on IVB+ */
5832 if (INTEL_GEN(dev_priv
) >= 7 &&
5833 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
5834 ilk_compute_wm_maximums(dev_priv
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
5835 ilk_wm_merge(dev_priv
, &config
, &max
, &lp_wm_5_6
);
5837 best_lp_wm
= ilk_find_best_result(dev_priv
, &lp_wm_1_2
, &lp_wm_5_6
);
5839 best_lp_wm
= &lp_wm_1_2
;
5842 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
5843 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
5845 ilk_compute_wm_results(dev_priv
, best_lp_wm
, partitioning
, &results
);
5847 ilk_write_wm_values(dev_priv
, &results
);
5850 static void ilk_initial_watermarks(struct intel_atomic_state
*state
,
5851 struct intel_crtc
*crtc
)
5853 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5854 const struct intel_crtc_state
*crtc_state
=
5855 intel_atomic_get_new_crtc_state(state
, crtc
);
5857 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5858 crtc
->wm
.active
.ilk
= crtc_state
->wm
.ilk
.intermediate
;
5859 ilk_program_watermarks(dev_priv
);
5860 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5863 static void ilk_optimize_watermarks(struct intel_atomic_state
*state
,
5864 struct intel_crtc
*crtc
)
5866 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5867 const struct intel_crtc_state
*crtc_state
=
5868 intel_atomic_get_new_crtc_state(state
, crtc
);
5870 if (!crtc_state
->wm
.need_postvbl_update
)
5873 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5874 crtc
->wm
.active
.ilk
= crtc_state
->wm
.ilk
.optimal
;
5875 ilk_program_watermarks(dev_priv
);
5876 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5879 static inline void skl_wm_level_from_reg_val(u32 val
,
5880 struct skl_wm_level
*level
)
5882 level
->plane_en
= val
& PLANE_WM_EN
;
5883 level
->ignore_lines
= val
& PLANE_WM_IGNORE_LINES
;
5884 level
->plane_res_b
= val
& PLANE_WM_BLOCKS_MASK
;
5885 level
->plane_res_l
= (val
>> PLANE_WM_LINES_SHIFT
) &
5886 PLANE_WM_LINES_MASK
;
5889 void skl_pipe_wm_get_hw_state(struct intel_crtc
*crtc
,
5890 struct skl_pipe_wm
*out
)
5892 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5893 enum pipe pipe
= crtc
->pipe
;
5894 int level
, max_level
;
5895 enum plane_id plane_id
;
5898 max_level
= ilk_wm_max_level(dev_priv
);
5900 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5901 struct skl_plane_wm
*wm
= &out
->planes
[plane_id
];
5903 for (level
= 0; level
<= max_level
; level
++) {
5904 if (plane_id
!= PLANE_CURSOR
)
5905 val
= I915_READ(PLANE_WM(pipe
, plane_id
, level
));
5907 val
= I915_READ(CUR_WM(pipe
, level
));
5909 skl_wm_level_from_reg_val(val
, &wm
->wm
[level
]);
5912 if (plane_id
!= PLANE_CURSOR
)
5913 val
= I915_READ(PLANE_WM_TRANS(pipe
, plane_id
));
5915 val
= I915_READ(CUR_WM_TRANS(pipe
));
5917 skl_wm_level_from_reg_val(val
, &wm
->trans_wm
);
5924 void skl_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
5926 struct intel_crtc
*crtc
;
5927 struct intel_crtc_state
*crtc_state
;
5929 skl_ddb_get_hw_state(dev_priv
);
5930 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5931 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
5933 skl_pipe_wm_get_hw_state(crtc
, &crtc_state
->wm
.skl
.optimal
);
5936 if (dev_priv
->active_pipes
) {
5937 /* Fully recompute DDB on first atomic commit */
5938 dev_priv
->wm
.distrust_bios_wm
= true;
5942 static void ilk_pipe_wm_get_hw_state(struct intel_crtc
*crtc
)
5944 struct drm_device
*dev
= crtc
->base
.dev
;
5945 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5946 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
5947 struct intel_crtc_state
*crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
5948 struct intel_pipe_wm
*active
= &crtc_state
->wm
.ilk
.optimal
;
5949 enum pipe pipe
= crtc
->pipe
;
5950 static const i915_reg_t wm0_pipe_reg
[] = {
5951 [PIPE_A
] = WM0_PIPEA_ILK
,
5952 [PIPE_B
] = WM0_PIPEB_ILK
,
5953 [PIPE_C
] = WM0_PIPEC_IVB
,
5956 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
5958 memset(active
, 0, sizeof(*active
));
5960 active
->pipe_enabled
= crtc
->active
;
5962 if (active
->pipe_enabled
) {
5963 u32 tmp
= hw
->wm_pipe
[pipe
];
5966 * For active pipes LP0 watermark is marked as
5967 * enabled, and LP1+ watermaks as disabled since
5968 * we can't really reverse compute them in case
5969 * multiple pipes are active.
5971 active
->wm
[0].enable
= true;
5972 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
5973 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
5974 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
5976 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5979 * For inactive pipes, all watermark levels
5980 * should be marked as enabled but zeroed,
5981 * which is what we'd compute them to.
5983 for (level
= 0; level
<= max_level
; level
++)
5984 active
->wm
[level
].enable
= true;
5987 crtc
->wm
.active
.ilk
= *active
;
5990 #define _FW_WM(value, plane) \
5991 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5992 #define _FW_WM_VLV(value, plane) \
5993 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5995 static void g4x_read_wm_values(struct drm_i915_private
*dev_priv
,
5996 struct g4x_wm_values
*wm
)
6000 tmp
= I915_READ(DSPFW1
);
6001 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
6002 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
6003 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM(tmp
, PLANEB
);
6004 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM(tmp
, PLANEA
);
6006 tmp
= I915_READ(DSPFW2
);
6007 wm
->fbc_en
= tmp
& DSPFW_FBC_SR_EN
;
6008 wm
->sr
.fbc
= _FW_WM(tmp
, FBC_SR
);
6009 wm
->hpll
.fbc
= _FW_WM(tmp
, FBC_HPLL_SR
);
6010 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM(tmp
, SPRITEB
);
6011 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
6012 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM(tmp
, SPRITEA
);
6014 tmp
= I915_READ(DSPFW3
);
6015 wm
->hpll_en
= tmp
& DSPFW_HPLL_SR_EN
;
6016 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
6017 wm
->hpll
.cursor
= _FW_WM(tmp
, HPLL_CURSOR
);
6018 wm
->hpll
.plane
= _FW_WM(tmp
, HPLL_SR
);
6021 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
6022 struct vlv_wm_values
*wm
)
6027 for_each_pipe(dev_priv
, pipe
) {
6028 tmp
= I915_READ(VLV_DDL(pipe
));
6030 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] =
6031 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
6032 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] =
6033 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
6034 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] =
6035 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
6036 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] =
6037 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
6040 tmp
= I915_READ(DSPFW1
);
6041 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
6042 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
6043 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEB
);
6044 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEA
);
6046 tmp
= I915_READ(DSPFW2
);
6047 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEB
);
6048 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
6049 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEA
);
6051 tmp
= I915_READ(DSPFW3
);
6052 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
6054 if (IS_CHERRYVIEW(dev_priv
)) {
6055 tmp
= I915_READ(DSPFW7_CHV
);
6056 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
6057 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
6059 tmp
= I915_READ(DSPFW8_CHV
);
6060 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEF
);
6061 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEE
);
6063 tmp
= I915_READ(DSPFW9_CHV
);
6064 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEC
);
6065 wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORC
);
6067 tmp
= I915_READ(DSPHOWM
);
6068 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
6069 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
6070 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
6071 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEC_HI
) << 8;
6072 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
6073 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
6074 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
6075 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
6076 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
6077 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
6079 tmp
= I915_READ(DSPFW7
);
6080 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
6081 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
6083 tmp
= I915_READ(DSPHOWM
);
6084 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
6085 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
6086 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
6087 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
6088 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
6089 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
6090 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
6097 void g4x_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
6099 struct g4x_wm_values
*wm
= &dev_priv
->wm
.g4x
;
6100 struct intel_crtc
*crtc
;
6102 g4x_read_wm_values(dev_priv
, wm
);
6104 wm
->cxsr
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
6106 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6107 struct intel_crtc_state
*crtc_state
=
6108 to_intel_crtc_state(crtc
->base
.state
);
6109 struct g4x_wm_state
*active
= &crtc
->wm
.active
.g4x
;
6110 struct g4x_pipe_wm
*raw
;
6111 enum pipe pipe
= crtc
->pipe
;
6112 enum plane_id plane_id
;
6113 int level
, max_level
;
6115 active
->cxsr
= wm
->cxsr
;
6116 active
->hpll_en
= wm
->hpll_en
;
6117 active
->fbc_en
= wm
->fbc_en
;
6119 active
->sr
= wm
->sr
;
6120 active
->hpll
= wm
->hpll
;
6122 for_each_plane_id_on_crtc(crtc
, plane_id
) {
6123 active
->wm
.plane
[plane_id
] =
6124 wm
->pipe
[pipe
].plane
[plane_id
];
6127 if (wm
->cxsr
&& wm
->hpll_en
)
6128 max_level
= G4X_WM_LEVEL_HPLL
;
6130 max_level
= G4X_WM_LEVEL_SR
;
6132 max_level
= G4X_WM_LEVEL_NORMAL
;
6134 level
= G4X_WM_LEVEL_NORMAL
;
6135 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
6136 for_each_plane_id_on_crtc(crtc
, plane_id
)
6137 raw
->plane
[plane_id
] = active
->wm
.plane
[plane_id
];
6139 if (++level
> max_level
)
6142 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
6143 raw
->plane
[PLANE_PRIMARY
] = active
->sr
.plane
;
6144 raw
->plane
[PLANE_CURSOR
] = active
->sr
.cursor
;
6145 raw
->plane
[PLANE_SPRITE0
] = 0;
6146 raw
->fbc
= active
->sr
.fbc
;
6148 if (++level
> max_level
)
6151 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
6152 raw
->plane
[PLANE_PRIMARY
] = active
->hpll
.plane
;
6153 raw
->plane
[PLANE_CURSOR
] = active
->hpll
.cursor
;
6154 raw
->plane
[PLANE_SPRITE0
] = 0;
6155 raw
->fbc
= active
->hpll
.fbc
;
6158 for_each_plane_id_on_crtc(crtc
, plane_id
)
6159 g4x_raw_plane_wm_set(crtc_state
, level
,
6160 plane_id
, USHRT_MAX
);
6161 g4x_raw_fbc_wm_set(crtc_state
, level
, USHRT_MAX
);
6163 crtc_state
->wm
.g4x
.optimal
= *active
;
6164 crtc_state
->wm
.g4x
.intermediate
= *active
;
6166 drm_dbg_kms(&dev_priv
->drm
,
6167 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6169 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
6170 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
6171 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
]);
6174 drm_dbg_kms(&dev_priv
->drm
,
6175 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6176 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->sr
.fbc
);
6177 drm_dbg_kms(&dev_priv
->drm
,
6178 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6179 wm
->hpll
.plane
, wm
->hpll
.cursor
, wm
->hpll
.fbc
);
6180 drm_dbg_kms(&dev_priv
->drm
, "Initial SR=%s HPLL=%s FBC=%s\n",
6181 yesno(wm
->cxsr
), yesno(wm
->hpll_en
), yesno(wm
->fbc_en
));
6184 void g4x_wm_sanitize(struct drm_i915_private
*dev_priv
)
6186 struct intel_plane
*plane
;
6187 struct intel_crtc
*crtc
;
6189 mutex_lock(&dev_priv
->wm
.wm_mutex
);
6191 for_each_intel_plane(&dev_priv
->drm
, plane
) {
6192 struct intel_crtc
*crtc
=
6193 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
6194 struct intel_crtc_state
*crtc_state
=
6195 to_intel_crtc_state(crtc
->base
.state
);
6196 struct intel_plane_state
*plane_state
=
6197 to_intel_plane_state(plane
->base
.state
);
6198 struct g4x_wm_state
*wm_state
= &crtc_state
->wm
.g4x
.optimal
;
6199 enum plane_id plane_id
= plane
->id
;
6202 if (plane_state
->uapi
.visible
)
6205 for (level
= 0; level
< 3; level
++) {
6206 struct g4x_pipe_wm
*raw
=
6207 &crtc_state
->wm
.g4x
.raw
[level
];
6209 raw
->plane
[plane_id
] = 0;
6210 wm_state
->wm
.plane
[plane_id
] = 0;
6213 if (plane_id
== PLANE_PRIMARY
) {
6214 for (level
= 0; level
< 3; level
++) {
6215 struct g4x_pipe_wm
*raw
=
6216 &crtc_state
->wm
.g4x
.raw
[level
];
6220 wm_state
->sr
.fbc
= 0;
6221 wm_state
->hpll
.fbc
= 0;
6222 wm_state
->fbc_en
= false;
6226 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6227 struct intel_crtc_state
*crtc_state
=
6228 to_intel_crtc_state(crtc
->base
.state
);
6230 crtc_state
->wm
.g4x
.intermediate
=
6231 crtc_state
->wm
.g4x
.optimal
;
6232 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.optimal
;
6235 g4x_program_watermarks(dev_priv
);
6237 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
6240 void vlv_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
6242 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
6243 struct intel_crtc
*crtc
;
6246 vlv_read_wm_values(dev_priv
, wm
);
6248 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
6249 wm
->level
= VLV_WM_LEVEL_PM2
;
6251 if (IS_CHERRYVIEW(dev_priv
)) {
6252 vlv_punit_get(dev_priv
);
6254 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPSSPM
);
6255 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
6256 wm
->level
= VLV_WM_LEVEL_PM5
;
6259 * If DDR DVFS is disabled in the BIOS, Punit
6260 * will never ack the request. So if that happens
6261 * assume we don't have to enable/disable DDR DVFS
6262 * dynamically. To test that just set the REQ_ACK
6263 * bit to poke the Punit, but don't change the
6264 * HIGH/LOW bits so that we don't actually change
6265 * the current state.
6267 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
6268 val
|= FORCE_DDR_FREQ_REQ_ACK
;
6269 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
6271 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
6272 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
6273 drm_dbg_kms(&dev_priv
->drm
,
6274 "Punit not acking DDR DVFS request, "
6275 "assuming DDR DVFS is disabled\n");
6276 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
6278 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
6279 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
6280 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
6283 vlv_punit_put(dev_priv
);
6286 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6287 struct intel_crtc_state
*crtc_state
=
6288 to_intel_crtc_state(crtc
->base
.state
);
6289 struct vlv_wm_state
*active
= &crtc
->wm
.active
.vlv
;
6290 const struct vlv_fifo_state
*fifo_state
=
6291 &crtc_state
->wm
.vlv
.fifo_state
;
6292 enum pipe pipe
= crtc
->pipe
;
6293 enum plane_id plane_id
;
6296 vlv_get_fifo_size(crtc_state
);
6298 active
->num_levels
= wm
->level
+ 1;
6299 active
->cxsr
= wm
->cxsr
;
6301 for (level
= 0; level
< active
->num_levels
; level
++) {
6302 struct g4x_pipe_wm
*raw
=
6303 &crtc_state
->wm
.vlv
.raw
[level
];
6305 active
->sr
[level
].plane
= wm
->sr
.plane
;
6306 active
->sr
[level
].cursor
= wm
->sr
.cursor
;
6308 for_each_plane_id_on_crtc(crtc
, plane_id
) {
6309 active
->wm
[level
].plane
[plane_id
] =
6310 wm
->pipe
[pipe
].plane
[plane_id
];
6312 raw
->plane
[plane_id
] =
6313 vlv_invert_wm_value(active
->wm
[level
].plane
[plane_id
],
6314 fifo_state
->plane
[plane_id
]);
6318 for_each_plane_id_on_crtc(crtc
, plane_id
)
6319 vlv_raw_plane_wm_set(crtc_state
, level
,
6320 plane_id
, USHRT_MAX
);
6321 vlv_invalidate_wms(crtc
, active
, level
);
6323 crtc_state
->wm
.vlv
.optimal
= *active
;
6324 crtc_state
->wm
.vlv
.intermediate
= *active
;
6326 drm_dbg_kms(&dev_priv
->drm
,
6327 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6329 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
6330 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
6331 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
],
6332 wm
->pipe
[pipe
].plane
[PLANE_SPRITE1
]);
6335 drm_dbg_kms(&dev_priv
->drm
,
6336 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6337 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
6340 void vlv_wm_sanitize(struct drm_i915_private
*dev_priv
)
6342 struct intel_plane
*plane
;
6343 struct intel_crtc
*crtc
;
6345 mutex_lock(&dev_priv
->wm
.wm_mutex
);
6347 for_each_intel_plane(&dev_priv
->drm
, plane
) {
6348 struct intel_crtc
*crtc
=
6349 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
6350 struct intel_crtc_state
*crtc_state
=
6351 to_intel_crtc_state(crtc
->base
.state
);
6352 struct intel_plane_state
*plane_state
=
6353 to_intel_plane_state(plane
->base
.state
);
6354 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
6355 const struct vlv_fifo_state
*fifo_state
=
6356 &crtc_state
->wm
.vlv
.fifo_state
;
6357 enum plane_id plane_id
= plane
->id
;
6360 if (plane_state
->uapi
.visible
)
6363 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
6364 struct g4x_pipe_wm
*raw
=
6365 &crtc_state
->wm
.vlv
.raw
[level
];
6367 raw
->plane
[plane_id
] = 0;
6369 wm_state
->wm
[level
].plane
[plane_id
] =
6370 vlv_invert_wm_value(raw
->plane
[plane_id
],
6371 fifo_state
->plane
[plane_id
]);
6375 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6376 struct intel_crtc_state
*crtc_state
=
6377 to_intel_crtc_state(crtc
->base
.state
);
6379 crtc_state
->wm
.vlv
.intermediate
=
6380 crtc_state
->wm
.vlv
.optimal
;
6381 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
6384 vlv_program_watermarks(dev_priv
);
6386 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
6390 * FIXME should probably kill this and improve
6391 * the real watermark readout/sanitation instead
6393 static void ilk_init_lp_watermarks(struct drm_i915_private
*dev_priv
)
6395 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
6396 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
6397 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
6400 * Don't touch WM1S_LP_EN here.
6401 * Doing so could cause underruns.
6405 void ilk_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
6407 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
6408 struct intel_crtc
*crtc
;
6410 ilk_init_lp_watermarks(dev_priv
);
6412 for_each_intel_crtc(&dev_priv
->drm
, crtc
)
6413 ilk_pipe_wm_get_hw_state(crtc
);
6415 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
6416 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
6417 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
6419 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
6420 if (INTEL_GEN(dev_priv
) >= 7) {
6421 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
6422 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
6425 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
6426 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
6427 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
6428 else if (IS_IVYBRIDGE(dev_priv
))
6429 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
6430 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
6433 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
6437 * intel_update_watermarks - update FIFO watermark values based on current modes
6438 * @crtc: the #intel_crtc on which to compute the WM
6440 * Calculate watermark values for the various WM regs based on current mode
6441 * and plane configuration.
6443 * There are several cases to deal with here:
6444 * - normal (i.e. non-self-refresh)
6445 * - self-refresh (SR) mode
6446 * - lines are large relative to FIFO size (buffer can hold up to 2)
6447 * - lines are small relative to FIFO size (buffer can hold more than 2
6448 * lines), so need to account for TLB latency
6450 * The normal calculation is:
6451 * watermark = dotclock * bytes per pixel * latency
6452 * where latency is platform & configuration dependent (we assume pessimal
6455 * The SR calculation is:
6456 * watermark = (trunc(latency/line time)+1) * surface width *
6459 * line time = htotal / dotclock
6460 * surface width = hdisplay for normal plane and 64 for cursor
6461 * and latency is assumed to be high, as above.
6463 * The final value programmed to the register should always be rounded up,
6464 * and include an extra 2 entries to account for clock crossings.
6466 * We don't use the sprite, so we can ignore that. And on Crestline we have
6467 * to set the non-SR watermarks to 8.
6469 void intel_update_watermarks(struct intel_crtc
*crtc
)
6471 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6473 if (dev_priv
->display
.update_wm
)
6474 dev_priv
->display
.update_wm(crtc
);
6477 void intel_enable_ipc(struct drm_i915_private
*dev_priv
)
6481 if (!HAS_IPC(dev_priv
))
6484 val
= I915_READ(DISP_ARB_CTL2
);
6486 if (dev_priv
->ipc_enabled
)
6487 val
|= DISP_IPC_ENABLE
;
6489 val
&= ~DISP_IPC_ENABLE
;
6491 I915_WRITE(DISP_ARB_CTL2
, val
);
6494 static bool intel_can_enable_ipc(struct drm_i915_private
*dev_priv
)
6496 /* Display WA #0477 WaDisableIPC: skl */
6497 if (IS_SKYLAKE(dev_priv
))
6500 /* Display WA #1141: SKL:all KBL:all CFL */
6501 if (IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
))
6502 return dev_priv
->dram_info
.symmetric_memory
;
6507 void intel_init_ipc(struct drm_i915_private
*dev_priv
)
6509 if (!HAS_IPC(dev_priv
))
6512 dev_priv
->ipc_enabled
= intel_can_enable_ipc(dev_priv
);
6514 intel_enable_ipc(dev_priv
);
6517 static void ibx_init_clock_gating(struct drm_i915_private
*dev_priv
)
6520 * On Ibex Peak and Cougar Point, we need to disable clock
6521 * gating for the panel power sequencer or it will fail to
6522 * start up when no ports are active.
6524 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
6527 static void g4x_disable_trickle_feed(struct drm_i915_private
*dev_priv
)
6531 for_each_pipe(dev_priv
, pipe
) {
6532 I915_WRITE(DSPCNTR(pipe
),
6533 I915_READ(DSPCNTR(pipe
)) |
6534 DISPPLANE_TRICKLE_FEED_DISABLE
);
6536 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
6537 POSTING_READ(DSPSURF(pipe
));
6541 static void ilk_init_clock_gating(struct drm_i915_private
*dev_priv
)
6543 u32 dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6547 * WaFbcDisableDpfcClockGating:ilk
6549 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
6550 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
6551 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
6553 I915_WRITE(PCH_3DCGDIS0
,
6554 MARIUNIT_CLOCK_GATE_DISABLE
|
6555 SVSMUNIT_CLOCK_GATE_DISABLE
);
6556 I915_WRITE(PCH_3DCGDIS1
,
6557 VFMUNIT_CLOCK_GATE_DISABLE
);
6560 * According to the spec the following bits should be set in
6561 * order to enable memory self-refresh
6562 * The bit 22/21 of 0x42004
6563 * The bit 5 of 0x42020
6564 * The bit 15 of 0x45000
6566 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6567 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
6568 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
6569 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
6570 I915_WRITE(DISP_ARB_CTL
,
6571 (I915_READ(DISP_ARB_CTL
) |
6575 * Based on the document from hardware guys the following bits
6576 * should be set unconditionally in order to enable FBC.
6577 * The bit 22 of 0x42000
6578 * The bit 22 of 0x42004
6579 * The bit 7,8,9 of 0x42020.
6581 if (IS_IRONLAKE_M(dev_priv
)) {
6582 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6583 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6584 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6586 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6587 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6591 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6593 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6594 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6595 ILK_ELPIN_409_SELECT
);
6596 I915_WRITE(_3D_CHICKEN2
,
6597 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
6598 _3D_CHICKEN2_WM_READ_PIPELINED
);
6600 /* WaDisableRenderCachePipelinedFlush:ilk */
6601 I915_WRITE(CACHE_MODE_0
,
6602 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6604 /* WaDisable_RenderCache_OperationalFlush:ilk */
6605 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6607 g4x_disable_trickle_feed(dev_priv
);
6609 ibx_init_clock_gating(dev_priv
);
6612 static void cpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
6618 * On Ibex Peak and Cougar Point, we need to disable clock
6619 * gating for the panel power sequencer or it will fail to
6620 * start up when no ports are active.
6622 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
6623 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
6624 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
6625 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
6626 DPLS_EDP_PPS_FIX_DIS
);
6627 /* The below fixes the weird display corruption, a few pixels shifted
6628 * downward, on (only) LVDS of some HP laptops with IVY.
6630 for_each_pipe(dev_priv
, pipe
) {
6631 val
= I915_READ(TRANS_CHICKEN2(pipe
));
6632 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
6633 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6634 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
6635 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6636 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
6637 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
6638 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
6640 /* WADP0ClockGatingDisable */
6641 for_each_pipe(dev_priv
, pipe
) {
6642 I915_WRITE(TRANS_CHICKEN1(pipe
),
6643 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6647 static void gen6_check_mch_setup(struct drm_i915_private
*dev_priv
)
6651 tmp
= I915_READ(MCH_SSKPD
);
6652 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
6653 drm_dbg_kms(&dev_priv
->drm
,
6654 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6658 static void gen6_init_clock_gating(struct drm_i915_private
*dev_priv
)
6660 u32 dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6662 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6664 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6665 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6666 ILK_ELPIN_409_SELECT
);
6668 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6669 I915_WRITE(_3D_CHICKEN
,
6670 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
6672 /* WaDisable_RenderCache_OperationalFlush:snb */
6673 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6676 * BSpec recoomends 8x4 when MSAA is used,
6677 * however in practice 16x4 seems fastest.
6679 * Note that PS/WM thread counts depend on the WIZ hashing
6680 * disable bit, which we don't touch here, but it's good
6681 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6683 I915_WRITE(GEN6_GT_MODE
,
6684 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6686 I915_WRITE(CACHE_MODE_0
,
6687 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
6689 I915_WRITE(GEN6_UCGCTL1
,
6690 I915_READ(GEN6_UCGCTL1
) |
6691 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
6692 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6694 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6695 * gating disable must be set. Failure to set it results in
6696 * flickering pixels due to Z write ordering failures after
6697 * some amount of runtime in the Mesa "fire" demo, and Unigine
6698 * Sanctuary and Tropics, and apparently anything else with
6699 * alpha test or pixel discard.
6701 * According to the spec, bit 11 (RCCUNIT) must also be set,
6702 * but we didn't debug actual testcases to find it out.
6704 * WaDisableRCCUnitClockGating:snb
6705 * WaDisableRCPBUnitClockGating:snb
6707 I915_WRITE(GEN6_UCGCTL2
,
6708 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
6709 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
6711 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6712 I915_WRITE(_3D_CHICKEN3
,
6713 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
6717 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6718 * 3DSTATE_SF number of SF output attributes is more than 16."
6720 I915_WRITE(_3D_CHICKEN3
,
6721 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
6724 * According to the spec the following bits should be
6725 * set in order to enable memory self-refresh and fbc:
6726 * The bit21 and bit22 of 0x42000
6727 * The bit21 and bit22 of 0x42004
6728 * The bit5 and bit7 of 0x42020
6729 * The bit14 of 0x70180
6730 * The bit14 of 0x71180
6732 * WaFbcAsynchFlipDisableFbcQueue:snb
6734 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6735 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6736 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
6737 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6738 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6739 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
6740 I915_WRITE(ILK_DSPCLK_GATE_D
,
6741 I915_READ(ILK_DSPCLK_GATE_D
) |
6742 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
6743 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
6745 g4x_disable_trickle_feed(dev_priv
);
6747 cpt_init_clock_gating(dev_priv
);
6749 gen6_check_mch_setup(dev_priv
);
6752 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
6754 u32 reg
= I915_READ(GEN7_FF_THREAD_MODE
);
6757 * WaVSThreadDispatchOverride:ivb,vlv
6759 * This actually overrides the dispatch
6760 * mode for all thread types.
6762 reg
&= ~GEN7_FF_SCHED_MASK
;
6763 reg
|= GEN7_FF_TS_SCHED_HW
;
6764 reg
|= GEN7_FF_VS_SCHED_HW
;
6765 reg
|= GEN7_FF_DS_SCHED_HW
;
6767 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
6770 static void lpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
6773 * TODO: this bit should only be enabled when really needed, then
6774 * disabled when not needed anymore in order to save power.
6776 if (HAS_PCH_LPT_LP(dev_priv
))
6777 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
6778 I915_READ(SOUTH_DSPCLK_GATE_D
) |
6779 PCH_LP_PARTITION_LEVEL_DISABLE
);
6781 /* WADPOClockGatingDisable:hsw */
6782 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
6783 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
6784 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6787 static void lpt_suspend_hw(struct drm_i915_private
*dev_priv
)
6789 if (HAS_PCH_LPT_LP(dev_priv
)) {
6790 u32 val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
6792 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
6793 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
6797 static void gen8_set_l3sqc_credits(struct drm_i915_private
*dev_priv
,
6798 int general_prio_credits
,
6799 int high_prio_credits
)
6804 /* WaTempDisableDOPClkGating:bdw */
6805 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
6806 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
6808 val
= I915_READ(GEN8_L3SQCREG1
);
6809 val
&= ~L3_PRIO_CREDITS_MASK
;
6810 val
|= L3_GENERAL_PRIO_CREDITS(general_prio_credits
);
6811 val
|= L3_HIGH_PRIO_CREDITS(high_prio_credits
);
6812 I915_WRITE(GEN8_L3SQCREG1
, val
);
6815 * Wait at least 100 clocks before re-enabling clock gating.
6816 * See the definition of L3SQCREG1 in BSpec.
6818 POSTING_READ(GEN8_L3SQCREG1
);
6820 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
6823 static void icl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6825 /* This is not an Wa. Enable to reduce Sampler power */
6826 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN
,
6827 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN
) & ~DFR_DISABLE
);
6829 /*Wa_14010594013:icl, ehl */
6830 intel_uncore_rmw(&dev_priv
->uncore
, GEN8_CHICKEN_DCPR_1
,
6831 0, CNL_DELAY_PMRSP
);
6834 static void tgl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6836 u32 vd_pg_enable
= 0;
6839 /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
6840 for (i
= 0; i
< I915_MAX_VCS
; i
++) {
6841 if (HAS_ENGINE(dev_priv
, _VCS(i
)))
6842 vd_pg_enable
|= VDN_HCP_POWERGATE_ENABLE(i
) |
6843 VDN_MFX_POWERGATE_ENABLE(i
);
6846 I915_WRITE(POWERGATE_ENABLE
,
6847 I915_READ(POWERGATE_ENABLE
) | vd_pg_enable
);
6849 /* Wa_1409825376:tgl (pre-prod)*/
6850 if (IS_TGL_REVID(dev_priv
, TGL_REVID_A0
, TGL_REVID_A0
))
6851 I915_WRITE(GEN9_CLKGATE_DIS_3
, I915_READ(GEN9_CLKGATE_DIS_3
) |
6852 TGL_VRH_GATING_DIS
);
6855 static void cnp_init_clock_gating(struct drm_i915_private
*dev_priv
)
6857 if (!HAS_PCH_CNP(dev_priv
))
6860 /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
6861 I915_WRITE(SOUTH_DSPCLK_GATE_D
, I915_READ(SOUTH_DSPCLK_GATE_D
) |
6862 CNP_PWM_CGE_GATING_DISABLE
);
6865 static void cnl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6868 cnp_init_clock_gating(dev_priv
);
6870 /* This is not an Wa. Enable for better image quality */
6871 I915_WRITE(_3D_CHICKEN3
,
6872 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE
));
6874 /* WaEnableChickenDCPR:cnl */
6875 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
6876 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
6878 /* WaFbcWakeMemOn:cnl */
6879 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
6880 DISP_FBC_MEMORY_WAKE
);
6882 val
= I915_READ(SLICE_UNIT_LEVEL_CLKGATE
);
6883 /* ReadHitWriteOnlyDisable:cnl */
6884 val
|= RCCUNIT_CLKGATE_DIS
;
6885 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
6886 if (IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_B0
))
6887 val
|= SARBUNIT_CLKGATE_DIS
;
6888 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE
, val
);
6890 /* Wa_2201832410:cnl */
6891 val
= I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE
);
6892 val
|= GWUNIT_CLKGATE_DIS
;
6893 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE
, val
);
6895 /* WaDisableVFclkgate:cnl */
6896 /* WaVFUnitClockGatingDisable:cnl */
6897 val
= I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE
);
6898 val
|= VFUNIT_CLKGATE_DIS
;
6899 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE
, val
);
6902 static void cfl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6904 cnp_init_clock_gating(dev_priv
);
6905 gen9_init_clock_gating(dev_priv
);
6907 /* WaFbcNukeOnHostModify:cfl */
6908 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6909 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6912 static void kbl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6914 gen9_init_clock_gating(dev_priv
);
6916 /* WaDisableSDEUnitClockGating:kbl */
6917 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
6918 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6919 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6921 /* WaDisableGamClockGating:kbl */
6922 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
6923 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6924 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
6926 /* WaFbcNukeOnHostModify:kbl */
6927 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6928 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6931 static void skl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6933 gen9_init_clock_gating(dev_priv
);
6935 /* WAC6entrylatency:skl */
6936 I915_WRITE(FBC_LLC_READ_CTRL
, I915_READ(FBC_LLC_READ_CTRL
) |
6937 FBC_LLC_FULLY_OPEN
);
6939 /* WaFbcNukeOnHostModify:skl */
6940 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6941 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6944 static void bdw_init_clock_gating(struct drm_i915_private
*dev_priv
)
6948 /* WaSwitchSolVfFArbitrationPriority:bdw */
6949 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6951 /* WaPsrDPAMaskVBlankInSRD:bdw */
6952 I915_WRITE(CHICKEN_PAR1_1
,
6953 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
6955 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6956 for_each_pipe(dev_priv
, pipe
) {
6957 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
6958 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
6959 BDW_DPRS_MASK_VBLANK_SRD
);
6962 /* WaVSRefCountFullforceMissDisable:bdw */
6963 /* WaDSRefCountFullforceMissDisable:bdw */
6964 I915_WRITE(GEN7_FF_THREAD_MODE
,
6965 I915_READ(GEN7_FF_THREAD_MODE
) &
6966 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6968 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6969 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6971 /* WaDisableSDEUnitClockGating:bdw */
6972 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6973 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6975 /* WaProgramL3SqcReg1Default:bdw */
6976 gen8_set_l3sqc_credits(dev_priv
, 30, 2);
6978 /* WaKVMNotificationOnConfigChange:bdw */
6979 I915_WRITE(CHICKEN_PAR2_1
, I915_READ(CHICKEN_PAR2_1
)
6980 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT
);
6982 lpt_init_clock_gating(dev_priv
);
6984 /* WaDisableDopClockGating:bdw
6986 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
6989 I915_WRITE(GEN6_UCGCTL1
,
6990 I915_READ(GEN6_UCGCTL1
) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE
);
6993 static void hsw_init_clock_gating(struct drm_i915_private
*dev_priv
)
6995 /* L3 caching of data atomics doesn't work -- disable it. */
6996 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
6997 I915_WRITE(HSW_ROW_CHICKEN3
,
6998 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
7000 /* This is required by WaCatErrorRejectionIssue:hsw */
7001 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7002 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7003 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7005 /* WaVSRefCountFullforceMissDisable:hsw */
7006 I915_WRITE(GEN7_FF_THREAD_MODE
,
7007 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
7009 /* WaDisable_RenderCache_OperationalFlush:hsw */
7010 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7012 /* enable HiZ Raw Stall Optimization */
7013 I915_WRITE(CACHE_MODE_0_GEN7
,
7014 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7016 /* WaDisable4x2SubspanOptimization:hsw */
7017 I915_WRITE(CACHE_MODE_1
,
7018 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7021 * BSpec recommends 8x4 when MSAA is used,
7022 * however in practice 16x4 seems fastest.
7024 * Note that PS/WM thread counts depend on the WIZ hashing
7025 * disable bit, which we don't touch here, but it's good
7026 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7028 I915_WRITE(GEN7_GT_MODE
,
7029 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7031 /* WaSampleCChickenBitEnable:hsw */
7032 I915_WRITE(HALF_SLICE_CHICKEN3
,
7033 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
7035 /* WaSwitchSolVfFArbitrationPriority:hsw */
7036 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7038 lpt_init_clock_gating(dev_priv
);
7041 static void ivb_init_clock_gating(struct drm_i915_private
*dev_priv
)
7045 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
7047 /* WaDisableEarlyCull:ivb */
7048 I915_WRITE(_3D_CHICKEN3
,
7049 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7051 /* WaDisableBackToBackFlipFix:ivb */
7052 I915_WRITE(IVB_CHICKEN3
,
7053 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7054 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7056 /* WaDisablePSDDualDispatchEnable:ivb */
7057 if (IS_IVB_GT1(dev_priv
))
7058 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7059 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7061 /* WaDisable_RenderCache_OperationalFlush:ivb */
7062 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7064 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7065 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
7066 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
7068 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7069 I915_WRITE(GEN7_L3CNTLREG1
,
7070 GEN7_WA_FOR_GEN7_L3_CONTROL
);
7071 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
7072 GEN7_WA_L3_CHICKEN_MODE
);
7073 if (IS_IVB_GT1(dev_priv
))
7074 I915_WRITE(GEN7_ROW_CHICKEN2
,
7075 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7077 /* must write both registers */
7078 I915_WRITE(GEN7_ROW_CHICKEN2
,
7079 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7080 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
7081 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7084 /* WaForceL3Serialization:ivb */
7085 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7086 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7089 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7090 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7092 I915_WRITE(GEN6_UCGCTL2
,
7093 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7095 /* This is required by WaCatErrorRejectionIssue:ivb */
7096 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7097 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7098 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7100 g4x_disable_trickle_feed(dev_priv
);
7102 gen7_setup_fixed_func_scheduler(dev_priv
);
7104 if (0) { /* causes HiZ corruption on ivb:gt1 */
7105 /* enable HiZ Raw Stall Optimization */
7106 I915_WRITE(CACHE_MODE_0_GEN7
,
7107 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7110 /* WaDisable4x2SubspanOptimization:ivb */
7111 I915_WRITE(CACHE_MODE_1
,
7112 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7115 * BSpec recommends 8x4 when MSAA is used,
7116 * however in practice 16x4 seems fastest.
7118 * Note that PS/WM thread counts depend on the WIZ hashing
7119 * disable bit, which we don't touch here, but it's good
7120 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7122 I915_WRITE(GEN7_GT_MODE
,
7123 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7125 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
7126 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
7127 snpcr
|= GEN6_MBC_SNPCR_MED
;
7128 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
7130 if (!HAS_PCH_NOP(dev_priv
))
7131 cpt_init_clock_gating(dev_priv
);
7133 gen6_check_mch_setup(dev_priv
);
7136 static void vlv_init_clock_gating(struct drm_i915_private
*dev_priv
)
7138 /* WaDisableEarlyCull:vlv */
7139 I915_WRITE(_3D_CHICKEN3
,
7140 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7142 /* WaDisableBackToBackFlipFix:vlv */
7143 I915_WRITE(IVB_CHICKEN3
,
7144 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7145 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7147 /* WaPsdDispatchEnable:vlv */
7148 /* WaDisablePSDDualDispatchEnable:vlv */
7149 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7150 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
7151 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7153 /* WaDisable_RenderCache_OperationalFlush:vlv */
7154 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7156 /* WaForceL3Serialization:vlv */
7157 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7158 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7160 /* WaDisableDopClockGating:vlv */
7161 I915_WRITE(GEN7_ROW_CHICKEN2
,
7162 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7164 /* This is required by WaCatErrorRejectionIssue:vlv */
7165 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7166 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7167 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7169 gen7_setup_fixed_func_scheduler(dev_priv
);
7172 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7173 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7175 I915_WRITE(GEN6_UCGCTL2
,
7176 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7178 /* WaDisableL3Bank2xClockGate:vlv
7179 * Disabling L3 clock gating- MMIO 940c[25] = 1
7180 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7181 I915_WRITE(GEN7_UCGCTL4
,
7182 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
7185 * BSpec says this must be set, even though
7186 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7188 I915_WRITE(CACHE_MODE_1
,
7189 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7192 * BSpec recommends 8x4 when MSAA is used,
7193 * however in practice 16x4 seems fastest.
7195 * Note that PS/WM thread counts depend on the WIZ hashing
7196 * disable bit, which we don't touch here, but it's good
7197 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7199 I915_WRITE(GEN7_GT_MODE
,
7200 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7203 * WaIncreaseL3CreditsForVLVB0:vlv
7204 * This is the hardware default actually.
7206 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
7209 * WaDisableVLVClockGating_VBIIssue:vlv
7210 * Disable clock gating on th GCFG unit to prevent a delay
7211 * in the reporting of vblank events.
7213 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
7216 static void chv_init_clock_gating(struct drm_i915_private
*dev_priv
)
7218 /* WaVSRefCountFullforceMissDisable:chv */
7219 /* WaDSRefCountFullforceMissDisable:chv */
7220 I915_WRITE(GEN7_FF_THREAD_MODE
,
7221 I915_READ(GEN7_FF_THREAD_MODE
) &
7222 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7224 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7225 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7226 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7228 /* WaDisableCSUnitClockGating:chv */
7229 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7230 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7232 /* WaDisableSDEUnitClockGating:chv */
7233 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7234 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7237 * WaProgramL3SqcReg1Default:chv
7238 * See gfxspecs/Related Documents/Performance Guide/
7239 * LSQC Setting Recommendations.
7241 gen8_set_l3sqc_credits(dev_priv
, 38, 2);
7244 static void g4x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7248 I915_WRITE(RENCLK_GATE_D1
, 0);
7249 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
7250 GS_UNIT_CLOCK_GATE_DISABLE
|
7251 CL_UNIT_CLOCK_GATE_DISABLE
);
7252 I915_WRITE(RAMCLK_GATE_D
, 0);
7253 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
7254 OVRUNIT_CLOCK_GATE_DISABLE
|
7255 OVCUNIT_CLOCK_GATE_DISABLE
;
7256 if (IS_GM45(dev_priv
))
7257 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
7258 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
7260 /* WaDisableRenderCachePipelinedFlush */
7261 I915_WRITE(CACHE_MODE_0
,
7262 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7264 /* WaDisable_RenderCache_OperationalFlush:g4x */
7265 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7267 g4x_disable_trickle_feed(dev_priv
);
7270 static void i965gm_init_clock_gating(struct drm_i915_private
*dev_priv
)
7272 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
7274 intel_uncore_write(uncore
, RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
7275 intel_uncore_write(uncore
, RENCLK_GATE_D2
, 0);
7276 intel_uncore_write(uncore
, DSPCLK_GATE_D
, 0);
7277 intel_uncore_write(uncore
, RAMCLK_GATE_D
, 0);
7278 intel_uncore_write16(uncore
, DEUC
, 0);
7279 intel_uncore_write(uncore
,
7281 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7283 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7284 intel_uncore_write(uncore
,
7286 _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7289 static void i965g_init_clock_gating(struct drm_i915_private
*dev_priv
)
7291 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
7292 I965_RCC_CLOCK_GATE_DISABLE
|
7293 I965_RCPB_CLOCK_GATE_DISABLE
|
7294 I965_ISC_CLOCK_GATE_DISABLE
|
7295 I965_FBC_CLOCK_GATE_DISABLE
);
7296 I915_WRITE(RENCLK_GATE_D2
, 0);
7297 I915_WRITE(MI_ARB_STATE
,
7298 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7300 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7301 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7304 static void gen3_init_clock_gating(struct drm_i915_private
*dev_priv
)
7306 u32 dstate
= I915_READ(D_STATE
);
7308 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
7309 DSTATE_DOT_CLOCK_GATING
;
7310 I915_WRITE(D_STATE
, dstate
);
7312 if (IS_PINEVIEW(dev_priv
))
7313 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
7315 /* IIR "flip pending" means done if this bit is set */
7316 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
7318 /* interrupts should cause a wake up from C3 */
7319 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
7321 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7322 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
7324 I915_WRITE(MI_ARB_STATE
,
7325 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7328 static void i85x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7330 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
7332 /* interrupts should cause a wake up from C3 */
7333 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
7334 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
7336 I915_WRITE(MEM_MODE
,
7337 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
7340 static void i830_init_clock_gating(struct drm_i915_private
*dev_priv
)
7342 I915_WRITE(MEM_MODE
,
7343 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
7344 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
7347 void intel_init_clock_gating(struct drm_i915_private
*dev_priv
)
7349 dev_priv
->display
.init_clock_gating(dev_priv
);
7352 void intel_suspend_hw(struct drm_i915_private
*dev_priv
)
7354 if (HAS_PCH_LPT(dev_priv
))
7355 lpt_suspend_hw(dev_priv
);
7358 static void nop_init_clock_gating(struct drm_i915_private
*dev_priv
)
7360 drm_dbg_kms(&dev_priv
->drm
,
7361 "No clock gating settings or workarounds applied.\n");
7365 * intel_init_clock_gating_hooks - setup the clock gating hooks
7366 * @dev_priv: device private
7368 * Setup the hooks that configure which clocks of a given platform can be
7369 * gated and also apply various GT and display specific workarounds for these
7370 * platforms. Note that some GT specific workarounds are applied separately
7371 * when GPU contexts or batchbuffers start their execution.
7373 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
7375 if (IS_GEN(dev_priv
, 12))
7376 dev_priv
->display
.init_clock_gating
= tgl_init_clock_gating
;
7377 else if (IS_GEN(dev_priv
, 11))
7378 dev_priv
->display
.init_clock_gating
= icl_init_clock_gating
;
7379 else if (IS_CANNONLAKE(dev_priv
))
7380 dev_priv
->display
.init_clock_gating
= cnl_init_clock_gating
;
7381 else if (IS_COFFEELAKE(dev_priv
))
7382 dev_priv
->display
.init_clock_gating
= cfl_init_clock_gating
;
7383 else if (IS_SKYLAKE(dev_priv
))
7384 dev_priv
->display
.init_clock_gating
= skl_init_clock_gating
;
7385 else if (IS_KABYLAKE(dev_priv
))
7386 dev_priv
->display
.init_clock_gating
= kbl_init_clock_gating
;
7387 else if (IS_BROXTON(dev_priv
))
7388 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
7389 else if (IS_GEMINILAKE(dev_priv
))
7390 dev_priv
->display
.init_clock_gating
= glk_init_clock_gating
;
7391 else if (IS_BROADWELL(dev_priv
))
7392 dev_priv
->display
.init_clock_gating
= bdw_init_clock_gating
;
7393 else if (IS_CHERRYVIEW(dev_priv
))
7394 dev_priv
->display
.init_clock_gating
= chv_init_clock_gating
;
7395 else if (IS_HASWELL(dev_priv
))
7396 dev_priv
->display
.init_clock_gating
= hsw_init_clock_gating
;
7397 else if (IS_IVYBRIDGE(dev_priv
))
7398 dev_priv
->display
.init_clock_gating
= ivb_init_clock_gating
;
7399 else if (IS_VALLEYVIEW(dev_priv
))
7400 dev_priv
->display
.init_clock_gating
= vlv_init_clock_gating
;
7401 else if (IS_GEN(dev_priv
, 6))
7402 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7403 else if (IS_GEN(dev_priv
, 5))
7404 dev_priv
->display
.init_clock_gating
= ilk_init_clock_gating
;
7405 else if (IS_G4X(dev_priv
))
7406 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7407 else if (IS_I965GM(dev_priv
))
7408 dev_priv
->display
.init_clock_gating
= i965gm_init_clock_gating
;
7409 else if (IS_I965G(dev_priv
))
7410 dev_priv
->display
.init_clock_gating
= i965g_init_clock_gating
;
7411 else if (IS_GEN(dev_priv
, 3))
7412 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7413 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
7414 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7415 else if (IS_GEN(dev_priv
, 2))
7416 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7418 MISSING_CASE(INTEL_DEVID(dev_priv
));
7419 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
7423 /* Set up chip specific power management-related functions */
7424 void intel_init_pm(struct drm_i915_private
*dev_priv
)
7427 if (IS_PINEVIEW(dev_priv
))
7428 pnv_get_mem_freq(dev_priv
);
7429 else if (IS_GEN(dev_priv
, 5))
7430 ilk_get_mem_freq(dev_priv
);
7432 if (intel_has_sagv(dev_priv
))
7433 skl_setup_sagv_block_time(dev_priv
);
7435 /* For FIFO watermark updates */
7436 if (INTEL_GEN(dev_priv
) >= 9) {
7437 skl_setup_wm_latency(dev_priv
);
7438 dev_priv
->display
.compute_global_watermarks
= skl_compute_wm
;
7439 } else if (HAS_PCH_SPLIT(dev_priv
)) {
7440 ilk_setup_wm_latency(dev_priv
);
7442 if ((IS_GEN(dev_priv
, 5) && dev_priv
->wm
.pri_latency
[1] &&
7443 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
7444 (!IS_GEN(dev_priv
, 5) && dev_priv
->wm
.pri_latency
[0] &&
7445 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
7446 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
7447 dev_priv
->display
.compute_intermediate_wm
=
7448 ilk_compute_intermediate_wm
;
7449 dev_priv
->display
.initial_watermarks
=
7450 ilk_initial_watermarks
;
7451 dev_priv
->display
.optimize_watermarks
=
7452 ilk_optimize_watermarks
;
7454 drm_dbg_kms(&dev_priv
->drm
,
7455 "Failed to read display plane latency. "
7458 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
7459 vlv_setup_wm_latency(dev_priv
);
7460 dev_priv
->display
.compute_pipe_wm
= vlv_compute_pipe_wm
;
7461 dev_priv
->display
.compute_intermediate_wm
= vlv_compute_intermediate_wm
;
7462 dev_priv
->display
.initial_watermarks
= vlv_initial_watermarks
;
7463 dev_priv
->display
.optimize_watermarks
= vlv_optimize_watermarks
;
7464 dev_priv
->display
.atomic_update_watermarks
= vlv_atomic_update_fifo
;
7465 } else if (IS_G4X(dev_priv
)) {
7466 g4x_setup_wm_latency(dev_priv
);
7467 dev_priv
->display
.compute_pipe_wm
= g4x_compute_pipe_wm
;
7468 dev_priv
->display
.compute_intermediate_wm
= g4x_compute_intermediate_wm
;
7469 dev_priv
->display
.initial_watermarks
= g4x_initial_watermarks
;
7470 dev_priv
->display
.optimize_watermarks
= g4x_optimize_watermarks
;
7471 } else if (IS_PINEVIEW(dev_priv
)) {
7472 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv
),
7475 dev_priv
->mem_freq
)) {
7476 drm_info(&dev_priv
->drm
,
7477 "failed to find known CxSR latency "
7478 "(found ddr%s fsb freq %d, mem freq %d), "
7480 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
7481 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
7482 /* Disable CxSR and never update its watermark again */
7483 intel_set_memory_cxsr(dev_priv
, false);
7484 dev_priv
->display
.update_wm
= NULL
;
7486 dev_priv
->display
.update_wm
= pnv_update_wm
;
7487 } else if (IS_GEN(dev_priv
, 4)) {
7488 dev_priv
->display
.update_wm
= i965_update_wm
;
7489 } else if (IS_GEN(dev_priv
, 3)) {
7490 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7491 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7492 } else if (IS_GEN(dev_priv
, 2)) {
7493 if (INTEL_NUM_PIPES(dev_priv
) == 1) {
7494 dev_priv
->display
.update_wm
= i845_update_wm
;
7495 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7497 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7498 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7501 drm_err(&dev_priv
->drm
,
7502 "unexpected fall-through in %s\n", __func__
);
7506 void intel_pm_setup(struct drm_i915_private
*dev_priv
)
7508 dev_priv
->runtime_pm
.suspended
= false;
7509 atomic_set(&dev_priv
->runtime_pm
.wakeref_count
, 0);