]>
Commit | Line | Data |
---|---|---|
85208be0 ED |
1 | /* |
2 | * Copyright © 2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> | |
25 | * | |
26 | */ | |
27 | ||
d0e93599 | 28 | #include <linux/module.h> |
08ea70a4 | 29 | #include <linux/pm_runtime.h> |
d0e93599 SR |
30 | |
31 | #include <drm/drm_atomic_helper.h> | |
32 | #include <drm/drm_fourcc.h> | |
9c2f7a9d | 33 | #include <drm/drm_plane_helper.h> |
d0e93599 | 34 | |
df0566a6 | 35 | #include "display/intel_atomic.h" |
1d455f8d | 36 | #include "display/intel_display_types.h" |
df0566a6 JN |
37 | #include "display/intel_fbc.h" |
38 | #include "display/intel_sprite.h" | |
39 | ||
0dc3c562 AS |
40 | #include "gt/intel_llc.h" |
41 | ||
85208be0 | 42 | #include "i915_drv.h" |
a10510af | 43 | #include "i915_fixed.h" |
440e2b3d | 44 | #include "i915_irq.h" |
a09d9a80 | 45 | #include "i915_trace.h" |
696173b0 | 46 | #include "intel_pm.h" |
56c5098f | 47 | #include "intel_sideband.h" |
eb48eb00 | 48 | #include "../../../platform/x86/intel_ips.h" |
85208be0 | 49 | |
a10510af JN |
50 | /* Stores plane specific WM parameters */ |
51 | struct skl_wm_params { | |
52 | bool x_tiled, y_tiled; | |
53 | bool rc_surface; | |
54 | bool is_planar; | |
55 | u32 width; | |
56 | u8 cpp; | |
57 | u32 plane_pixel_rate; | |
58 | u32 y_min_scanlines; | |
59 | u32 plane_bytes_per_line; | |
60 | uint_fixed_16_16_t plane_blocks_per_line; | |
61 | uint_fixed_16_16_t y_tile_minimum; | |
62 | u32 linetime_us; | |
63 | u32 dbuf_block_size; | |
64 | }; | |
65 | ||
66 | /* used in computing the new watermarks state */ | |
67 | struct intel_wm_config { | |
68 | unsigned int num_pipes_active; | |
69 | bool sprites_enabled; | |
70 | bool sprites_scaled; | |
71 | }; | |
72 | ||
46f16e63 | 73 | static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) |
a82abe43 | 74 | { |
93564044 VS |
75 | if (HAS_LLC(dev_priv)) { |
76 | /* | |
77 | * WaCompressedResourceDisplayNewHashMode:skl,kbl | |
e0403cb9 | 78 | * Display WA #0390: skl,kbl |
93564044 VS |
79 | * |
80 | * Must match Sampler, Pixel Back End, and Media. See | |
81 | * WaCompressedResourceSamplerPbeMediaNewHashMode. | |
82 | */ | |
83 | I915_WRITE(CHICKEN_PAR1_1, | |
84 | I915_READ(CHICKEN_PAR1_1) | | |
85 | SKL_DE_COMPRESSED_HASH_MODE); | |
86 | } | |
87 | ||
82525c17 | 88 | /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ |
dc00b6a0 SV |
89 | I915_WRITE(CHICKEN_PAR1_1, |
90 | I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); | |
91 | ||
82525c17 | 92 | /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ |
590e8ff0 MK |
93 | I915_WRITE(GEN8_CHICKEN_DCPR_1, |
94 | I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); | |
0f78dee6 | 95 | |
82525c17 RV |
96 | /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */ |
97 | /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */ | |
303d4ea5 MK |
98 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | |
99 | DISP_FBC_WM_DIS | | |
100 | DISP_FBC_MEMORY_WAKE); | |
d1b4eefd | 101 | |
82525c17 | 102 | /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */ |
d1b4eefd MK |
103 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | |
104 | ILK_DPFC_DISABLE_DUMMY0); | |
32087d14 PP |
105 | |
106 | if (IS_SKYLAKE(dev_priv)) { | |
107 | /* WaDisableDopClockGating */ | |
108 | I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) | |
109 | & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
110 | } | |
b033bb6d MK |
111 | } |
112 | ||
46f16e63 | 113 | static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) |
b033bb6d | 114 | { |
46f16e63 | 115 | gen9_init_clock_gating(dev_priv); |
b033bb6d | 116 | |
a7546159 NH |
117 | /* WaDisableSDEUnitClockGating:bxt */ |
118 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
119 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
120 | ||
32608ca2 ID |
121 | /* |
122 | * FIXME: | |
868434c5 | 123 | * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. |
32608ca2 | 124 | */ |
32608ca2 | 125 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | |
868434c5 | 126 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); |
d965e7ac ID |
127 | |
128 | /* | |
129 | * Wa: Backlight PWM may stop in the asserted state, causing backlight | |
130 | * to stay fully on. | |
131 | */ | |
8aeaf64c JN |
132 | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | |
133 | PWM1_GATING_DIS | PWM2_GATING_DIS); | |
1d85a299 US |
134 | |
135 | /* | |
136 | * Lower the display internal timeout. | |
137 | * This is needed to avoid any hard hangs when DSI port PLL | |
138 | * is off and a MMIO access is attempted by any privilege | |
139 | * application, using batch buffers or any other means. | |
140 | */ | |
141 | I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); | |
a82abe43 ID |
142 | } |
143 | ||
9fb5026f ACO |
144 | static void glk_init_clock_gating(struct drm_i915_private *dev_priv) |
145 | { | |
146 | gen9_init_clock_gating(dev_priv); | |
147 | ||
148 | /* | |
149 | * WaDisablePWMClockGating:glk | |
150 | * Backlight PWM may stop in the asserted state, causing backlight | |
151 | * to stay fully on. | |
152 | */ | |
153 | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | | |
154 | PWM1_GATING_DIS | PWM2_GATING_DIS); | |
155 | } | |
156 | ||
1d218220 | 157 | static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) |
c921aba8 | 158 | { |
c921aba8 SV |
159 | u32 tmp; |
160 | ||
161 | tmp = I915_READ(CLKCFG); | |
162 | ||
163 | switch (tmp & CLKCFG_FSB_MASK) { | |
164 | case CLKCFG_FSB_533: | |
165 | dev_priv->fsb_freq = 533; /* 133*4 */ | |
166 | break; | |
167 | case CLKCFG_FSB_800: | |
168 | dev_priv->fsb_freq = 800; /* 200*4 */ | |
169 | break; | |
170 | case CLKCFG_FSB_667: | |
171 | dev_priv->fsb_freq = 667; /* 167*4 */ | |
172 | break; | |
173 | case CLKCFG_FSB_400: | |
174 | dev_priv->fsb_freq = 400; /* 100*4 */ | |
175 | break; | |
176 | } | |
177 | ||
178 | switch (tmp & CLKCFG_MEM_MASK) { | |
179 | case CLKCFG_MEM_533: | |
180 | dev_priv->mem_freq = 533; | |
181 | break; | |
182 | case CLKCFG_MEM_667: | |
183 | dev_priv->mem_freq = 667; | |
184 | break; | |
185 | case CLKCFG_MEM_800: | |
186 | dev_priv->mem_freq = 800; | |
187 | break; | |
188 | } | |
189 | ||
190 | /* detect pineview DDR3 setting */ | |
191 | tmp = I915_READ(CSHRDDR3CTL); | |
192 | dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; | |
193 | } | |
194 | ||
9eae5e27 | 195 | static void ilk_get_mem_freq(struct drm_i915_private *dev_priv) |
c921aba8 | 196 | { |
c921aba8 SV |
197 | u16 ddrpll, csipll; |
198 | ||
4f5fd91f TU |
199 | ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); |
200 | csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); | |
c921aba8 SV |
201 | |
202 | switch (ddrpll & 0xff) { | |
203 | case 0xc: | |
204 | dev_priv->mem_freq = 800; | |
205 | break; | |
206 | case 0x10: | |
207 | dev_priv->mem_freq = 1066; | |
208 | break; | |
209 | case 0x14: | |
210 | dev_priv->mem_freq = 1333; | |
211 | break; | |
212 | case 0x18: | |
213 | dev_priv->mem_freq = 1600; | |
214 | break; | |
215 | default: | |
f8d18d5c WK |
216 | drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", |
217 | ddrpll & 0xff); | |
c921aba8 SV |
218 | dev_priv->mem_freq = 0; |
219 | break; | |
220 | } | |
221 | ||
c921aba8 SV |
222 | switch (csipll & 0x3ff) { |
223 | case 0x00c: | |
224 | dev_priv->fsb_freq = 3200; | |
225 | break; | |
226 | case 0x00e: | |
227 | dev_priv->fsb_freq = 3733; | |
228 | break; | |
229 | case 0x010: | |
230 | dev_priv->fsb_freq = 4266; | |
231 | break; | |
232 | case 0x012: | |
233 | dev_priv->fsb_freq = 4800; | |
234 | break; | |
235 | case 0x014: | |
236 | dev_priv->fsb_freq = 5333; | |
237 | break; | |
238 | case 0x016: | |
239 | dev_priv->fsb_freq = 5866; | |
240 | break; | |
241 | case 0x018: | |
242 | dev_priv->fsb_freq = 6400; | |
243 | break; | |
244 | default: | |
f8d18d5c WK |
245 | drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", |
246 | csipll & 0x3ff); | |
c921aba8 SV |
247 | dev_priv->fsb_freq = 0; |
248 | break; | |
249 | } | |
c921aba8 SV |
250 | } |
251 | ||
b445e3b0 ED |
252 | static const struct cxsr_latency cxsr_latency_table[] = { |
253 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | |
254 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | |
255 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | |
256 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ | |
257 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ | |
258 | ||
259 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | |
260 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | |
261 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | |
262 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ | |
263 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ | |
264 | ||
265 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | |
266 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | |
267 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | |
268 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ | |
269 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ | |
270 | ||
271 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | |
272 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | |
273 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | |
274 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ | |
275 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ | |
276 | ||
277 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | |
278 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | |
279 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | |
280 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ | |
281 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ | |
282 | ||
283 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | |
284 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | |
285 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | |
286 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ | |
287 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ | |
288 | }; | |
289 | ||
44a655ca TU |
290 | static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, |
291 | bool is_ddr3, | |
b445e3b0 ED |
292 | int fsb, |
293 | int mem) | |
294 | { | |
295 | const struct cxsr_latency *latency; | |
296 | int i; | |
297 | ||
298 | if (fsb == 0 || mem == 0) | |
299 | return NULL; | |
300 | ||
301 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | |
302 | latency = &cxsr_latency_table[i]; | |
303 | if (is_desktop == latency->is_desktop && | |
304 | is_ddr3 == latency->is_ddr3 && | |
305 | fsb == latency->fsb_freq && mem == latency->mem_freq) | |
306 | return latency; | |
307 | } | |
308 | ||
309 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | |
310 | ||
311 | return NULL; | |
312 | } | |
313 | ||
fc1ac8de VS |
314 | static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) |
315 | { | |
316 | u32 val; | |
317 | ||
337fa6e0 | 318 | vlv_punit_get(dev_priv); |
fc1ac8de VS |
319 | |
320 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | |
321 | if (enable) | |
322 | val &= ~FORCE_DDR_HIGH_FREQ; | |
323 | else | |
324 | val |= FORCE_DDR_HIGH_FREQ; | |
325 | val &= ~FORCE_DDR_LOW_FREQ; | |
326 | val |= FORCE_DDR_FREQ_REQ_ACK; | |
327 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); | |
328 | ||
329 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & | |
330 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) | |
f8d18d5c WK |
331 | drm_err(&dev_priv->drm, |
332 | "timed out waiting for Punit DDR DVFS request\n"); | |
fc1ac8de | 333 | |
337fa6e0 | 334 | vlv_punit_put(dev_priv); |
fc1ac8de VS |
335 | } |
336 | ||
cfb41411 VS |
337 | static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) |
338 | { | |
339 | u32 val; | |
340 | ||
337fa6e0 | 341 | vlv_punit_get(dev_priv); |
cfb41411 | 342 | |
c11b813f | 343 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); |
cfb41411 VS |
344 | if (enable) |
345 | val |= DSP_MAXFIFO_PM5_ENABLE; | |
346 | else | |
347 | val &= ~DSP_MAXFIFO_PM5_ENABLE; | |
c11b813f | 348 | vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); |
cfb41411 | 349 | |
337fa6e0 | 350 | vlv_punit_put(dev_priv); |
cfb41411 VS |
351 | } |
352 | ||
f4998963 VS |
353 | #define FW_WM(value, plane) \ |
354 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) | |
355 | ||
11a85d6a | 356 | static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) |
b445e3b0 | 357 | { |
11a85d6a | 358 | bool was_enabled; |
5209b1f4 | 359 | u32 val; |
b445e3b0 | 360 | |
920a14b2 | 361 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
11a85d6a | 362 | was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; |
5209b1f4 | 363 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); |
a7a6c498 | 364 | POSTING_READ(FW_BLC_SELF_VLV); |
c0f86832 | 365 | } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { |
11a85d6a | 366 | was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
5209b1f4 | 367 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); |
a7a6c498 | 368 | POSTING_READ(FW_BLC_SELF); |
9b1e14f4 | 369 | } else if (IS_PINEVIEW(dev_priv)) { |
11a85d6a VS |
370 | val = I915_READ(DSPFW3); |
371 | was_enabled = val & PINEVIEW_SELF_REFRESH_EN; | |
372 | if (enable) | |
373 | val |= PINEVIEW_SELF_REFRESH_EN; | |
374 | else | |
375 | val &= ~PINEVIEW_SELF_REFRESH_EN; | |
5209b1f4 | 376 | I915_WRITE(DSPFW3, val); |
a7a6c498 | 377 | POSTING_READ(DSPFW3); |
50a0bc90 | 378 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { |
11a85d6a | 379 | was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
5209b1f4 ID |
380 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : |
381 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); | |
382 | I915_WRITE(FW_BLC_SELF, val); | |
a7a6c498 | 383 | POSTING_READ(FW_BLC_SELF); |
50a0bc90 | 384 | } else if (IS_I915GM(dev_priv)) { |
acb91359 VS |
385 | /* |
386 | * FIXME can't find a bit like this for 915G, and | |
387 | * and yet it does have the related watermark in | |
388 | * FW_BLC_SELF. What's going on? | |
389 | */ | |
11a85d6a | 390 | was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; |
5209b1f4 ID |
391 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : |
392 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); | |
393 | I915_WRITE(INSTPM, val); | |
a7a6c498 | 394 | POSTING_READ(INSTPM); |
5209b1f4 | 395 | } else { |
11a85d6a | 396 | return false; |
5209b1f4 | 397 | } |
b445e3b0 | 398 | |
1489bba8 VS |
399 | trace_intel_memory_cxsr(dev_priv, was_enabled, enable); |
400 | ||
f8d18d5c WK |
401 | drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", |
402 | enableddisabled(enable), | |
403 | enableddisabled(was_enabled)); | |
11a85d6a VS |
404 | |
405 | return was_enabled; | |
b445e3b0 ED |
406 | } |
407 | ||
62571fc3 VS |
408 | /** |
409 | * intel_set_memory_cxsr - Configure CxSR state | |
410 | * @dev_priv: i915 device | |
411 | * @enable: Allow vs. disallow CxSR | |
412 | * | |
413 | * Allow or disallow the system to enter a special CxSR | |
414 | * (C-state self refresh) state. What typically happens in CxSR mode | |
415 | * is that several display FIFOs may get combined into a single larger | |
416 | * FIFO for a particular plane (so called max FIFO mode) to allow the | |
417 | * system to defer memory fetches longer, and the memory will enter | |
418 | * self refresh. | |
419 | * | |
420 | * Note that enabling CxSR does not guarantee that the system enter | |
421 | * this special mode, nor does it guarantee that the system stays | |
422 | * in that mode once entered. So this just allows/disallows the system | |
423 | * to autonomously utilize the CxSR mode. Other factors such as core | |
424 | * C-states will affect when/if the system actually enters/exits the | |
425 | * CxSR mode. | |
426 | * | |
427 | * Note that on VLV/CHV this actually only controls the max FIFO mode, | |
428 | * and the system is free to enter/exit memory self refresh at any time | |
429 | * even when the use of CxSR has been disallowed. | |
430 | * | |
431 | * While the system is actually in the CxSR/max FIFO mode, some plane | |
432 | * control registers will not get latched on vblank. Thus in order to | |
433 | * guarantee the system will respond to changes in the plane registers | |
434 | * we must always disallow CxSR prior to making changes to those registers. | |
435 | * Unfortunately the system will re-evaluate the CxSR conditions at | |
436 | * frame start which happens after vblank start (which is when the plane | |
437 | * registers would get latched), so we can't proceed with the plane update | |
438 | * during the same frame where we disallowed CxSR. | |
439 | * | |
440 | * Certain platforms also have a deeper HPLL SR mode. Fortunately the | |
441 | * HPLL SR mode depends on CxSR itself, so we don't have to hand hold | |
442 | * the hardware w.r.t. HPLL SR when writing to plane registers. | |
443 | * Disallowing just CxSR is sufficient. | |
444 | */ | |
11a85d6a | 445 | bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) |
3d90e649 | 446 | { |
11a85d6a VS |
447 | bool ret; |
448 | ||
3d90e649 | 449 | mutex_lock(&dev_priv->wm.wm_mutex); |
11a85d6a | 450 | ret = _intel_set_memory_cxsr(dev_priv, enable); |
04548cba VS |
451 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
452 | dev_priv->wm.vlv.cxsr = enable; | |
453 | else if (IS_G4X(dev_priv)) | |
454 | dev_priv->wm.g4x.cxsr = enable; | |
3d90e649 | 455 | mutex_unlock(&dev_priv->wm.wm_mutex); |
11a85d6a VS |
456 | |
457 | return ret; | |
3d90e649 | 458 | } |
fc1ac8de | 459 | |
b445e3b0 ED |
460 | /* |
461 | * Latency for FIFO fetches is dependent on several factors: | |
462 | * - memory configuration (speed, channels) | |
463 | * - chipset | |
464 | * - current MCH state | |
465 | * It can be fairly high in some situations, so here we assume a fairly | |
466 | * pessimal value. It's a tradeoff between extra memory fetches (if we | |
467 | * set this value too high, the FIFO will fetch frequently to stay full) | |
468 | * and power consumption (set it too low to save power and we might see | |
469 | * FIFO underruns and display "flicker"). | |
470 | * | |
471 | * A value of 5us seems to be a good balance; safe for very low end | |
472 | * platforms but not overly aggressive on lower latency configs. | |
473 | */ | |
5aef6003 | 474 | static const int pessimal_latency_ns = 5000; |
b445e3b0 | 475 | |
b5004720 VS |
476 | #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ |
477 | ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) | |
478 | ||
814e7f0b | 479 | static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) |
b5004720 | 480 | { |
2225f3c6 | 481 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
f07d43d2 | 482 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
814e7f0b | 483 | struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; |
f07d43d2 VS |
484 | enum pipe pipe = crtc->pipe; |
485 | int sprite0_start, sprite1_start; | |
2713eb41 | 486 | u32 dsparb, dsparb2, dsparb3; |
49845a23 | 487 | |
f07d43d2 | 488 | switch (pipe) { |
b5004720 VS |
489 | case PIPE_A: |
490 | dsparb = I915_READ(DSPARB); | |
491 | dsparb2 = I915_READ(DSPARB2); | |
492 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); | |
493 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); | |
494 | break; | |
495 | case PIPE_B: | |
496 | dsparb = I915_READ(DSPARB); | |
497 | dsparb2 = I915_READ(DSPARB2); | |
498 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); | |
499 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); | |
500 | break; | |
501 | case PIPE_C: | |
502 | dsparb2 = I915_READ(DSPARB2); | |
503 | dsparb3 = I915_READ(DSPARB3); | |
504 | sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); | |
505 | sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); | |
506 | break; | |
507 | default: | |
f07d43d2 VS |
508 | MISSING_CASE(pipe); |
509 | return; | |
b5004720 VS |
510 | } |
511 | ||
f07d43d2 VS |
512 | fifo_state->plane[PLANE_PRIMARY] = sprite0_start; |
513 | fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; | |
514 | fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; | |
515 | fifo_state->plane[PLANE_CURSOR] = 63; | |
b5004720 VS |
516 | } |
517 | ||
bdaf8439 VS |
518 | static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, |
519 | enum i9xx_plane_id i9xx_plane) | |
b445e3b0 | 520 | { |
5ce9a649 | 521 | u32 dsparb = I915_READ(DSPARB); |
b445e3b0 ED |
522 | int size; |
523 | ||
524 | size = dsparb & 0x7f; | |
bdaf8439 | 525 | if (i9xx_plane == PLANE_B) |
b445e3b0 ED |
526 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
527 | ||
f8d18d5c WK |
528 | drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", |
529 | dsparb, plane_name(i9xx_plane), size); | |
b445e3b0 ED |
530 | |
531 | return size; | |
532 | } | |
533 | ||
bdaf8439 VS |
534 | static int i830_get_fifo_size(struct drm_i915_private *dev_priv, |
535 | enum i9xx_plane_id i9xx_plane) | |
b445e3b0 | 536 | { |
5ce9a649 | 537 | u32 dsparb = I915_READ(DSPARB); |
b445e3b0 ED |
538 | int size; |
539 | ||
540 | size = dsparb & 0x1ff; | |
bdaf8439 | 541 | if (i9xx_plane == PLANE_B) |
b445e3b0 ED |
542 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; |
543 | size >>= 1; /* Convert to cachelines */ | |
544 | ||
f8d18d5c WK |
545 | drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", |
546 | dsparb, plane_name(i9xx_plane), size); | |
b445e3b0 ED |
547 | |
548 | return size; | |
549 | } | |
550 | ||
bdaf8439 VS |
551 | static int i845_get_fifo_size(struct drm_i915_private *dev_priv, |
552 | enum i9xx_plane_id i9xx_plane) | |
b445e3b0 | 553 | { |
5ce9a649 | 554 | u32 dsparb = I915_READ(DSPARB); |
b445e3b0 ED |
555 | int size; |
556 | ||
557 | size = dsparb & 0x7f; | |
558 | size >>= 2; /* Convert to cachelines */ | |
559 | ||
f8d18d5c WK |
560 | drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", |
561 | dsparb, plane_name(i9xx_plane), size); | |
b445e3b0 ED |
562 | |
563 | return size; | |
564 | } | |
565 | ||
b445e3b0 | 566 | /* Pineview has different values for various configs */ |
1d218220 | 567 | static const struct intel_watermark_params pnv_display_wm = { |
e0f0273e VS |
568 | .fifo_size = PINEVIEW_DISPLAY_FIFO, |
569 | .max_wm = PINEVIEW_MAX_WM, | |
570 | .default_wm = PINEVIEW_DFT_WM, | |
571 | .guard_size = PINEVIEW_GUARD_WM, | |
572 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
b445e3b0 | 573 | }; |
1d218220 LDM |
574 | |
575 | static const struct intel_watermark_params pnv_display_hplloff_wm = { | |
e0f0273e VS |
576 | .fifo_size = PINEVIEW_DISPLAY_FIFO, |
577 | .max_wm = PINEVIEW_MAX_WM, | |
578 | .default_wm = PINEVIEW_DFT_HPLLOFF_WM, | |
579 | .guard_size = PINEVIEW_GUARD_WM, | |
580 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
b445e3b0 | 581 | }; |
1d218220 LDM |
582 | |
583 | static const struct intel_watermark_params pnv_cursor_wm = { | |
e0f0273e VS |
584 | .fifo_size = PINEVIEW_CURSOR_FIFO, |
585 | .max_wm = PINEVIEW_CURSOR_MAX_WM, | |
586 | .default_wm = PINEVIEW_CURSOR_DFT_WM, | |
587 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, | |
588 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
b445e3b0 | 589 | }; |
1d218220 LDM |
590 | |
591 | static const struct intel_watermark_params pnv_cursor_hplloff_wm = { | |
e0f0273e VS |
592 | .fifo_size = PINEVIEW_CURSOR_FIFO, |
593 | .max_wm = PINEVIEW_CURSOR_MAX_WM, | |
594 | .default_wm = PINEVIEW_CURSOR_DFT_WM, | |
595 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, | |
596 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
b445e3b0 | 597 | }; |
1d218220 | 598 | |
b445e3b0 | 599 | static const struct intel_watermark_params i965_cursor_wm_info = { |
e0f0273e VS |
600 | .fifo_size = I965_CURSOR_FIFO, |
601 | .max_wm = I965_CURSOR_MAX_WM, | |
602 | .default_wm = I965_CURSOR_DFT_WM, | |
603 | .guard_size = 2, | |
604 | .cacheline_size = I915_FIFO_LINE_SIZE, | |
b445e3b0 | 605 | }; |
1d218220 | 606 | |
b445e3b0 | 607 | static const struct intel_watermark_params i945_wm_info = { |
e0f0273e VS |
608 | .fifo_size = I945_FIFO_SIZE, |
609 | .max_wm = I915_MAX_WM, | |
610 | .default_wm = 1, | |
611 | .guard_size = 2, | |
612 | .cacheline_size = I915_FIFO_LINE_SIZE, | |
b445e3b0 | 613 | }; |
1d218220 | 614 | |
b445e3b0 | 615 | static const struct intel_watermark_params i915_wm_info = { |
e0f0273e VS |
616 | .fifo_size = I915_FIFO_SIZE, |
617 | .max_wm = I915_MAX_WM, | |
618 | .default_wm = 1, | |
619 | .guard_size = 2, | |
620 | .cacheline_size = I915_FIFO_LINE_SIZE, | |
b445e3b0 | 621 | }; |
1d218220 | 622 | |
9d539105 | 623 | static const struct intel_watermark_params i830_a_wm_info = { |
e0f0273e VS |
624 | .fifo_size = I855GM_FIFO_SIZE, |
625 | .max_wm = I915_MAX_WM, | |
626 | .default_wm = 1, | |
627 | .guard_size = 2, | |
628 | .cacheline_size = I830_FIFO_LINE_SIZE, | |
b445e3b0 | 629 | }; |
1d218220 | 630 | |
9d539105 VS |
631 | static const struct intel_watermark_params i830_bc_wm_info = { |
632 | .fifo_size = I855GM_FIFO_SIZE, | |
633 | .max_wm = I915_MAX_WM/2, | |
634 | .default_wm = 1, | |
635 | .guard_size = 2, | |
636 | .cacheline_size = I830_FIFO_LINE_SIZE, | |
637 | }; | |
1d218220 | 638 | |
feb56b93 | 639 | static const struct intel_watermark_params i845_wm_info = { |
e0f0273e VS |
640 | .fifo_size = I830_FIFO_SIZE, |
641 | .max_wm = I915_MAX_WM, | |
642 | .default_wm = 1, | |
643 | .guard_size = 2, | |
644 | .cacheline_size = I830_FIFO_LINE_SIZE, | |
b445e3b0 ED |
645 | }; |
646 | ||
baf69ca8 VS |
647 | /** |
648 | * intel_wm_method1 - Method 1 / "small buffer" watermark formula | |
649 | * @pixel_rate: Pipe pixel rate in kHz | |
650 | * @cpp: Plane bytes per pixel | |
651 | * @latency: Memory wakeup latency in 0.1us units | |
652 | * | |
653 | * Compute the watermark using the method 1 or "small buffer" | |
654 | * formula. The caller may additonally add extra cachelines | |
655 | * to account for TLB misses and clock crossings. | |
656 | * | |
657 | * This method is concerned with the short term drain rate | |
658 | * of the FIFO, ie. it does not account for blanking periods | |
659 | * which would effectively reduce the average drain rate across | |
660 | * a longer period. The name "small" refers to the fact the | |
661 | * FIFO is relatively small compared to the amount of data | |
662 | * fetched. | |
663 | * | |
664 | * The FIFO level vs. time graph might look something like: | |
665 | * | |
666 | * |\ |\ | |
667 | * | \ | \ | |
668 | * __---__---__ (- plane active, _ blanking) | |
669 | * -> time | |
670 | * | |
671 | * or perhaps like this: | |
672 | * | |
673 | * |\|\ |\|\ | |
674 | * __----__----__ (- plane active, _ blanking) | |
675 | * -> time | |
676 | * | |
677 | * Returns: | |
678 | * The watermark in bytes | |
679 | */ | |
680 | static unsigned int intel_wm_method1(unsigned int pixel_rate, | |
681 | unsigned int cpp, | |
682 | unsigned int latency) | |
683 | { | |
5ce9a649 | 684 | u64 ret; |
baf69ca8 | 685 | |
d492a29d | 686 | ret = mul_u32_u32(pixel_rate, cpp * latency); |
baf69ca8 VS |
687 | ret = DIV_ROUND_UP_ULL(ret, 10000); |
688 | ||
689 | return ret; | |
690 | } | |
691 | ||
692 | /** | |
693 | * intel_wm_method2 - Method 2 / "large buffer" watermark formula | |
694 | * @pixel_rate: Pipe pixel rate in kHz | |
695 | * @htotal: Pipe horizontal total | |
696 | * @width: Plane width in pixels | |
697 | * @cpp: Plane bytes per pixel | |
698 | * @latency: Memory wakeup latency in 0.1us units | |
699 | * | |
700 | * Compute the watermark using the method 2 or "large buffer" | |
701 | * formula. The caller may additonally add extra cachelines | |
702 | * to account for TLB misses and clock crossings. | |
703 | * | |
704 | * This method is concerned with the long term drain rate | |
705 | * of the FIFO, ie. it does account for blanking periods | |
706 | * which effectively reduce the average drain rate across | |
707 | * a longer period. The name "large" refers to the fact the | |
708 | * FIFO is relatively large compared to the amount of data | |
709 | * fetched. | |
710 | * | |
711 | * The FIFO level vs. time graph might look something like: | |
712 | * | |
713 | * |\___ |\___ | |
714 | * | \___ | \___ | |
715 | * | \ | \ | |
716 | * __ --__--__--__--__--__--__ (- plane active, _ blanking) | |
717 | * -> time | |
718 | * | |
719 | * Returns: | |
720 | * The watermark in bytes | |
721 | */ | |
722 | static unsigned int intel_wm_method2(unsigned int pixel_rate, | |
723 | unsigned int htotal, | |
724 | unsigned int width, | |
725 | unsigned int cpp, | |
726 | unsigned int latency) | |
727 | { | |
728 | unsigned int ret; | |
729 | ||
730 | /* | |
731 | * FIXME remove once all users are computing | |
732 | * watermarks in the correct place. | |
733 | */ | |
734 | if (WARN_ON_ONCE(htotal == 0)) | |
735 | htotal = 1; | |
736 | ||
737 | ret = (latency * pixel_rate) / (htotal * 10000); | |
738 | ret = (ret + 1) * width * cpp; | |
739 | ||
740 | return ret; | |
741 | } | |
742 | ||
b445e3b0 ED |
743 | /** |
744 | * intel_calculate_wm - calculate watermark level | |
baf69ca8 | 745 | * @pixel_rate: pixel clock |
b445e3b0 | 746 | * @wm: chip FIFO params |
31383410 | 747 | * @fifo_size: size of the FIFO buffer |
ac484963 | 748 | * @cpp: bytes per pixel |
b445e3b0 ED |
749 | * @latency_ns: memory latency for the platform |
750 | * | |
751 | * Calculate the watermark level (the level at which the display plane will | |
752 | * start fetching from memory again). Each chip has a different display | |
753 | * FIFO size and allocation, so the caller needs to figure that out and pass | |
754 | * in the correct intel_watermark_params structure. | |
755 | * | |
756 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | |
757 | * on the pixel size. When it reaches the watermark level, it'll start | |
758 | * fetching FIFO line sized based chunks from memory until the FIFO fills | |
759 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | |
760 | * will occur, and a display engine hang could result. | |
761 | */ | |
baf69ca8 VS |
762 | static unsigned int intel_calculate_wm(int pixel_rate, |
763 | const struct intel_watermark_params *wm, | |
764 | int fifo_size, int cpp, | |
765 | unsigned int latency_ns) | |
b445e3b0 | 766 | { |
baf69ca8 | 767 | int entries, wm_size; |
b445e3b0 ED |
768 | |
769 | /* | |
770 | * Note: we need to make sure we don't overflow for various clock & | |
771 | * latency values. | |
772 | * clocks go from a few thousand to several hundred thousand. | |
773 | * latency is usually a few thousand | |
774 | */ | |
baf69ca8 VS |
775 | entries = intel_wm_method1(pixel_rate, cpp, |
776 | latency_ns / 100); | |
777 | entries = DIV_ROUND_UP(entries, wm->cacheline_size) + | |
778 | wm->guard_size; | |
779 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); | |
b445e3b0 | 780 | |
baf69ca8 VS |
781 | wm_size = fifo_size - entries; |
782 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); | |
b445e3b0 ED |
783 | |
784 | /* Don't promote wm_size to unsigned... */ | |
baf69ca8 | 785 | if (wm_size > wm->max_wm) |
b445e3b0 ED |
786 | wm_size = wm->max_wm; |
787 | if (wm_size <= 0) | |
788 | wm_size = wm->default_wm; | |
d6feb196 VS |
789 | |
790 | /* | |
791 | * Bspec seems to indicate that the value shouldn't be lower than | |
792 | * 'burst size + 1'. Certainly 830 is quite unhappy with low values. | |
793 | * Lets go for 8 which is the burst size since certain platforms | |
794 | * already use a hardcoded 8 (which is what the spec says should be | |
795 | * done). | |
796 | */ | |
797 | if (wm_size <= 8) | |
798 | wm_size = 8; | |
799 | ||
b445e3b0 ED |
800 | return wm_size; |
801 | } | |
802 | ||
04548cba VS |
803 | static bool is_disabling(int old, int new, int threshold) |
804 | { | |
805 | return old >= threshold && new < threshold; | |
806 | } | |
807 | ||
808 | static bool is_enabling(int old, int new, int threshold) | |
809 | { | |
810 | return old < threshold && new >= threshold; | |
811 | } | |
812 | ||
6d5019b6 VS |
813 | static int intel_wm_num_levels(struct drm_i915_private *dev_priv) |
814 | { | |
815 | return dev_priv->wm.max_level + 1; | |
816 | } | |
817 | ||
24304d81 VS |
818 | static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, |
819 | const struct intel_plane_state *plane_state) | |
820 | { | |
f90a85e7 | 821 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
24304d81 VS |
822 | |
823 | /* FIXME check the 'enable' instead */ | |
1326a92c | 824 | if (!crtc_state->hw.active) |
24304d81 VS |
825 | return false; |
826 | ||
827 | /* | |
828 | * Treat cursor with fb as always visible since cursor updates | |
829 | * can happen faster than the vrefresh rate, and the current | |
830 | * watermark code doesn't handle that correctly. Cursor updates | |
831 | * which set/clear the fb or change the cursor size are going | |
832 | * to get throttled by intel_legacy_cursor_update() to work | |
833 | * around this problem with the watermark code. | |
834 | */ | |
835 | if (plane->id == PLANE_CURSOR) | |
7b3cb17a | 836 | return plane_state->hw.fb != NULL; |
24304d81 | 837 | else |
f90a85e7 | 838 | return plane_state->uapi.visible; |
24304d81 VS |
839 | } |
840 | ||
04da7b9f VS |
841 | static bool intel_crtc_active(struct intel_crtc *crtc) |
842 | { | |
843 | /* Be paranoid as we can arrive here with only partial | |
844 | * state retrieved from the hardware during setup. | |
845 | * | |
846 | * We can ditch the adjusted_mode.crtc_clock check as soon | |
847 | * as Haswell has gained clock readout/fastboot support. | |
848 | * | |
849 | * We can ditch the crtc->primary->state->fb check as soon as we can | |
850 | * properly reconstruct framebuffers. | |
851 | * | |
852 | * FIXME: The intel_crtc->active here should be switched to | |
853 | * crtc->state->active once we have proper CRTC states wired up | |
854 | * for atomic. | |
855 | */ | |
856 | return crtc->active && crtc->base.primary->state->fb && | |
857 | crtc->config->hw.adjusted_mode.crtc_clock; | |
858 | } | |
859 | ||
ffc7a76b | 860 | static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) |
b445e3b0 | 861 | { |
efc2611e | 862 | struct intel_crtc *crtc, *enabled = NULL; |
b445e3b0 | 863 | |
ffc7a76b | 864 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
efc2611e | 865 | if (intel_crtc_active(crtc)) { |
b445e3b0 ED |
866 | if (enabled) |
867 | return NULL; | |
868 | enabled = crtc; | |
869 | } | |
870 | } | |
871 | ||
872 | return enabled; | |
873 | } | |
874 | ||
1d218220 | 875 | static void pnv_update_wm(struct intel_crtc *unused_crtc) |
b445e3b0 | 876 | { |
ffc7a76b | 877 | struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); |
efc2611e | 878 | struct intel_crtc *crtc; |
b445e3b0 ED |
879 | const struct cxsr_latency *latency; |
880 | u32 reg; | |
baf69ca8 | 881 | unsigned int wm; |
b445e3b0 | 882 | |
86d35d4e | 883 | latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), |
50a0bc90 TU |
884 | dev_priv->is_ddr3, |
885 | dev_priv->fsb_freq, | |
886 | dev_priv->mem_freq); | |
b445e3b0 | 887 | if (!latency) { |
f8d18d5c WK |
888 | drm_dbg_kms(&dev_priv->drm, |
889 | "Unknown FSB/MEM found, disable CxSR\n"); | |
5209b1f4 | 890 | intel_set_memory_cxsr(dev_priv, false); |
b445e3b0 ED |
891 | return; |
892 | } | |
893 | ||
ffc7a76b | 894 | crtc = single_enabled_crtc(dev_priv); |
b445e3b0 | 895 | if (crtc) { |
efc2611e | 896 | const struct drm_display_mode *adjusted_mode = |
1326a92c | 897 | &crtc->config->hw.adjusted_mode; |
efc2611e VS |
898 | const struct drm_framebuffer *fb = |
899 | crtc->base.primary->state->fb; | |
353c8598 | 900 | int cpp = fb->format->cpp[0]; |
7c5f93b0 | 901 | int clock = adjusted_mode->crtc_clock; |
b445e3b0 ED |
902 | |
903 | /* Display SR */ | |
1d218220 LDM |
904 | wm = intel_calculate_wm(clock, &pnv_display_wm, |
905 | pnv_display_wm.fifo_size, | |
ac484963 | 906 | cpp, latency->display_sr); |
b445e3b0 ED |
907 | reg = I915_READ(DSPFW1); |
908 | reg &= ~DSPFW_SR_MASK; | |
f4998963 | 909 | reg |= FW_WM(wm, SR); |
b445e3b0 | 910 | I915_WRITE(DSPFW1, reg); |
f8d18d5c | 911 | drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); |
b445e3b0 ED |
912 | |
913 | /* cursor SR */ | |
1d218220 LDM |
914 | wm = intel_calculate_wm(clock, &pnv_cursor_wm, |
915 | pnv_display_wm.fifo_size, | |
99834b14 | 916 | 4, latency->cursor_sr); |
b445e3b0 ED |
917 | reg = I915_READ(DSPFW3); |
918 | reg &= ~DSPFW_CURSOR_SR_MASK; | |
f4998963 | 919 | reg |= FW_WM(wm, CURSOR_SR); |
b445e3b0 ED |
920 | I915_WRITE(DSPFW3, reg); |
921 | ||
922 | /* Display HPLL off SR */ | |
1d218220 LDM |
923 | wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, |
924 | pnv_display_hplloff_wm.fifo_size, | |
ac484963 | 925 | cpp, latency->display_hpll_disable); |
b445e3b0 ED |
926 | reg = I915_READ(DSPFW3); |
927 | reg &= ~DSPFW_HPLL_SR_MASK; | |
f4998963 | 928 | reg |= FW_WM(wm, HPLL_SR); |
b445e3b0 ED |
929 | I915_WRITE(DSPFW3, reg); |
930 | ||
931 | /* cursor HPLL off SR */ | |
1d218220 LDM |
932 | wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, |
933 | pnv_display_hplloff_wm.fifo_size, | |
99834b14 | 934 | 4, latency->cursor_hpll_disable); |
b445e3b0 ED |
935 | reg = I915_READ(DSPFW3); |
936 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | |
f4998963 | 937 | reg |= FW_WM(wm, HPLL_CURSOR); |
b445e3b0 | 938 | I915_WRITE(DSPFW3, reg); |
f8d18d5c | 939 | drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); |
b445e3b0 | 940 | |
5209b1f4 | 941 | intel_set_memory_cxsr(dev_priv, true); |
b445e3b0 | 942 | } else { |
5209b1f4 | 943 | intel_set_memory_cxsr(dev_priv, false); |
b445e3b0 ED |
944 | } |
945 | } | |
946 | ||
0f95ff85 VS |
947 | /* |
948 | * Documentation says: | |
949 | * "If the line size is small, the TLB fetches can get in the way of the | |
950 | * data fetches, causing some lag in the pixel data return which is not | |
951 | * accounted for in the above formulas. The following adjustment only | |
952 | * needs to be applied if eight whole lines fit in the buffer at once. | |
953 | * The WM is adjusted upwards by the difference between the FIFO size | |
954 | * and the size of 8 whole lines. This adjustment is always performed | |
955 | * in the actual pixel depth regardless of whether FBC is enabled or not." | |
956 | */ | |
1a1f1287 | 957 | static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) |
0f95ff85 VS |
958 | { |
959 | int tlb_miss = fifo_size * 64 - width * cpp * 8; | |
960 | ||
961 | return max(0, tlb_miss); | |
962 | } | |
963 | ||
04548cba VS |
964 | static void g4x_write_wm_values(struct drm_i915_private *dev_priv, |
965 | const struct g4x_wm_values *wm) | |
b445e3b0 | 966 | { |
e93329a5 VS |
967 | enum pipe pipe; |
968 | ||
969 | for_each_pipe(dev_priv, pipe) | |
970 | trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); | |
971 | ||
04548cba VS |
972 | I915_WRITE(DSPFW1, |
973 | FW_WM(wm->sr.plane, SR) | | |
974 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | | |
975 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | | |
976 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); | |
977 | I915_WRITE(DSPFW2, | |
978 | (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | | |
979 | FW_WM(wm->sr.fbc, FBC_SR) | | |
980 | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | | |
981 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | | |
982 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | | |
983 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); | |
984 | I915_WRITE(DSPFW3, | |
985 | (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | | |
986 | FW_WM(wm->sr.cursor, CURSOR_SR) | | |
987 | FW_WM(wm->hpll.cursor, HPLL_CURSOR) | | |
988 | FW_WM(wm->hpll.plane, HPLL_SR)); | |
b445e3b0 | 989 | |
04548cba | 990 | POSTING_READ(DSPFW1); |
b445e3b0 ED |
991 | } |
992 | ||
15665979 VS |
993 | #define FW_WM_VLV(value, plane) \ |
994 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) | |
995 | ||
50f4caef | 996 | static void vlv_write_wm_values(struct drm_i915_private *dev_priv, |
0018fda1 VS |
997 | const struct vlv_wm_values *wm) |
998 | { | |
50f4caef VS |
999 | enum pipe pipe; |
1000 | ||
1001 | for_each_pipe(dev_priv, pipe) { | |
c137d660 VS |
1002 | trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); |
1003 | ||
50f4caef VS |
1004 | I915_WRITE(VLV_DDL(pipe), |
1005 | (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | | |
1006 | (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | | |
1007 | (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | | |
1008 | (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); | |
1009 | } | |
0018fda1 | 1010 | |
6fe6a7ff VS |
1011 | /* |
1012 | * Zero the (unused) WM1 watermarks, and also clear all the | |
1013 | * high order bits so that there are no out of bounds values | |
1014 | * present in the registers during the reprogramming. | |
1015 | */ | |
1016 | I915_WRITE(DSPHOWM, 0); | |
1017 | I915_WRITE(DSPHOWM1, 0); | |
1018 | I915_WRITE(DSPFW4, 0); | |
1019 | I915_WRITE(DSPFW5, 0); | |
1020 | I915_WRITE(DSPFW6, 0); | |
1021 | ||
ae80152d | 1022 | I915_WRITE(DSPFW1, |
15665979 | 1023 | FW_WM(wm->sr.plane, SR) | |
1b31389c VS |
1024 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | |
1025 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | | |
1026 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); | |
ae80152d | 1027 | I915_WRITE(DSPFW2, |
1b31389c VS |
1028 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | |
1029 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | | |
1030 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); | |
ae80152d | 1031 | I915_WRITE(DSPFW3, |
15665979 | 1032 | FW_WM(wm->sr.cursor, CURSOR_SR)); |
ae80152d VS |
1033 | |
1034 | if (IS_CHERRYVIEW(dev_priv)) { | |
1035 | I915_WRITE(DSPFW7_CHV, | |
1b31389c VS |
1036 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | |
1037 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); | |
ae80152d | 1038 | I915_WRITE(DSPFW8_CHV, |
1b31389c VS |
1039 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | |
1040 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); | |
ae80152d | 1041 | I915_WRITE(DSPFW9_CHV, |
1b31389c VS |
1042 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | |
1043 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); | |
ae80152d | 1044 | I915_WRITE(DSPHOWM, |
15665979 | 1045 | FW_WM(wm->sr.plane >> 9, SR_HI) | |
1b31389c VS |
1046 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | |
1047 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | | |
1048 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | | |
1049 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | | |
1050 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | | |
1051 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | | |
1052 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | | |
1053 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | | |
1054 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); | |
ae80152d VS |
1055 | } else { |
1056 | I915_WRITE(DSPFW7, | |
1b31389c VS |
1057 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | |
1058 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); | |
ae80152d | 1059 | I915_WRITE(DSPHOWM, |
15665979 | 1060 | FW_WM(wm->sr.plane >> 9, SR_HI) | |
1b31389c VS |
1061 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | |
1062 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | | |
1063 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | | |
1064 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | | |
1065 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | | |
1066 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); | |
ae80152d VS |
1067 | } |
1068 | ||
1069 | POSTING_READ(DSPFW1); | |
0018fda1 VS |
1070 | } |
1071 | ||
15665979 VS |
1072 | #undef FW_WM_VLV |
1073 | ||
04548cba VS |
1074 | static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) |
1075 | { | |
1076 | /* all latencies in usec */ | |
1077 | dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; | |
1078 | dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12; | |
79d94306 | 1079 | dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; |
04548cba | 1080 | |
79d94306 | 1081 | dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL; |
04548cba VS |
1082 | } |
1083 | ||
1084 | static int g4x_plane_fifo_size(enum plane_id plane_id, int level) | |
1085 | { | |
1086 | /* | |
1087 | * DSPCNTR[13] supposedly controls whether the | |
1088 | * primary plane can use the FIFO space otherwise | |
1089 | * reserved for the sprite plane. It's not 100% clear | |
1090 | * what the actual FIFO size is, but it looks like we | |
1091 | * can happily set both primary and sprite watermarks | |
1092 | * up to 127 cachelines. So that would seem to mean | |
1093 | * that either DSPCNTR[13] doesn't do anything, or that | |
1094 | * the total FIFO is >= 256 cachelines in size. Either | |
1095 | * way, we don't seem to have to worry about this | |
1096 | * repartitioning as the maximum watermark value the | |
1097 | * register can hold for each plane is lower than the | |
1098 | * minimum FIFO size. | |
1099 | */ | |
1100 | switch (plane_id) { | |
1101 | case PLANE_CURSOR: | |
1102 | return 63; | |
1103 | case PLANE_PRIMARY: | |
1104 | return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; | |
1105 | case PLANE_SPRITE0: | |
1106 | return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; | |
1107 | default: | |
1108 | MISSING_CASE(plane_id); | |
1109 | return 0; | |
1110 | } | |
1111 | } | |
1112 | ||
1113 | static int g4x_fbc_fifo_size(int level) | |
1114 | { | |
1115 | switch (level) { | |
1116 | case G4X_WM_LEVEL_SR: | |
1117 | return 7; | |
1118 | case G4X_WM_LEVEL_HPLL: | |
1119 | return 15; | |
1120 | default: | |
1121 | MISSING_CASE(level); | |
1122 | return 0; | |
1123 | } | |
1124 | } | |
1125 | ||
5ce9a649 JN |
1126 | static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, |
1127 | const struct intel_plane_state *plane_state, | |
1128 | int level) | |
04548cba | 1129 | { |
f90a85e7 | 1130 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
04548cba VS |
1131 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
1132 | const struct drm_display_mode *adjusted_mode = | |
1326a92c | 1133 | &crtc_state->hw.adjusted_mode; |
1a1f1287 CW |
1134 | unsigned int latency = dev_priv->wm.pri_latency[level] * 10; |
1135 | unsigned int clock, htotal, cpp, width, wm; | |
04548cba VS |
1136 | |
1137 | if (latency == 0) | |
1138 | return USHRT_MAX; | |
1139 | ||
1140 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |
1141 | return 0; | |
1142 | ||
7b3cb17a | 1143 | cpp = plane_state->hw.fb->format->cpp[0]; |
d56e823a | 1144 | |
04548cba VS |
1145 | /* |
1146 | * Not 100% sure which way ELK should go here as the | |
1147 | * spec only says CL/CTG should assume 32bpp and BW | |
1148 | * doesn't need to. But as these things followed the | |
1149 | * mobile vs. desktop lines on gen3 as well, let's | |
1150 | * assume ELK doesn't need this. | |
1151 | * | |
1152 | * The spec also fails to list such a restriction for | |
1153 | * the HPLL watermark, which seems a little strange. | |
1154 | * Let's use 32bpp for the HPLL watermark as well. | |
1155 | */ | |
1156 | if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY && | |
1157 | level != G4X_WM_LEVEL_NORMAL) | |
d56e823a | 1158 | cpp = max(cpp, 4u); |
04548cba VS |
1159 | |
1160 | clock = adjusted_mode->crtc_clock; | |
1161 | htotal = adjusted_mode->crtc_htotal; | |
1162 | ||
f90a85e7 | 1163 | width = drm_rect_width(&plane_state->uapi.dst); |
04548cba VS |
1164 | |
1165 | if (plane->id == PLANE_CURSOR) { | |
1166 | wm = intel_wm_method2(clock, htotal, width, cpp, latency); | |
1167 | } else if (plane->id == PLANE_PRIMARY && | |
1168 | level == G4X_WM_LEVEL_NORMAL) { | |
1169 | wm = intel_wm_method1(clock, cpp, latency); | |
1170 | } else { | |
1a1f1287 | 1171 | unsigned int small, large; |
04548cba VS |
1172 | |
1173 | small = intel_wm_method1(clock, cpp, latency); | |
1174 | large = intel_wm_method2(clock, htotal, width, cpp, latency); | |
1175 | ||
1176 | wm = min(small, large); | |
1177 | } | |
1178 | ||
1179 | wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), | |
1180 | width, cpp); | |
1181 | ||
1182 | wm = DIV_ROUND_UP(wm, 64) + 2; | |
1183 | ||
1a1f1287 | 1184 | return min_t(unsigned int, wm, USHRT_MAX); |
04548cba VS |
1185 | } |
1186 | ||
1187 | static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, | |
1188 | int level, enum plane_id plane_id, u16 value) | |
1189 | { | |
2225f3c6 | 1190 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
04548cba VS |
1191 | bool dirty = false; |
1192 | ||
1193 | for (; level < intel_wm_num_levels(dev_priv); level++) { | |
1194 | struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |
1195 | ||
1196 | dirty |= raw->plane[plane_id] != value; | |
1197 | raw->plane[plane_id] = value; | |
1198 | } | |
1199 | ||
1200 | return dirty; | |
1201 | } | |
1202 | ||
1203 | static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, | |
1204 | int level, u16 value) | |
1205 | { | |
2225f3c6 | 1206 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
04548cba VS |
1207 | bool dirty = false; |
1208 | ||
1209 | /* NORMAL level doesn't have an FBC watermark */ | |
1210 | level = max(level, G4X_WM_LEVEL_SR); | |
1211 | ||
1212 | for (; level < intel_wm_num_levels(dev_priv); level++) { | |
1213 | struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |
1214 | ||
1215 | dirty |= raw->fbc != value; | |
1216 | raw->fbc = value; | |
1217 | } | |
1218 | ||
1219 | return dirty; | |
1220 | } | |
1221 | ||
ec193640 ML |
1222 | static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, |
1223 | const struct intel_plane_state *plane_state, | |
5ce9a649 | 1224 | u32 pri_val); |
04548cba VS |
1225 | |
1226 | static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, | |
1227 | const struct intel_plane_state *plane_state) | |
1228 | { | |
f90a85e7 | 1229 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
f8d18d5c | 1230 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
04548cba VS |
1231 | int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); |
1232 | enum plane_id plane_id = plane->id; | |
1233 | bool dirty = false; | |
1234 | int level; | |
1235 | ||
1236 | if (!intel_wm_plane_visible(crtc_state, plane_state)) { | |
1237 | dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); | |
1238 | if (plane_id == PLANE_PRIMARY) | |
1239 | dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); | |
1240 | goto out; | |
1241 | } | |
1242 | ||
1243 | for (level = 0; level < num_levels; level++) { | |
1244 | struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |
1245 | int wm, max_wm; | |
1246 | ||
1247 | wm = g4x_compute_wm(crtc_state, plane_state, level); | |
1248 | max_wm = g4x_plane_fifo_size(plane_id, level); | |
1249 | ||
1250 | if (wm > max_wm) | |
1251 | break; | |
1252 | ||
1253 | dirty |= raw->plane[plane_id] != wm; | |
1254 | raw->plane[plane_id] = wm; | |
1255 | ||
1256 | if (plane_id != PLANE_PRIMARY || | |
1257 | level == G4X_WM_LEVEL_NORMAL) | |
1258 | continue; | |
1259 | ||
1260 | wm = ilk_compute_fbc_wm(crtc_state, plane_state, | |
1261 | raw->plane[plane_id]); | |
1262 | max_wm = g4x_fbc_fifo_size(level); | |
1263 | ||
1264 | /* | |
1265 | * FBC wm is not mandatory as we | |
1266 | * can always just disable its use. | |
1267 | */ | |
1268 | if (wm > max_wm) | |
1269 | wm = USHRT_MAX; | |
1270 | ||
1271 | dirty |= raw->fbc != wm; | |
1272 | raw->fbc = wm; | |
1273 | } | |
1274 | ||
1275 | /* mark watermarks as invalid */ | |
1276 | dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); | |
1277 | ||
1278 | if (plane_id == PLANE_PRIMARY) | |
1279 | dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); | |
1280 | ||
1281 | out: | |
1282 | if (dirty) { | |
f8d18d5c WK |
1283 | drm_dbg_kms(&dev_priv->drm, |
1284 | "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", | |
1285 | plane->base.name, | |
1286 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], | |
1287 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], | |
1288 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); | |
04548cba VS |
1289 | |
1290 | if (plane_id == PLANE_PRIMARY) | |
f8d18d5c WK |
1291 | drm_dbg_kms(&dev_priv->drm, |
1292 | "FBC watermarks: SR=%d, HPLL=%d\n", | |
1293 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, | |
1294 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); | |
04548cba VS |
1295 | } |
1296 | ||
1297 | return dirty; | |
1298 | } | |
1299 | ||
1300 | static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, | |
1301 | enum plane_id plane_id, int level) | |
1302 | { | |
1303 | const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |
1304 | ||
1305 | return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); | |
1306 | } | |
1307 | ||
1308 | static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, | |
1309 | int level) | |
1310 | { | |
2225f3c6 | 1311 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
04548cba VS |
1312 | |
1313 | if (level > dev_priv->wm.max_level) | |
1314 | return false; | |
1315 | ||
1316 | return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && | |
1317 | g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && | |
1318 | g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); | |
1319 | } | |
1320 | ||
1321 | /* mark all levels starting from 'level' as invalid */ | |
1322 | static void g4x_invalidate_wms(struct intel_crtc *crtc, | |
1323 | struct g4x_wm_state *wm_state, int level) | |
1324 | { | |
1325 | if (level <= G4X_WM_LEVEL_NORMAL) { | |
1326 | enum plane_id plane_id; | |
1327 | ||
1328 | for_each_plane_id_on_crtc(crtc, plane_id) | |
1329 | wm_state->wm.plane[plane_id] = USHRT_MAX; | |
1330 | } | |
1331 | ||
1332 | if (level <= G4X_WM_LEVEL_SR) { | |
1333 | wm_state->cxsr = false; | |
1334 | wm_state->sr.cursor = USHRT_MAX; | |
1335 | wm_state->sr.plane = USHRT_MAX; | |
1336 | wm_state->sr.fbc = USHRT_MAX; | |
1337 | } | |
1338 | ||
1339 | if (level <= G4X_WM_LEVEL_HPLL) { | |
1340 | wm_state->hpll_en = false; | |
1341 | wm_state->hpll.cursor = USHRT_MAX; | |
1342 | wm_state->hpll.plane = USHRT_MAX; | |
1343 | wm_state->hpll.fbc = USHRT_MAX; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) | |
1348 | { | |
2225f3c6 | 1349 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
04548cba | 1350 | struct intel_atomic_state *state = |
2225f3c6 | 1351 | to_intel_atomic_state(crtc_state->uapi.state); |
04548cba | 1352 | struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; |
0b14d968 VS |
1353 | int num_active_planes = hweight8(crtc_state->active_planes & |
1354 | ~BIT(PLANE_CURSOR)); | |
04548cba | 1355 | const struct g4x_pipe_wm *raw; |
7b510451 VS |
1356 | const struct intel_plane_state *old_plane_state; |
1357 | const struct intel_plane_state *new_plane_state; | |
04548cba VS |
1358 | struct intel_plane *plane; |
1359 | enum plane_id plane_id; | |
1360 | int i, level; | |
1361 | unsigned int dirty = 0; | |
1362 | ||
7b510451 VS |
1363 | for_each_oldnew_intel_plane_in_state(state, plane, |
1364 | old_plane_state, | |
1365 | new_plane_state, i) { | |
7b3cb17a ML |
1366 | if (new_plane_state->hw.crtc != &crtc->base && |
1367 | old_plane_state->hw.crtc != &crtc->base) | |
04548cba VS |
1368 | continue; |
1369 | ||
7b510451 | 1370 | if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) |
04548cba VS |
1371 | dirty |= BIT(plane->id); |
1372 | } | |
1373 | ||
1374 | if (!dirty) | |
1375 | return 0; | |
1376 | ||
1377 | level = G4X_WM_LEVEL_NORMAL; | |
1378 | if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) | |
1379 | goto out; | |
1380 | ||
1381 | raw = &crtc_state->wm.g4x.raw[level]; | |
1382 | for_each_plane_id_on_crtc(crtc, plane_id) | |
1383 | wm_state->wm.plane[plane_id] = raw->plane[plane_id]; | |
1384 | ||
1385 | level = G4X_WM_LEVEL_SR; | |
1386 | ||
1387 | if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) | |
1388 | goto out; | |
1389 | ||
1390 | raw = &crtc_state->wm.g4x.raw[level]; | |
1391 | wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; | |
1392 | wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; | |
1393 | wm_state->sr.fbc = raw->fbc; | |
1394 | ||
1395 | wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY); | |
1396 | ||
1397 | level = G4X_WM_LEVEL_HPLL; | |
1398 | ||
1399 | if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) | |
1400 | goto out; | |
1401 | ||
1402 | raw = &crtc_state->wm.g4x.raw[level]; | |
1403 | wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; | |
1404 | wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; | |
1405 | wm_state->hpll.fbc = raw->fbc; | |
1406 | ||
1407 | wm_state->hpll_en = wm_state->cxsr; | |
1408 | ||
1409 | level++; | |
1410 | ||
1411 | out: | |
1412 | if (level == G4X_WM_LEVEL_NORMAL) | |
1413 | return -EINVAL; | |
1414 | ||
1415 | /* invalidate the higher levels */ | |
1416 | g4x_invalidate_wms(crtc, wm_state, level); | |
1417 | ||
1418 | /* | |
1419 | * Determine if the FBC watermark(s) can be used. IF | |
1420 | * this isn't the case we prefer to disable the FBC | |
1421 | ( watermark(s) rather than disable the SR/HPLL | |
1422 | * level(s) entirely. | |
1423 | */ | |
1424 | wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL; | |
1425 | ||
1426 | if (level >= G4X_WM_LEVEL_SR && | |
1427 | wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) | |
1428 | wm_state->fbc_en = false; | |
1429 | else if (level >= G4X_WM_LEVEL_HPLL && | |
1430 | wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) | |
1431 | wm_state->fbc_en = false; | |
1432 | ||
1433 | return 0; | |
1434 | } | |
1435 | ||
cd1d3ee9 | 1436 | static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) |
04548cba | 1437 | { |
2225f3c6 | 1438 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); |
248c2435 ML |
1439 | struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; |
1440 | const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; | |
1441 | struct intel_atomic_state *intel_state = | |
2225f3c6 | 1442 | to_intel_atomic_state(new_crtc_state->uapi.state); |
248c2435 ML |
1443 | const struct intel_crtc_state *old_crtc_state = |
1444 | intel_atomic_get_old_crtc_state(intel_state, crtc); | |
1445 | const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; | |
04548cba VS |
1446 | enum plane_id plane_id; |
1447 | ||
2225f3c6 | 1448 | if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { |
248c2435 ML |
1449 | *intermediate = *optimal; |
1450 | ||
1451 | intermediate->cxsr = false; | |
1452 | intermediate->hpll_en = false; | |
1453 | goto out; | |
1454 | } | |
1455 | ||
04548cba | 1456 | intermediate->cxsr = optimal->cxsr && active->cxsr && |
248c2435 | 1457 | !new_crtc_state->disable_cxsr; |
04548cba | 1458 | intermediate->hpll_en = optimal->hpll_en && active->hpll_en && |
248c2435 | 1459 | !new_crtc_state->disable_cxsr; |
04548cba VS |
1460 | intermediate->fbc_en = optimal->fbc_en && active->fbc_en; |
1461 | ||
1462 | for_each_plane_id_on_crtc(crtc, plane_id) { | |
1463 | intermediate->wm.plane[plane_id] = | |
1464 | max(optimal->wm.plane[plane_id], | |
1465 | active->wm.plane[plane_id]); | |
1466 | ||
1467 | WARN_ON(intermediate->wm.plane[plane_id] > | |
1468 | g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); | |
1469 | } | |
1470 | ||
1471 | intermediate->sr.plane = max(optimal->sr.plane, | |
1472 | active->sr.plane); | |
1473 | intermediate->sr.cursor = max(optimal->sr.cursor, | |
1474 | active->sr.cursor); | |
1475 | intermediate->sr.fbc = max(optimal->sr.fbc, | |
1476 | active->sr.fbc); | |
1477 | ||
1478 | intermediate->hpll.plane = max(optimal->hpll.plane, | |
1479 | active->hpll.plane); | |
1480 | intermediate->hpll.cursor = max(optimal->hpll.cursor, | |
1481 | active->hpll.cursor); | |
1482 | intermediate->hpll.fbc = max(optimal->hpll.fbc, | |
1483 | active->hpll.fbc); | |
1484 | ||
1485 | WARN_ON((intermediate->sr.plane > | |
1486 | g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || | |
1487 | intermediate->sr.cursor > | |
1488 | g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && | |
1489 | intermediate->cxsr); | |
1490 | WARN_ON((intermediate->sr.plane > | |
1491 | g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || | |
1492 | intermediate->sr.cursor > | |
1493 | g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && | |
1494 | intermediate->hpll_en); | |
1495 | ||
1496 | WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) && | |
1497 | intermediate->fbc_en && intermediate->cxsr); | |
1498 | WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && | |
1499 | intermediate->fbc_en && intermediate->hpll_en); | |
1500 | ||
248c2435 | 1501 | out: |
04548cba VS |
1502 | /* |
1503 | * If our intermediate WM are identical to the final WM, then we can | |
1504 | * omit the post-vblank programming; only update if it's different. | |
1505 | */ | |
1506 | if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) | |
248c2435 | 1507 | new_crtc_state->wm.need_postvbl_update = true; |
04548cba VS |
1508 | |
1509 | return 0; | |
1510 | } | |
1511 | ||
1512 | static void g4x_merge_wm(struct drm_i915_private *dev_priv, | |
1513 | struct g4x_wm_values *wm) | |
1514 | { | |
1515 | struct intel_crtc *crtc; | |
c08e9132 | 1516 | int num_active_pipes = 0; |
04548cba VS |
1517 | |
1518 | wm->cxsr = true; | |
1519 | wm->hpll_en = true; | |
1520 | wm->fbc_en = true; | |
1521 | ||
1522 | for_each_intel_crtc(&dev_priv->drm, crtc) { | |
1523 | const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; | |
1524 | ||
1525 | if (!crtc->active) | |
1526 | continue; | |
1527 | ||
1528 | if (!wm_state->cxsr) | |
1529 | wm->cxsr = false; | |
1530 | if (!wm_state->hpll_en) | |
1531 | wm->hpll_en = false; | |
1532 | if (!wm_state->fbc_en) | |
1533 | wm->fbc_en = false; | |
1534 | ||
c08e9132 | 1535 | num_active_pipes++; |
04548cba VS |
1536 | } |
1537 | ||
c08e9132 | 1538 | if (num_active_pipes != 1) { |
04548cba VS |
1539 | wm->cxsr = false; |
1540 | wm->hpll_en = false; | |
1541 | wm->fbc_en = false; | |
1542 | } | |
1543 | ||
1544 | for_each_intel_crtc(&dev_priv->drm, crtc) { | |
1545 | const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; | |
1546 | enum pipe pipe = crtc->pipe; | |
1547 | ||
1548 | wm->pipe[pipe] = wm_state->wm; | |
1549 | if (crtc->active && wm->cxsr) | |
1550 | wm->sr = wm_state->sr; | |
1551 | if (crtc->active && wm->hpll_en) | |
1552 | wm->hpll = wm_state->hpll; | |
1553 | } | |
1554 | } | |
1555 | ||
1556 | static void g4x_program_watermarks(struct drm_i915_private *dev_priv) | |
1557 | { | |
1558 | struct g4x_wm_values *old_wm = &dev_priv->wm.g4x; | |
1559 | struct g4x_wm_values new_wm = {}; | |
1560 | ||
1561 | g4x_merge_wm(dev_priv, &new_wm); | |
1562 | ||
1563 | if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) | |
1564 | return; | |
1565 | ||
1566 | if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) | |
1567 | _intel_set_memory_cxsr(dev_priv, false); | |
1568 | ||
1569 | g4x_write_wm_values(dev_priv, &new_wm); | |
1570 | ||
1571 | if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) | |
1572 | _intel_set_memory_cxsr(dev_priv, true); | |
1573 | ||
1574 | *old_wm = new_wm; | |
1575 | } | |
1576 | ||
1577 | static void g4x_initial_watermarks(struct intel_atomic_state *state, | |
7a8fdb1f | 1578 | struct intel_crtc *crtc) |
04548cba | 1579 | { |
7a8fdb1f VS |
1580 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1581 | const struct intel_crtc_state *crtc_state = | |
1582 | intel_atomic_get_new_crtc_state(state, crtc); | |
04548cba VS |
1583 | |
1584 | mutex_lock(&dev_priv->wm.wm_mutex); | |
1585 | crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; | |
1586 | g4x_program_watermarks(dev_priv); | |
1587 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
1588 | } | |
1589 | ||
1590 | static void g4x_optimize_watermarks(struct intel_atomic_state *state, | |
7a8fdb1f | 1591 | struct intel_crtc *crtc) |
04548cba | 1592 | { |
7a8fdb1f VS |
1593 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1594 | const struct intel_crtc_state *crtc_state = | |
1595 | intel_atomic_get_new_crtc_state(state, crtc); | |
04548cba VS |
1596 | |
1597 | if (!crtc_state->wm.need_postvbl_update) | |
1598 | return; | |
1599 | ||
1600 | mutex_lock(&dev_priv->wm.wm_mutex); | |
88016a9f | 1601 | crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; |
04548cba VS |
1602 | g4x_program_watermarks(dev_priv); |
1603 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
1604 | } | |
1605 | ||
262cd2e1 VS |
1606 | /* latency must be in 0.1us units. */ |
1607 | static unsigned int vlv_wm_method2(unsigned int pixel_rate, | |
baf69ca8 VS |
1608 | unsigned int htotal, |
1609 | unsigned int width, | |
ac484963 | 1610 | unsigned int cpp, |
262cd2e1 VS |
1611 | unsigned int latency) |
1612 | { | |
1613 | unsigned int ret; | |
1614 | ||
baf69ca8 VS |
1615 | ret = intel_wm_method2(pixel_rate, htotal, |
1616 | width, cpp, latency); | |
262cd2e1 VS |
1617 | ret = DIV_ROUND_UP(ret, 64); |
1618 | ||
1619 | return ret; | |
1620 | } | |
1621 | ||
bb726519 | 1622 | static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) |
262cd2e1 | 1623 | { |
262cd2e1 VS |
1624 | /* all latencies in usec */ |
1625 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; | |
1626 | ||
58590c14 VS |
1627 | dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; |
1628 | ||
262cd2e1 VS |
1629 | if (IS_CHERRYVIEW(dev_priv)) { |
1630 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; | |
1631 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; | |
58590c14 VS |
1632 | |
1633 | dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; | |
262cd2e1 VS |
1634 | } |
1635 | } | |
1636 | ||
5ce9a649 JN |
1637 | static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, |
1638 | const struct intel_plane_state *plane_state, | |
1639 | int level) | |
262cd2e1 | 1640 | { |
f90a85e7 | 1641 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
262cd2e1 | 1642 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
e339d67e | 1643 | const struct drm_display_mode *adjusted_mode = |
1326a92c | 1644 | &crtc_state->hw.adjusted_mode; |
1a1f1287 | 1645 | unsigned int clock, htotal, cpp, width, wm; |
262cd2e1 VS |
1646 | |
1647 | if (dev_priv->wm.pri_latency[level] == 0) | |
1648 | return USHRT_MAX; | |
1649 | ||
a07102f1 | 1650 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
262cd2e1 VS |
1651 | return 0; |
1652 | ||
7b3cb17a | 1653 | cpp = plane_state->hw.fb->format->cpp[0]; |
e339d67e VS |
1654 | clock = adjusted_mode->crtc_clock; |
1655 | htotal = adjusted_mode->crtc_htotal; | |
1656 | width = crtc_state->pipe_src_w; | |
262cd2e1 | 1657 | |
709f3fc9 | 1658 | if (plane->id == PLANE_CURSOR) { |
262cd2e1 VS |
1659 | /* |
1660 | * FIXME the formula gives values that are | |
1661 | * too big for the cursor FIFO, and hence we | |
1662 | * would never be able to use cursors. For | |
1663 | * now just hardcode the watermark. | |
1664 | */ | |
1665 | wm = 63; | |
1666 | } else { | |
ac484963 | 1667 | wm = vlv_wm_method2(clock, htotal, width, cpp, |
262cd2e1 VS |
1668 | dev_priv->wm.pri_latency[level] * 10); |
1669 | } | |
1670 | ||
1a1f1287 | 1671 | return min_t(unsigned int, wm, USHRT_MAX); |
262cd2e1 VS |
1672 | } |
1673 | ||
1a10ae6b VS |
1674 | static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) |
1675 | { | |
1676 | return (active_planes & (BIT(PLANE_SPRITE0) | | |
1677 | BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); | |
1678 | } | |
1679 | ||
5012e604 | 1680 | static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) |
54f1b6e1 | 1681 | { |
2225f3c6 | 1682 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
114d7dc0 | 1683 | const struct g4x_pipe_wm *raw = |
5012e604 | 1684 | &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; |
814e7f0b | 1685 | struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; |
5012e604 | 1686 | unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); |
0b14d968 | 1687 | int num_active_planes = hweight8(active_planes); |
5012e604 | 1688 | const int fifo_size = 511; |
54f1b6e1 | 1689 | int fifo_extra, fifo_left = fifo_size; |
1a10ae6b | 1690 | int sprite0_fifo_extra = 0; |
5012e604 VS |
1691 | unsigned int total_rate; |
1692 | enum plane_id plane_id; | |
54f1b6e1 | 1693 | |
1a10ae6b VS |
1694 | /* |
1695 | * When enabling sprite0 after sprite1 has already been enabled | |
1696 | * we tend to get an underrun unless sprite0 already has some | |
1697 | * FIFO space allcoated. Hence we always allocate at least one | |
1698 | * cacheline for sprite0 whenever sprite1 is enabled. | |
1699 | * | |
1700 | * All other plane enable sequences appear immune to this problem. | |
1701 | */ | |
1702 | if (vlv_need_sprite0_fifo_workaround(active_planes)) | |
1703 | sprite0_fifo_extra = 1; | |
1704 | ||
5012e604 VS |
1705 | total_rate = raw->plane[PLANE_PRIMARY] + |
1706 | raw->plane[PLANE_SPRITE0] + | |
1a10ae6b VS |
1707 | raw->plane[PLANE_SPRITE1] + |
1708 | sprite0_fifo_extra; | |
54f1b6e1 | 1709 | |
5012e604 VS |
1710 | if (total_rate > fifo_size) |
1711 | return -EINVAL; | |
54f1b6e1 | 1712 | |
5012e604 VS |
1713 | if (total_rate == 0) |
1714 | total_rate = 1; | |
54f1b6e1 | 1715 | |
5012e604 | 1716 | for_each_plane_id_on_crtc(crtc, plane_id) { |
54f1b6e1 VS |
1717 | unsigned int rate; |
1718 | ||
5012e604 VS |
1719 | if ((active_planes & BIT(plane_id)) == 0) { |
1720 | fifo_state->plane[plane_id] = 0; | |
54f1b6e1 VS |
1721 | continue; |
1722 | } | |
1723 | ||
5012e604 VS |
1724 | rate = raw->plane[plane_id]; |
1725 | fifo_state->plane[plane_id] = fifo_size * rate / total_rate; | |
1726 | fifo_left -= fifo_state->plane[plane_id]; | |
54f1b6e1 VS |
1727 | } |
1728 | ||
1a10ae6b VS |
1729 | fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; |
1730 | fifo_left -= sprite0_fifo_extra; | |
1731 | ||
5012e604 VS |
1732 | fifo_state->plane[PLANE_CURSOR] = 63; |
1733 | ||
1734 | fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); | |
54f1b6e1 VS |
1735 | |
1736 | /* spread the remainder evenly */ | |
5012e604 | 1737 | for_each_plane_id_on_crtc(crtc, plane_id) { |
54f1b6e1 VS |
1738 | int plane_extra; |
1739 | ||
1740 | if (fifo_left == 0) | |
1741 | break; | |
1742 | ||
5012e604 | 1743 | if ((active_planes & BIT(plane_id)) == 0) |
54f1b6e1 VS |
1744 | continue; |
1745 | ||
1746 | plane_extra = min(fifo_extra, fifo_left); | |
5012e604 | 1747 | fifo_state->plane[plane_id] += plane_extra; |
54f1b6e1 VS |
1748 | fifo_left -= plane_extra; |
1749 | } | |
1750 | ||
5012e604 VS |
1751 | WARN_ON(active_planes != 0 && fifo_left != 0); |
1752 | ||
1753 | /* give it all to the first plane if none are active */ | |
1754 | if (active_planes == 0) { | |
1755 | WARN_ON(fifo_left != fifo_size); | |
1756 | fifo_state->plane[PLANE_PRIMARY] = fifo_left; | |
1757 | } | |
1758 | ||
1759 | return 0; | |
54f1b6e1 VS |
1760 | } |
1761 | ||
ff32c54e VS |
1762 | /* mark all levels starting from 'level' as invalid */ |
1763 | static void vlv_invalidate_wms(struct intel_crtc *crtc, | |
1764 | struct vlv_wm_state *wm_state, int level) | |
1765 | { | |
1766 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
1767 | ||
6d5019b6 | 1768 | for (; level < intel_wm_num_levels(dev_priv); level++) { |
ff32c54e VS |
1769 | enum plane_id plane_id; |
1770 | ||
1771 | for_each_plane_id_on_crtc(crtc, plane_id) | |
1772 | wm_state->wm[level].plane[plane_id] = USHRT_MAX; | |
1773 | ||
1774 | wm_state->sr[level].cursor = USHRT_MAX; | |
1775 | wm_state->sr[level].plane = USHRT_MAX; | |
1776 | } | |
1777 | } | |
1778 | ||
26cca0e5 VS |
1779 | static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) |
1780 | { | |
1781 | if (wm > fifo_size) | |
1782 | return USHRT_MAX; | |
1783 | else | |
1784 | return fifo_size - wm; | |
1785 | } | |
1786 | ||
ff32c54e VS |
1787 | /* |
1788 | * Starting from 'level' set all higher | |
1789 | * levels to 'value' in the "raw" watermarks. | |
1790 | */ | |
236c48e6 | 1791 | static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, |
ff32c54e | 1792 | int level, enum plane_id plane_id, u16 value) |
262cd2e1 | 1793 | { |
2225f3c6 | 1794 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
6d5019b6 | 1795 | int num_levels = intel_wm_num_levels(dev_priv); |
236c48e6 | 1796 | bool dirty = false; |
262cd2e1 | 1797 | |
ff32c54e | 1798 | for (; level < num_levels; level++) { |
114d7dc0 | 1799 | struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; |
262cd2e1 | 1800 | |
236c48e6 | 1801 | dirty |= raw->plane[plane_id] != value; |
ff32c54e | 1802 | raw->plane[plane_id] = value; |
262cd2e1 | 1803 | } |
236c48e6 VS |
1804 | |
1805 | return dirty; | |
262cd2e1 VS |
1806 | } |
1807 | ||
77d14ee4 VS |
1808 | static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, |
1809 | const struct intel_plane_state *plane_state) | |
262cd2e1 | 1810 | { |
f90a85e7 | 1811 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
f8d18d5c | 1812 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
ff32c54e | 1813 | enum plane_id plane_id = plane->id; |
6d5019b6 | 1814 | int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); |
262cd2e1 | 1815 | int level; |
236c48e6 | 1816 | bool dirty = false; |
262cd2e1 | 1817 | |
a07102f1 | 1818 | if (!intel_wm_plane_visible(crtc_state, plane_state)) { |
236c48e6 VS |
1819 | dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); |
1820 | goto out; | |
ff32c54e | 1821 | } |
262cd2e1 | 1822 | |
ff32c54e | 1823 | for (level = 0; level < num_levels; level++) { |
114d7dc0 | 1824 | struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; |
ff32c54e VS |
1825 | int wm = vlv_compute_wm_level(crtc_state, plane_state, level); |
1826 | int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; | |
262cd2e1 | 1827 | |
ff32c54e VS |
1828 | if (wm > max_wm) |
1829 | break; | |
262cd2e1 | 1830 | |
236c48e6 | 1831 | dirty |= raw->plane[plane_id] != wm; |
ff32c54e VS |
1832 | raw->plane[plane_id] = wm; |
1833 | } | |
262cd2e1 | 1834 | |
ff32c54e | 1835 | /* mark all higher levels as invalid */ |
236c48e6 | 1836 | dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); |
262cd2e1 | 1837 | |
236c48e6 VS |
1838 | out: |
1839 | if (dirty) | |
f8d18d5c WK |
1840 | drm_dbg_kms(&dev_priv->drm, |
1841 | "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", | |
1842 | plane->base.name, | |
1843 | crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], | |
1844 | crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], | |
1845 | crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); | |
236c48e6 VS |
1846 | |
1847 | return dirty; | |
ff32c54e | 1848 | } |
262cd2e1 | 1849 | |
77d14ee4 VS |
1850 | static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, |
1851 | enum plane_id plane_id, int level) | |
ff32c54e | 1852 | { |
114d7dc0 | 1853 | const struct g4x_pipe_wm *raw = |
ff32c54e VS |
1854 | &crtc_state->wm.vlv.raw[level]; |
1855 | const struct vlv_fifo_state *fifo_state = | |
1856 | &crtc_state->wm.vlv.fifo_state; | |
262cd2e1 | 1857 | |
ff32c54e VS |
1858 | return raw->plane[plane_id] <= fifo_state->plane[plane_id]; |
1859 | } | |
262cd2e1 | 1860 | |
77d14ee4 | 1861 | static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) |
ff32c54e | 1862 | { |
77d14ee4 VS |
1863 | return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && |
1864 | vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && | |
1865 | vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && | |
1866 | vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); | |
ff32c54e VS |
1867 | } |
1868 | ||
1869 | static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) | |
1870 | { | |
2225f3c6 | 1871 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
ff32c54e VS |
1872 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1873 | struct intel_atomic_state *state = | |
2225f3c6 | 1874 | to_intel_atomic_state(crtc_state->uapi.state); |
ff32c54e VS |
1875 | struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; |
1876 | const struct vlv_fifo_state *fifo_state = | |
1877 | &crtc_state->wm.vlv.fifo_state; | |
0b14d968 VS |
1878 | int num_active_planes = hweight8(crtc_state->active_planes & |
1879 | ~BIT(PLANE_CURSOR)); | |
2225f3c6 | 1880 | bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi); |
7b510451 VS |
1881 | const struct intel_plane_state *old_plane_state; |
1882 | const struct intel_plane_state *new_plane_state; | |
ff32c54e VS |
1883 | struct intel_plane *plane; |
1884 | enum plane_id plane_id; | |
1885 | int level, ret, i; | |
236c48e6 | 1886 | unsigned int dirty = 0; |
ff32c54e | 1887 | |
7b510451 VS |
1888 | for_each_oldnew_intel_plane_in_state(state, plane, |
1889 | old_plane_state, | |
1890 | new_plane_state, i) { | |
7b3cb17a ML |
1891 | if (new_plane_state->hw.crtc != &crtc->base && |
1892 | old_plane_state->hw.crtc != &crtc->base) | |
ff32c54e | 1893 | continue; |
262cd2e1 | 1894 | |
7b510451 | 1895 | if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) |
236c48e6 VS |
1896 | dirty |= BIT(plane->id); |
1897 | } | |
1898 | ||
1899 | /* | |
1900 | * DSPARB registers may have been reset due to the | |
1901 | * power well being turned off. Make sure we restore | |
1902 | * them to a consistent state even if no primary/sprite | |
1903 | * planes are initially active. | |
1904 | */ | |
1905 | if (needs_modeset) | |
1906 | crtc_state->fifo_changed = true; | |
1907 | ||
1908 | if (!dirty) | |
1909 | return 0; | |
1910 | ||
1911 | /* cursor changes don't warrant a FIFO recompute */ | |
1912 | if (dirty & ~BIT(PLANE_CURSOR)) { | |
1913 | const struct intel_crtc_state *old_crtc_state = | |
7b510451 | 1914 | intel_atomic_get_old_crtc_state(state, crtc); |
236c48e6 VS |
1915 | const struct vlv_fifo_state *old_fifo_state = |
1916 | &old_crtc_state->wm.vlv.fifo_state; | |
1917 | ||
1918 | ret = vlv_compute_fifo(crtc_state); | |
1919 | if (ret) | |
1920 | return ret; | |
1921 | ||
1922 | if (needs_modeset || | |
1923 | memcmp(old_fifo_state, fifo_state, | |
1924 | sizeof(*fifo_state)) != 0) | |
1925 | crtc_state->fifo_changed = true; | |
5012e604 | 1926 | } |
262cd2e1 | 1927 | |
ff32c54e | 1928 | /* initially allow all levels */ |
6d5019b6 | 1929 | wm_state->num_levels = intel_wm_num_levels(dev_priv); |
ff32c54e VS |
1930 | /* |
1931 | * Note that enabling cxsr with no primary/sprite planes | |
1932 | * enabled can wedge the pipe. Hence we only allow cxsr | |
1933 | * with exactly one enabled primary/sprite plane. | |
1934 | */ | |
5eeb798b | 1935 | wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; |
ff32c54e | 1936 | |
5012e604 | 1937 | for (level = 0; level < wm_state->num_levels; level++) { |
114d7dc0 | 1938 | const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; |
24977870 | 1939 | const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; |
5012e604 | 1940 | |
77d14ee4 | 1941 | if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) |
ff32c54e | 1942 | break; |
5012e604 | 1943 | |
ff32c54e VS |
1944 | for_each_plane_id_on_crtc(crtc, plane_id) { |
1945 | wm_state->wm[level].plane[plane_id] = | |
1946 | vlv_invert_wm_value(raw->plane[plane_id], | |
1947 | fifo_state->plane[plane_id]); | |
1948 | } | |
1949 | ||
1950 | wm_state->sr[level].plane = | |
1951 | vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], | |
5012e604 | 1952 | raw->plane[PLANE_SPRITE0], |
ff32c54e VS |
1953 | raw->plane[PLANE_SPRITE1]), |
1954 | sr_fifo_size); | |
262cd2e1 | 1955 | |
ff32c54e VS |
1956 | wm_state->sr[level].cursor = |
1957 | vlv_invert_wm_value(raw->plane[PLANE_CURSOR], | |
1958 | 63); | |
262cd2e1 VS |
1959 | } |
1960 | ||
ff32c54e VS |
1961 | if (level == 0) |
1962 | return -EINVAL; | |
1963 | ||
1964 | /* limit to only levels we can actually handle */ | |
1965 | wm_state->num_levels = level; | |
1966 | ||
1967 | /* invalidate the higher levels */ | |
1968 | vlv_invalidate_wms(crtc, wm_state, level); | |
1969 | ||
1970 | return 0; | |
262cd2e1 VS |
1971 | } |
1972 | ||
54f1b6e1 VS |
1973 | #define VLV_FIFO(plane, value) \ |
1974 | (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) | |
1975 | ||
ff32c54e | 1976 | static void vlv_atomic_update_fifo(struct intel_atomic_state *state, |
7a8fdb1f | 1977 | struct intel_crtc *crtc) |
54f1b6e1 | 1978 | { |
f07d43d2 | 1979 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
e33a4be8 | 1980 | struct intel_uncore *uncore = &dev_priv->uncore; |
7a8fdb1f VS |
1981 | const struct intel_crtc_state *crtc_state = |
1982 | intel_atomic_get_new_crtc_state(state, crtc); | |
814e7f0b VS |
1983 | const struct vlv_fifo_state *fifo_state = |
1984 | &crtc_state->wm.vlv.fifo_state; | |
f07d43d2 | 1985 | int sprite0_start, sprite1_start, fifo_size; |
2713eb41 | 1986 | u32 dsparb, dsparb2, dsparb3; |
54f1b6e1 | 1987 | |
236c48e6 VS |
1988 | if (!crtc_state->fifo_changed) |
1989 | return; | |
1990 | ||
f07d43d2 VS |
1991 | sprite0_start = fifo_state->plane[PLANE_PRIMARY]; |
1992 | sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; | |
1993 | fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; | |
54f1b6e1 | 1994 | |
48a1b8d4 PB |
1995 | drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); |
1996 | drm_WARN_ON(&dev_priv->drm, fifo_size != 511); | |
54f1b6e1 | 1997 | |
c137d660 VS |
1998 | trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); |
1999 | ||
44e921d4 VS |
2000 | /* |
2001 | * uncore.lock serves a double purpose here. It allows us to | |
2002 | * use the less expensive I915_{READ,WRITE}_FW() functions, and | |
2003 | * it protects the DSPARB registers from getting clobbered by | |
2004 | * parallel updates from multiple pipes. | |
2005 | * | |
2006 | * intel_pipe_update_start() has already disabled interrupts | |
2007 | * for us, so a plain spin_lock() is sufficient here. | |
2008 | */ | |
e33a4be8 | 2009 | spin_lock(&uncore->lock); |
467a14d9 | 2010 | |
54f1b6e1 | 2011 | switch (crtc->pipe) { |
54f1b6e1 | 2012 | case PIPE_A: |
e33a4be8 TU |
2013 | dsparb = intel_uncore_read_fw(uncore, DSPARB); |
2014 | dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); | |
54f1b6e1 VS |
2015 | |
2016 | dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | | |
2017 | VLV_FIFO(SPRITEB, 0xff)); | |
2018 | dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | | |
2019 | VLV_FIFO(SPRITEB, sprite1_start)); | |
2020 | ||
2021 | dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | | |
2022 | VLV_FIFO(SPRITEB_HI, 0x1)); | |
2023 | dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | | |
2024 | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); | |
2025 | ||
e33a4be8 TU |
2026 | intel_uncore_write_fw(uncore, DSPARB, dsparb); |
2027 | intel_uncore_write_fw(uncore, DSPARB2, dsparb2); | |
54f1b6e1 VS |
2028 | break; |
2029 | case PIPE_B: | |
e33a4be8 TU |
2030 | dsparb = intel_uncore_read_fw(uncore, DSPARB); |
2031 | dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); | |
54f1b6e1 VS |
2032 | |
2033 | dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | | |
2034 | VLV_FIFO(SPRITED, 0xff)); | |
2035 | dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | | |
2036 | VLV_FIFO(SPRITED, sprite1_start)); | |
2037 | ||
2038 | dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | | |
2039 | VLV_FIFO(SPRITED_HI, 0xff)); | |
2040 | dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | | |
2041 | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); | |
2042 | ||
e33a4be8 TU |
2043 | intel_uncore_write_fw(uncore, DSPARB, dsparb); |
2044 | intel_uncore_write_fw(uncore, DSPARB2, dsparb2); | |
54f1b6e1 VS |
2045 | break; |
2046 | case PIPE_C: | |
e33a4be8 TU |
2047 | dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); |
2048 | dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); | |
54f1b6e1 VS |
2049 | |
2050 | dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | | |
2051 | VLV_FIFO(SPRITEF, 0xff)); | |
2052 | dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | | |
2053 | VLV_FIFO(SPRITEF, sprite1_start)); | |
2054 | ||
2055 | dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | | |
2056 | VLV_FIFO(SPRITEF_HI, 0xff)); | |
2057 | dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | | |
2058 | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); | |
2059 | ||
e33a4be8 TU |
2060 | intel_uncore_write_fw(uncore, DSPARB3, dsparb3); |
2061 | intel_uncore_write_fw(uncore, DSPARB2, dsparb2); | |
54f1b6e1 VS |
2062 | break; |
2063 | default: | |
2064 | break; | |
2065 | } | |
467a14d9 | 2066 | |
e33a4be8 | 2067 | intel_uncore_posting_read_fw(uncore, DSPARB); |
467a14d9 | 2068 | |
e33a4be8 | 2069 | spin_unlock(&uncore->lock); |
54f1b6e1 VS |
2070 | } |
2071 | ||
2072 | #undef VLV_FIFO | |
2073 | ||
cd1d3ee9 | 2074 | static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) |
4841da51 | 2075 | { |
2225f3c6 | 2076 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); |
5b9489cb ML |
2077 | struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; |
2078 | const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; | |
2079 | struct intel_atomic_state *intel_state = | |
2225f3c6 | 2080 | to_intel_atomic_state(new_crtc_state->uapi.state); |
5b9489cb ML |
2081 | const struct intel_crtc_state *old_crtc_state = |
2082 | intel_atomic_get_old_crtc_state(intel_state, crtc); | |
2083 | const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; | |
4841da51 VS |
2084 | int level; |
2085 | ||
2225f3c6 | 2086 | if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { |
5b9489cb ML |
2087 | *intermediate = *optimal; |
2088 | ||
2089 | intermediate->cxsr = false; | |
2090 | goto out; | |
2091 | } | |
2092 | ||
4841da51 | 2093 | intermediate->num_levels = min(optimal->num_levels, active->num_levels); |
5eeb798b | 2094 | intermediate->cxsr = optimal->cxsr && active->cxsr && |
5b9489cb | 2095 | !new_crtc_state->disable_cxsr; |
4841da51 VS |
2096 | |
2097 | for (level = 0; level < intermediate->num_levels; level++) { | |
2098 | enum plane_id plane_id; | |
2099 | ||
2100 | for_each_plane_id_on_crtc(crtc, plane_id) { | |
2101 | intermediate->wm[level].plane[plane_id] = | |
2102 | min(optimal->wm[level].plane[plane_id], | |
2103 | active->wm[level].plane[plane_id]); | |
2104 | } | |
2105 | ||
2106 | intermediate->sr[level].plane = min(optimal->sr[level].plane, | |
2107 | active->sr[level].plane); | |
2108 | intermediate->sr[level].cursor = min(optimal->sr[level].cursor, | |
2109 | active->sr[level].cursor); | |
2110 | } | |
2111 | ||
2112 | vlv_invalidate_wms(crtc, intermediate, level); | |
2113 | ||
5b9489cb | 2114 | out: |
4841da51 VS |
2115 | /* |
2116 | * If our intermediate WM are identical to the final WM, then we can | |
2117 | * omit the post-vblank programming; only update if it's different. | |
2118 | */ | |
5eeb798b | 2119 | if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) |
5b9489cb | 2120 | new_crtc_state->wm.need_postvbl_update = true; |
4841da51 VS |
2121 | |
2122 | return 0; | |
2123 | } | |
2124 | ||
7c951c00 | 2125 | static void vlv_merge_wm(struct drm_i915_private *dev_priv, |
262cd2e1 VS |
2126 | struct vlv_wm_values *wm) |
2127 | { | |
2128 | struct intel_crtc *crtc; | |
c08e9132 | 2129 | int num_active_pipes = 0; |
262cd2e1 | 2130 | |
7c951c00 | 2131 | wm->level = dev_priv->wm.max_level; |
262cd2e1 VS |
2132 | wm->cxsr = true; |
2133 | ||
7c951c00 | 2134 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
7eb4941f | 2135 | const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; |
262cd2e1 VS |
2136 | |
2137 | if (!crtc->active) | |
2138 | continue; | |
2139 | ||
2140 | if (!wm_state->cxsr) | |
2141 | wm->cxsr = false; | |
2142 | ||
c08e9132 | 2143 | num_active_pipes++; |
262cd2e1 VS |
2144 | wm->level = min_t(int, wm->level, wm_state->num_levels - 1); |
2145 | } | |
2146 | ||
c08e9132 | 2147 | if (num_active_pipes != 1) |
262cd2e1 VS |
2148 | wm->cxsr = false; |
2149 | ||
c08e9132 | 2150 | if (num_active_pipes > 1) |
6f9c784b VS |
2151 | wm->level = VLV_WM_LEVEL_PM2; |
2152 | ||
7c951c00 | 2153 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
7eb4941f | 2154 | const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; |
262cd2e1 VS |
2155 | enum pipe pipe = crtc->pipe; |
2156 | ||
262cd2e1 | 2157 | wm->pipe[pipe] = wm_state->wm[wm->level]; |
ff32c54e | 2158 | if (crtc->active && wm->cxsr) |
262cd2e1 VS |
2159 | wm->sr = wm_state->sr[wm->level]; |
2160 | ||
1b31389c VS |
2161 | wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; |
2162 | wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; | |
2163 | wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; | |
2164 | wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; | |
262cd2e1 VS |
2165 | } |
2166 | } | |
2167 | ||
ff32c54e | 2168 | static void vlv_program_watermarks(struct drm_i915_private *dev_priv) |
262cd2e1 | 2169 | { |
fa292a4b VS |
2170 | struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; |
2171 | struct vlv_wm_values new_wm = {}; | |
262cd2e1 | 2172 | |
fa292a4b | 2173 | vlv_merge_wm(dev_priv, &new_wm); |
262cd2e1 | 2174 | |
ff32c54e | 2175 | if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) |
262cd2e1 VS |
2176 | return; |
2177 | ||
fa292a4b | 2178 | if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) |
262cd2e1 VS |
2179 | chv_set_memory_dvfs(dev_priv, false); |
2180 | ||
fa292a4b | 2181 | if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) |
262cd2e1 VS |
2182 | chv_set_memory_pm5(dev_priv, false); |
2183 | ||
fa292a4b | 2184 | if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) |
3d90e649 | 2185 | _intel_set_memory_cxsr(dev_priv, false); |
262cd2e1 | 2186 | |
fa292a4b | 2187 | vlv_write_wm_values(dev_priv, &new_wm); |
262cd2e1 | 2188 | |
fa292a4b | 2189 | if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) |
3d90e649 | 2190 | _intel_set_memory_cxsr(dev_priv, true); |
262cd2e1 | 2191 | |
fa292a4b | 2192 | if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) |
262cd2e1 VS |
2193 | chv_set_memory_pm5(dev_priv, true); |
2194 | ||
fa292a4b | 2195 | if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) |
262cd2e1 VS |
2196 | chv_set_memory_dvfs(dev_priv, true); |
2197 | ||
fa292a4b | 2198 | *old_wm = new_wm; |
3c2777fd VS |
2199 | } |
2200 | ||
ff32c54e | 2201 | static void vlv_initial_watermarks(struct intel_atomic_state *state, |
7a8fdb1f | 2202 | struct intel_crtc *crtc) |
ff32c54e | 2203 | { |
7a8fdb1f VS |
2204 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
2205 | const struct intel_crtc_state *crtc_state = | |
2206 | intel_atomic_get_new_crtc_state(state, crtc); | |
ff32c54e VS |
2207 | |
2208 | mutex_lock(&dev_priv->wm.wm_mutex); | |
4841da51 VS |
2209 | crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; |
2210 | vlv_program_watermarks(dev_priv); | |
2211 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
2212 | } | |
2213 | ||
2214 | static void vlv_optimize_watermarks(struct intel_atomic_state *state, | |
7a8fdb1f | 2215 | struct intel_crtc *crtc) |
4841da51 | 2216 | { |
7a8fdb1f VS |
2217 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
2218 | const struct intel_crtc_state *crtc_state = | |
2219 | intel_atomic_get_new_crtc_state(state, crtc); | |
4841da51 VS |
2220 | |
2221 | if (!crtc_state->wm.need_postvbl_update) | |
2222 | return; | |
2223 | ||
2224 | mutex_lock(&dev_priv->wm.wm_mutex); | |
88016a9f | 2225 | crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; |
ff32c54e VS |
2226 | vlv_program_watermarks(dev_priv); |
2227 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
2228 | } | |
2229 | ||
432081bc | 2230 | static void i965_update_wm(struct intel_crtc *unused_crtc) |
b445e3b0 | 2231 | { |
ffc7a76b | 2232 | struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); |
efc2611e | 2233 | struct intel_crtc *crtc; |
b445e3b0 ED |
2234 | int srwm = 1; |
2235 | int cursor_sr = 16; | |
9858425c | 2236 | bool cxsr_enabled; |
b445e3b0 ED |
2237 | |
2238 | /* Calc sr entries for one plane configs */ | |
ffc7a76b | 2239 | crtc = single_enabled_crtc(dev_priv); |
b445e3b0 ED |
2240 | if (crtc) { |
2241 | /* self-refresh has much higher latency */ | |
2242 | static const int sr_latency_ns = 12000; | |
efc2611e | 2243 | const struct drm_display_mode *adjusted_mode = |
1326a92c | 2244 | &crtc->config->hw.adjusted_mode; |
efc2611e VS |
2245 | const struct drm_framebuffer *fb = |
2246 | crtc->base.primary->state->fb; | |
241bfc38 | 2247 | int clock = adjusted_mode->crtc_clock; |
fec8cba3 | 2248 | int htotal = adjusted_mode->crtc_htotal; |
efc2611e | 2249 | int hdisplay = crtc->config->pipe_src_w; |
353c8598 | 2250 | int cpp = fb->format->cpp[0]; |
b445e3b0 ED |
2251 | int entries; |
2252 | ||
baf69ca8 VS |
2253 | entries = intel_wm_method2(clock, htotal, |
2254 | hdisplay, cpp, sr_latency_ns / 100); | |
b445e3b0 ED |
2255 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); |
2256 | srwm = I965_FIFO_SIZE - entries; | |
2257 | if (srwm < 0) | |
2258 | srwm = 1; | |
2259 | srwm &= 0x1ff; | |
f8d18d5c WK |
2260 | drm_dbg_kms(&dev_priv->drm, |
2261 | "self-refresh entries: %d, wm: %d\n", | |
2262 | entries, srwm); | |
b445e3b0 | 2263 | |
baf69ca8 VS |
2264 | entries = intel_wm_method2(clock, htotal, |
2265 | crtc->base.cursor->state->crtc_w, 4, | |
2266 | sr_latency_ns / 100); | |
b445e3b0 | 2267 | entries = DIV_ROUND_UP(entries, |
baf69ca8 VS |
2268 | i965_cursor_wm_info.cacheline_size) + |
2269 | i965_cursor_wm_info.guard_size; | |
b445e3b0 | 2270 | |
baf69ca8 | 2271 | cursor_sr = i965_cursor_wm_info.fifo_size - entries; |
b445e3b0 ED |
2272 | if (cursor_sr > i965_cursor_wm_info.max_wm) |
2273 | cursor_sr = i965_cursor_wm_info.max_wm; | |
2274 | ||
f8d18d5c WK |
2275 | drm_dbg_kms(&dev_priv->drm, |
2276 | "self-refresh watermark: display plane %d " | |
2277 | "cursor %d\n", srwm, cursor_sr); | |
b445e3b0 | 2278 | |
9858425c | 2279 | cxsr_enabled = true; |
b445e3b0 | 2280 | } else { |
9858425c | 2281 | cxsr_enabled = false; |
b445e3b0 | 2282 | /* Turn off self refresh if both pipes are enabled */ |
5209b1f4 | 2283 | intel_set_memory_cxsr(dev_priv, false); |
b445e3b0 ED |
2284 | } |
2285 | ||
f8d18d5c WK |
2286 | drm_dbg_kms(&dev_priv->drm, |
2287 | "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | |
2288 | srwm); | |
b445e3b0 ED |
2289 | |
2290 | /* 965 has limitations... */ | |
f4998963 VS |
2291 | I915_WRITE(DSPFW1, FW_WM(srwm, SR) | |
2292 | FW_WM(8, CURSORB) | | |
2293 | FW_WM(8, PLANEB) | | |
2294 | FW_WM(8, PLANEA)); | |
2295 | I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | | |
2296 | FW_WM(8, PLANEC_OLD)); | |
b445e3b0 | 2297 | /* update cursor SR watermark */ |
f4998963 | 2298 | I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); |
9858425c ID |
2299 | |
2300 | if (cxsr_enabled) | |
2301 | intel_set_memory_cxsr(dev_priv, true); | |
b445e3b0 ED |
2302 | } |
2303 | ||
f4998963 VS |
2304 | #undef FW_WM |
2305 | ||
432081bc | 2306 | static void i9xx_update_wm(struct intel_crtc *unused_crtc) |
b445e3b0 | 2307 | { |
ffc7a76b | 2308 | struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); |
b445e3b0 | 2309 | const struct intel_watermark_params *wm_info; |
5ce9a649 JN |
2310 | u32 fwater_lo; |
2311 | u32 fwater_hi; | |
b445e3b0 ED |
2312 | int cwm, srwm = 1; |
2313 | int fifo_size; | |
2314 | int planea_wm, planeb_wm; | |
efc2611e | 2315 | struct intel_crtc *crtc, *enabled = NULL; |
b445e3b0 | 2316 | |
a9097be4 | 2317 | if (IS_I945GM(dev_priv)) |
b445e3b0 | 2318 | wm_info = &i945_wm_info; |
cf819eff | 2319 | else if (!IS_GEN(dev_priv, 2)) |
b445e3b0 ED |
2320 | wm_info = &i915_wm_info; |
2321 | else | |
9d539105 | 2322 | wm_info = &i830_a_wm_info; |
b445e3b0 | 2323 | |
bdaf8439 VS |
2324 | fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A); |
2325 | crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); | |
efc2611e VS |
2326 | if (intel_crtc_active(crtc)) { |
2327 | const struct drm_display_mode *adjusted_mode = | |
1326a92c | 2328 | &crtc->config->hw.adjusted_mode; |
efc2611e VS |
2329 | const struct drm_framebuffer *fb = |
2330 | crtc->base.primary->state->fb; | |
2331 | int cpp; | |
2332 | ||
cf819eff | 2333 | if (IS_GEN(dev_priv, 2)) |
b9e0bda3 | 2334 | cpp = 4; |
efc2611e | 2335 | else |
353c8598 | 2336 | cpp = fb->format->cpp[0]; |
b9e0bda3 | 2337 | |
241bfc38 | 2338 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, |
b9e0bda3 | 2339 | wm_info, fifo_size, cpp, |
5aef6003 | 2340 | pessimal_latency_ns); |
b445e3b0 | 2341 | enabled = crtc; |
9d539105 | 2342 | } else { |
b445e3b0 | 2343 | planea_wm = fifo_size - wm_info->guard_size; |
9d539105 VS |
2344 | if (planea_wm > (long)wm_info->max_wm) |
2345 | planea_wm = wm_info->max_wm; | |
2346 | } | |
2347 | ||
cf819eff | 2348 | if (IS_GEN(dev_priv, 2)) |
9d539105 | 2349 | wm_info = &i830_bc_wm_info; |
b445e3b0 | 2350 | |
bdaf8439 VS |
2351 | fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B); |
2352 | crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); | |
efc2611e VS |
2353 | if (intel_crtc_active(crtc)) { |
2354 | const struct drm_display_mode *adjusted_mode = | |
1326a92c | 2355 | &crtc->config->hw.adjusted_mode; |
efc2611e VS |
2356 | const struct drm_framebuffer *fb = |
2357 | crtc->base.primary->state->fb; | |
2358 | int cpp; | |
2359 | ||
cf819eff | 2360 | if (IS_GEN(dev_priv, 2)) |
b9e0bda3 | 2361 | cpp = 4; |
efc2611e | 2362 | else |
353c8598 | 2363 | cpp = fb->format->cpp[0]; |
b9e0bda3 | 2364 | |
241bfc38 | 2365 | planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, |
b9e0bda3 | 2366 | wm_info, fifo_size, cpp, |
5aef6003 | 2367 | pessimal_latency_ns); |
b445e3b0 ED |
2368 | if (enabled == NULL) |
2369 | enabled = crtc; | |
2370 | else | |
2371 | enabled = NULL; | |
9d539105 | 2372 | } else { |
b445e3b0 | 2373 | planeb_wm = fifo_size - wm_info->guard_size; |
9d539105 VS |
2374 | if (planeb_wm > (long)wm_info->max_wm) |
2375 | planeb_wm = wm_info->max_wm; | |
2376 | } | |
b445e3b0 | 2377 | |
f8d18d5c WK |
2378 | drm_dbg_kms(&dev_priv->drm, |
2379 | "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | |
b445e3b0 | 2380 | |
50a0bc90 | 2381 | if (IS_I915GM(dev_priv) && enabled) { |
2ff8fde1 | 2382 | struct drm_i915_gem_object *obj; |
2ab1bc9d | 2383 | |
efc2611e | 2384 | obj = intel_fb_obj(enabled->base.primary->state->fb); |
2ab1bc9d SV |
2385 | |
2386 | /* self-refresh seems busted with untiled */ | |
3e510a8e | 2387 | if (!i915_gem_object_is_tiled(obj)) |
2ab1bc9d SV |
2388 | enabled = NULL; |
2389 | } | |
2390 | ||
b445e3b0 ED |
2391 | /* |
2392 | * Overlay gets an aggressive default since video jitter is bad. | |
2393 | */ | |
2394 | cwm = 2; | |
2395 | ||
2396 | /* Play safe and disable self-refresh before adjusting watermarks. */ | |
5209b1f4 | 2397 | intel_set_memory_cxsr(dev_priv, false); |
b445e3b0 ED |
2398 | |
2399 | /* Calc sr entries for one plane configs */ | |
03427fcb | 2400 | if (HAS_FW_BLC(dev_priv) && enabled) { |
b445e3b0 ED |
2401 | /* self-refresh has much higher latency */ |
2402 | static const int sr_latency_ns = 6000; | |
efc2611e | 2403 | const struct drm_display_mode *adjusted_mode = |
1326a92c | 2404 | &enabled->config->hw.adjusted_mode; |
efc2611e VS |
2405 | const struct drm_framebuffer *fb = |
2406 | enabled->base.primary->state->fb; | |
241bfc38 | 2407 | int clock = adjusted_mode->crtc_clock; |
fec8cba3 | 2408 | int htotal = adjusted_mode->crtc_htotal; |
efc2611e VS |
2409 | int hdisplay = enabled->config->pipe_src_w; |
2410 | int cpp; | |
b445e3b0 ED |
2411 | int entries; |
2412 | ||
50a0bc90 | 2413 | if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) |
2d1b5056 | 2414 | cpp = 4; |
efc2611e | 2415 | else |
353c8598 | 2416 | cpp = fb->format->cpp[0]; |
2d1b5056 | 2417 | |
baf69ca8 VS |
2418 | entries = intel_wm_method2(clock, htotal, hdisplay, cpp, |
2419 | sr_latency_ns / 100); | |
b445e3b0 | 2420 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); |
f8d18d5c WK |
2421 | drm_dbg_kms(&dev_priv->drm, |
2422 | "self-refresh entries: %d\n", entries); | |
b445e3b0 ED |
2423 | srwm = wm_info->fifo_size - entries; |
2424 | if (srwm < 0) | |
2425 | srwm = 1; | |
2426 | ||
50a0bc90 | 2427 | if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) |
b445e3b0 ED |
2428 | I915_WRITE(FW_BLC_SELF, |
2429 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | |
acb91359 | 2430 | else |
b445e3b0 ED |
2431 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
2432 | } | |
2433 | ||
f8d18d5c WK |
2434 | drm_dbg_kms(&dev_priv->drm, |
2435 | "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | |
2436 | planea_wm, planeb_wm, cwm, srwm); | |
b445e3b0 ED |
2437 | |
2438 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | |
2439 | fwater_hi = (cwm & 0x1f); | |
2440 | ||
2441 | /* Set request length to 8 cachelines per fetch */ | |
2442 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | |
2443 | fwater_hi = fwater_hi | (1 << 8); | |
2444 | ||
2445 | I915_WRITE(FW_BLC, fwater_lo); | |
2446 | I915_WRITE(FW_BLC2, fwater_hi); | |
2447 | ||
5209b1f4 ID |
2448 | if (enabled) |
2449 | intel_set_memory_cxsr(dev_priv, true); | |
b445e3b0 ED |
2450 | } |
2451 | ||
432081bc | 2452 | static void i845_update_wm(struct intel_crtc *unused_crtc) |
b445e3b0 | 2453 | { |
ffc7a76b | 2454 | struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); |
efc2611e | 2455 | struct intel_crtc *crtc; |
241bfc38 | 2456 | const struct drm_display_mode *adjusted_mode; |
5ce9a649 | 2457 | u32 fwater_lo; |
b445e3b0 ED |
2458 | int planea_wm; |
2459 | ||
ffc7a76b | 2460 | crtc = single_enabled_crtc(dev_priv); |
b445e3b0 ED |
2461 | if (crtc == NULL) |
2462 | return; | |
2463 | ||
1326a92c | 2464 | adjusted_mode = &crtc->config->hw.adjusted_mode; |
241bfc38 | 2465 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, |
feb56b93 | 2466 | &i845_wm_info, |
bdaf8439 | 2467 | dev_priv->display.get_fifo_size(dev_priv, PLANE_A), |
5aef6003 | 2468 | 4, pessimal_latency_ns); |
b445e3b0 ED |
2469 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
2470 | fwater_lo |= (3<<8) | planea_wm; | |
2471 | ||
f8d18d5c WK |
2472 | drm_dbg_kms(&dev_priv->drm, |
2473 | "Setting FIFO watermarks - A: %d\n", planea_wm); | |
b445e3b0 ED |
2474 | |
2475 | I915_WRITE(FW_BLC, fwater_lo); | |
2476 | } | |
2477 | ||
37126462 | 2478 | /* latency must be in 0.1us units. */ |
baf69ca8 VS |
2479 | static unsigned int ilk_wm_method1(unsigned int pixel_rate, |
2480 | unsigned int cpp, | |
2481 | unsigned int latency) | |
801bcfff | 2482 | { |
baf69ca8 | 2483 | unsigned int ret; |
3312ba65 | 2484 | |
baf69ca8 VS |
2485 | ret = intel_wm_method1(pixel_rate, cpp, latency); |
2486 | ret = DIV_ROUND_UP(ret, 64) + 2; | |
801bcfff PZ |
2487 | |
2488 | return ret; | |
2489 | } | |
2490 | ||
37126462 | 2491 | /* latency must be in 0.1us units. */ |
baf69ca8 VS |
2492 | static unsigned int ilk_wm_method2(unsigned int pixel_rate, |
2493 | unsigned int htotal, | |
2494 | unsigned int width, | |
2495 | unsigned int cpp, | |
2496 | unsigned int latency) | |
801bcfff | 2497 | { |
baf69ca8 | 2498 | unsigned int ret; |
3312ba65 | 2499 | |
baf69ca8 VS |
2500 | ret = intel_wm_method2(pixel_rate, htotal, |
2501 | width, cpp, latency); | |
801bcfff | 2502 | ret = DIV_ROUND_UP(ret, 64) + 2; |
baf69ca8 | 2503 | |
801bcfff PZ |
2504 | return ret; |
2505 | } | |
2506 | ||
5ce9a649 | 2507 | static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) |
cca32e9a | 2508 | { |
15126882 MR |
2509 | /* |
2510 | * Neither of these should be possible since this function shouldn't be | |
2511 | * called if the CRTC is off or the plane is invisible. But let's be | |
2512 | * extra paranoid to avoid a potential divide-by-zero if we screw up | |
2513 | * elsewhere in the driver. | |
2514 | */ | |
ac484963 | 2515 | if (WARN_ON(!cpp)) |
15126882 MR |
2516 | return 0; |
2517 | if (WARN_ON(!horiz_pixels)) | |
2518 | return 0; | |
2519 | ||
ac484963 | 2520 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; |
cca32e9a PZ |
2521 | } |
2522 | ||
820c1980 | 2523 | struct ilk_wm_maximums { |
5ce9a649 JN |
2524 | u16 pri; |
2525 | u16 spr; | |
2526 | u16 cur; | |
2527 | u16 fbc; | |
cca32e9a PZ |
2528 | }; |
2529 | ||
37126462 VS |
2530 | /* |
2531 | * For both WM_PIPE and WM_LP. | |
2532 | * mem_value must be in 0.1us units. | |
2533 | */ | |
ec193640 ML |
2534 | static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, |
2535 | const struct intel_plane_state *plane_state, | |
5ce9a649 | 2536 | u32 mem_value, bool is_lp) |
801bcfff | 2537 | { |
5ce9a649 | 2538 | u32 method1, method2; |
8305494e | 2539 | int cpp; |
cca32e9a | 2540 | |
03981c6e VS |
2541 | if (mem_value == 0) |
2542 | return U32_MAX; | |
2543 | ||
ec193640 | 2544 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
801bcfff PZ |
2545 | return 0; |
2546 | ||
7b3cb17a | 2547 | cpp = plane_state->hw.fb->format->cpp[0]; |
8305494e | 2548 | |
ec193640 | 2549 | method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); |
cca32e9a PZ |
2550 | |
2551 | if (!is_lp) | |
2552 | return method1; | |
2553 | ||
ec193640 | 2554 | method2 = ilk_wm_method2(crtc_state->pixel_rate, |
1326a92c | 2555 | crtc_state->hw.adjusted_mode.crtc_htotal, |
f90a85e7 | 2556 | drm_rect_width(&plane_state->uapi.dst), |
ac484963 | 2557 | cpp, mem_value); |
cca32e9a PZ |
2558 | |
2559 | return min(method1, method2); | |
801bcfff PZ |
2560 | } |
2561 | ||
37126462 VS |
2562 | /* |
2563 | * For both WM_PIPE and WM_LP. | |
2564 | * mem_value must be in 0.1us units. | |
2565 | */ | |
ec193640 ML |
2566 | static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, |
2567 | const struct intel_plane_state *plane_state, | |
5ce9a649 | 2568 | u32 mem_value) |
801bcfff | 2569 | { |
5ce9a649 | 2570 | u32 method1, method2; |
8305494e | 2571 | int cpp; |
801bcfff | 2572 | |
03981c6e VS |
2573 | if (mem_value == 0) |
2574 | return U32_MAX; | |
2575 | ||
ec193640 | 2576 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
801bcfff PZ |
2577 | return 0; |
2578 | ||
7b3cb17a | 2579 | cpp = plane_state->hw.fb->format->cpp[0]; |
8305494e | 2580 | |
ec193640 ML |
2581 | method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); |
2582 | method2 = ilk_wm_method2(crtc_state->pixel_rate, | |
1326a92c | 2583 | crtc_state->hw.adjusted_mode.crtc_htotal, |
f90a85e7 | 2584 | drm_rect_width(&plane_state->uapi.dst), |
ac484963 | 2585 | cpp, mem_value); |
801bcfff PZ |
2586 | return min(method1, method2); |
2587 | } | |
2588 | ||
37126462 VS |
2589 | /* |
2590 | * For both WM_PIPE and WM_LP. | |
2591 | * mem_value must be in 0.1us units. | |
2592 | */ | |
ec193640 ML |
2593 | static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, |
2594 | const struct intel_plane_state *plane_state, | |
5ce9a649 | 2595 | u32 mem_value) |
801bcfff | 2596 | { |
a5509abd VS |
2597 | int cpp; |
2598 | ||
03981c6e VS |
2599 | if (mem_value == 0) |
2600 | return U32_MAX; | |
2601 | ||
ec193640 | 2602 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
801bcfff PZ |
2603 | return 0; |
2604 | ||
7b3cb17a | 2605 | cpp = plane_state->hw.fb->format->cpp[0]; |
a5509abd | 2606 | |
ec193640 | 2607 | return ilk_wm_method2(crtc_state->pixel_rate, |
1326a92c | 2608 | crtc_state->hw.adjusted_mode.crtc_htotal, |
f90a85e7 | 2609 | drm_rect_width(&plane_state->uapi.dst), |
3a612765 | 2610 | cpp, mem_value); |
801bcfff PZ |
2611 | } |
2612 | ||
cca32e9a | 2613 | /* Only for WM_LP. */ |
ec193640 ML |
2614 | static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, |
2615 | const struct intel_plane_state *plane_state, | |
5ce9a649 | 2616 | u32 pri_val) |
cca32e9a | 2617 | { |
8305494e | 2618 | int cpp; |
43d59eda | 2619 | |
ec193640 | 2620 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
cca32e9a PZ |
2621 | return 0; |
2622 | ||
7b3cb17a | 2623 | cpp = plane_state->hw.fb->format->cpp[0]; |
8305494e | 2624 | |
f90a85e7 ML |
2625 | return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst), |
2626 | cpp); | |
cca32e9a PZ |
2627 | } |
2628 | ||
175fded1 TU |
2629 | static unsigned int |
2630 | ilk_display_fifo_size(const struct drm_i915_private *dev_priv) | |
158ae64f | 2631 | { |
175fded1 | 2632 | if (INTEL_GEN(dev_priv) >= 8) |
416f4727 | 2633 | return 3072; |
175fded1 | 2634 | else if (INTEL_GEN(dev_priv) >= 7) |
158ae64f VS |
2635 | return 768; |
2636 | else | |
2637 | return 512; | |
2638 | } | |
2639 | ||
175fded1 TU |
2640 | static unsigned int |
2641 | ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, | |
2642 | int level, bool is_sprite) | |
4e975081 | 2643 | { |
175fded1 | 2644 | if (INTEL_GEN(dev_priv) >= 8) |
4e975081 VS |
2645 | /* BDW primary/sprite plane watermarks */ |
2646 | return level == 0 ? 255 : 2047; | |
175fded1 | 2647 | else if (INTEL_GEN(dev_priv) >= 7) |
4e975081 VS |
2648 | /* IVB/HSW primary/sprite plane watermarks */ |
2649 | return level == 0 ? 127 : 1023; | |
2650 | else if (!is_sprite) | |
2651 | /* ILK/SNB primary plane watermarks */ | |
2652 | return level == 0 ? 127 : 511; | |
2653 | else | |
2654 | /* ILK/SNB sprite plane watermarks */ | |
2655 | return level == 0 ? 63 : 255; | |
2656 | } | |
2657 | ||
175fded1 TU |
2658 | static unsigned int |
2659 | ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) | |
4e975081 | 2660 | { |
175fded1 | 2661 | if (INTEL_GEN(dev_priv) >= 7) |
4e975081 VS |
2662 | return level == 0 ? 63 : 255; |
2663 | else | |
2664 | return level == 0 ? 31 : 63; | |
2665 | } | |
2666 | ||
175fded1 | 2667 | static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) |
4e975081 | 2668 | { |
175fded1 | 2669 | if (INTEL_GEN(dev_priv) >= 8) |
4e975081 VS |
2670 | return 31; |
2671 | else | |
2672 | return 15; | |
2673 | } | |
2674 | ||
158ae64f | 2675 | /* Calculate the maximum primary/sprite plane watermark */ |
cd1d3ee9 | 2676 | static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, |
158ae64f | 2677 | int level, |
240264f4 | 2678 | const struct intel_wm_config *config, |
158ae64f VS |
2679 | enum intel_ddb_partitioning ddb_partitioning, |
2680 | bool is_sprite) | |
2681 | { | |
175fded1 | 2682 | unsigned int fifo_size = ilk_display_fifo_size(dev_priv); |
158ae64f VS |
2683 | |
2684 | /* if sprites aren't enabled, sprites get nothing */ | |
240264f4 | 2685 | if (is_sprite && !config->sprites_enabled) |
158ae64f VS |
2686 | return 0; |
2687 | ||
2688 | /* HSW allows LP1+ watermarks even with multiple pipes */ | |
240264f4 | 2689 | if (level == 0 || config->num_pipes_active > 1) { |
24977870 | 2690 | fifo_size /= INTEL_NUM_PIPES(dev_priv); |
158ae64f VS |
2691 | |
2692 | /* | |
2693 | * For some reason the non self refresh | |
2694 | * FIFO size is only half of the self | |
2695 | * refresh FIFO size on ILK/SNB. | |
2696 | */ | |
175fded1 | 2697 | if (INTEL_GEN(dev_priv) <= 6) |
158ae64f VS |
2698 | fifo_size /= 2; |
2699 | } | |
2700 | ||
240264f4 | 2701 | if (config->sprites_enabled) { |
158ae64f VS |
2702 | /* level 0 is always calculated with 1:1 split */ |
2703 | if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { | |
2704 | if (is_sprite) | |
2705 | fifo_size *= 5; | |
2706 | fifo_size /= 6; | |
2707 | } else { | |
2708 | fifo_size /= 2; | |
2709 | } | |
2710 | } | |
2711 | ||
2712 | /* clamp to max that the registers can hold */ | |
175fded1 | 2713 | return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); |
158ae64f VS |
2714 | } |
2715 | ||
2716 | /* Calculate the maximum cursor plane watermark */ | |
cd1d3ee9 | 2717 | static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, |
240264f4 VS |
2718 | int level, |
2719 | const struct intel_wm_config *config) | |
158ae64f VS |
2720 | { |
2721 | /* HSW LP1+ watermarks w/ multiple pipes */ | |
240264f4 | 2722 | if (level > 0 && config->num_pipes_active > 1) |
158ae64f VS |
2723 | return 64; |
2724 | ||
2725 | /* otherwise just report max that registers can hold */ | |
cd1d3ee9 | 2726 | return ilk_cursor_wm_reg_max(dev_priv, level); |
158ae64f VS |
2727 | } |
2728 | ||
cd1d3ee9 | 2729 | static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, |
34982fe1 VS |
2730 | int level, |
2731 | const struct intel_wm_config *config, | |
2732 | enum intel_ddb_partitioning ddb_partitioning, | |
820c1980 | 2733 | struct ilk_wm_maximums *max) |
158ae64f | 2734 | { |
cd1d3ee9 MR |
2735 | max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); |
2736 | max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); | |
2737 | max->cur = ilk_cursor_wm_max(dev_priv, level, config); | |
2738 | max->fbc = ilk_fbc_wm_reg_max(dev_priv); | |
158ae64f VS |
2739 | } |
2740 | ||
175fded1 | 2741 | static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, |
a3cb4048 VS |
2742 | int level, |
2743 | struct ilk_wm_maximums *max) | |
2744 | { | |
175fded1 TU |
2745 | max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); |
2746 | max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); | |
2747 | max->cur = ilk_cursor_wm_reg_max(dev_priv, level); | |
2748 | max->fbc = ilk_fbc_wm_reg_max(dev_priv); | |
a3cb4048 VS |
2749 | } |
2750 | ||
d9395655 | 2751 | static bool ilk_validate_wm_level(int level, |
820c1980 | 2752 | const struct ilk_wm_maximums *max, |
d9395655 | 2753 | struct intel_wm_level *result) |
a9786a11 VS |
2754 | { |
2755 | bool ret; | |
2756 | ||
2757 | /* already determined to be invalid? */ | |
2758 | if (!result->enable) | |
2759 | return false; | |
2760 | ||
2761 | result->enable = result->pri_val <= max->pri && | |
2762 | result->spr_val <= max->spr && | |
2763 | result->cur_val <= max->cur; | |
2764 | ||
2765 | ret = result->enable; | |
2766 | ||
2767 | /* | |
2768 | * HACK until we can pre-compute everything, | |
2769 | * and thus fail gracefully if LP0 watermarks | |
2770 | * are exceeded... | |
2771 | */ | |
2772 | if (level == 0 && !result->enable) { | |
2773 | if (result->pri_val > max->pri) | |
2774 | DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", | |
2775 | level, result->pri_val, max->pri); | |
2776 | if (result->spr_val > max->spr) | |
2777 | DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", | |
2778 | level, result->spr_val, max->spr); | |
2779 | if (result->cur_val > max->cur) | |
2780 | DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", | |
2781 | level, result->cur_val, max->cur); | |
2782 | ||
5ce9a649 JN |
2783 | result->pri_val = min_t(u32, result->pri_val, max->pri); |
2784 | result->spr_val = min_t(u32, result->spr_val, max->spr); | |
2785 | result->cur_val = min_t(u32, result->cur_val, max->cur); | |
a9786a11 VS |
2786 | result->enable = true; |
2787 | } | |
2788 | ||
a9786a11 VS |
2789 | return ret; |
2790 | } | |
2791 | ||
d34ff9c6 | 2792 | static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, |
2a67054b | 2793 | const struct intel_crtc *crtc, |
6f5ddd17 | 2794 | int level, |
ec193640 | 2795 | struct intel_crtc_state *crtc_state, |
28283f4f ML |
2796 | const struct intel_plane_state *pristate, |
2797 | const struct intel_plane_state *sprstate, | |
2798 | const struct intel_plane_state *curstate, | |
1fd527cc | 2799 | struct intel_wm_level *result) |
6f5ddd17 | 2800 | { |
5ce9a649 JN |
2801 | u16 pri_latency = dev_priv->wm.pri_latency[level]; |
2802 | u16 spr_latency = dev_priv->wm.spr_latency[level]; | |
2803 | u16 cur_latency = dev_priv->wm.cur_latency[level]; | |
6f5ddd17 VS |
2804 | |
2805 | /* WM1+ latency values stored in 0.5us units */ | |
2806 | if (level > 0) { | |
2807 | pri_latency *= 5; | |
2808 | spr_latency *= 5; | |
2809 | cur_latency *= 5; | |
2810 | } | |
2811 | ||
e3bddded | 2812 | if (pristate) { |
ec193640 | 2813 | result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, |
e3bddded | 2814 | pri_latency, level); |
ec193640 | 2815 | result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); |
e3bddded ML |
2816 | } |
2817 | ||
2818 | if (sprstate) | |
ec193640 | 2819 | result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); |
e3bddded ML |
2820 | |
2821 | if (curstate) | |
ec193640 | 2822 | result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); |
e3bddded | 2823 | |
6f5ddd17 VS |
2824 | result->enable = true; |
2825 | } | |
2826 | ||
bb726519 | 2827 | static void intel_read_wm_latency(struct drm_i915_private *dev_priv, |
5ce9a649 | 2828 | u16 wm[8]) |
12b134df | 2829 | { |
1cea02db TU |
2830 | struct intel_uncore *uncore = &dev_priv->uncore; |
2831 | ||
50682ee6 | 2832 | if (INTEL_GEN(dev_priv) >= 9) { |
5ce9a649 | 2833 | u32 val; |
4f947386 | 2834 | int ret, i; |
5db94019 | 2835 | int level, max_level = ilk_wm_max_level(dev_priv); |
2af30a5c PB |
2836 | |
2837 | /* read the first set of memory latencies[0:3] */ | |
2838 | val = 0; /* data0 to be programmed to 0 for first set */ | |
2af30a5c PB |
2839 | ret = sandybridge_pcode_read(dev_priv, |
2840 | GEN9_PCODE_READ_MEM_LATENCY, | |
d284d514 | 2841 | &val, NULL); |
2af30a5c PB |
2842 | |
2843 | if (ret) { | |
f8d18d5c WK |
2844 | drm_err(&dev_priv->drm, |
2845 | "SKL Mailbox read error = %d\n", ret); | |
2af30a5c PB |
2846 | return; |
2847 | } | |
2848 | ||
2849 | wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; | |
2850 | wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & | |
2851 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2852 | wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & | |
2853 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2854 | wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & | |
2855 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2856 | ||
2857 | /* read the second set of memory latencies[4:7] */ | |
2858 | val = 1; /* data0 to be programmed to 1 for second set */ | |
2af30a5c PB |
2859 | ret = sandybridge_pcode_read(dev_priv, |
2860 | GEN9_PCODE_READ_MEM_LATENCY, | |
d284d514 | 2861 | &val, NULL); |
2af30a5c | 2862 | if (ret) { |
f8d18d5c WK |
2863 | drm_err(&dev_priv->drm, |
2864 | "SKL Mailbox read error = %d\n", ret); | |
2af30a5c PB |
2865 | return; |
2866 | } | |
2867 | ||
2868 | wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; | |
2869 | wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & | |
2870 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2871 | wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & | |
2872 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2873 | wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & | |
2874 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2875 | ||
0727e40a PZ |
2876 | /* |
2877 | * If a level n (n > 1) has a 0us latency, all levels m (m >= n) | |
2878 | * need to be disabled. We make sure to sanitize the values out | |
2879 | * of the punit to satisfy this requirement. | |
2880 | */ | |
2881 | for (level = 1; level <= max_level; level++) { | |
2882 | if (wm[level] == 0) { | |
2883 | for (i = level + 1; i <= max_level; i++) | |
2884 | wm[i] = 0; | |
2885 | break; | |
2886 | } | |
2887 | } | |
2888 | ||
367294be | 2889 | /* |
50682ee6 | 2890 | * WaWmMemoryReadLatency:skl+,glk |
6f97235b | 2891 | * |
367294be | 2892 | * punit doesn't take into account the read latency so we need |
0727e40a PZ |
2893 | * to add 2us to the various latency levels we retrieve from the |
2894 | * punit when level 0 response data us 0us. | |
367294be | 2895 | */ |
0727e40a PZ |
2896 | if (wm[0] == 0) { |
2897 | wm[0] += 2; | |
2898 | for (level = 1; level <= max_level; level++) { | |
2899 | if (wm[level] == 0) | |
2900 | break; | |
367294be | 2901 | wm[level] += 2; |
4f947386 | 2902 | } |
0727e40a PZ |
2903 | } |
2904 | ||
86b59287 MK |
2905 | /* |
2906 | * WA Level-0 adjustment for 16GB DIMMs: SKL+ | |
2907 | * If we could not get dimm info enable this WA to prevent from | |
2908 | * any underrun. If not able to get Dimm info assume 16GB dimm | |
2909 | * to avoid any underrun. | |
2910 | */ | |
5d6f36b2 | 2911 | if (dev_priv->dram_info.is_16gb_dimm) |
86b59287 MK |
2912 | wm[0] += 1; |
2913 | ||
8652744b | 2914 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
1cea02db | 2915 | u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); |
12b134df VS |
2916 | |
2917 | wm[0] = (sskpd >> 56) & 0xFF; | |
2918 | if (wm[0] == 0) | |
2919 | wm[0] = sskpd & 0xF; | |
e5d5019e VS |
2920 | wm[1] = (sskpd >> 4) & 0xFF; |
2921 | wm[2] = (sskpd >> 12) & 0xFF; | |
2922 | wm[3] = (sskpd >> 20) & 0x1FF; | |
2923 | wm[4] = (sskpd >> 32) & 0x1FF; | |
bb726519 | 2924 | } else if (INTEL_GEN(dev_priv) >= 6) { |
1cea02db | 2925 | u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD); |
63cf9a13 VS |
2926 | |
2927 | wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; | |
2928 | wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; | |
2929 | wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; | |
2930 | wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; | |
bb726519 | 2931 | } else if (INTEL_GEN(dev_priv) >= 5) { |
1cea02db | 2932 | u32 mltr = intel_uncore_read(uncore, MLTR_ILK); |
3a88d0ac VS |
2933 | |
2934 | /* ILK primary LP0 latency is 700 ns */ | |
2935 | wm[0] = 7; | |
2936 | wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; | |
2937 | wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; | |
50682ee6 PZ |
2938 | } else { |
2939 | MISSING_CASE(INTEL_DEVID(dev_priv)); | |
12b134df VS |
2940 | } |
2941 | } | |
2942 | ||
5db94019 | 2943 | static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, |
5ce9a649 | 2944 | u16 wm[5]) |
53615a5e VS |
2945 | { |
2946 | /* ILK sprite LP0 latency is 1300 ns */ | |
cf819eff | 2947 | if (IS_GEN(dev_priv, 5)) |
53615a5e VS |
2948 | wm[0] = 13; |
2949 | } | |
2950 | ||
fd6b8f43 | 2951 | static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, |
5ce9a649 | 2952 | u16 wm[5]) |
53615a5e VS |
2953 | { |
2954 | /* ILK cursor LP0 latency is 1300 ns */ | |
cf819eff | 2955 | if (IS_GEN(dev_priv, 5)) |
53615a5e | 2956 | wm[0] = 13; |
53615a5e VS |
2957 | } |
2958 | ||
5db94019 | 2959 | int ilk_wm_max_level(const struct drm_i915_private *dev_priv) |
26ec971e | 2960 | { |
26ec971e | 2961 | /* how many WM levels are we expecting */ |
8652744b | 2962 | if (INTEL_GEN(dev_priv) >= 9) |
2af30a5c | 2963 | return 7; |
8652744b | 2964 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
ad0d6dc4 | 2965 | return 4; |
8652744b | 2966 | else if (INTEL_GEN(dev_priv) >= 6) |
ad0d6dc4 | 2967 | return 3; |
26ec971e | 2968 | else |
ad0d6dc4 VS |
2969 | return 2; |
2970 | } | |
7526ed79 | 2971 | |
5db94019 | 2972 | static void intel_print_wm_latency(struct drm_i915_private *dev_priv, |
ad0d6dc4 | 2973 | const char *name, |
5ce9a649 | 2974 | const u16 wm[8]) |
ad0d6dc4 | 2975 | { |
5db94019 | 2976 | int level, max_level = ilk_wm_max_level(dev_priv); |
26ec971e VS |
2977 | |
2978 | for (level = 0; level <= max_level; level++) { | |
2979 | unsigned int latency = wm[level]; | |
2980 | ||
2981 | if (latency == 0) { | |
f8d18d5c WK |
2982 | drm_dbg_kms(&dev_priv->drm, |
2983 | "%s WM%d latency not provided\n", | |
2984 | name, level); | |
26ec971e VS |
2985 | continue; |
2986 | } | |
2987 | ||
2af30a5c PB |
2988 | /* |
2989 | * - latencies are in us on gen9. | |
2990 | * - before then, WM1+ latency values are in 0.5us units | |
2991 | */ | |
dfc267ab | 2992 | if (INTEL_GEN(dev_priv) >= 9) |
2af30a5c PB |
2993 | latency *= 10; |
2994 | else if (level > 0) | |
26ec971e VS |
2995 | latency *= 5; |
2996 | ||
f8d18d5c WK |
2997 | drm_dbg_kms(&dev_priv->drm, |
2998 | "%s WM%d latency %u (%u.%u usec)\n", name, level, | |
2999 | wm[level], latency / 10, latency % 10); | |
26ec971e VS |
3000 | } |
3001 | } | |
3002 | ||
e95a2f75 | 3003 | static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, |
5ce9a649 | 3004 | u16 wm[5], u16 min) |
e95a2f75 | 3005 | { |
5db94019 | 3006 | int level, max_level = ilk_wm_max_level(dev_priv); |
e95a2f75 VS |
3007 | |
3008 | if (wm[0] >= min) | |
3009 | return false; | |
3010 | ||
3011 | wm[0] = max(wm[0], min); | |
3012 | for (level = 1; level <= max_level; level++) | |
5ce9a649 | 3013 | wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); |
e95a2f75 VS |
3014 | |
3015 | return true; | |
3016 | } | |
3017 | ||
bb726519 | 3018 | static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) |
e95a2f75 | 3019 | { |
e95a2f75 VS |
3020 | bool changed; |
3021 | ||
3022 | /* | |
3023 | * The BIOS provided WM memory latency values are often | |
3024 | * inadequate for high resolution displays. Adjust them. | |
3025 | */ | |
3026 | changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | | |
3027 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | | |
3028 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); | |
3029 | ||
3030 | if (!changed) | |
3031 | return; | |
3032 | ||
f8d18d5c WK |
3033 | drm_dbg_kms(&dev_priv->drm, |
3034 | "WM latency values increased to avoid potential underruns\n"); | |
5db94019 TU |
3035 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); |
3036 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | |
3037 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | |
e95a2f75 VS |
3038 | } |
3039 | ||
03981c6e VS |
3040 | static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) |
3041 | { | |
3042 | /* | |
3043 | * On some SNB machines (Thinkpad X220 Tablet at least) | |
3044 | * LP3 usage can cause vblank interrupts to be lost. | |
3045 | * The DEIIR bit will go high but it looks like the CPU | |
3046 | * never gets interrupted. | |
3047 | * | |
3048 | * It's not clear whether other interrupt source could | |
3049 | * be affected or if this is somehow limited to vblank | |
3050 | * interrupts only. To play it safe we disable LP3 | |
3051 | * watermarks entirely. | |
3052 | */ | |
3053 | if (dev_priv->wm.pri_latency[3] == 0 && | |
3054 | dev_priv->wm.spr_latency[3] == 0 && | |
3055 | dev_priv->wm.cur_latency[3] == 0) | |
3056 | return; | |
3057 | ||
3058 | dev_priv->wm.pri_latency[3] = 0; | |
3059 | dev_priv->wm.spr_latency[3] = 0; | |
3060 | dev_priv->wm.cur_latency[3] = 0; | |
3061 | ||
f8d18d5c WK |
3062 | drm_dbg_kms(&dev_priv->drm, |
3063 | "LP3 watermarks disabled due to potential for lost interrupts\n"); | |
03981c6e VS |
3064 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); |
3065 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | |
3066 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | |
3067 | } | |
3068 | ||
bb726519 | 3069 | static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) |
53615a5e | 3070 | { |
bb726519 | 3071 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); |
53615a5e VS |
3072 | |
3073 | memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, | |
3074 | sizeof(dev_priv->wm.pri_latency)); | |
3075 | memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, | |
3076 | sizeof(dev_priv->wm.pri_latency)); | |
3077 | ||
5db94019 | 3078 | intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency); |
fd6b8f43 | 3079 | intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency); |
26ec971e | 3080 | |
5db94019 TU |
3081 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); |
3082 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | |
3083 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | |
e95a2f75 | 3084 | |
cf819eff | 3085 | if (IS_GEN(dev_priv, 6)) { |
bb726519 | 3086 | snb_wm_latency_quirk(dev_priv); |
03981c6e VS |
3087 | snb_wm_lp3_irq_quirk(dev_priv); |
3088 | } | |
53615a5e VS |
3089 | } |
3090 | ||
bb726519 | 3091 | static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) |
2af30a5c | 3092 | { |
bb726519 | 3093 | intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency); |
5db94019 | 3094 | intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); |
2af30a5c PB |
3095 | } |
3096 | ||
cd1d3ee9 | 3097 | static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, |
ed4a6a7c MR |
3098 | struct intel_pipe_wm *pipe_wm) |
3099 | { | |
3100 | /* LP0 watermark maximums depend on this pipe alone */ | |
3101 | const struct intel_wm_config config = { | |
3102 | .num_pipes_active = 1, | |
3103 | .sprites_enabled = pipe_wm->sprites_enabled, | |
3104 | .sprites_scaled = pipe_wm->sprites_scaled, | |
3105 | }; | |
3106 | struct ilk_wm_maximums max; | |
3107 | ||
3108 | /* LP0 watermarks always use 1/2 DDB partitioning */ | |
cd1d3ee9 | 3109 | ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); |
ed4a6a7c MR |
3110 | |
3111 | /* At least LP0 must be valid */ | |
3112 | if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { | |
f8d18d5c | 3113 | drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); |
ed4a6a7c MR |
3114 | return false; |
3115 | } | |
3116 | ||
3117 | return true; | |
3118 | } | |
3119 | ||
0b2ae6d7 | 3120 | /* Compute new watermarks for the pipe */ |
ec193640 | 3121 | static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) |
0b2ae6d7 | 3122 | { |
2225f3c6 | 3123 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
2a67054b | 3124 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
86c8bbbe | 3125 | struct intel_pipe_wm *pipe_wm; |
af9fbfa6 ML |
3126 | struct intel_plane *plane; |
3127 | const struct intel_plane_state *plane_state; | |
28283f4f ML |
3128 | const struct intel_plane_state *pristate = NULL; |
3129 | const struct intel_plane_state *sprstate = NULL; | |
3130 | const struct intel_plane_state *curstate = NULL; | |
5db94019 | 3131 | int level, max_level = ilk_wm_max_level(dev_priv), usable_level; |
820c1980 | 3132 | struct ilk_wm_maximums max; |
0b2ae6d7 | 3133 | |
ec193640 | 3134 | pipe_wm = &crtc_state->wm.ilk.optimal; |
86c8bbbe | 3135 | |
af9fbfa6 ML |
3136 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { |
3137 | if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) | |
3138 | pristate = plane_state; | |
3139 | else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) | |
3140 | sprstate = plane_state; | |
3141 | else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) | |
3142 | curstate = plane_state; | |
43d59eda MR |
3143 | } |
3144 | ||
1326a92c | 3145 | pipe_wm->pipe_enabled = crtc_state->hw.active; |
e3bddded | 3146 | if (sprstate) { |
f90a85e7 ML |
3147 | pipe_wm->sprites_enabled = sprstate->uapi.visible; |
3148 | pipe_wm->sprites_scaled = sprstate->uapi.visible && | |
3149 | (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 || | |
3150 | drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16); | |
e3bddded ML |
3151 | } |
3152 | ||
d81f04c5 ML |
3153 | usable_level = max_level; |
3154 | ||
7b39a0b7 | 3155 | /* ILK/SNB: LP2+ watermarks only w/o sprites */ |
175fded1 | 3156 | if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled) |
d81f04c5 | 3157 | usable_level = 1; |
7b39a0b7 VS |
3158 | |
3159 | /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ | |
ed4a6a7c | 3160 | if (pipe_wm->sprites_scaled) |
d81f04c5 | 3161 | usable_level = 0; |
7b39a0b7 | 3162 | |
71f0a626 | 3163 | memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); |
2a67054b | 3164 | ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, |
28283f4f | 3165 | pristate, sprstate, curstate, &pipe_wm->wm[0]); |
0b2ae6d7 | 3166 | |
cd1d3ee9 | 3167 | if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) |
1a426d61 | 3168 | return -EINVAL; |
a3cb4048 | 3169 | |
175fded1 | 3170 | ilk_compute_wm_reg_maximums(dev_priv, 1, &max); |
a3cb4048 | 3171 | |
28283f4f ML |
3172 | for (level = 1; level <= usable_level; level++) { |
3173 | struct intel_wm_level *wm = &pipe_wm->wm[level]; | |
a3cb4048 | 3174 | |
2a67054b | 3175 | ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, |
d81f04c5 | 3176 | pristate, sprstate, curstate, wm); |
a3cb4048 VS |
3177 | |
3178 | /* | |
3179 | * Disable any watermark level that exceeds the | |
3180 | * register maximums since such watermarks are | |
3181 | * always invalid. | |
3182 | */ | |
28283f4f ML |
3183 | if (!ilk_validate_wm_level(level, &max, wm)) { |
3184 | memset(wm, 0, sizeof(*wm)); | |
3185 | break; | |
3186 | } | |
a3cb4048 VS |
3187 | } |
3188 | ||
86c8bbbe | 3189 | return 0; |
0b2ae6d7 VS |
3190 | } |
3191 | ||
ed4a6a7c MR |
3192 | /* |
3193 | * Build a set of 'intermediate' watermark values that satisfy both the old | |
3194 | * state and the new state. These can be programmed to the hardware | |
3195 | * immediately. | |
3196 | */ | |
cd1d3ee9 | 3197 | static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) |
ed4a6a7c | 3198 | { |
2225f3c6 | 3199 | struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc); |
cd1d3ee9 | 3200 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); |
e8f1f02e | 3201 | struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; |
b6b178a7 | 3202 | struct intel_atomic_state *intel_state = |
2225f3c6 | 3203 | to_intel_atomic_state(newstate->uapi.state); |
b6b178a7 ML |
3204 | const struct intel_crtc_state *oldstate = |
3205 | intel_atomic_get_old_crtc_state(intel_state, intel_crtc); | |
3206 | const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; | |
cd1d3ee9 | 3207 | int level, max_level = ilk_wm_max_level(dev_priv); |
ed4a6a7c MR |
3208 | |
3209 | /* | |
3210 | * Start with the final, target watermarks, then combine with the | |
3211 | * currently active watermarks to get values that are safe both before | |
3212 | * and after the vblank. | |
3213 | */ | |
e8f1f02e | 3214 | *a = newstate->wm.ilk.optimal; |
2225f3c6 | 3215 | if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) || |
f255c624 | 3216 | intel_state->skip_intermediate_wm) |
b6b178a7 ML |
3217 | return 0; |
3218 | ||
ed4a6a7c MR |
3219 | a->pipe_enabled |= b->pipe_enabled; |
3220 | a->sprites_enabled |= b->sprites_enabled; | |
3221 | a->sprites_scaled |= b->sprites_scaled; | |
3222 | ||
3223 | for (level = 0; level <= max_level; level++) { | |
3224 | struct intel_wm_level *a_wm = &a->wm[level]; | |
3225 | const struct intel_wm_level *b_wm = &b->wm[level]; | |
3226 | ||
3227 | a_wm->enable &= b_wm->enable; | |
3228 | a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); | |
3229 | a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); | |
3230 | a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); | |
3231 | a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); | |
3232 | } | |
3233 | ||
3234 | /* | |
3235 | * We need to make sure that these merged watermark values are | |
3236 | * actually a valid configuration themselves. If they're not, | |
3237 | * there's no safe way to transition from the old state to | |
3238 | * the new state, so we need to fail the atomic transaction. | |
3239 | */ | |
cd1d3ee9 | 3240 | if (!ilk_validate_pipe_wm(dev_priv, a)) |
ed4a6a7c MR |
3241 | return -EINVAL; |
3242 | ||
3243 | /* | |
3244 | * If our intermediate WM are identical to the final WM, then we can | |
3245 | * omit the post-vblank programming; only update if it's different. | |
3246 | */ | |
5eeb798b VS |
3247 | if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0) |
3248 | newstate->wm.need_postvbl_update = true; | |
ed4a6a7c MR |
3249 | |
3250 | return 0; | |
3251 | } | |
3252 | ||
0b2ae6d7 VS |
3253 | /* |
3254 | * Merge the watermarks from all active pipes for a specific level. | |
3255 | */ | |
cd1d3ee9 | 3256 | static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, |
0b2ae6d7 VS |
3257 | int level, |
3258 | struct intel_wm_level *ret_wm) | |
3259 | { | |
3260 | const struct intel_crtc *intel_crtc; | |
3261 | ||
d52fea5b VS |
3262 | ret_wm->enable = true; |
3263 | ||
cd1d3ee9 | 3264 | for_each_intel_crtc(&dev_priv->drm, intel_crtc) { |
ed4a6a7c | 3265 | const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; |
fe392efd VS |
3266 | const struct intel_wm_level *wm = &active->wm[level]; |
3267 | ||
3268 | if (!active->pipe_enabled) | |
3269 | continue; | |
0b2ae6d7 | 3270 | |
d52fea5b VS |
3271 | /* |
3272 | * The watermark values may have been used in the past, | |
3273 | * so we must maintain them in the registers for some | |
3274 | * time even if the level is now disabled. | |
3275 | */ | |
0b2ae6d7 | 3276 | if (!wm->enable) |
d52fea5b | 3277 | ret_wm->enable = false; |
0b2ae6d7 VS |
3278 | |
3279 | ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); | |
3280 | ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); | |
3281 | ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); | |
3282 | ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); | |
3283 | } | |
0b2ae6d7 VS |
3284 | } |
3285 | ||
3286 | /* | |
3287 | * Merge all low power watermarks for all active pipes. | |
3288 | */ | |
cd1d3ee9 | 3289 | static void ilk_wm_merge(struct drm_i915_private *dev_priv, |
0ba22e26 | 3290 | const struct intel_wm_config *config, |
820c1980 | 3291 | const struct ilk_wm_maximums *max, |
0b2ae6d7 VS |
3292 | struct intel_pipe_wm *merged) |
3293 | { | |
5db94019 | 3294 | int level, max_level = ilk_wm_max_level(dev_priv); |
d52fea5b | 3295 | int last_enabled_level = max_level; |
0b2ae6d7 | 3296 | |
0ba22e26 | 3297 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ |
fd6b8f43 | 3298 | if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && |
0ba22e26 | 3299 | config->num_pipes_active > 1) |
1204d5ba | 3300 | last_enabled_level = 0; |
0ba22e26 | 3301 | |
6c8b6c28 | 3302 | /* ILK: FBC WM must be disabled always */ |
175fded1 | 3303 | merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6; |
0b2ae6d7 VS |
3304 | |
3305 | /* merge each WM1+ level */ | |
3306 | for (level = 1; level <= max_level; level++) { | |
3307 | struct intel_wm_level *wm = &merged->wm[level]; | |
3308 | ||
cd1d3ee9 | 3309 | ilk_merge_wm_level(dev_priv, level, wm); |
0b2ae6d7 | 3310 | |
d52fea5b VS |
3311 | if (level > last_enabled_level) |
3312 | wm->enable = false; | |
3313 | else if (!ilk_validate_wm_level(level, max, wm)) | |
3314 | /* make sure all following levels get disabled */ | |
3315 | last_enabled_level = level - 1; | |
0b2ae6d7 VS |
3316 | |
3317 | /* | |
3318 | * The spec says it is preferred to disable | |
3319 | * FBC WMs instead of disabling a WM level. | |
3320 | */ | |
3321 | if (wm->fbc_val > max->fbc) { | |
d52fea5b VS |
3322 | if (wm->enable) |
3323 | merged->fbc_wm_enabled = false; | |
0b2ae6d7 VS |
3324 | wm->fbc_val = 0; |
3325 | } | |
3326 | } | |
6c8b6c28 VS |
3327 | |
3328 | /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ | |
3329 | /* | |
3330 | * FIXME this is racy. FBC might get enabled later. | |
3331 | * What we should check here is whether FBC can be | |
3332 | * enabled sometime later. | |
3333 | */ | |
cf819eff | 3334 | if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled && |
0e631adc | 3335 | intel_fbc_is_active(dev_priv)) { |
6c8b6c28 VS |
3336 | for (level = 2; level <= max_level; level++) { |
3337 | struct intel_wm_level *wm = &merged->wm[level]; | |
3338 | ||
3339 | wm->enable = false; | |
3340 | } | |
3341 | } | |
0b2ae6d7 VS |
3342 | } |
3343 | ||
b380ca3c VS |
3344 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) |
3345 | { | |
3346 | /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ | |
3347 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); | |
3348 | } | |
3349 | ||
a68d68ee | 3350 | /* The value we need to program into the WM_LPx latency field */ |
cd1d3ee9 MR |
3351 | static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, |
3352 | int level) | |
a68d68ee | 3353 | { |
8652744b | 3354 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
a68d68ee VS |
3355 | return 2 * level; |
3356 | else | |
3357 | return dev_priv->wm.pri_latency[level]; | |
3358 | } | |
3359 | ||
cd1d3ee9 | 3360 | static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, |
0362c781 | 3361 | const struct intel_pipe_wm *merged, |
609cedef | 3362 | enum intel_ddb_partitioning partitioning, |
820c1980 | 3363 | struct ilk_wm_values *results) |
801bcfff | 3364 | { |
0b2ae6d7 VS |
3365 | struct intel_crtc *intel_crtc; |
3366 | int level, wm_lp; | |
cca32e9a | 3367 | |
0362c781 | 3368 | results->enable_fbc_wm = merged->fbc_wm_enabled; |
609cedef | 3369 | results->partitioning = partitioning; |
cca32e9a | 3370 | |
0b2ae6d7 | 3371 | /* LP1+ register values */ |
cca32e9a | 3372 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { |
1fd527cc | 3373 | const struct intel_wm_level *r; |
801bcfff | 3374 | |
b380ca3c | 3375 | level = ilk_wm_lp_to_level(wm_lp, merged); |
0b2ae6d7 | 3376 | |
0362c781 | 3377 | r = &merged->wm[level]; |
cca32e9a | 3378 | |
d52fea5b VS |
3379 | /* |
3380 | * Maintain the watermark values even if the level is | |
3381 | * disabled. Doing otherwise could cause underruns. | |
3382 | */ | |
3383 | results->wm_lp[wm_lp - 1] = | |
cd1d3ee9 | 3384 | (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) | |
416f4727 VS |
3385 | (r->pri_val << WM1_LP_SR_SHIFT) | |
3386 | r->cur_val; | |
3387 | ||
d52fea5b VS |
3388 | if (r->enable) |
3389 | results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; | |
3390 | ||
175fded1 | 3391 | if (INTEL_GEN(dev_priv) >= 8) |
416f4727 VS |
3392 | results->wm_lp[wm_lp - 1] |= |
3393 | r->fbc_val << WM1_LP_FBC_SHIFT_BDW; | |
3394 | else | |
3395 | results->wm_lp[wm_lp - 1] |= | |
3396 | r->fbc_val << WM1_LP_FBC_SHIFT; | |
3397 | ||
d52fea5b VS |
3398 | /* |
3399 | * Always set WM1S_LP_EN when spr_val != 0, even if the | |
3400 | * level is disabled. Doing otherwise could cause underruns. | |
3401 | */ | |
175fded1 | 3402 | if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) { |
48a1b8d4 | 3403 | drm_WARN_ON(&dev_priv->drm, wm_lp != 1); |
6cef2b8a VS |
3404 | results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; |
3405 | } else | |
3406 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | |
cca32e9a | 3407 | } |
801bcfff | 3408 | |
0b2ae6d7 | 3409 | /* LP0 register values */ |
cd1d3ee9 | 3410 | for_each_intel_crtc(&dev_priv->drm, intel_crtc) { |
0b2ae6d7 | 3411 | enum pipe pipe = intel_crtc->pipe; |
0560b0c6 VS |
3412 | const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk; |
3413 | const struct intel_wm_level *r = &pipe_wm->wm[0]; | |
0b2ae6d7 | 3414 | |
48a1b8d4 | 3415 | if (drm_WARN_ON(&dev_priv->drm, !r->enable)) |
0b2ae6d7 | 3416 | continue; |
1011d8c4 | 3417 | |
0b2ae6d7 VS |
3418 | results->wm_pipe[pipe] = |
3419 | (r->pri_val << WM0_PIPE_PLANE_SHIFT) | | |
3420 | (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | | |
3421 | r->cur_val; | |
801bcfff PZ |
3422 | } |
3423 | } | |
3424 | ||
861f3389 PZ |
3425 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in |
3426 | * case both are at the same level. Prefer r1 in case they're the same. */ | |
cd1d3ee9 MR |
3427 | static struct intel_pipe_wm * |
3428 | ilk_find_best_result(struct drm_i915_private *dev_priv, | |
3429 | struct intel_pipe_wm *r1, | |
3430 | struct intel_pipe_wm *r2) | |
861f3389 | 3431 | { |
cd1d3ee9 | 3432 | int level, max_level = ilk_wm_max_level(dev_priv); |
198a1e9b | 3433 | int level1 = 0, level2 = 0; |
861f3389 | 3434 | |
198a1e9b VS |
3435 | for (level = 1; level <= max_level; level++) { |
3436 | if (r1->wm[level].enable) | |
3437 | level1 = level; | |
3438 | if (r2->wm[level].enable) | |
3439 | level2 = level; | |
861f3389 PZ |
3440 | } |
3441 | ||
198a1e9b VS |
3442 | if (level1 == level2) { |
3443 | if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) | |
861f3389 PZ |
3444 | return r2; |
3445 | else | |
3446 | return r1; | |
198a1e9b | 3447 | } else if (level1 > level2) { |
861f3389 PZ |
3448 | return r1; |
3449 | } else { | |
3450 | return r2; | |
3451 | } | |
3452 | } | |
3453 | ||
49a687c4 VS |
3454 | /* dirty bits used to track which watermarks need changes */ |
3455 | #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) | |
49a687c4 VS |
3456 | #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) |
3457 | #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) | |
3458 | #define WM_DIRTY_FBC (1 << 24) | |
3459 | #define WM_DIRTY_DDB (1 << 25) | |
3460 | ||
055e393f | 3461 | static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, |
820c1980 ID |
3462 | const struct ilk_wm_values *old, |
3463 | const struct ilk_wm_values *new) | |
49a687c4 VS |
3464 | { |
3465 | unsigned int dirty = 0; | |
3466 | enum pipe pipe; | |
3467 | int wm_lp; | |
3468 | ||
055e393f | 3469 | for_each_pipe(dev_priv, pipe) { |
49a687c4 VS |
3470 | if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { |
3471 | dirty |= WM_DIRTY_PIPE(pipe); | |
3472 | /* Must disable LP1+ watermarks too */ | |
3473 | dirty |= WM_DIRTY_LP_ALL; | |
3474 | } | |
3475 | } | |
3476 | ||
3477 | if (old->enable_fbc_wm != new->enable_fbc_wm) { | |
3478 | dirty |= WM_DIRTY_FBC; | |
3479 | /* Must disable LP1+ watermarks too */ | |
3480 | dirty |= WM_DIRTY_LP_ALL; | |
3481 | } | |
3482 | ||
3483 | if (old->partitioning != new->partitioning) { | |
3484 | dirty |= WM_DIRTY_DDB; | |
3485 | /* Must disable LP1+ watermarks too */ | |
3486 | dirty |= WM_DIRTY_LP_ALL; | |
3487 | } | |
3488 | ||
3489 | /* LP1+ watermarks already deemed dirty, no need to continue */ | |
3490 | if (dirty & WM_DIRTY_LP_ALL) | |
3491 | return dirty; | |
3492 | ||
3493 | /* Find the lowest numbered LP1+ watermark in need of an update... */ | |
3494 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | |
3495 | if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || | |
3496 | old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) | |
3497 | break; | |
3498 | } | |
3499 | ||
3500 | /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ | |
3501 | for (; wm_lp <= 3; wm_lp++) | |
3502 | dirty |= WM_DIRTY_LP(wm_lp); | |
3503 | ||
3504 | return dirty; | |
3505 | } | |
3506 | ||
8553c18e VS |
3507 | static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, |
3508 | unsigned int dirty) | |
801bcfff | 3509 | { |
820c1980 | 3510 | struct ilk_wm_values *previous = &dev_priv->wm.hw; |
8553c18e | 3511 | bool changed = false; |
801bcfff | 3512 | |
facd619b VS |
3513 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { |
3514 | previous->wm_lp[2] &= ~WM1_LP_SR_EN; | |
3515 | I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); | |
8553c18e | 3516 | changed = true; |
facd619b VS |
3517 | } |
3518 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { | |
3519 | previous->wm_lp[1] &= ~WM1_LP_SR_EN; | |
3520 | I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); | |
8553c18e | 3521 | changed = true; |
facd619b VS |
3522 | } |
3523 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { | |
3524 | previous->wm_lp[0] &= ~WM1_LP_SR_EN; | |
3525 | I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); | |
8553c18e | 3526 | changed = true; |
facd619b | 3527 | } |
801bcfff | 3528 | |
facd619b VS |
3529 | /* |
3530 | * Don't touch WM1S_LP_EN here. | |
3531 | * Doing so could cause underruns. | |
3532 | */ | |
6cef2b8a | 3533 | |
8553c18e VS |
3534 | return changed; |
3535 | } | |
3536 | ||
3537 | /* | |
3538 | * The spec says we shouldn't write when we don't need, because every write | |
3539 | * causes WMs to be re-evaluated, expending some power. | |
3540 | */ | |
820c1980 ID |
3541 | static void ilk_write_wm_values(struct drm_i915_private *dev_priv, |
3542 | struct ilk_wm_values *results) | |
8553c18e | 3543 | { |
820c1980 | 3544 | struct ilk_wm_values *previous = &dev_priv->wm.hw; |
8553c18e | 3545 | unsigned int dirty; |
5ce9a649 | 3546 | u32 val; |
8553c18e | 3547 | |
055e393f | 3548 | dirty = ilk_compute_wm_dirty(dev_priv, previous, results); |
8553c18e VS |
3549 | if (!dirty) |
3550 | return; | |
3551 | ||
3552 | _ilk_disable_lp_wm(dev_priv, dirty); | |
3553 | ||
49a687c4 | 3554 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) |
801bcfff | 3555 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); |
49a687c4 | 3556 | if (dirty & WM_DIRTY_PIPE(PIPE_B)) |
801bcfff | 3557 | I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); |
49a687c4 | 3558 | if (dirty & WM_DIRTY_PIPE(PIPE_C)) |
801bcfff PZ |
3559 | I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); |
3560 | ||
49a687c4 | 3561 | if (dirty & WM_DIRTY_DDB) { |
8652744b | 3562 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
ac9545fd VS |
3563 | val = I915_READ(WM_MISC); |
3564 | if (results->partitioning == INTEL_DDB_PART_1_2) | |
3565 | val &= ~WM_MISC_DATA_PARTITION_5_6; | |
3566 | else | |
3567 | val |= WM_MISC_DATA_PARTITION_5_6; | |
3568 | I915_WRITE(WM_MISC, val); | |
3569 | } else { | |
3570 | val = I915_READ(DISP_ARB_CTL2); | |
3571 | if (results->partitioning == INTEL_DDB_PART_1_2) | |
3572 | val &= ~DISP_DATA_PARTITION_5_6; | |
3573 | else | |
3574 | val |= DISP_DATA_PARTITION_5_6; | |
3575 | I915_WRITE(DISP_ARB_CTL2, val); | |
3576 | } | |
1011d8c4 PZ |
3577 | } |
3578 | ||
49a687c4 | 3579 | if (dirty & WM_DIRTY_FBC) { |
cca32e9a PZ |
3580 | val = I915_READ(DISP_ARB_CTL); |
3581 | if (results->enable_fbc_wm) | |
3582 | val &= ~DISP_FBC_WM_DIS; | |
3583 | else | |
3584 | val |= DISP_FBC_WM_DIS; | |
3585 | I915_WRITE(DISP_ARB_CTL, val); | |
3586 | } | |
3587 | ||
954911eb ID |
3588 | if (dirty & WM_DIRTY_LP(1) && |
3589 | previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | |
3590 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); | |
3591 | ||
175fded1 | 3592 | if (INTEL_GEN(dev_priv) >= 7) { |
6cef2b8a VS |
3593 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) |
3594 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | |
3595 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | |
3596 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | |
3597 | } | |
801bcfff | 3598 | |
facd619b | 3599 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) |
801bcfff | 3600 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); |
facd619b | 3601 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) |
801bcfff | 3602 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); |
facd619b | 3603 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) |
801bcfff | 3604 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); |
609cedef VS |
3605 | |
3606 | dev_priv->wm.hw = *results; | |
801bcfff PZ |
3607 | } |
3608 | ||
60aca574 | 3609 | bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) |
8553c18e | 3610 | { |
8553c18e VS |
3611 | return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); |
3612 | } | |
3613 | ||
0f0f9aee | 3614 | u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv) |
74bd8004 | 3615 | { |
0f0f9aee SL |
3616 | int i; |
3617 | int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; | |
3618 | u8 enabled_slices_mask = 0; | |
74bd8004 | 3619 | |
0f0f9aee SL |
3620 | for (i = 0; i < max_slices; i++) { |
3621 | if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE) | |
3622 | enabled_slices_mask |= BIT(i); | |
3623 | } | |
74bd8004 | 3624 | |
0f0f9aee | 3625 | return enabled_slices_mask; |
74bd8004 MK |
3626 | } |
3627 | ||
ee3d532f PZ |
3628 | /* |
3629 | * FIXME: We still don't have the proper code detect if we need to apply the WA, | |
3630 | * so assume we'll always need it in order to avoid underruns. | |
3631 | */ | |
60e983ff | 3632 | static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv) |
ee3d532f | 3633 | { |
60e983ff | 3634 | return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv); |
ee3d532f PZ |
3635 | } |
3636 | ||
56feca91 PZ |
3637 | static bool |
3638 | intel_has_sagv(struct drm_i915_private *dev_priv) | |
3639 | { | |
8ffa4392 LDM |
3640 | /* HACK! */ |
3641 | if (IS_GEN(dev_priv, 12)) | |
3642 | return false; | |
3643 | ||
1ca2b067 RV |
3644 | return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) && |
3645 | dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; | |
56feca91 PZ |
3646 | } |
3647 | ||
b068a860 JA |
3648 | static void |
3649 | skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) | |
3650 | { | |
da80f047 JA |
3651 | if (INTEL_GEN(dev_priv) >= 12) { |
3652 | u32 val = 0; | |
3653 | int ret; | |
3654 | ||
3655 | ret = sandybridge_pcode_read(dev_priv, | |
3656 | GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, | |
3657 | &val, NULL); | |
3658 | if (!ret) { | |
3659 | dev_priv->sagv_block_time_us = val; | |
3660 | return; | |
3661 | } | |
3662 | ||
f8d18d5c | 3663 | drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n"); |
da80f047 | 3664 | } else if (IS_GEN(dev_priv, 11)) { |
b068a860 JA |
3665 | dev_priv->sagv_block_time_us = 10; |
3666 | return; | |
3667 | } else if (IS_GEN(dev_priv, 10)) { | |
3668 | dev_priv->sagv_block_time_us = 20; | |
3669 | return; | |
3670 | } else if (IS_GEN(dev_priv, 9)) { | |
3671 | dev_priv->sagv_block_time_us = 30; | |
3672 | return; | |
3673 | } else { | |
3674 | MISSING_CASE(INTEL_GEN(dev_priv)); | |
3675 | } | |
3676 | ||
3677 | /* Default to an unusable block time */ | |
3678 | dev_priv->sagv_block_time_us = -1; | |
3679 | } | |
3680 | ||
656d1b89 L |
3681 | /* |
3682 | * SAGV dynamically adjusts the system agent voltage and clock frequencies | |
3683 | * depending on power and performance requirements. The display engine access | |
3684 | * to system memory is blocked during the adjustment time. Because of the | |
3685 | * blocking time, having this enabled can cause full system hangs and/or pipe | |
3686 | * underruns if we don't meet all of the following requirements: | |
3687 | * | |
3688 | * - <= 1 pipe enabled | |
3689 | * - All planes can enable watermarks for latencies >= SAGV engine block time | |
3690 | * - We're not using an interlaced display configuration | |
3691 | */ | |
3692 | int | |
16dcdc4e | 3693 | intel_enable_sagv(struct drm_i915_private *dev_priv) |
656d1b89 L |
3694 | { |
3695 | int ret; | |
3696 | ||
56feca91 PZ |
3697 | if (!intel_has_sagv(dev_priv)) |
3698 | return 0; | |
3699 | ||
3700 | if (dev_priv->sagv_status == I915_SAGV_ENABLED) | |
656d1b89 L |
3701 | return 0; |
3702 | ||
f8d18d5c | 3703 | drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); |
656d1b89 L |
3704 | ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, |
3705 | GEN9_SAGV_ENABLE); | |
3706 | ||
ff61a974 | 3707 | /* We don't need to wait for SAGV when enabling */ |
656d1b89 L |
3708 | |
3709 | /* | |
3710 | * Some skl systems, pre-release machines in particular, | |
ff61a974 | 3711 | * don't actually have SAGV. |
656d1b89 | 3712 | */ |
6e3100ec | 3713 | if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { |
f8d18d5c | 3714 | drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); |
16dcdc4e | 3715 | dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; |
656d1b89 L |
3716 | return 0; |
3717 | } else if (ret < 0) { | |
f8d18d5c | 3718 | drm_err(&dev_priv->drm, "Failed to enable SAGV\n"); |
656d1b89 L |
3719 | return ret; |
3720 | } | |
3721 | ||
16dcdc4e | 3722 | dev_priv->sagv_status = I915_SAGV_ENABLED; |
656d1b89 L |
3723 | return 0; |
3724 | } | |
3725 | ||
656d1b89 | 3726 | int |
16dcdc4e | 3727 | intel_disable_sagv(struct drm_i915_private *dev_priv) |
656d1b89 | 3728 | { |
b3b8e999 | 3729 | int ret; |
656d1b89 | 3730 | |
56feca91 PZ |
3731 | if (!intel_has_sagv(dev_priv)) |
3732 | return 0; | |
3733 | ||
3734 | if (dev_priv->sagv_status == I915_SAGV_DISABLED) | |
656d1b89 L |
3735 | return 0; |
3736 | ||
f8d18d5c | 3737 | drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n"); |
656d1b89 | 3738 | /* bspec says to keep retrying for at least 1 ms */ |
b3b8e999 ID |
3739 | ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, |
3740 | GEN9_SAGV_DISABLE, | |
3741 | GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, | |
3742 | 1); | |
656d1b89 L |
3743 | /* |
3744 | * Some skl systems, pre-release machines in particular, | |
ff61a974 | 3745 | * don't actually have SAGV. |
656d1b89 | 3746 | */ |
b3b8e999 | 3747 | if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { |
f8d18d5c | 3748 | drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); |
16dcdc4e | 3749 | dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; |
656d1b89 | 3750 | return 0; |
b3b8e999 | 3751 | } else if (ret < 0) { |
f8d18d5c | 3752 | drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret); |
b3b8e999 | 3753 | return ret; |
656d1b89 L |
3754 | } |
3755 | ||
16dcdc4e | 3756 | dev_priv->sagv_status = I915_SAGV_DISABLED; |
656d1b89 L |
3757 | return 0; |
3758 | } | |
3759 | ||
855e0d68 | 3760 | bool intel_can_enable_sagv(struct intel_atomic_state *state) |
656d1b89 | 3761 | { |
855e0d68 | 3762 | struct drm_device *dev = state->base.dev; |
656d1b89 | 3763 | struct drm_i915_private *dev_priv = to_i915(dev); |
ee3d532f PZ |
3764 | struct intel_crtc *crtc; |
3765 | struct intel_plane *plane; | |
ec193640 | 3766 | struct intel_crtc_state *crtc_state; |
656d1b89 | 3767 | enum pipe pipe; |
d8c0fafc | 3768 | int level, latency; |
656d1b89 | 3769 | |
56feca91 PZ |
3770 | if (!intel_has_sagv(dev_priv)) |
3771 | return false; | |
3772 | ||
656d1b89 | 3773 | /* |
656d1b89 L |
3774 | * If there are no active CRTCs, no additional checks need be performed |
3775 | */ | |
0b14d968 | 3776 | if (hweight8(state->active_pipes) == 0) |
656d1b89 | 3777 | return true; |
da17223e LDM |
3778 | |
3779 | /* | |
3780 | * SKL+ workaround: bspec recommends we disable SAGV when we have | |
3781 | * more then one pipe enabled | |
3782 | */ | |
0b14d968 | 3783 | if (hweight8(state->active_pipes) > 1) |
656d1b89 L |
3784 | return false; |
3785 | ||
3786 | /* Since we're now guaranteed to only have one active CRTC... */ | |
d06a79d3 | 3787 | pipe = ffs(state->active_pipes) - 1; |
98187836 | 3788 | crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
ec193640 | 3789 | crtc_state = to_intel_crtc_state(crtc->base.state); |
656d1b89 | 3790 | |
1326a92c | 3791 | if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) |
656d1b89 L |
3792 | return false; |
3793 | ||
ee3d532f | 3794 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
d5cdfdf5 | 3795 | struct skl_plane_wm *wm = |
ec193640 | 3796 | &crtc_state->wm.skl.optimal.planes[plane->id]; |
ee3d532f | 3797 | |
656d1b89 | 3798 | /* Skip this plane if it's not enabled */ |
d8c0fafc | 3799 | if (!wm->wm[0].plane_en) |
656d1b89 L |
3800 | continue; |
3801 | ||
3802 | /* Find the highest enabled wm level for this plane */ | |
5db94019 | 3803 | for (level = ilk_wm_max_level(dev_priv); |
d8c0fafc | 3804 | !wm->wm[level].plane_en; --level) |
656d1b89 L |
3805 | { } |
3806 | ||
ee3d532f PZ |
3807 | latency = dev_priv->wm.skl_latency[level]; |
3808 | ||
60e983ff | 3809 | if (skl_needs_memory_bw_wa(dev_priv) && |
bae781b2 | 3810 | plane->base.state->fb->modifier == |
ee3d532f PZ |
3811 | I915_FORMAT_MOD_X_TILED) |
3812 | latency += 15; | |
3813 | ||
656d1b89 | 3814 | /* |
fdd11c2b PZ |
3815 | * If any of the planes on this pipe don't enable wm levels that |
3816 | * incur memory latencies higher than sagv_block_time_us we | |
ff61a974 | 3817 | * can't enable SAGV. |
656d1b89 | 3818 | */ |
b068a860 | 3819 | if (latency < dev_priv->sagv_block_time_us) |
656d1b89 L |
3820 | return false; |
3821 | } | |
3822 | ||
3823 | return true; | |
3824 | } | |
3825 | ||
ff2cd863 SL |
3826 | /* |
3827 | * Calculate initial DBuf slice offset, based on slice size | |
3828 | * and mask(i.e if slice size is 1024 and second slice is enabled | |
3829 | * offset would be 1024) | |
3830 | */ | |
3831 | static unsigned int | |
3832 | icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask, | |
3833 | u32 slice_size, | |
3834 | u32 ddb_size) | |
3835 | { | |
3836 | unsigned int offset = 0; | |
3837 | ||
3838 | if (!dbuf_slice_mask) | |
3839 | return 0; | |
3840 | ||
3841 | offset = (ffs(dbuf_slice_mask) - 1) * slice_size; | |
3842 | ||
3843 | WARN_ON(offset >= ddb_size); | |
3844 | return offset; | |
3845 | } | |
3846 | ||
3847 | static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv) | |
aa9664ff | 3848 | { |
aa9664ff MK |
3849 | u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size; |
3850 | ||
48a1b8d4 | 3851 | drm_WARN_ON(&dev_priv->drm, ddb_size == 0); |
aa9664ff MK |
3852 | |
3853 | if (INTEL_GEN(dev_priv) < 11) | |
3854 | return ddb_size - 4; /* 4 blocks for bypass path allocation */ | |
3855 | ||
aa9664ff MK |
3856 | return ddb_size; |
3857 | } | |
3858 | ||
ff2cd863 | 3859 | static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state, |
05e8155a | 3860 | u8 active_pipes); |
ff2cd863 | 3861 | |
b9cec075 | 3862 | static void |
b048a00b | 3863 | skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, |
ec193640 | 3864 | const struct intel_crtc_state *crtc_state, |
24719e94 | 3865 | const u64 total_data_rate, |
c107acfe MR |
3866 | struct skl_ddb_entry *alloc, /* out */ |
3867 | int *num_active /* out */) | |
b9cec075 | 3868 | { |
2225f3c6 | 3869 | struct drm_atomic_state *state = crtc_state->uapi.state; |
c107acfe | 3870 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
2225f3c6 | 3871 | struct drm_crtc *for_crtc = crtc_state->uapi.crtc; |
ec193640 | 3872 | const struct intel_crtc *crtc; |
ff2cd863 | 3873 | u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0; |
cf1f697a MK |
3874 | enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; |
3875 | u16 ddb_size; | |
ff2cd863 | 3876 | u32 ddb_range_size; |
cf1f697a | 3877 | u32 i; |
ff2cd863 SL |
3878 | u32 dbuf_slice_mask; |
3879 | u32 active_pipes; | |
3880 | u32 offset; | |
3881 | u32 slice_size; | |
3882 | u32 total_slice_mask; | |
3883 | u32 start, end; | |
c107acfe | 3884 | |
48a1b8d4 | 3885 | if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) { |
b9cec075 DL |
3886 | alloc->start = 0; |
3887 | alloc->end = 0; | |
0b14d968 | 3888 | *num_active = hweight8(dev_priv->active_pipes); |
b9cec075 DL |
3889 | return; |
3890 | } | |
3891 | ||
a6d3460e | 3892 | if (intel_state->active_pipe_changes) |
ff2cd863 | 3893 | active_pipes = intel_state->active_pipes; |
a6d3460e | 3894 | else |
ff2cd863 SL |
3895 | active_pipes = dev_priv->active_pipes; |
3896 | ||
3897 | *num_active = hweight8(active_pipes); | |
3898 | ||
3899 | ddb_size = intel_get_ddb_size(dev_priv); | |
a6d3460e | 3900 | |
ff2cd863 | 3901 | slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices; |
b9cec075 | 3902 | |
c107acfe | 3903 | /* |
cf1f697a MK |
3904 | * If the state doesn't change the active CRTC's or there is no |
3905 | * modeset request, then there's no need to recalculate; | |
3906 | * the existing pipe allocation limits should remain unchanged. | |
3907 | * Note that we're safe from racing commits since any racing commit | |
3908 | * that changes the active CRTC list or do modeset would need to | |
3909 | * grab _all_ crtc locks, including the one we currently hold. | |
c107acfe | 3910 | */ |
cf1f697a | 3911 | if (!intel_state->active_pipe_changes && !intel_state->modeset) { |
512b5527 ML |
3912 | /* |
3913 | * alloc may be cleared by clear_intel_crtc_state, | |
3914 | * copy from old state to be sure | |
3915 | */ | |
3916 | *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb; | |
a6d3460e | 3917 | return; |
c107acfe | 3918 | } |
a6d3460e | 3919 | |
ff2cd863 SL |
3920 | /* |
3921 | * Get allowed DBuf slices for correspondent pipe and platform. | |
3922 | */ | |
3923 | dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes); | |
3924 | ||
3925 | DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n", | |
3926 | dbuf_slice_mask, | |
3927 | pipe_name(for_pipe), active_pipes); | |
3928 | ||
3929 | /* | |
3930 | * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2 | |
3931 | * and slice size is 1024, the offset would be 1024 | |
3932 | */ | |
3933 | offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask, | |
3934 | slice_size, ddb_size); | |
3935 | ||
3936 | /* | |
3937 | * Figure out total size of allowed DBuf slices, which is basically | |
3938 | * a number of allowed slices for that pipe multiplied by slice size. | |
3939 | * Inside of this | |
3940 | * range ddb entries are still allocated in proportion to display width. | |
3941 | */ | |
3942 | ddb_range_size = hweight8(dbuf_slice_mask) * slice_size; | |
3943 | ||
cf1f697a MK |
3944 | /* |
3945 | * Watermark/ddb requirement highly depends upon width of the | |
3946 | * framebuffer, So instead of allocating DDB equally among pipes | |
3947 | * distribute DDB based on resolution/width of the display. | |
3948 | */ | |
ff2cd863 | 3949 | total_slice_mask = dbuf_slice_mask; |
ec193640 ML |
3950 | for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { |
3951 | const struct drm_display_mode *adjusted_mode = | |
1326a92c | 3952 | &crtc_state->hw.adjusted_mode; |
ec193640 | 3953 | enum pipe pipe = crtc->pipe; |
cf1f697a | 3954 | int hdisplay, vdisplay; |
ff2cd863 | 3955 | u32 pipe_dbuf_slice_mask; |
cf1f697a | 3956 | |
ff2cd863 SL |
3957 | if (!crtc_state->hw.active) |
3958 | continue; | |
3959 | ||
3960 | pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, | |
3961 | active_pipes); | |
3962 | ||
3963 | /* | |
3964 | * According to BSpec pipe can share one dbuf slice with another | |
3965 | * pipes or pipe can use multiple dbufs, in both cases we | |
3966 | * account for other pipes only if they have exactly same mask. | |
3967 | * However we need to account how many slices we should enable | |
3968 | * in total. | |
3969 | */ | |
3970 | total_slice_mask |= pipe_dbuf_slice_mask; | |
3971 | ||
3972 | /* | |
3973 | * Do not account pipes using other slice sets | |
3974 | * luckily as of current BSpec slice sets do not partially | |
3975 | * intersect(pipes share either same one slice or same slice set | |
3976 | * i.e no partial intersection), so it is enough to check for | |
3977 | * equality for now. | |
3978 | */ | |
3979 | if (dbuf_slice_mask != pipe_dbuf_slice_mask) | |
cf1f697a MK |
3980 | continue; |
3981 | ||
cf1f697a | 3982 | drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); |
ff2cd863 SL |
3983 | |
3984 | total_width_in_range += hdisplay; | |
cf1f697a MK |
3985 | |
3986 | if (pipe < for_pipe) | |
ff2cd863 | 3987 | width_before_pipe_in_range += hdisplay; |
cf1f697a MK |
3988 | else if (pipe == for_pipe) |
3989 | pipe_width = hdisplay; | |
3990 | } | |
3991 | ||
ff2cd863 SL |
3992 | /* |
3993 | * FIXME: For now we always enable slice S1 as per | |
3994 | * the Bspec display initialization sequence. | |
3995 | */ | |
3996 | intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1); | |
3997 | ||
3998 | start = ddb_range_size * width_before_pipe_in_range / total_width_in_range; | |
3999 | end = ddb_range_size * | |
4000 | (width_before_pipe_in_range + pipe_width) / total_width_in_range; | |
4001 | ||
4002 | alloc->start = offset + start; | |
4003 | alloc->end = offset + end; | |
4004 | ||
4005 | DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe, | |
4006 | alloc->start, alloc->end); | |
4007 | DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n", | |
4008 | intel_state->enabled_dbuf_slices_mask, | |
4009 | INTEL_INFO(dev_priv)->num_supported_dbuf_slices); | |
b9cec075 DL |
4010 | } |
4011 | ||
df331de3 VS |
4012 | static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, |
4013 | int width, const struct drm_format_info *format, | |
4014 | u64 modifier, unsigned int rotation, | |
4015 | u32 plane_pixel_rate, struct skl_wm_params *wp, | |
4016 | int color_plane); | |
ec193640 | 4017 | static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, |
df331de3 VS |
4018 | int level, |
4019 | const struct skl_wm_params *wp, | |
4020 | const struct skl_wm_level *result_prev, | |
4021 | struct skl_wm_level *result /* out */); | |
4022 | ||
4023 | static unsigned int | |
4024 | skl_cursor_allocation(const struct intel_crtc_state *crtc_state, | |
4025 | int num_active) | |
b9cec075 | 4026 | { |
2225f3c6 | 4027 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
df331de3 VS |
4028 | int level, max_level = ilk_wm_max_level(dev_priv); |
4029 | struct skl_wm_level wm = {}; | |
4030 | int ret, min_ddb_alloc = 0; | |
4031 | struct skl_wm_params wp; | |
4032 | ||
4033 | ret = skl_compute_wm_params(crtc_state, 256, | |
4034 | drm_format_info(DRM_FORMAT_ARGB8888), | |
4035 | DRM_FORMAT_MOD_LINEAR, | |
4036 | DRM_MODE_ROTATE_0, | |
4037 | crtc_state->pixel_rate, &wp, 0); | |
48a1b8d4 | 4038 | drm_WARN_ON(&dev_priv->drm, ret); |
df331de3 VS |
4039 | |
4040 | for (level = 0; level <= max_level; level++) { | |
6086e47b | 4041 | skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm); |
df331de3 VS |
4042 | if (wm.min_ddb_alloc == U16_MAX) |
4043 | break; | |
4044 | ||
4045 | min_ddb_alloc = wm.min_ddb_alloc; | |
4046 | } | |
b9cec075 | 4047 | |
df331de3 | 4048 | return max(num_active == 1 ? 32 : 8, min_ddb_alloc); |
b9cec075 DL |
4049 | } |
4050 | ||
37cde11b MK |
4051 | static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv, |
4052 | struct skl_ddb_entry *entry, u32 reg) | |
a269c583 | 4053 | { |
37cde11b | 4054 | |
d7e449a8 VS |
4055 | entry->start = reg & DDB_ENTRY_MASK; |
4056 | entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK; | |
37cde11b | 4057 | |
16160e3d DL |
4058 | if (entry->end) |
4059 | entry->end += 1; | |
a269c583 DL |
4060 | } |
4061 | ||
ddf34319 MK |
4062 | static void |
4063 | skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, | |
4064 | const enum pipe pipe, | |
4065 | const enum plane_id plane_id, | |
ff43bc37 VS |
4066 | struct skl_ddb_entry *ddb_y, |
4067 | struct skl_ddb_entry *ddb_uv) | |
ddf34319 | 4068 | { |
ff43bc37 VS |
4069 | u32 val, val2; |
4070 | u32 fourcc = 0; | |
ddf34319 MK |
4071 | |
4072 | /* Cursor doesn't support NV12/planar, so no extra calculation needed */ | |
4073 | if (plane_id == PLANE_CURSOR) { | |
4074 | val = I915_READ(CUR_BUF_CFG(pipe)); | |
ff43bc37 | 4075 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); |
ddf34319 MK |
4076 | return; |
4077 | } | |
4078 | ||
4079 | val = I915_READ(PLANE_CTL(pipe, plane_id)); | |
4080 | ||
4081 | /* No DDB allocated for disabled planes */ | |
ff43bc37 VS |
4082 | if (val & PLANE_CTL_ENABLE) |
4083 | fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK, | |
4084 | val & PLANE_CTL_ORDER_RGBX, | |
4085 | val & PLANE_CTL_ALPHA_MASK); | |
ddf34319 | 4086 | |
ff43bc37 VS |
4087 | if (INTEL_GEN(dev_priv) >= 11) { |
4088 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); | |
4089 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); | |
4090 | } else { | |
4091 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); | |
12a6c931 | 4092 | val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); |
ddf34319 | 4093 | |
d1d23d7f VS |
4094 | if (fourcc && |
4095 | drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) | |
ff43bc37 VS |
4096 | swap(val, val2); |
4097 | ||
4098 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); | |
4099 | skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); | |
ddf34319 MK |
4100 | } |
4101 | } | |
4102 | ||
ff43bc37 VS |
4103 | void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, |
4104 | struct skl_ddb_entry *ddb_y, | |
4105 | struct skl_ddb_entry *ddb_uv) | |
a269c583 | 4106 | { |
ff43bc37 VS |
4107 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
4108 | enum intel_display_power_domain power_domain; | |
4109 | enum pipe pipe = crtc->pipe; | |
0e6e0be4 | 4110 | intel_wakeref_t wakeref; |
ff43bc37 | 4111 | enum plane_id plane_id; |
74bd8004 | 4112 | |
ff43bc37 | 4113 | power_domain = POWER_DOMAIN_PIPE(pipe); |
0e6e0be4 CW |
4114 | wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); |
4115 | if (!wakeref) | |
ff43bc37 | 4116 | return; |
4d800030 | 4117 | |
ff43bc37 VS |
4118 | for_each_plane_id_on_crtc(crtc, plane_id) |
4119 | skl_ddb_get_hw_plane_state(dev_priv, pipe, | |
4120 | plane_id, | |
4121 | &ddb_y[plane_id], | |
4122 | &ddb_uv[plane_id]); | |
b10f1b20 | 4123 | |
0e6e0be4 | 4124 | intel_display_power_put(dev_priv, power_domain, wakeref); |
ff43bc37 | 4125 | } |
4d800030 | 4126 | |
072fcc30 | 4127 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv) |
ff43bc37 | 4128 | { |
0f0f9aee SL |
4129 | dev_priv->enabled_dbuf_slices_mask = |
4130 | intel_enabled_dbuf_slices_mask(dev_priv); | |
a269c583 DL |
4131 | } |
4132 | ||
9c2f7a9d KM |
4133 | /* |
4134 | * Determines the downscale amount of a plane for the purposes of watermark calculations. | |
4135 | * The bspec defines downscale amount as: | |
4136 | * | |
4137 | * """ | |
4138 | * Horizontal down scale amount = maximum[1, Horizontal source size / | |
4139 | * Horizontal destination size] | |
4140 | * Vertical down scale amount = maximum[1, Vertical source size / | |
4141 | * Vertical destination size] | |
4142 | * Total down scale amount = Horizontal down scale amount * | |
4143 | * Vertical down scale amount | |
4144 | * """ | |
4145 | * | |
4146 | * Return value is provided in 16.16 fixed point form to retain fractional part. | |
4147 | * Caller should take care of dividing & rounding off the value. | |
4148 | */ | |
7084b50b | 4149 | static uint_fixed_16_16_t |
ec193640 ML |
4150 | skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, |
4151 | const struct intel_plane_state *plane_state) | |
9c2f7a9d | 4152 | { |
5ce9a649 | 4153 | u32 src_w, src_h, dst_w, dst_h; |
7084b50b KM |
4154 | uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; |
4155 | uint_fixed_16_16_t downscale_h, downscale_w; | |
9c2f7a9d | 4156 | |
ec193640 | 4157 | if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) |
eac2cb81 | 4158 | return u32_to_fixed16(0); |
9c2f7a9d | 4159 | |
3a612765 ML |
4160 | /* |
4161 | * Src coordinates are already rotated by 270 degrees for | |
4162 | * the 90/270 degree plane rotation cases (to match the | |
4163 | * GTT mapping), hence no need to account for rotation here. | |
4164 | * | |
4165 | * n.b., src is 16.16 fixed point, dst is whole integer. | |
4166 | */ | |
f90a85e7 ML |
4167 | src_w = drm_rect_width(&plane_state->uapi.src) >> 16; |
4168 | src_h = drm_rect_height(&plane_state->uapi.src) >> 16; | |
4169 | dst_w = drm_rect_width(&plane_state->uapi.dst); | |
4170 | dst_h = drm_rect_height(&plane_state->uapi.dst); | |
93aa2a1c | 4171 | |
eac2cb81 KM |
4172 | fp_w_ratio = div_fixed16(src_w, dst_w); |
4173 | fp_h_ratio = div_fixed16(src_h, dst_h); | |
4174 | downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); | |
4175 | downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); | |
9c2f7a9d | 4176 | |
7084b50b | 4177 | return mul_fixed16(downscale_w, downscale_h); |
9c2f7a9d KM |
4178 | } |
4179 | ||
ff2cd863 SL |
4180 | struct dbuf_slice_conf_entry { |
4181 | u8 active_pipes; | |
4182 | u8 dbuf_mask[I915_MAX_PIPES]; | |
4183 | }; | |
4184 | ||
4185 | /* | |
4186 | * Table taken from Bspec 12716 | |
4187 | * Pipes do have some preferred DBuf slice affinity, | |
4188 | * plus there are some hardcoded requirements on how | |
4189 | * those should be distributed for multipipe scenarios. | |
4190 | * For more DBuf slices algorithm can get even more messy | |
4191 | * and less readable, so decided to use a table almost | |
4192 | * as is from BSpec itself - that way it is at least easier | |
4193 | * to compare, change and check. | |
4194 | */ | |
f8226d02 | 4195 | static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = |
ff2cd863 SL |
4196 | /* Autogenerated with igt/tools/intel_dbuf_map tool: */ |
4197 | { | |
4198 | { | |
4199 | .active_pipes = BIT(PIPE_A), | |
4200 | .dbuf_mask = { | |
06812bd9 VS |
4201 | [PIPE_A] = BIT(DBUF_S1), |
4202 | }, | |
ff2cd863 SL |
4203 | }, |
4204 | { | |
4205 | .active_pipes = BIT(PIPE_B), | |
4206 | .dbuf_mask = { | |
06812bd9 VS |
4207 | [PIPE_B] = BIT(DBUF_S1), |
4208 | }, | |
ff2cd863 SL |
4209 | }, |
4210 | { | |
4211 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), | |
4212 | .dbuf_mask = { | |
4213 | [PIPE_A] = BIT(DBUF_S1), | |
06812bd9 VS |
4214 | [PIPE_B] = BIT(DBUF_S2), |
4215 | }, | |
ff2cd863 SL |
4216 | }, |
4217 | { | |
4218 | .active_pipes = BIT(PIPE_C), | |
4219 | .dbuf_mask = { | |
06812bd9 VS |
4220 | [PIPE_C] = BIT(DBUF_S2), |
4221 | }, | |
ff2cd863 SL |
4222 | }, |
4223 | { | |
4224 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), | |
4225 | .dbuf_mask = { | |
4226 | [PIPE_A] = BIT(DBUF_S1), | |
06812bd9 VS |
4227 | [PIPE_C] = BIT(DBUF_S2), |
4228 | }, | |
ff2cd863 SL |
4229 | }, |
4230 | { | |
4231 | .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), | |
4232 | .dbuf_mask = { | |
4233 | [PIPE_B] = BIT(DBUF_S1), | |
06812bd9 VS |
4234 | [PIPE_C] = BIT(DBUF_S2), |
4235 | }, | |
ff2cd863 SL |
4236 | }, |
4237 | { | |
4238 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), | |
4239 | .dbuf_mask = { | |
4240 | [PIPE_A] = BIT(DBUF_S1), | |
4241 | [PIPE_B] = BIT(DBUF_S1), | |
06812bd9 VS |
4242 | [PIPE_C] = BIT(DBUF_S2), |
4243 | }, | |
ff2cd863 | 4244 | }, |
05e8155a | 4245 | {} |
ff2cd863 SL |
4246 | }; |
4247 | ||
4248 | /* | |
4249 | * Table taken from Bspec 49255 | |
4250 | * Pipes do have some preferred DBuf slice affinity, | |
4251 | * plus there are some hardcoded requirements on how | |
4252 | * those should be distributed for multipipe scenarios. | |
4253 | * For more DBuf slices algorithm can get even more messy | |
4254 | * and less readable, so decided to use a table almost | |
4255 | * as is from BSpec itself - that way it is at least easier | |
4256 | * to compare, change and check. | |
4257 | */ | |
f8226d02 | 4258 | static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = |
ff2cd863 SL |
4259 | /* Autogenerated with igt/tools/intel_dbuf_map tool: */ |
4260 | { | |
4261 | { | |
4262 | .active_pipes = BIT(PIPE_A), | |
4263 | .dbuf_mask = { | |
06812bd9 VS |
4264 | [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), |
4265 | }, | |
ff2cd863 SL |
4266 | }, |
4267 | { | |
4268 | .active_pipes = BIT(PIPE_B), | |
4269 | .dbuf_mask = { | |
06812bd9 VS |
4270 | [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), |
4271 | }, | |
ff2cd863 SL |
4272 | }, |
4273 | { | |
4274 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), | |
4275 | .dbuf_mask = { | |
4276 | [PIPE_A] = BIT(DBUF_S2), | |
06812bd9 VS |
4277 | [PIPE_B] = BIT(DBUF_S1), |
4278 | }, | |
ff2cd863 SL |
4279 | }, |
4280 | { | |
4281 | .active_pipes = BIT(PIPE_C), | |
4282 | .dbuf_mask = { | |
06812bd9 VS |
4283 | [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), |
4284 | }, | |
ff2cd863 SL |
4285 | }, |
4286 | { | |
4287 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), | |
4288 | .dbuf_mask = { | |
4289 | [PIPE_A] = BIT(DBUF_S1), | |
06812bd9 VS |
4290 | [PIPE_C] = BIT(DBUF_S2), |
4291 | }, | |
ff2cd863 SL |
4292 | }, |
4293 | { | |
4294 | .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), | |
4295 | .dbuf_mask = { | |
4296 | [PIPE_B] = BIT(DBUF_S1), | |
06812bd9 VS |
4297 | [PIPE_C] = BIT(DBUF_S2), |
4298 | }, | |
ff2cd863 SL |
4299 | }, |
4300 | { | |
4301 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), | |
4302 | .dbuf_mask = { | |
4303 | [PIPE_A] = BIT(DBUF_S1), | |
4304 | [PIPE_B] = BIT(DBUF_S1), | |
06812bd9 VS |
4305 | [PIPE_C] = BIT(DBUF_S2), |
4306 | }, | |
ff2cd863 SL |
4307 | }, |
4308 | { | |
4309 | .active_pipes = BIT(PIPE_D), | |
4310 | .dbuf_mask = { | |
06812bd9 VS |
4311 | [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), |
4312 | }, | |
ff2cd863 SL |
4313 | }, |
4314 | { | |
4315 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), | |
4316 | .dbuf_mask = { | |
4317 | [PIPE_A] = BIT(DBUF_S1), | |
06812bd9 VS |
4318 | [PIPE_D] = BIT(DBUF_S2), |
4319 | }, | |
ff2cd863 SL |
4320 | }, |
4321 | { | |
4322 | .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), | |
4323 | .dbuf_mask = { | |
4324 | [PIPE_B] = BIT(DBUF_S1), | |
06812bd9 VS |
4325 | [PIPE_D] = BIT(DBUF_S2), |
4326 | }, | |
ff2cd863 SL |
4327 | }, |
4328 | { | |
4329 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), | |
4330 | .dbuf_mask = { | |
4331 | [PIPE_A] = BIT(DBUF_S1), | |
4332 | [PIPE_B] = BIT(DBUF_S1), | |
06812bd9 VS |
4333 | [PIPE_D] = BIT(DBUF_S2), |
4334 | }, | |
ff2cd863 SL |
4335 | }, |
4336 | { | |
4337 | .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), | |
4338 | .dbuf_mask = { | |
4339 | [PIPE_C] = BIT(DBUF_S1), | |
06812bd9 VS |
4340 | [PIPE_D] = BIT(DBUF_S2), |
4341 | }, | |
ff2cd863 SL |
4342 | }, |
4343 | { | |
4344 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), | |
4345 | .dbuf_mask = { | |
4346 | [PIPE_A] = BIT(DBUF_S1), | |
4347 | [PIPE_C] = BIT(DBUF_S2), | |
06812bd9 VS |
4348 | [PIPE_D] = BIT(DBUF_S2), |
4349 | }, | |
ff2cd863 SL |
4350 | }, |
4351 | { | |
4352 | .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), | |
4353 | .dbuf_mask = { | |
4354 | [PIPE_B] = BIT(DBUF_S1), | |
4355 | [PIPE_C] = BIT(DBUF_S2), | |
06812bd9 VS |
4356 | [PIPE_D] = BIT(DBUF_S2), |
4357 | }, | |
ff2cd863 SL |
4358 | }, |
4359 | { | |
4360 | .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), | |
4361 | .dbuf_mask = { | |
4362 | [PIPE_A] = BIT(DBUF_S1), | |
4363 | [PIPE_B] = BIT(DBUF_S1), | |
4364 | [PIPE_C] = BIT(DBUF_S2), | |
06812bd9 VS |
4365 | [PIPE_D] = BIT(DBUF_S2), |
4366 | }, | |
ff2cd863 | 4367 | }, |
05e8155a | 4368 | {} |
ff2cd863 SL |
4369 | }; |
4370 | ||
05e8155a VS |
4371 | static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, |
4372 | const struct dbuf_slice_conf_entry *dbuf_slices) | |
ff2cd863 SL |
4373 | { |
4374 | int i; | |
4375 | ||
05e8155a | 4376 | for (i = 0; i < dbuf_slices[i].active_pipes; i++) { |
ff2cd863 SL |
4377 | if (dbuf_slices[i].active_pipes == active_pipes) |
4378 | return dbuf_slices[i].dbuf_mask[pipe]; | |
4379 | } | |
4380 | return 0; | |
4381 | } | |
4382 | ||
4383 | /* | |
4384 | * This function finds an entry with same enabled pipe configuration and | |
4385 | * returns correspondent DBuf slice mask as stated in BSpec for particular | |
4386 | * platform. | |
4387 | */ | |
05e8155a | 4388 | static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) |
ff2cd863 SL |
4389 | { |
4390 | /* | |
4391 | * FIXME: For ICL this is still a bit unclear as prev BSpec revision | |
4392 | * required calculating "pipe ratio" in order to determine | |
4393 | * if one or two slices can be used for single pipe configurations | |
4394 | * as additional constraint to the existing table. | |
4395 | * However based on recent info, it should be not "pipe ratio" | |
4396 | * but rather ratio between pixel_rate and cdclk with additional | |
4397 | * constants, so for now we are using only table until this is | |
4398 | * clarified. Also this is the reason why crtc_state param is | |
4399 | * still here - we will need it once those additional constraints | |
4400 | * pop up. | |
4401 | */ | |
05e8155a | 4402 | return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); |
ff2cd863 SL |
4403 | } |
4404 | ||
05e8155a | 4405 | static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) |
ff2cd863 | 4406 | { |
05e8155a | 4407 | return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); |
ff2cd863 SL |
4408 | } |
4409 | ||
4410 | static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state, | |
05e8155a | 4411 | u8 active_pipes) |
ff2cd863 SL |
4412 | { |
4413 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); | |
4414 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
4415 | enum pipe pipe = crtc->pipe; | |
4416 | ||
4417 | if (IS_GEN(dev_priv, 12)) | |
05e8155a | 4418 | return tgl_compute_dbuf_slices(pipe, active_pipes); |
ff2cd863 | 4419 | else if (IS_GEN(dev_priv, 11)) |
05e8155a | 4420 | return icl_compute_dbuf_slices(pipe, active_pipes); |
ff2cd863 SL |
4421 | /* |
4422 | * For anything else just return one slice yet. | |
4423 | * Should be extended for other platforms. | |
4424 | */ | |
4425 | return BIT(DBUF_S1); | |
4426 | } | |
4427 | ||
24719e94 | 4428 | static u64 |
ec193640 ML |
4429 | skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, |
4430 | const struct intel_plane_state *plane_state, | |
d1d23d7f | 4431 | int color_plane) |
b9cec075 | 4432 | { |
f90a85e7 | 4433 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
7b3cb17a | 4434 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
5ce9a649 JN |
4435 | u32 data_rate; |
4436 | u32 width = 0, height = 0; | |
7084b50b | 4437 | uint_fixed_16_16_t down_scale_amount; |
24719e94 | 4438 | u64 rate; |
a1de91e5 | 4439 | |
f90a85e7 | 4440 | if (!plane_state->uapi.visible) |
a1de91e5 | 4441 | return 0; |
8305494e | 4442 | |
d1d23d7f | 4443 | if (plane->id == PLANE_CURSOR) |
a1de91e5 | 4444 | return 0; |
d1d23d7f VS |
4445 | |
4446 | if (color_plane == 1 && | |
4941f35b | 4447 | !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) |
a1de91e5 | 4448 | return 0; |
a280f7dd | 4449 | |
fce5adf5 VS |
4450 | /* |
4451 | * Src coordinates are already rotated by 270 degrees for | |
4452 | * the 90/270 degree plane rotation cases (to match the | |
4453 | * GTT mapping), hence no need to account for rotation here. | |
4454 | */ | |
f90a85e7 ML |
4455 | width = drm_rect_width(&plane_state->uapi.src) >> 16; |
4456 | height = drm_rect_height(&plane_state->uapi.src) >> 16; | |
a280f7dd | 4457 | |
b879d58f | 4458 | /* UV plane does 1/2 pixel sub-sampling */ |
d1d23d7f | 4459 | if (color_plane == 1) { |
b879d58f MK |
4460 | width /= 2; |
4461 | height /= 2; | |
2cd601c6 CK |
4462 | } |
4463 | ||
24719e94 | 4464 | data_rate = width * height; |
b879d58f | 4465 | |
ec193640 | 4466 | down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state); |
8d19d7d9 | 4467 | |
24719e94 ML |
4468 | rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); |
4469 | ||
d1d23d7f | 4470 | rate *= fb->format->cpp[color_plane]; |
24719e94 | 4471 | return rate; |
b9cec075 DL |
4472 | } |
4473 | ||
24719e94 | 4474 | static u64 |
ec193640 | 4475 | skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, |
24719e94 ML |
4476 | u64 *plane_data_rate, |
4477 | u64 *uv_plane_data_rate) | |
b9cec075 | 4478 | { |
af9fbfa6 ML |
4479 | struct intel_plane *plane; |
4480 | const struct intel_plane_state *plane_state; | |
24719e94 | 4481 | u64 total_data_rate = 0; |
a6d3460e | 4482 | |
a1de91e5 | 4483 | /* Calculate and cache data rate for each plane */ |
af9fbfa6 ML |
4484 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { |
4485 | enum plane_id plane_id = plane->id; | |
24719e94 | 4486 | u64 rate; |
a6d3460e | 4487 | |
b879d58f | 4488 | /* packed/y */ |
ec193640 | 4489 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); |
d5cdfdf5 | 4490 | plane_data_rate[plane_id] = rate; |
1e6ee542 | 4491 | total_data_rate += rate; |
a6d3460e | 4492 | |
b879d58f | 4493 | /* uv-plane */ |
ec193640 | 4494 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); |
b879d58f | 4495 | uv_plane_data_rate[plane_id] = rate; |
1e6ee542 | 4496 | total_data_rate += rate; |
b9cec075 DL |
4497 | } |
4498 | ||
4499 | return total_data_rate; | |
4500 | } | |
4501 | ||
b048a00b | 4502 | static u64 |
ec193640 | 4503 | icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, |
b048a00b ML |
4504 | u64 *plane_data_rate) |
4505 | { | |
af9fbfa6 ML |
4506 | struct intel_plane *plane; |
4507 | const struct intel_plane_state *plane_state; | |
b048a00b ML |
4508 | u64 total_data_rate = 0; |
4509 | ||
b048a00b | 4510 | /* Calculate and cache data rate for each plane */ |
af9fbfa6 ML |
4511 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { |
4512 | enum plane_id plane_id = plane->id; | |
b048a00b ML |
4513 | u64 rate; |
4514 | ||
c47b7ddb | 4515 | if (!plane_state->planar_linked_plane) { |
ec193640 | 4516 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); |
b048a00b ML |
4517 | plane_data_rate[plane_id] = rate; |
4518 | total_data_rate += rate; | |
4519 | } else { | |
4520 | enum plane_id y_plane_id; | |
4521 | ||
4522 | /* | |
4523 | * The slave plane might not iterate in | |
af9fbfa6 | 4524 | * intel_atomic_crtc_state_for_each_plane_state(), |
b048a00b ML |
4525 | * and needs the master plane state which may be |
4526 | * NULL if we try get_new_plane_state(), so we | |
4527 | * always calculate from the master. | |
4528 | */ | |
c47b7ddb | 4529 | if (plane_state->planar_slave) |
b048a00b ML |
4530 | continue; |
4531 | ||
4532 | /* Y plane rate is calculated on the slave */ | |
ec193640 | 4533 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); |
c47b7ddb | 4534 | y_plane_id = plane_state->planar_linked_plane->id; |
b048a00b ML |
4535 | plane_data_rate[y_plane_id] = rate; |
4536 | total_data_rate += rate; | |
4537 | ||
ec193640 | 4538 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); |
b048a00b ML |
4539 | plane_data_rate[plane_id] = rate; |
4540 | total_data_rate += rate; | |
4541 | } | |
4542 | } | |
4543 | ||
4544 | return total_data_rate; | |
4545 | } | |
4546 | ||
c107acfe | 4547 | static int |
072fcc30 | 4548 | skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) |
b9cec075 | 4549 | { |
2a67054b VS |
4550 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
4551 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
ec193640 | 4552 | struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; |
5ce9a649 JN |
4553 | u16 alloc_size, start = 0; |
4554 | u16 total[I915_MAX_PLANES] = {}; | |
4555 | u16 uv_total[I915_MAX_PLANES] = {}; | |
24719e94 | 4556 | u64 total_data_rate; |
d5cdfdf5 | 4557 | enum plane_id plane_id; |
c107acfe | 4558 | int num_active; |
24719e94 ML |
4559 | u64 plane_data_rate[I915_MAX_PLANES] = {}; |
4560 | u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; | |
0aded171 | 4561 | u32 blocks; |
d8e87498 | 4562 | int level; |
b9cec075 | 4563 | |
5a920b85 | 4564 | /* Clear the partitioning for disabled planes. */ |
ec193640 ML |
4565 | memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); |
4566 | memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv)); | |
5a920b85 | 4567 | |
1326a92c | 4568 | if (!crtc_state->hw.active) { |
ce0ba283 | 4569 | alloc->start = alloc->end = 0; |
c107acfe MR |
4570 | return 0; |
4571 | } | |
4572 | ||
323b0a82 LDM |
4573 | if (INTEL_GEN(dev_priv) >= 11) |
4574 | total_data_rate = | |
ec193640 | 4575 | icl_get_total_relative_data_rate(crtc_state, |
323b0a82 LDM |
4576 | plane_data_rate); |
4577 | else | |
b048a00b | 4578 | total_data_rate = |
ec193640 | 4579 | skl_get_total_relative_data_rate(crtc_state, |
b048a00b ML |
4580 | plane_data_rate, |
4581 | uv_plane_data_rate); | |
323b0a82 | 4582 | |
b048a00b | 4583 | |
ec193640 | 4584 | skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate, |
072fcc30 | 4585 | alloc, &num_active); |
34bb56af | 4586 | alloc_size = skl_ddb_entry_size(alloc); |
336031ea | 4587 | if (alloc_size == 0) |
c107acfe | 4588 | return 0; |
b9cec075 | 4589 | |
d8e87498 | 4590 | /* Allocate fixed number of blocks for cursor. */ |
ec193640 | 4591 | total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); |
d8e87498 | 4592 | alloc_size -= total[PLANE_CURSOR]; |
ec193640 | 4593 | crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = |
d8e87498 | 4594 | alloc->end - total[PLANE_CURSOR]; |
ec193640 | 4595 | crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; |
d8e87498 MR |
4596 | |
4597 | if (total_data_rate == 0) | |
4598 | return 0; | |
a6d3460e | 4599 | |
49845a7a | 4600 | /* |
d8e87498 MR |
4601 | * Find the highest watermark level for which we can satisfy the block |
4602 | * requirement of active planes. | |
49845a7a | 4603 | */ |
d8e87498 | 4604 | for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { |
25db2eaf | 4605 | blocks = 0; |
2a67054b | 4606 | for_each_plane_id_on_crtc(crtc, plane_id) { |
5e6037c8 | 4607 | const struct skl_plane_wm *wm = |
ec193640 | 4608 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
10a7e07b VS |
4609 | |
4610 | if (plane_id == PLANE_CURSOR) { | |
4ba48701 | 4611 | if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { |
48a1b8d4 PB |
4612 | drm_WARN_ON(&dev_priv->drm, |
4613 | wm->wm[level].min_ddb_alloc != U16_MAX); | |
10a7e07b VS |
4614 | blocks = U32_MAX; |
4615 | break; | |
4616 | } | |
d8e87498 | 4617 | continue; |
10a7e07b | 4618 | } |
80958155 | 4619 | |
961d95e0 VS |
4620 | blocks += wm->wm[level].min_ddb_alloc; |
4621 | blocks += wm->uv_wm[level].min_ddb_alloc; | |
d8e87498 MR |
4622 | } |
4623 | ||
3cf963cf | 4624 | if (blocks <= alloc_size) { |
d8e87498 MR |
4625 | alloc_size -= blocks; |
4626 | break; | |
4627 | } | |
80958155 DL |
4628 | } |
4629 | ||
d8e87498 | 4630 | if (level < 0) { |
f8d18d5c WK |
4631 | drm_dbg_kms(&dev_priv->drm, |
4632 | "Requested display configuration exceeds system DDB limitations"); | |
4633 | drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", | |
4634 | blocks, alloc_size); | |
5ba6faaf KM |
4635 | return -EINVAL; |
4636 | } | |
4637 | ||
b9cec075 | 4638 | /* |
d8e87498 MR |
4639 | * Grant each plane the blocks it requires at the highest achievable |
4640 | * watermark level, plus an extra share of the leftover blocks | |
4641 | * proportional to its relative data rate. | |
b9cec075 | 4642 | */ |
2a67054b | 4643 | for_each_plane_id_on_crtc(crtc, plane_id) { |
5e6037c8 | 4644 | const struct skl_plane_wm *wm = |
ec193640 | 4645 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
d8e87498 MR |
4646 | u64 rate; |
4647 | u16 extra; | |
b9cec075 | 4648 | |
d5cdfdf5 | 4649 | if (plane_id == PLANE_CURSOR) |
49845a7a ML |
4650 | continue; |
4651 | ||
b9cec075 | 4652 | /* |
d8e87498 MR |
4653 | * We've accounted for all active planes; remaining planes are |
4654 | * all disabled. | |
b9cec075 | 4655 | */ |
d8e87498 MR |
4656 | if (total_data_rate == 0) |
4657 | break; | |
b9cec075 | 4658 | |
d8e87498 MR |
4659 | rate = plane_data_rate[plane_id]; |
4660 | extra = min_t(u16, alloc_size, | |
4661 | DIV64_U64_ROUND_UP(alloc_size * rate, | |
4662 | total_data_rate)); | |
961d95e0 | 4663 | total[plane_id] = wm->wm[level].min_ddb_alloc + extra; |
d8e87498 MR |
4664 | alloc_size -= extra; |
4665 | total_data_rate -= rate; | |
9a30a261 | 4666 | |
d8e87498 MR |
4667 | if (total_data_rate == 0) |
4668 | break; | |
a1de91e5 | 4669 | |
d8e87498 MR |
4670 | rate = uv_plane_data_rate[plane_id]; |
4671 | extra = min_t(u16, alloc_size, | |
4672 | DIV64_U64_ROUND_UP(alloc_size * rate, | |
4673 | total_data_rate)); | |
961d95e0 | 4674 | uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra; |
d8e87498 MR |
4675 | alloc_size -= extra; |
4676 | total_data_rate -= rate; | |
4677 | } | |
48a1b8d4 | 4678 | drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0); |
d8e87498 MR |
4679 | |
4680 | /* Set the actual DDB start/end points for each plane */ | |
4681 | start = alloc->start; | |
2a67054b | 4682 | for_each_plane_id_on_crtc(crtc, plane_id) { |
5e6037c8 | 4683 | struct skl_ddb_entry *plane_alloc = |
ec193640 | 4684 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
5e6037c8 | 4685 | struct skl_ddb_entry *uv_plane_alloc = |
ec193640 | 4686 | &crtc_state->wm.skl.plane_ddb_uv[plane_id]; |
d8e87498 MR |
4687 | |
4688 | if (plane_id == PLANE_CURSOR) | |
4689 | continue; | |
4690 | ||
b048a00b | 4691 | /* Gen11+ uses a separate plane for UV watermarks */ |
48a1b8d4 PB |
4692 | drm_WARN_ON(&dev_priv->drm, |
4693 | INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]); | |
d8e87498 MR |
4694 | |
4695 | /* Leave disabled planes at (0,0) */ | |
4696 | if (total[plane_id]) { | |
4697 | plane_alloc->start = start; | |
4698 | start += total[plane_id]; | |
4699 | plane_alloc->end = start; | |
4700 | } | |
b048a00b | 4701 | |
d8e87498 MR |
4702 | if (uv_total[plane_id]) { |
4703 | uv_plane_alloc->start = start; | |
4704 | start += uv_total[plane_id]; | |
4705 | uv_plane_alloc->end = start; | |
c107acfe | 4706 | } |
d8e87498 | 4707 | } |
9a30a261 | 4708 | |
d8e87498 MR |
4709 | /* |
4710 | * When we calculated watermark values we didn't know how high | |
4711 | * of a level we'd actually be able to hit, so we just marked | |
4712 | * all levels as "enabled." Go back now and disable the ones | |
4713 | * that aren't actually possible. | |
4714 | */ | |
4715 | for (level++; level <= ilk_wm_max_level(dev_priv); level++) { | |
2a67054b | 4716 | for_each_plane_id_on_crtc(crtc, plane_id) { |
5e6037c8 | 4717 | struct skl_plane_wm *wm = |
ec193640 | 4718 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
a301cb0f VS |
4719 | |
4720 | /* | |
4721 | * We only disable the watermarks for each plane if | |
4722 | * they exceed the ddb allocation of said plane. This | |
4723 | * is done so that we don't end up touching cursor | |
4724 | * watermarks needlessly when some other plane reduces | |
4725 | * our max possible watermark level. | |
4726 | * | |
4727 | * Bspec has this to say about the PLANE_WM enable bit: | |
4728 | * "All the watermarks at this level for all enabled | |
4729 | * planes must be enabled before the level will be used." | |
4730 | * So this is actually safe to do. | |
4731 | */ | |
4732 | if (wm->wm[level].min_ddb_alloc > total[plane_id] || | |
4733 | wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id]) | |
4734 | memset(&wm->wm[level], 0, sizeof(wm->wm[level])); | |
290248c2 | 4735 | |
c384afe3 | 4736 | /* |
39564ae8 | 4737 | * Wa_1408961008:icl, ehl |
c384afe3 VS |
4738 | * Underruns with WM1+ disabled |
4739 | */ | |
39564ae8 | 4740 | if (IS_GEN(dev_priv, 11) && |
290248c2 VS |
4741 | level == 1 && wm->wm[0].plane_en) { |
4742 | wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; | |
c384afe3 VS |
4743 | wm->wm[level].plane_res_l = wm->wm[0].plane_res_l; |
4744 | wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; | |
290248c2 | 4745 | } |
d8e87498 MR |
4746 | } |
4747 | } | |
4748 | ||
4749 | /* | |
4750 | * Go back and disable the transition watermark if it turns out we | |
4751 | * don't have enough DDB blocks for it. | |
4752 | */ | |
2a67054b | 4753 | for_each_plane_id_on_crtc(crtc, plane_id) { |
5e6037c8 | 4754 | struct skl_plane_wm *wm = |
ec193640 | 4755 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
5e6037c8 | 4756 | |
b19c9bca | 4757 | if (wm->trans_wm.plane_res_b >= total[plane_id]) |
d8e87498 | 4758 | memset(&wm->trans_wm, 0, sizeof(wm->trans_wm)); |
b9cec075 DL |
4759 | } |
4760 | ||
c107acfe | 4761 | return 0; |
b9cec075 DL |
4762 | } |
4763 | ||
2d41c0b5 PB |
4764 | /* |
4765 | * The max latency should be 257 (max the punit can code is 255 and we add 2us | |
ac484963 | 4766 | * for the read latency) and cpp should always be <= 8, so that |
2d41c0b5 PB |
4767 | * should allow pixel_rate up to ~2 GHz which seems sufficient since max |
4768 | * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. | |
4769 | */ | |
6c64dd37 | 4770 | static uint_fixed_16_16_t |
5ce9a649 JN |
4771 | skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate, |
4772 | u8 cpp, u32 latency, u32 dbuf_block_size) | |
2d41c0b5 | 4773 | { |
5ce9a649 | 4774 | u32 wm_intermediate_val; |
b95320bd | 4775 | uint_fixed_16_16_t ret; |
2d41c0b5 PB |
4776 | |
4777 | if (latency == 0) | |
b95320bd | 4778 | return FP_16_16_MAX; |
2d41c0b5 | 4779 | |
b95320bd | 4780 | wm_intermediate_val = latency * pixel_rate * cpp; |
df8ee190 | 4781 | ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); |
6c64dd37 PZ |
4782 | |
4783 | if (INTEL_GEN(dev_priv) >= 10) | |
4784 | ret = add_fixed16_u32(ret, 1); | |
4785 | ||
2d41c0b5 PB |
4786 | return ret; |
4787 | } | |
4788 | ||
5ce9a649 JN |
4789 | static uint_fixed_16_16_t |
4790 | skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, | |
4791 | uint_fixed_16_16_t plane_blocks_per_line) | |
2d41c0b5 | 4792 | { |
5ce9a649 | 4793 | u32 wm_intermediate_val; |
b95320bd | 4794 | uint_fixed_16_16_t ret; |
2d41c0b5 PB |
4795 | |
4796 | if (latency == 0) | |
b95320bd | 4797 | return FP_16_16_MAX; |
2d41c0b5 | 4798 | |
2d41c0b5 | 4799 | wm_intermediate_val = latency * pixel_rate; |
b95320bd MK |
4800 | wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, |
4801 | pipe_htotal * 1000); | |
eac2cb81 | 4802 | ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); |
2d41c0b5 PB |
4803 | return ret; |
4804 | } | |
4805 | ||
d555cb58 | 4806 | static uint_fixed_16_16_t |
ec193640 | 4807 | intel_get_linetime_us(const struct intel_crtc_state *crtc_state) |
d555cb58 | 4808 | { |
5ce9a649 JN |
4809 | u32 pixel_rate; |
4810 | u32 crtc_htotal; | |
d555cb58 KM |
4811 | uint_fixed_16_16_t linetime_us; |
4812 | ||
1326a92c | 4813 | if (!crtc_state->hw.active) |
eac2cb81 | 4814 | return u32_to_fixed16(0); |
d555cb58 | 4815 | |
ec193640 | 4816 | pixel_rate = crtc_state->pixel_rate; |
d555cb58 KM |
4817 | |
4818 | if (WARN_ON(pixel_rate == 0)) | |
eac2cb81 | 4819 | return u32_to_fixed16(0); |
d555cb58 | 4820 | |
1326a92c | 4821 | crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal; |
eac2cb81 | 4822 | linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); |
d555cb58 KM |
4823 | |
4824 | return linetime_us; | |
4825 | } | |
4826 | ||
5ce9a649 | 4827 | static u32 |
ec193640 ML |
4828 | skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state, |
4829 | const struct intel_plane_state *plane_state) | |
9c2f7a9d | 4830 | { |
5ce9a649 | 4831 | u64 adjusted_pixel_rate; |
7084b50b | 4832 | uint_fixed_16_16_t downscale_amount; |
9c2f7a9d KM |
4833 | |
4834 | /* Shouldn't reach here on disabled planes... */ | |
ec193640 | 4835 | if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) |
9c2f7a9d KM |
4836 | return 0; |
4837 | ||
4838 | /* | |
4839 | * Adjusted plane pixel rate is just the pipe's adjusted pixel rate | |
4840 | * with additional adjustments for plane-specific scaling. | |
4841 | */ | |
ec193640 ML |
4842 | adjusted_pixel_rate = crtc_state->pixel_rate; |
4843 | downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state); | |
9c2f7a9d | 4844 | |
7084b50b KM |
4845 | return mul_round_up_u32_fixed16(adjusted_pixel_rate, |
4846 | downscale_amount); | |
9c2f7a9d KM |
4847 | } |
4848 | ||
7e452fdb | 4849 | static int |
c92558aa VS |
4850 | skl_compute_wm_params(const struct intel_crtc_state *crtc_state, |
4851 | int width, const struct drm_format_info *format, | |
4852 | u64 modifier, unsigned int rotation, | |
4853 | u32 plane_pixel_rate, struct skl_wm_params *wp, | |
4854 | int color_plane) | |
2d41c0b5 | 4855 | { |
2225f3c6 | 4856 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
c92558aa | 4857 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5ce9a649 | 4858 | u32 interm_pbpl; |
2d41c0b5 | 4859 | |
df7d4156 | 4860 | /* only planar format has two planes */ |
4941f35b ID |
4861 | if (color_plane == 1 && |
4862 | !intel_format_info_is_yuv_semiplanar(format, modifier)) { | |
f8d18d5c WK |
4863 | drm_dbg_kms(&dev_priv->drm, |
4864 | "Non planar format have single plane\n"); | |
942aa2d0 MK |
4865 | return -EINVAL; |
4866 | } | |
4867 | ||
c92558aa VS |
4868 | wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || |
4869 | modifier == I915_FORMAT_MOD_Yf_TILED || | |
4870 | modifier == I915_FORMAT_MOD_Y_TILED_CCS || | |
4871 | modifier == I915_FORMAT_MOD_Yf_TILED_CCS; | |
4872 | wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; | |
4873 | wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || | |
4874 | modifier == I915_FORMAT_MOD_Yf_TILED_CCS; | |
4941f35b | 4875 | wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); |
a280f7dd | 4876 | |
c92558aa | 4877 | wp->width = width; |
45bee430 | 4878 | if (color_plane == 1 && wp->is_planar) |
942aa2d0 MK |
4879 | wp->width /= 2; |
4880 | ||
c92558aa VS |
4881 | wp->cpp = format->cpp[color_plane]; |
4882 | wp->plane_pixel_rate = plane_pixel_rate; | |
9c2f7a9d | 4883 | |
df8ee190 | 4884 | if (INTEL_GEN(dev_priv) >= 11 && |
c92558aa | 4885 | modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) |
df8ee190 MK |
4886 | wp->dbuf_block_size = 256; |
4887 | else | |
4888 | wp->dbuf_block_size = 512; | |
4889 | ||
c92558aa | 4890 | if (drm_rotation_90_or_270(rotation)) { |
7e452fdb | 4891 | switch (wp->cpp) { |
1186fa85 | 4892 | case 1: |
7e452fdb | 4893 | wp->y_min_scanlines = 16; |
1186fa85 PZ |
4894 | break; |
4895 | case 2: | |
7e452fdb | 4896 | wp->y_min_scanlines = 8; |
1186fa85 | 4897 | break; |
1186fa85 | 4898 | case 4: |
7e452fdb | 4899 | wp->y_min_scanlines = 4; |
1186fa85 | 4900 | break; |
86a462bc | 4901 | default: |
7e452fdb | 4902 | MISSING_CASE(wp->cpp); |
86a462bc | 4903 | return -EINVAL; |
1186fa85 PZ |
4904 | } |
4905 | } else { | |
7e452fdb | 4906 | wp->y_min_scanlines = 4; |
1186fa85 PZ |
4907 | } |
4908 | ||
60e983ff | 4909 | if (skl_needs_memory_bw_wa(dev_priv)) |
7e452fdb | 4910 | wp->y_min_scanlines *= 2; |
2ef32dee | 4911 | |
7e452fdb KM |
4912 | wp->plane_bytes_per_line = wp->width * wp->cpp; |
4913 | if (wp->y_tiled) { | |
4914 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * | |
df8ee190 MK |
4915 | wp->y_min_scanlines, |
4916 | wp->dbuf_block_size); | |
6c64dd37 PZ |
4917 | |
4918 | if (INTEL_GEN(dev_priv) >= 10) | |
4919 | interm_pbpl++; | |
4920 | ||
7e452fdb KM |
4921 | wp->plane_blocks_per_line = div_fixed16(interm_pbpl, |
4922 | wp->y_min_scanlines); | |
cf819eff | 4923 | } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) { |
df8ee190 MK |
4924 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, |
4925 | wp->dbuf_block_size); | |
7e452fdb | 4926 | wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); |
ef8a4fb4 | 4927 | } else { |
df8ee190 MK |
4928 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, |
4929 | wp->dbuf_block_size) + 1; | |
7e452fdb | 4930 | wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); |
7a1a8aed PZ |
4931 | } |
4932 | ||
7e452fdb KM |
4933 | wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, |
4934 | wp->plane_blocks_per_line); | |
c92558aa | 4935 | |
7e452fdb | 4936 | wp->linetime_us = fixed16_to_u32_round_up( |
c92558aa | 4937 | intel_get_linetime_us(crtc_state)); |
7e452fdb KM |
4938 | |
4939 | return 0; | |
4940 | } | |
4941 | ||
c92558aa VS |
4942 | static int |
4943 | skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, | |
4944 | const struct intel_plane_state *plane_state, | |
4945 | struct skl_wm_params *wp, int color_plane) | |
4946 | { | |
7b3cb17a | 4947 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
c92558aa VS |
4948 | int width; |
4949 | ||
3a612765 ML |
4950 | /* |
4951 | * Src coordinates are already rotated by 270 degrees for | |
4952 | * the 90/270 degree plane rotation cases (to match the | |
4953 | * GTT mapping), hence no need to account for rotation here. | |
4954 | */ | |
f90a85e7 | 4955 | width = drm_rect_width(&plane_state->uapi.src) >> 16; |
c92558aa VS |
4956 | |
4957 | return skl_compute_wm_params(crtc_state, width, | |
4958 | fb->format, fb->modifier, | |
7b3cb17a | 4959 | plane_state->hw.rotation, |
c92558aa VS |
4960 | skl_adjusted_plane_pixel_rate(crtc_state, plane_state), |
4961 | wp, color_plane); | |
4962 | } | |
4963 | ||
b52c273b VS |
4964 | static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) |
4965 | { | |
4966 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) | |
4967 | return true; | |
4968 | ||
4969 | /* The number of lines are ignored for the level 0 watermark. */ | |
4970 | return level > 0; | |
4971 | } | |
4972 | ||
ec193640 | 4973 | static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, |
d8e87498 MR |
4974 | int level, |
4975 | const struct skl_wm_params *wp, | |
4976 | const struct skl_wm_level *result_prev, | |
4977 | struct skl_wm_level *result /* out */) | |
7e452fdb | 4978 | { |
2225f3c6 | 4979 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
5ce9a649 | 4980 | u32 latency = dev_priv->wm.skl_latency[level]; |
7e452fdb KM |
4981 | uint_fixed_16_16_t method1, method2; |
4982 | uint_fixed_16_16_t selected_result; | |
961d95e0 | 4983 | u32 res_blocks, res_lines, min_ddb_alloc = 0; |
ce110ec3 | 4984 | |
0aded171 VS |
4985 | if (latency == 0) { |
4986 | /* reject it */ | |
4987 | result->min_ddb_alloc = U16_MAX; | |
692927f4 | 4988 | return; |
0aded171 | 4989 | } |
692927f4 | 4990 | |
25312ef1 VS |
4991 | /* |
4992 | * WaIncreaseLatencyIPCEnabled: kbl,cfl | |
4993 | * Display WA #1141: kbl,cfl | |
4994 | */ | |
421abe20 | 4995 | if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && |
7e452fdb KM |
4996 | dev_priv->ipc_enabled) |
4997 | latency += 4; | |
4998 | ||
60e983ff | 4999 | if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled) |
7e452fdb KM |
5000 | latency += 15; |
5001 | ||
5002 | method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, | |
df8ee190 | 5003 | wp->cpp, latency, wp->dbuf_block_size); |
7e452fdb | 5004 | method2 = skl_wm_method2(wp->plane_pixel_rate, |
1326a92c | 5005 | crtc_state->hw.adjusted_mode.crtc_htotal, |
1186fa85 | 5006 | latency, |
7e452fdb | 5007 | wp->plane_blocks_per_line); |
75676ed4 | 5008 | |
7e452fdb KM |
5009 | if (wp->y_tiled) { |
5010 | selected_result = max_fixed16(method2, wp->y_tile_minimum); | |
0fda6568 | 5011 | } else { |
1326a92c | 5012 | if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal / |
df8ee190 | 5013 | wp->dbuf_block_size < 1) && |
077b5820 | 5014 | (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { |
f1db3eaf | 5015 | selected_result = method2; |
077b5820 | 5016 | } else if (latency >= wp->linetime_us) { |
cf819eff | 5017 | if (IS_GEN(dev_priv, 9) && |
077b5820 PZ |
5018 | !IS_GEMINILAKE(dev_priv)) |
5019 | selected_result = min_fixed16(method1, method2); | |
5020 | else | |
5021 | selected_result = method2; | |
5022 | } else { | |
0fda6568 | 5023 | selected_result = method1; |
077b5820 | 5024 | } |
0fda6568 | 5025 | } |
2d41c0b5 | 5026 | |
eac2cb81 | 5027 | res_blocks = fixed16_to_u32_round_up(selected_result) + 1; |
d273ecce | 5028 | res_lines = div_round_up_fixed16(selected_result, |
7e452fdb | 5029 | wp->plane_blocks_per_line); |
e6d66171 | 5030 | |
a5b79d34 PZ |
5031 | if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { |
5032 | /* Display WA #1125: skl,bxt,kbl */ | |
5033 | if (level == 0 && wp->rc_surface) | |
5034 | res_blocks += | |
5035 | fixed16_to_u32_round_up(wp->y_tile_minimum); | |
5036 | ||
5037 | /* Display WA #1126: skl,bxt,kbl */ | |
5038 | if (level >= 1 && level <= 7) { | |
5039 | if (wp->y_tiled) { | |
5040 | res_blocks += | |
5041 | fixed16_to_u32_round_up(wp->y_tile_minimum); | |
5042 | res_lines += wp->y_min_scanlines; | |
5043 | } else { | |
5044 | res_blocks++; | |
5045 | } | |
8b2b53ce | 5046 | |
a5b79d34 PZ |
5047 | /* |
5048 | * Make sure result blocks for higher latency levels are | |
5049 | * atleast as high as level below the current level. | |
5050 | * Assumption in DDB algorithm optimization for special | |
5051 | * cases. Also covers Display WA #1125 for RC. | |
5052 | */ | |
5053 | if (result_prev->plane_res_b > res_blocks) | |
5054 | res_blocks = result_prev->plane_res_b; | |
5055 | } | |
0fda6568 | 5056 | } |
e6d66171 | 5057 | |
961d95e0 VS |
5058 | if (INTEL_GEN(dev_priv) >= 11) { |
5059 | if (wp->y_tiled) { | |
5060 | int extra_lines; | |
5061 | ||
5062 | if (res_lines % wp->y_min_scanlines == 0) | |
5063 | extra_lines = wp->y_min_scanlines; | |
5064 | else | |
5065 | extra_lines = wp->y_min_scanlines * 2 - | |
5066 | res_lines % wp->y_min_scanlines; | |
5067 | ||
5068 | min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines, | |
5069 | wp->plane_blocks_per_line); | |
5070 | } else { | |
5071 | min_ddb_alloc = res_blocks + | |
5072 | DIV_ROUND_UP(res_blocks, 10); | |
5073 | } | |
5074 | } | |
5075 | ||
b52c273b VS |
5076 | if (!skl_wm_has_lines(dev_priv, level)) |
5077 | res_lines = 0; | |
5078 | ||
0aded171 VS |
5079 | if (res_lines > 31) { |
5080 | /* reject it */ | |
5081 | result->min_ddb_alloc = U16_MAX; | |
d8e87498 | 5082 | return; |
0aded171 | 5083 | } |
d8e87498 MR |
5084 | |
5085 | /* | |
5086 | * If res_lines is valid, assume we can use this watermark level | |
5087 | * for now. We'll come back and disable it after we calculate the | |
5088 | * DDB allocation if it turns out we don't actually have enough | |
5089 | * blocks to satisfy it. | |
5090 | */ | |
62027b77 MK |
5091 | result->plane_res_b = res_blocks; |
5092 | result->plane_res_l = res_lines; | |
961d95e0 VS |
5093 | /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ |
5094 | result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1; | |
62027b77 | 5095 | result->plane_en = true; |
2d41c0b5 PB |
5096 | } |
5097 | ||
d8e87498 | 5098 | static void |
ec193640 | 5099 | skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, |
7e452fdb | 5100 | const struct skl_wm_params *wm_params, |
b048a00b | 5101 | struct skl_wm_level *levels) |
2d41c0b5 | 5102 | { |
2225f3c6 | 5103 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
d2f5e36d | 5104 | int level, max_level = ilk_wm_max_level(dev_priv); |
b048a00b | 5105 | struct skl_wm_level *result_prev = &levels[0]; |
a62163e9 | 5106 | |
d2f5e36d | 5107 | for (level = 0; level <= max_level; level++) { |
b048a00b | 5108 | struct skl_wm_level *result = &levels[level]; |
d2f5e36d | 5109 | |
ec193640 | 5110 | skl_compute_plane_wm(crtc_state, level, wm_params, |
d8e87498 | 5111 | result_prev, result); |
b048a00b ML |
5112 | |
5113 | result_prev = result; | |
d2f5e36d | 5114 | } |
2d41c0b5 PB |
5115 | } |
5116 | ||
ec193640 | 5117 | static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, |
6a3c910b | 5118 | const struct skl_wm_params *wp, |
d8e87498 | 5119 | struct skl_plane_wm *wm) |
407b50f3 | 5120 | { |
2225f3c6 | 5121 | struct drm_device *dev = crtc_state->uapi.crtc->dev; |
ca47667f | 5122 | const struct drm_i915_private *dev_priv = to_i915(dev); |
c834d03c | 5123 | u16 trans_min, trans_amount, trans_y_tile_min; |
5ce9a649 | 5124 | u16 wm0_sel_res_b, trans_offset_b, res_blocks; |
ca47667f | 5125 | |
ca47667f KM |
5126 | /* Transition WM don't make any sense if ipc is disabled */ |
5127 | if (!dev_priv->ipc_enabled) | |
14a43062 | 5128 | return; |
ca47667f | 5129 | |
a7f1e8e4 VS |
5130 | /* |
5131 | * WaDisableTWM:skl,kbl,cfl,bxt | |
5132 | * Transition WM are not recommended by HW team for GEN9 | |
5133 | */ | |
5134 | if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) | |
5135 | return; | |
5136 | ||
91961a85 | 5137 | if (INTEL_GEN(dev_priv) >= 11) |
ca47667f | 5138 | trans_min = 4; |
c834d03c VS |
5139 | else |
5140 | trans_min = 14; | |
5141 | ||
5142 | /* Display WA #1140: glk,cnl */ | |
5143 | if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) | |
5144 | trans_amount = 0; | |
5145 | else | |
5146 | trans_amount = 10; /* This is configurable amount */ | |
ca47667f KM |
5147 | |
5148 | trans_offset_b = trans_min + trans_amount; | |
5149 | ||
cbacc79d PZ |
5150 | /* |
5151 | * The spec asks for Selected Result Blocks for wm0 (the real value), | |
5152 | * not Result Blocks (the integer value). Pay attention to the capital | |
5153 | * letters. The value wm_l0->plane_res_b is actually Result Blocks, but | |
5154 | * since Result Blocks is the ceiling of Selected Result Blocks plus 1, | |
5155 | * and since we later will have to get the ceiling of the sum in the | |
5156 | * transition watermarks calculation, we can just pretend Selected | |
5157 | * Result Blocks is Result Blocks minus 1 and it should work for the | |
5158 | * current platforms. | |
5159 | */ | |
6a3c910b | 5160 | wm0_sel_res_b = wm->wm[0].plane_res_b - 1; |
cbacc79d | 5161 | |
ca47667f | 5162 | if (wp->y_tiled) { |
5ce9a649 JN |
5163 | trans_y_tile_min = |
5164 | (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); | |
cbacc79d | 5165 | res_blocks = max(wm0_sel_res_b, trans_y_tile_min) + |
ca47667f KM |
5166 | trans_offset_b; |
5167 | } else { | |
cbacc79d | 5168 | res_blocks = wm0_sel_res_b + trans_offset_b; |
ca47667f KM |
5169 | |
5170 | /* WA BUG:1938466 add one block for non y-tile planes */ | |
5171 | if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) | |
5172 | res_blocks += 1; | |
ca47667f KM |
5173 | } |
5174 | ||
d8e87498 MR |
5175 | /* |
5176 | * Just assume we can enable the transition watermark. After | |
5177 | * computing the DDB we'll come back and disable it if that | |
5178 | * assumption turns out to be false. | |
5179 | */ | |
5180 | wm->trans_wm.plane_res_b = res_blocks + 1; | |
5181 | wm->trans_wm.plane_en = true; | |
407b50f3 DL |
5182 | } |
5183 | ||
ff43bc37 | 5184 | static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, |
8315847b VS |
5185 | const struct intel_plane_state *plane_state, |
5186 | enum plane_id plane_id, int color_plane) | |
b048a00b | 5187 | { |
8315847b | 5188 | struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; |
b048a00b | 5189 | struct skl_wm_params wm_params; |
b048a00b ML |
5190 | int ret; |
5191 | ||
51de9c6d | 5192 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, |
b048a00b ML |
5193 | &wm_params, color_plane); |
5194 | if (ret) | |
5195 | return ret; | |
5196 | ||
67155a69 | 5197 | skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); |
d8e87498 | 5198 | skl_compute_transition_wm(crtc_state, &wm_params, wm); |
b048a00b ML |
5199 | |
5200 | return 0; | |
5201 | } | |
5202 | ||
ff43bc37 | 5203 | static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, |
8315847b VS |
5204 | const struct intel_plane_state *plane_state, |
5205 | enum plane_id plane_id) | |
b048a00b | 5206 | { |
8315847b | 5207 | struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; |
b048a00b | 5208 | struct skl_wm_params wm_params; |
b048a00b ML |
5209 | int ret; |
5210 | ||
8315847b | 5211 | wm->is_planar = true; |
b048a00b ML |
5212 | |
5213 | /* uv plane watermarks must also be validated for NV12/Planar */ | |
51de9c6d | 5214 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, |
8315847b VS |
5215 | &wm_params, 1); |
5216 | if (ret) | |
5217 | return ret; | |
b048a00b | 5218 | |
67155a69 | 5219 | skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm); |
b048a00b | 5220 | |
8315847b | 5221 | return 0; |
b048a00b ML |
5222 | } |
5223 | ||
96cb7cde | 5224 | static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, |
8315847b | 5225 | const struct intel_plane_state *plane_state) |
b048a00b | 5226 | { |
f90a85e7 | 5227 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
7b3cb17a | 5228 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
8315847b | 5229 | enum plane_id plane_id = plane->id; |
b048a00b | 5230 | int ret; |
b048a00b | 5231 | |
8315847b VS |
5232 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
5233 | return 0; | |
5234 | ||
ff43bc37 | 5235 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
8315847b | 5236 | plane_id, 0); |
b048a00b ML |
5237 | if (ret) |
5238 | return ret; | |
5239 | ||
8315847b | 5240 | if (fb->format->is_yuv && fb->format->num_planes > 1) { |
ff43bc37 | 5241 | ret = skl_build_plane_wm_uv(crtc_state, plane_state, |
8315847b VS |
5242 | plane_id); |
5243 | if (ret) | |
5244 | return ret; | |
5245 | } | |
5246 | ||
5247 | return 0; | |
5248 | } | |
5249 | ||
96cb7cde | 5250 | static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, |
8315847b VS |
5251 | const struct intel_plane_state *plane_state) |
5252 | { | |
f90a85e7 | 5253 | enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id; |
8315847b VS |
5254 | int ret; |
5255 | ||
5256 | /* Watermarks calculated in master */ | |
c47b7ddb | 5257 | if (plane_state->planar_slave) |
8315847b VS |
5258 | return 0; |
5259 | ||
c47b7ddb | 5260 | if (plane_state->planar_linked_plane) { |
7b3cb17a | 5261 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
c47b7ddb | 5262 | enum plane_id y_plane_id = plane_state->planar_linked_plane->id; |
8315847b VS |
5263 | |
5264 | WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); | |
5265 | WARN_ON(!fb->format->is_yuv || | |
5266 | fb->format->num_planes == 1); | |
5267 | ||
ff43bc37 | 5268 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
8315847b VS |
5269 | y_plane_id, 0); |
5270 | if (ret) | |
5271 | return ret; | |
5272 | ||
ff43bc37 | 5273 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
8315847b VS |
5274 | plane_id, 1); |
5275 | if (ret) | |
5276 | return ret; | |
5277 | } else if (intel_wm_plane_visible(crtc_state, plane_state)) { | |
ff43bc37 | 5278 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
8315847b VS |
5279 | plane_id, 0); |
5280 | if (ret) | |
5281 | return ret; | |
5282 | } | |
5283 | ||
5284 | return 0; | |
b048a00b ML |
5285 | } |
5286 | ||
ec193640 | 5287 | static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) |
2d41c0b5 | 5288 | { |
2225f3c6 | 5289 | struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
ec193640 | 5290 | struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; |
af9fbfa6 ML |
5291 | struct intel_plane *plane; |
5292 | const struct intel_plane_state *plane_state; | |
55994c2c | 5293 | int ret; |
2d41c0b5 | 5294 | |
a62163e9 L |
5295 | /* |
5296 | * We'll only calculate watermarks for planes that are actually | |
5297 | * enabled, so make sure all other planes are set as disabled. | |
5298 | */ | |
5299 | memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); | |
5300 | ||
af9fbfa6 ML |
5301 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, |
5302 | crtc_state) { | |
eb2fdcdf | 5303 | |
8315847b | 5304 | if (INTEL_GEN(dev_priv) >= 11) |
ec193640 | 5305 | ret = icl_build_plane_wm(crtc_state, plane_state); |
b048a00b | 5306 | else |
ec193640 | 5307 | ret = skl_build_plane_wm(crtc_state, plane_state); |
d2f5e36d KM |
5308 | if (ret) |
5309 | return ret; | |
2d41c0b5 | 5310 | } |
942aa2d0 | 5311 | |
55994c2c | 5312 | return 0; |
2d41c0b5 PB |
5313 | } |
5314 | ||
f0f59a00 VS |
5315 | static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, |
5316 | i915_reg_t reg, | |
16160e3d DL |
5317 | const struct skl_ddb_entry *entry) |
5318 | { | |
5319 | if (entry->end) | |
9b6320aa JN |
5320 | intel_de_write_fw(dev_priv, reg, |
5321 | (entry->end - 1) << 16 | entry->start); | |
16160e3d | 5322 | else |
9b6320aa | 5323 | intel_de_write_fw(dev_priv, reg, 0); |
16160e3d DL |
5324 | } |
5325 | ||
d8c0fafc | 5326 | static void skl_write_wm_level(struct drm_i915_private *dev_priv, |
5327 | i915_reg_t reg, | |
5328 | const struct skl_wm_level *level) | |
5329 | { | |
5ce9a649 | 5330 | u32 val = 0; |
d8c0fafc | 5331 | |
2ed8e1f5 | 5332 | if (level->plane_en) |
d8c0fafc | 5333 | val |= PLANE_WM_EN; |
2ed8e1f5 VS |
5334 | if (level->ignore_lines) |
5335 | val |= PLANE_WM_IGNORE_LINES; | |
5336 | val |= level->plane_res_b; | |
5337 | val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; | |
d8c0fafc | 5338 | |
9b6320aa | 5339 | intel_de_write_fw(dev_priv, reg, val); |
d8c0fafc | 5340 | } |
5341 | ||
ff43bc37 VS |
5342 | void skl_write_plane_wm(struct intel_plane *plane, |
5343 | const struct intel_crtc_state *crtc_state) | |
62e0fb88 | 5344 | { |
ff43bc37 | 5345 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5db94019 | 5346 | int level, max_level = ilk_wm_max_level(dev_priv); |
ff43bc37 VS |
5347 | enum plane_id plane_id = plane->id; |
5348 | enum pipe pipe = plane->pipe; | |
5349 | const struct skl_plane_wm *wm = | |
5350 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |
5351 | const struct skl_ddb_entry *ddb_y = | |
5352 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | |
5353 | const struct skl_ddb_entry *ddb_uv = | |
5354 | &crtc_state->wm.skl.plane_ddb_uv[plane_id]; | |
62e0fb88 L |
5355 | |
5356 | for (level = 0; level <= max_level; level++) { | |
d5cdfdf5 | 5357 | skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), |
d8c0fafc | 5358 | &wm->wm[level]); |
62e0fb88 | 5359 | } |
d5cdfdf5 | 5360 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), |
d8c0fafc | 5361 | &wm->trans_wm); |
27082493 | 5362 | |
ff43bc37 | 5363 | if (INTEL_GEN(dev_priv) >= 11) { |
234059da | 5364 | skl_ddb_entry_write(dev_priv, |
ff43bc37 VS |
5365 | PLANE_BUF_CFG(pipe, plane_id), ddb_y); |
5366 | return; | |
b879d58f | 5367 | } |
ff43bc37 VS |
5368 | |
5369 | if (wm->is_planar) | |
5370 | swap(ddb_y, ddb_uv); | |
5371 | ||
5372 | skl_ddb_entry_write(dev_priv, | |
5373 | PLANE_BUF_CFG(pipe, plane_id), ddb_y); | |
5374 | skl_ddb_entry_write(dev_priv, | |
5375 | PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv); | |
62e0fb88 L |
5376 | } |
5377 | ||
ff43bc37 VS |
5378 | void skl_write_cursor_wm(struct intel_plane *plane, |
5379 | const struct intel_crtc_state *crtc_state) | |
62e0fb88 | 5380 | { |
ff43bc37 | 5381 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5db94019 | 5382 | int level, max_level = ilk_wm_max_level(dev_priv); |
ff43bc37 VS |
5383 | enum plane_id plane_id = plane->id; |
5384 | enum pipe pipe = plane->pipe; | |
5385 | const struct skl_plane_wm *wm = | |
5386 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |
5387 | const struct skl_ddb_entry *ddb = | |
5388 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | |
62e0fb88 L |
5389 | |
5390 | for (level = 0; level <= max_level; level++) { | |
d8c0fafc | 5391 | skl_write_wm_level(dev_priv, CUR_WM(pipe, level), |
5392 | &wm->wm[level]); | |
62e0fb88 | 5393 | } |
d8c0fafc | 5394 | skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); |
5d374d96 | 5395 | |
ff43bc37 | 5396 | skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); |
2d41c0b5 PB |
5397 | } |
5398 | ||
45ece230 | 5399 | bool skl_wm_level_equals(const struct skl_wm_level *l1, |
5400 | const struct skl_wm_level *l2) | |
5401 | { | |
ff43bc37 | 5402 | return l1->plane_en == l2->plane_en && |
2ed8e1f5 | 5403 | l1->ignore_lines == l2->ignore_lines && |
ff43bc37 VS |
5404 | l1->plane_res_l == l2->plane_res_l && |
5405 | l1->plane_res_b == l2->plane_res_b; | |
5406 | } | |
45ece230 | 5407 | |
ff43bc37 VS |
5408 | static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, |
5409 | const struct skl_plane_wm *wm1, | |
5410 | const struct skl_plane_wm *wm2) | |
5411 | { | |
5412 | int level, max_level = ilk_wm_max_level(dev_priv); | |
45ece230 | 5413 | |
ff43bc37 | 5414 | for (level = 0; level <= max_level; level++) { |
e7f54e6c VS |
5415 | /* |
5416 | * We don't check uv_wm as the hardware doesn't actually | |
5417 | * use it. It only gets used for calculating the required | |
5418 | * ddb allocation. | |
5419 | */ | |
5420 | if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) | |
ff43bc37 VS |
5421 | return false; |
5422 | } | |
5423 | ||
5424 | return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); | |
45ece230 | 5425 | } |
5426 | ||
27082493 L |
5427 | static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, |
5428 | const struct skl_ddb_entry *b) | |
0e8fb7ba | 5429 | { |
27082493 | 5430 | return a->start < b->end && b->start < a->end; |
0e8fb7ba DL |
5431 | } |
5432 | ||
53cc6880 | 5433 | bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, |
696173b0 | 5434 | const struct skl_ddb_entry *entries, |
53cc6880 | 5435 | int num_entries, int ignore_idx) |
0e8fb7ba | 5436 | { |
53cc6880 | 5437 | int i; |
0e8fb7ba | 5438 | |
53cc6880 VS |
5439 | for (i = 0; i < num_entries; i++) { |
5440 | if (i != ignore_idx && | |
5441 | skl_ddb_entries_overlap(ddb, &entries[i])) | |
27082493 | 5442 | return true; |
2b68504b | 5443 | } |
0e8fb7ba | 5444 | |
27082493 | 5445 | return false; |
0e8fb7ba DL |
5446 | } |
5447 | ||
bb7791bd | 5448 | static int |
ff43bc37 VS |
5449 | skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, |
5450 | struct intel_crtc_state *new_crtc_state) | |
9a30a261 | 5451 | { |
2225f3c6 ML |
5452 | struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); |
5453 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); | |
ff43bc37 VS |
5454 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5455 | struct intel_plane *plane; | |
9a30a261 | 5456 | |
ff43bc37 VS |
5457 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
5458 | struct intel_plane_state *plane_state; | |
5459 | enum plane_id plane_id = plane->id; | |
9a30a261 | 5460 | |
ff43bc37 VS |
5461 | if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], |
5462 | &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) && | |
5463 | skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id], | |
5464 | &new_crtc_state->wm.skl.plane_ddb_uv[plane_id])) | |
9a30a261 RV |
5465 | continue; |
5466 | ||
ff43bc37 | 5467 | plane_state = intel_atomic_get_plane_state(state, plane); |
9a30a261 RV |
5468 | if (IS_ERR(plane_state)) |
5469 | return PTR_ERR(plane_state); | |
1ab554b0 | 5470 | |
ff43bc37 | 5471 | new_crtc_state->update_planes |= BIT(plane_id); |
9a30a261 RV |
5472 | } |
5473 | ||
5474 | return 0; | |
5475 | } | |
5476 | ||
5477 | static int | |
cd1d3ee9 | 5478 | skl_compute_ddb(struct intel_atomic_state *state) |
98d39494 | 5479 | { |
072fcc30 | 5480 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
ff43bc37 VS |
5481 | struct intel_crtc_state *old_crtc_state; |
5482 | struct intel_crtc_state *new_crtc_state; | |
e1f96a66 | 5483 | struct intel_crtc *crtc; |
e1f96a66 | 5484 | int ret, i; |
98d39494 | 5485 | |
0f0f9aee | 5486 | state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask; |
5a920b85 | 5487 | |
cd1d3ee9 | 5488 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, |
ff43bc37 | 5489 | new_crtc_state, i) { |
072fcc30 | 5490 | ret = skl_allocate_pipe_ddb(new_crtc_state); |
9a30a261 RV |
5491 | if (ret) |
5492 | return ret; | |
5493 | ||
ff43bc37 VS |
5494 | ret = skl_ddb_add_affected_planes(old_crtc_state, |
5495 | new_crtc_state); | |
9a30a261 RV |
5496 | if (ret) |
5497 | return ret; | |
98d39494 MR |
5498 | } |
5499 | ||
5500 | return 0; | |
5501 | } | |
5502 | ||
ab98e944 VS |
5503 | static char enast(bool enable) |
5504 | { | |
5505 | return enable ? '*' : ' '; | |
5506 | } | |
5507 | ||
413fc530 | 5508 | static void |
ff43bc37 | 5509 | skl_print_wm_changes(struct intel_atomic_state *state) |
413fc530 | 5510 | { |
ff43bc37 VS |
5511 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
5512 | const struct intel_crtc_state *old_crtc_state; | |
5513 | const struct intel_crtc_state *new_crtc_state; | |
5514 | struct intel_plane *plane; | |
5515 | struct intel_crtc *crtc; | |
7570498e | 5516 | int i; |
413fc530 | 5517 | |
bdbf43d7 | 5518 | if (!drm_debug_enabled(DRM_UT_KMS)) |
ab98e944 VS |
5519 | return; |
5520 | ||
ff43bc37 VS |
5521 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, |
5522 | new_crtc_state, i) { | |
ab98e944 VS |
5523 | const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; |
5524 | ||
5525 | old_pipe_wm = &old_crtc_state->wm.skl.optimal; | |
5526 | new_pipe_wm = &new_crtc_state->wm.skl.optimal; | |
5527 | ||
ff43bc37 VS |
5528 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
5529 | enum plane_id plane_id = plane->id; | |
413fc530 | 5530 | const struct skl_ddb_entry *old, *new; |
5531 | ||
ff43bc37 VS |
5532 | old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id]; |
5533 | new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id]; | |
413fc530 | 5534 | |
413fc530 | 5535 | if (skl_ddb_entry_equal(old, new)) |
5536 | continue; | |
5537 | ||
f8d18d5c WK |
5538 | drm_dbg_kms(&dev_priv->drm, |
5539 | "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", | |
5540 | plane->base.base.id, plane->base.name, | |
5541 | old->start, old->end, new->start, new->end, | |
5542 | skl_ddb_entry_size(old), skl_ddb_entry_size(new)); | |
ab98e944 VS |
5543 | } |
5544 | ||
5545 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { | |
5546 | enum plane_id plane_id = plane->id; | |
5547 | const struct skl_plane_wm *old_wm, *new_wm; | |
5548 | ||
5549 | old_wm = &old_pipe_wm->planes[plane_id]; | |
5550 | new_wm = &new_pipe_wm->planes[plane_id]; | |
5551 | ||
5552 | if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) | |
5553 | continue; | |
5554 | ||
f8d18d5c WK |
5555 | drm_dbg_kms(&dev_priv->drm, |
5556 | "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" | |
5557 | " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", | |
5558 | plane->base.base.id, plane->base.name, | |
5559 | enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), | |
5560 | enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), | |
5561 | enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), | |
5562 | enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), | |
5563 | enast(old_wm->trans_wm.plane_en), | |
5564 | enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), | |
5565 | enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), | |
5566 | enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), | |
5567 | enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), | |
5568 | enast(new_wm->trans_wm.plane_en)); | |
5569 | ||
5570 | drm_dbg_kms(&dev_priv->drm, | |
5571 | "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" | |
2ed8e1f5 | 5572 | " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", |
f8d18d5c WK |
5573 | plane->base.base.id, plane->base.name, |
5574 | enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, | |
5575 | enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, | |
5576 | enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, | |
5577 | enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, | |
5578 | enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, | |
5579 | enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, | |
5580 | enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, | |
5581 | enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, | |
5582 | enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, | |
5583 | ||
5584 | enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, | |
5585 | enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, | |
5586 | enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, | |
5587 | enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, | |
5588 | enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, | |
5589 | enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, | |
5590 | enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, | |
5591 | enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, | |
5592 | enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); | |
5593 | ||
5594 | drm_dbg_kms(&dev_priv->drm, | |
5595 | "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" | |
5596 | " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", | |
5597 | plane->base.base.id, plane->base.name, | |
5598 | old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, | |
5599 | old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, | |
5600 | old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, | |
5601 | old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, | |
5602 | old_wm->trans_wm.plane_res_b, | |
5603 | new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, | |
5604 | new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, | |
5605 | new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, | |
5606 | new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, | |
5607 | new_wm->trans_wm.plane_res_b); | |
5608 | ||
5609 | drm_dbg_kms(&dev_priv->drm, | |
5610 | "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" | |
5611 | " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", | |
5612 | plane->base.base.id, plane->base.name, | |
5613 | old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, | |
5614 | old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, | |
5615 | old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, | |
5616 | old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, | |
5617 | old_wm->trans_wm.min_ddb_alloc, | |
5618 | new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, | |
5619 | new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, | |
5620 | new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, | |
5621 | new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, | |
5622 | new_wm->trans_wm.min_ddb_alloc); | |
413fc530 | 5623 | } |
5624 | } | |
5625 | } | |
5626 | ||
49e0ed38 VS |
5627 | static int intel_add_all_pipes(struct intel_atomic_state *state) |
5628 | { | |
5629 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); | |
5630 | struct intel_crtc *crtc; | |
5631 | ||
5632 | for_each_intel_crtc(&dev_priv->drm, crtc) { | |
5633 | struct intel_crtc_state *crtc_state; | |
5634 | ||
5635 | crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); | |
5636 | if (IS_ERR(crtc_state)) | |
5637 | return PTR_ERR(crtc_state); | |
5638 | } | |
5639 | ||
5640 | return 0; | |
5641 | } | |
5642 | ||
98d39494 | 5643 | static int |
d7a14584 | 5644 | skl_ddb_add_affected_pipes(struct intel_atomic_state *state) |
98d39494 | 5645 | { |
49e0ed38 | 5646 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
d7a14584 | 5647 | int ret; |
98d39494 | 5648 | |
e1f96a66 MK |
5649 | /* |
5650 | * If this is our first atomic update following hardware readout, | |
5651 | * we can't trust the DDB that the BIOS programmed for us. Let's | |
5652 | * pretend that all pipes switched active status so that we'll | |
5653 | * ensure a full DDB recompute. | |
5654 | */ | |
5655 | if (dev_priv->wm.distrust_bios_wm) { | |
49e0ed38 | 5656 | ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, |
cd1d3ee9 | 5657 | state->base.acquire_ctx); |
e1f96a66 MK |
5658 | if (ret) |
5659 | return ret; | |
5660 | ||
8d9875b4 | 5661 | state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask; |
e1f96a66 MK |
5662 | |
5663 | /* | |
d06a79d3 | 5664 | * We usually only initialize state->active_pipes if we |
e1f96a66 MK |
5665 | * we're doing a modeset; make sure this field is always |
5666 | * initialized during the sanitization process that happens | |
5667 | * on the first commit too. | |
5668 | */ | |
cd1d3ee9 | 5669 | if (!state->modeset) |
d06a79d3 | 5670 | state->active_pipes = dev_priv->active_pipes; |
e1f96a66 MK |
5671 | } |
5672 | ||
5673 | /* | |
5674 | * If the modeset changes which CRTC's are active, we need to | |
5675 | * recompute the DDB allocation for *all* active pipes, even | |
5676 | * those that weren't otherwise being modified in any way by this | |
5677 | * atomic commit. Due to the shrinking of the per-pipe allocations | |
5678 | * when new active CRTC's are added, it's possible for a pipe that | |
5679 | * we were already using and aren't changing at all here to suddenly | |
5680 | * become invalid if its DDB needs exceeds its new allocation. | |
5681 | * | |
5682 | * Note that if we wind up doing a full DDB recompute, we can't let | |
5683 | * any other display updates race with this transaction, so we need | |
5684 | * to grab the lock on *all* CRTC's. | |
5685 | */ | |
cd1d3ee9 | 5686 | if (state->active_pipe_changes || state->modeset) { |
49e0ed38 VS |
5687 | ret = intel_add_all_pipes(state); |
5688 | if (ret) | |
5689 | return ret; | |
e1f96a66 MK |
5690 | } |
5691 | ||
5692 | return 0; | |
5693 | } | |
5694 | ||
ff43bc37 VS |
5695 | /* |
5696 | * To make sure the cursor watermark registers are always consistent | |
5697 | * with our computed state the following scenario needs special | |
5698 | * treatment: | |
5699 | * | |
5700 | * 1. enable cursor | |
5701 | * 2. move cursor entirely offscreen | |
5702 | * 3. disable cursor | |
5703 | * | |
5704 | * Step 2. does call .disable_plane() but does not zero the watermarks | |
5705 | * (since we consider an offscreen cursor still active for the purposes | |
5706 | * of watermarks). Step 3. would not normally call .disable_plane() | |
5707 | * because the actual plane visibility isn't changing, and we don't | |
5708 | * deallocate the cursor ddb until the pipe gets disabled. So we must | |
5709 | * force step 3. to call .disable_plane() to update the watermark | |
5710 | * registers properly. | |
5711 | * | |
5712 | * Other planes do not suffer from this issues as their watermarks are | |
5713 | * calculated based on the actual plane visibility. The only time this | |
5714 | * can trigger for the other planes is during the initial readout as the | |
5715 | * default value of the watermarks registers is not zero. | |
5716 | */ | |
5717 | static int skl_wm_add_affected_planes(struct intel_atomic_state *state, | |
5718 | struct intel_crtc *crtc) | |
5719 | { | |
5720 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
5721 | const struct intel_crtc_state *old_crtc_state = | |
5722 | intel_atomic_get_old_crtc_state(state, crtc); | |
5723 | struct intel_crtc_state *new_crtc_state = | |
5724 | intel_atomic_get_new_crtc_state(state, crtc); | |
5725 | struct intel_plane *plane; | |
5726 | ||
5727 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { | |
5728 | struct intel_plane_state *plane_state; | |
5729 | enum plane_id plane_id = plane->id; | |
5730 | ||
5731 | /* | |
5732 | * Force a full wm update for every plane on modeset. | |
5733 | * Required because the reset value of the wm registers | |
5734 | * is non-zero, whereas we want all disabled planes to | |
5735 | * have zero watermarks. So if we turn off the relevant | |
5736 | * power well the hardware state will go out of sync | |
5737 | * with the software state. | |
5738 | */ | |
2225f3c6 | 5739 | if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && |
ff43bc37 VS |
5740 | skl_plane_wm_equals(dev_priv, |
5741 | &old_crtc_state->wm.skl.optimal.planes[plane_id], | |
5742 | &new_crtc_state->wm.skl.optimal.planes[plane_id])) | |
5743 | continue; | |
5744 | ||
5745 | plane_state = intel_atomic_get_plane_state(state, plane); | |
5746 | if (IS_ERR(plane_state)) | |
5747 | return PTR_ERR(plane_state); | |
5748 | ||
5749 | new_crtc_state->update_planes |= BIT(plane_id); | |
5750 | } | |
5751 | ||
5752 | return 0; | |
5753 | } | |
5754 | ||
e1f96a66 | 5755 | static int |
cd1d3ee9 | 5756 | skl_compute_wm(struct intel_atomic_state *state) |
e1f96a66 | 5757 | { |
cd1d3ee9 | 5758 | struct intel_crtc *crtc; |
8cac9fd9 | 5759 | struct intel_crtc_state *new_crtc_state; |
cd1d3ee9 | 5760 | struct intel_crtc_state *old_crtc_state; |
e1f96a66 MK |
5761 | int ret, i; |
5762 | ||
d7a14584 VS |
5763 | ret = skl_ddb_add_affected_pipes(state); |
5764 | if (ret) | |
e1f96a66 MK |
5765 | return ret; |
5766 | ||
734fa01f MR |
5767 | /* |
5768 | * Calculate WM's for all pipes that are part of this transaction. | |
d8e87498 | 5769 | * Note that skl_ddb_add_affected_pipes may have added more CRTC's that |
f119a5e2 | 5770 | * weren't otherwise being modified if pipe allocations had to change. |
734fa01f | 5771 | */ |
cd1d3ee9 | 5772 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, |
8cac9fd9 VS |
5773 | new_crtc_state, i) { |
5774 | ret = skl_build_pipe_wm(new_crtc_state); | |
ff43bc37 VS |
5775 | if (ret) |
5776 | return ret; | |
734fa01f MR |
5777 | } |
5778 | ||
d8e87498 MR |
5779 | ret = skl_compute_ddb(state); |
5780 | if (ret) | |
5781 | return ret; | |
5782 | ||
23baedd2 VS |
5783 | /* |
5784 | * skl_compute_ddb() will have adjusted the final watermarks | |
5785 | * based on how much ddb is available. Now we can actually | |
5786 | * check if the final watermarks changed. | |
5787 | */ | |
5788 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, | |
5789 | new_crtc_state, i) { | |
5790 | ret = skl_wm_add_affected_planes(state, crtc); | |
5791 | if (ret) | |
5792 | return ret; | |
5793 | } | |
5794 | ||
cd1d3ee9 | 5795 | skl_print_wm_changes(state); |
413fc530 | 5796 | |
98d39494 MR |
5797 | return 0; |
5798 | } | |
5799 | ||
cd1d3ee9 | 5800 | static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, |
d890565c VS |
5801 | struct intel_wm_config *config) |
5802 | { | |
5803 | struct intel_crtc *crtc; | |
5804 | ||
5805 | /* Compute the currently _active_ config */ | |
cd1d3ee9 | 5806 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
d890565c VS |
5807 | const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; |
5808 | ||
5809 | if (!wm->pipe_enabled) | |
5810 | continue; | |
5811 | ||
5812 | config->sprites_enabled |= wm->sprites_enabled; | |
5813 | config->sprites_scaled |= wm->sprites_scaled; | |
5814 | config->num_pipes_active++; | |
5815 | } | |
5816 | } | |
5817 | ||
ed4a6a7c | 5818 | static void ilk_program_watermarks(struct drm_i915_private *dev_priv) |
801bcfff | 5819 | { |
b9d5c839 | 5820 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; |
820c1980 | 5821 | struct ilk_wm_maximums max; |
d890565c | 5822 | struct intel_wm_config config = {}; |
820c1980 | 5823 | struct ilk_wm_values results = {}; |
77c122bc | 5824 | enum intel_ddb_partitioning partitioning; |
261a27d1 | 5825 | |
cd1d3ee9 | 5826 | ilk_compute_wm_config(dev_priv, &config); |
d890565c | 5827 | |
cd1d3ee9 MR |
5828 | ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); |
5829 | ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); | |
a485bfb8 VS |
5830 | |
5831 | /* 5/6 split only in single pipe config on IVB+ */ | |
175fded1 | 5832 | if (INTEL_GEN(dev_priv) >= 7 && |
d890565c | 5833 | config.num_pipes_active == 1 && config.sprites_enabled) { |
cd1d3ee9 MR |
5834 | ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); |
5835 | ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); | |
0362c781 | 5836 | |
cd1d3ee9 | 5837 | best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); |
861f3389 | 5838 | } else { |
198a1e9b | 5839 | best_lp_wm = &lp_wm_1_2; |
861f3389 PZ |
5840 | } |
5841 | ||
198a1e9b | 5842 | partitioning = (best_lp_wm == &lp_wm_1_2) ? |
77c122bc | 5843 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; |
801bcfff | 5844 | |
cd1d3ee9 | 5845 | ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); |
609cedef | 5846 | |
820c1980 | 5847 | ilk_write_wm_values(dev_priv, &results); |
1011d8c4 PZ |
5848 | } |
5849 | ||
ccf010fb | 5850 | static void ilk_initial_watermarks(struct intel_atomic_state *state, |
7a8fdb1f | 5851 | struct intel_crtc *crtc) |
b9d5c839 | 5852 | { |
7a8fdb1f VS |
5853 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5854 | const struct intel_crtc_state *crtc_state = | |
5855 | intel_atomic_get_new_crtc_state(state, crtc); | |
b9d5c839 | 5856 | |
ed4a6a7c | 5857 | mutex_lock(&dev_priv->wm.wm_mutex); |
88016a9f | 5858 | crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; |
ed4a6a7c MR |
5859 | ilk_program_watermarks(dev_priv); |
5860 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
5861 | } | |
bf220452 | 5862 | |
ccf010fb | 5863 | static void ilk_optimize_watermarks(struct intel_atomic_state *state, |
7a8fdb1f | 5864 | struct intel_crtc *crtc) |
ed4a6a7c | 5865 | { |
7a8fdb1f VS |
5866 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5867 | const struct intel_crtc_state *crtc_state = | |
5868 | intel_atomic_get_new_crtc_state(state, crtc); | |
88016a9f VS |
5869 | |
5870 | if (!crtc_state->wm.need_postvbl_update) | |
5871 | return; | |
bf220452 | 5872 | |
ed4a6a7c | 5873 | mutex_lock(&dev_priv->wm.wm_mutex); |
88016a9f VS |
5874 | crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; |
5875 | ilk_program_watermarks(dev_priv); | |
ed4a6a7c | 5876 | mutex_unlock(&dev_priv->wm.wm_mutex); |
b9d5c839 VS |
5877 | } |
5878 | ||
5ce9a649 | 5879 | static inline void skl_wm_level_from_reg_val(u32 val, |
d8c0fafc | 5880 | struct skl_wm_level *level) |
3078999f | 5881 | { |
d8c0fafc | 5882 | level->plane_en = val & PLANE_WM_EN; |
2ed8e1f5 | 5883 | level->ignore_lines = val & PLANE_WM_IGNORE_LINES; |
d8c0fafc | 5884 | level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; |
5885 | level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & | |
5886 | PLANE_WM_LINES_MASK; | |
3078999f PB |
5887 | } |
5888 | ||
cd1d3ee9 | 5889 | void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, |
bf9d99ad | 5890 | struct skl_pipe_wm *out) |
3078999f | 5891 | { |
cd1d3ee9 MR |
5892 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5893 | enum pipe pipe = crtc->pipe; | |
d5cdfdf5 VS |
5894 | int level, max_level; |
5895 | enum plane_id plane_id; | |
5ce9a649 | 5896 | u32 val; |
3078999f | 5897 | |
5db94019 | 5898 | max_level = ilk_wm_max_level(dev_priv); |
3078999f | 5899 | |
cd1d3ee9 | 5900 | for_each_plane_id_on_crtc(crtc, plane_id) { |
d5cdfdf5 | 5901 | struct skl_plane_wm *wm = &out->planes[plane_id]; |
3078999f | 5902 | |
d8c0fafc | 5903 | for (level = 0; level <= max_level; level++) { |
d5cdfdf5 VS |
5904 | if (plane_id != PLANE_CURSOR) |
5905 | val = I915_READ(PLANE_WM(pipe, plane_id, level)); | |
d8c0fafc | 5906 | else |
5907 | val = I915_READ(CUR_WM(pipe, level)); | |
3078999f | 5908 | |
d8c0fafc | 5909 | skl_wm_level_from_reg_val(val, &wm->wm[level]); |
3078999f | 5910 | } |
3078999f | 5911 | |
d5cdfdf5 VS |
5912 | if (plane_id != PLANE_CURSOR) |
5913 | val = I915_READ(PLANE_WM_TRANS(pipe, plane_id)); | |
d8c0fafc | 5914 | else |
5915 | val = I915_READ(CUR_WM_TRANS(pipe)); | |
5916 | ||
5917 | skl_wm_level_from_reg_val(val, &wm->trans_wm); | |
3078999f PB |
5918 | } |
5919 | ||
cd1d3ee9 | 5920 | if (!crtc->active) |
d8c0fafc | 5921 | return; |
3078999f PB |
5922 | } |
5923 | ||
cd1d3ee9 | 5924 | void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) |
3078999f | 5925 | { |
cd1d3ee9 | 5926 | struct intel_crtc *crtc; |
ec193640 | 5927 | struct intel_crtc_state *crtc_state; |
3078999f | 5928 | |
072fcc30 | 5929 | skl_ddb_get_hw_state(dev_priv); |
cd1d3ee9 | 5930 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
ec193640 | 5931 | crtc_state = to_intel_crtc_state(crtc->base.state); |
bf9d99ad | 5932 | |
ec193640 | 5933 | skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); |
bf9d99ad | 5934 | } |
a1de91e5 | 5935 | |
d06a79d3 | 5936 | if (dev_priv->active_pipes) { |
279e99d7 MR |
5937 | /* Fully recompute DDB on first atomic commit */ |
5938 | dev_priv->wm.distrust_bios_wm = true; | |
279e99d7 | 5939 | } |
3078999f PB |
5940 | } |
5941 | ||
cd1d3ee9 | 5942 | static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) |
243e6a44 | 5943 | { |
cd1d3ee9 | 5944 | struct drm_device *dev = crtc->base.dev; |
fac5e23e | 5945 | struct drm_i915_private *dev_priv = to_i915(dev); |
820c1980 | 5946 | struct ilk_wm_values *hw = &dev_priv->wm.hw; |
ec193640 ML |
5947 | struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); |
5948 | struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; | |
cd1d3ee9 | 5949 | enum pipe pipe = crtc->pipe; |
f0f59a00 | 5950 | static const i915_reg_t wm0_pipe_reg[] = { |
243e6a44 VS |
5951 | [PIPE_A] = WM0_PIPEA_ILK, |
5952 | [PIPE_B] = WM0_PIPEB_ILK, | |
5953 | [PIPE_C] = WM0_PIPEC_IVB, | |
5954 | }; | |
5955 | ||
5956 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); | |
243e6a44 | 5957 | |
15606534 VS |
5958 | memset(active, 0, sizeof(*active)); |
5959 | ||
cd1d3ee9 | 5960 | active->pipe_enabled = crtc->active; |
2a44b76b VS |
5961 | |
5962 | if (active->pipe_enabled) { | |
243e6a44 VS |
5963 | u32 tmp = hw->wm_pipe[pipe]; |
5964 | ||
5965 | /* | |
5966 | * For active pipes LP0 watermark is marked as | |
5967 | * enabled, and LP1+ watermaks as disabled since | |
5968 | * we can't really reverse compute them in case | |
5969 | * multiple pipes are active. | |
5970 | */ | |
5971 | active->wm[0].enable = true; | |
5972 | active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; | |
5973 | active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; | |
5974 | active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; | |
243e6a44 | 5975 | } else { |
5db94019 | 5976 | int level, max_level = ilk_wm_max_level(dev_priv); |
243e6a44 VS |
5977 | |
5978 | /* | |
5979 | * For inactive pipes, all watermark levels | |
5980 | * should be marked as enabled but zeroed, | |
5981 | * which is what we'd compute them to. | |
5982 | */ | |
5983 | for (level = 0; level <= max_level; level++) | |
5984 | active->wm[level].enable = true; | |
5985 | } | |
4e0963c7 | 5986 | |
cd1d3ee9 | 5987 | crtc->wm.active.ilk = *active; |
243e6a44 VS |
5988 | } |
5989 | ||
6eb1a681 VS |
5990 | #define _FW_WM(value, plane) \ |
5991 | (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) | |
5992 | #define _FW_WM_VLV(value, plane) \ | |
5993 | (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) | |
5994 | ||
04548cba VS |
5995 | static void g4x_read_wm_values(struct drm_i915_private *dev_priv, |
5996 | struct g4x_wm_values *wm) | |
5997 | { | |
5ce9a649 | 5998 | u32 tmp; |
04548cba VS |
5999 | |
6000 | tmp = I915_READ(DSPFW1); | |
6001 | wm->sr.plane = _FW_WM(tmp, SR); | |
6002 | wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); | |
6003 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); | |
6004 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); | |
6005 | ||
6006 | tmp = I915_READ(DSPFW2); | |
6007 | wm->fbc_en = tmp & DSPFW_FBC_SR_EN; | |
6008 | wm->sr.fbc = _FW_WM(tmp, FBC_SR); | |
6009 | wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); | |
6010 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); | |
6011 | wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); | |
6012 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); | |
6013 | ||
6014 | tmp = I915_READ(DSPFW3); | |
6015 | wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; | |
6016 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | |
6017 | wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); | |
6018 | wm->hpll.plane = _FW_WM(tmp, HPLL_SR); | |
6019 | } | |
6020 | ||
6eb1a681 VS |
6021 | static void vlv_read_wm_values(struct drm_i915_private *dev_priv, |
6022 | struct vlv_wm_values *wm) | |
6023 | { | |
6024 | enum pipe pipe; | |
5ce9a649 | 6025 | u32 tmp; |
6eb1a681 VS |
6026 | |
6027 | for_each_pipe(dev_priv, pipe) { | |
6028 | tmp = I915_READ(VLV_DDL(pipe)); | |
6029 | ||
1b31389c | 6030 | wm->ddl[pipe].plane[PLANE_PRIMARY] = |
6eb1a681 | 6031 | (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
1b31389c | 6032 | wm->ddl[pipe].plane[PLANE_CURSOR] = |
6eb1a681 | 6033 | (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
1b31389c | 6034 | wm->ddl[pipe].plane[PLANE_SPRITE0] = |
6eb1a681 | 6035 | (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
1b31389c | 6036 | wm->ddl[pipe].plane[PLANE_SPRITE1] = |
6eb1a681 VS |
6037 | (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
6038 | } | |
6039 | ||
6040 | tmp = I915_READ(DSPFW1); | |
6041 | wm->sr.plane = _FW_WM(tmp, SR); | |
1b31389c VS |
6042 | wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); |
6043 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); | |
6044 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); | |
6eb1a681 VS |
6045 | |
6046 | tmp = I915_READ(DSPFW2); | |
1b31389c VS |
6047 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); |
6048 | wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); | |
6049 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); | |
6eb1a681 VS |
6050 | |
6051 | tmp = I915_READ(DSPFW3); | |
6052 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | |
6053 | ||
6054 | if (IS_CHERRYVIEW(dev_priv)) { | |
6055 | tmp = I915_READ(DSPFW7_CHV); | |
1b31389c VS |
6056 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); |
6057 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); | |
6eb1a681 VS |
6058 | |
6059 | tmp = I915_READ(DSPFW8_CHV); | |
1b31389c VS |
6060 | wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); |
6061 | wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); | |
6eb1a681 VS |
6062 | |
6063 | tmp = I915_READ(DSPFW9_CHV); | |
1b31389c VS |
6064 | wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); |
6065 | wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); | |
6eb1a681 VS |
6066 | |
6067 | tmp = I915_READ(DSPHOWM); | |
6068 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | |
1b31389c VS |
6069 | wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; |
6070 | wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; | |
6071 | wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; | |
6072 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; | |
6073 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | |
6074 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; | |
6075 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | |
6076 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | |
6077 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; | |
6eb1a681 VS |
6078 | } else { |
6079 | tmp = I915_READ(DSPFW7); | |
1b31389c VS |
6080 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); |
6081 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); | |
6eb1a681 VS |
6082 | |
6083 | tmp = I915_READ(DSPHOWM); | |
6084 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | |
1b31389c VS |
6085 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; |
6086 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | |
6087 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; | |
6088 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | |
6089 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | |
6090 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; | |
6eb1a681 VS |
6091 | } |
6092 | } | |
6093 | ||
6094 | #undef _FW_WM | |
6095 | #undef _FW_WM_VLV | |
6096 | ||
cd1d3ee9 | 6097 | void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) |
04548cba | 6098 | { |
04548cba VS |
6099 | struct g4x_wm_values *wm = &dev_priv->wm.g4x; |
6100 | struct intel_crtc *crtc; | |
6101 | ||
6102 | g4x_read_wm_values(dev_priv, wm); | |
6103 | ||
6104 | wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | |
6105 | ||
cd1d3ee9 | 6106 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
04548cba VS |
6107 | struct intel_crtc_state *crtc_state = |
6108 | to_intel_crtc_state(crtc->base.state); | |
6109 | struct g4x_wm_state *active = &crtc->wm.active.g4x; | |
6110 | struct g4x_pipe_wm *raw; | |
6111 | enum pipe pipe = crtc->pipe; | |
6112 | enum plane_id plane_id; | |
6113 | int level, max_level; | |
6114 | ||
6115 | active->cxsr = wm->cxsr; | |
6116 | active->hpll_en = wm->hpll_en; | |
6117 | active->fbc_en = wm->fbc_en; | |
6118 | ||
6119 | active->sr = wm->sr; | |
6120 | active->hpll = wm->hpll; | |
6121 | ||
6122 | for_each_plane_id_on_crtc(crtc, plane_id) { | |
6123 | active->wm.plane[plane_id] = | |
6124 | wm->pipe[pipe].plane[plane_id]; | |
6125 | } | |
6126 | ||
6127 | if (wm->cxsr && wm->hpll_en) | |
6128 | max_level = G4X_WM_LEVEL_HPLL; | |
6129 | else if (wm->cxsr) | |
6130 | max_level = G4X_WM_LEVEL_SR; | |
6131 | else | |
6132 | max_level = G4X_WM_LEVEL_NORMAL; | |
6133 | ||
6134 | level = G4X_WM_LEVEL_NORMAL; | |
6135 | raw = &crtc_state->wm.g4x.raw[level]; | |
6136 | for_each_plane_id_on_crtc(crtc, plane_id) | |
6137 | raw->plane[plane_id] = active->wm.plane[plane_id]; | |
6138 | ||
6139 | if (++level > max_level) | |
6140 | goto out; | |
6141 | ||
6142 | raw = &crtc_state->wm.g4x.raw[level]; | |
6143 | raw->plane[PLANE_PRIMARY] = active->sr.plane; | |
6144 | raw->plane[PLANE_CURSOR] = active->sr.cursor; | |
6145 | raw->plane[PLANE_SPRITE0] = 0; | |
6146 | raw->fbc = active->sr.fbc; | |
6147 | ||
6148 | if (++level > max_level) | |
6149 | goto out; | |
6150 | ||
6151 | raw = &crtc_state->wm.g4x.raw[level]; | |
6152 | raw->plane[PLANE_PRIMARY] = active->hpll.plane; | |
6153 | raw->plane[PLANE_CURSOR] = active->hpll.cursor; | |
6154 | raw->plane[PLANE_SPRITE0] = 0; | |
6155 | raw->fbc = active->hpll.fbc; | |
6156 | ||
6157 | out: | |
6158 | for_each_plane_id_on_crtc(crtc, plane_id) | |
6159 | g4x_raw_plane_wm_set(crtc_state, level, | |
6160 | plane_id, USHRT_MAX); | |
6161 | g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); | |
6162 | ||
6163 | crtc_state->wm.g4x.optimal = *active; | |
6164 | crtc_state->wm.g4x.intermediate = *active; | |
6165 | ||
f8d18d5c WK |
6166 | drm_dbg_kms(&dev_priv->drm, |
6167 | "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", | |
6168 | pipe_name(pipe), | |
6169 | wm->pipe[pipe].plane[PLANE_PRIMARY], | |
6170 | wm->pipe[pipe].plane[PLANE_CURSOR], | |
6171 | wm->pipe[pipe].plane[PLANE_SPRITE0]); | |
04548cba VS |
6172 | } |
6173 | ||
f8d18d5c WK |
6174 | drm_dbg_kms(&dev_priv->drm, |
6175 | "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", | |
6176 | wm->sr.plane, wm->sr.cursor, wm->sr.fbc); | |
6177 | drm_dbg_kms(&dev_priv->drm, | |
6178 | "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", | |
6179 | wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); | |
6180 | drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", | |
6181 | yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en)); | |
04548cba VS |
6182 | } |
6183 | ||
6184 | void g4x_wm_sanitize(struct drm_i915_private *dev_priv) | |
6185 | { | |
6186 | struct intel_plane *plane; | |
6187 | struct intel_crtc *crtc; | |
6188 | ||
6189 | mutex_lock(&dev_priv->wm.wm_mutex); | |
6190 | ||
6191 | for_each_intel_plane(&dev_priv->drm, plane) { | |
6192 | struct intel_crtc *crtc = | |
6193 | intel_get_crtc_for_pipe(dev_priv, plane->pipe); | |
6194 | struct intel_crtc_state *crtc_state = | |
6195 | to_intel_crtc_state(crtc->base.state); | |
6196 | struct intel_plane_state *plane_state = | |
6197 | to_intel_plane_state(plane->base.state); | |
6198 | struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; | |
6199 | enum plane_id plane_id = plane->id; | |
6200 | int level; | |
6201 | ||
f90a85e7 | 6202 | if (plane_state->uapi.visible) |
04548cba VS |
6203 | continue; |
6204 | ||
6205 | for (level = 0; level < 3; level++) { | |
6206 | struct g4x_pipe_wm *raw = | |
6207 | &crtc_state->wm.g4x.raw[level]; | |
6208 | ||
6209 | raw->plane[plane_id] = 0; | |
6210 | wm_state->wm.plane[plane_id] = 0; | |
6211 | } | |
6212 | ||
6213 | if (plane_id == PLANE_PRIMARY) { | |
6214 | for (level = 0; level < 3; level++) { | |
6215 | struct g4x_pipe_wm *raw = | |
6216 | &crtc_state->wm.g4x.raw[level]; | |
6217 | raw->fbc = 0; | |
6218 | } | |
6219 | ||
6220 | wm_state->sr.fbc = 0; | |
6221 | wm_state->hpll.fbc = 0; | |
6222 | wm_state->fbc_en = false; | |
6223 | } | |
6224 | } | |
6225 | ||
6226 | for_each_intel_crtc(&dev_priv->drm, crtc) { | |
6227 | struct intel_crtc_state *crtc_state = | |
6228 | to_intel_crtc_state(crtc->base.state); | |
6229 | ||
6230 | crtc_state->wm.g4x.intermediate = | |
6231 | crtc_state->wm.g4x.optimal; | |
6232 | crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; | |
6233 | } | |
6234 | ||
6235 | g4x_program_watermarks(dev_priv); | |
6236 | ||
6237 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
6238 | } | |
6239 | ||
cd1d3ee9 | 6240 | void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) |
6eb1a681 | 6241 | { |
6eb1a681 | 6242 | struct vlv_wm_values *wm = &dev_priv->wm.vlv; |
f07d43d2 | 6243 | struct intel_crtc *crtc; |
6eb1a681 VS |
6244 | u32 val; |
6245 | ||
6246 | vlv_read_wm_values(dev_priv, wm); | |
6247 | ||
6eb1a681 VS |
6248 | wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; |
6249 | wm->level = VLV_WM_LEVEL_PM2; | |
6250 | ||
6251 | if (IS_CHERRYVIEW(dev_priv)) { | |
337fa6e0 | 6252 | vlv_punit_get(dev_priv); |
6eb1a681 | 6253 | |
c11b813f | 6254 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); |
6eb1a681 VS |
6255 | if (val & DSP_MAXFIFO_PM5_ENABLE) |
6256 | wm->level = VLV_WM_LEVEL_PM5; | |
6257 | ||
58590c14 VS |
6258 | /* |
6259 | * If DDR DVFS is disabled in the BIOS, Punit | |
6260 | * will never ack the request. So if that happens | |
6261 | * assume we don't have to enable/disable DDR DVFS | |
6262 | * dynamically. To test that just set the REQ_ACK | |
6263 | * bit to poke the Punit, but don't change the | |
6264 | * HIGH/LOW bits so that we don't actually change | |
6265 | * the current state. | |
6266 | */ | |
6eb1a681 | 6267 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); |
58590c14 VS |
6268 | val |= FORCE_DDR_FREQ_REQ_ACK; |
6269 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); | |
6270 | ||
6271 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & | |
6272 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { | |
f8d18d5c WK |
6273 | drm_dbg_kms(&dev_priv->drm, |
6274 | "Punit not acking DDR DVFS request, " | |
6275 | "assuming DDR DVFS is disabled\n"); | |
58590c14 VS |
6276 | dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; |
6277 | } else { | |
6278 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | |
6279 | if ((val & FORCE_DDR_HIGH_FREQ) == 0) | |
6280 | wm->level = VLV_WM_LEVEL_DDR_DVFS; | |
6281 | } | |
6eb1a681 | 6282 | |
337fa6e0 | 6283 | vlv_punit_put(dev_priv); |
6eb1a681 VS |
6284 | } |
6285 | ||
cd1d3ee9 | 6286 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
ff32c54e VS |
6287 | struct intel_crtc_state *crtc_state = |
6288 | to_intel_crtc_state(crtc->base.state); | |
6289 | struct vlv_wm_state *active = &crtc->wm.active.vlv; | |
6290 | const struct vlv_fifo_state *fifo_state = | |
6291 | &crtc_state->wm.vlv.fifo_state; | |
6292 | enum pipe pipe = crtc->pipe; | |
6293 | enum plane_id plane_id; | |
6294 | int level; | |
6295 | ||
6296 | vlv_get_fifo_size(crtc_state); | |
6297 | ||
6298 | active->num_levels = wm->level + 1; | |
6299 | active->cxsr = wm->cxsr; | |
6300 | ||
ff32c54e | 6301 | for (level = 0; level < active->num_levels; level++) { |
114d7dc0 | 6302 | struct g4x_pipe_wm *raw = |
ff32c54e VS |
6303 | &crtc_state->wm.vlv.raw[level]; |
6304 | ||
6305 | active->sr[level].plane = wm->sr.plane; | |
6306 | active->sr[level].cursor = wm->sr.cursor; | |
6307 | ||
6308 | for_each_plane_id_on_crtc(crtc, plane_id) { | |
6309 | active->wm[level].plane[plane_id] = | |
6310 | wm->pipe[pipe].plane[plane_id]; | |
6311 | ||
6312 | raw->plane[plane_id] = | |
6313 | vlv_invert_wm_value(active->wm[level].plane[plane_id], | |
6314 | fifo_state->plane[plane_id]); | |
6315 | } | |
6316 | } | |
6317 | ||
6318 | for_each_plane_id_on_crtc(crtc, plane_id) | |
6319 | vlv_raw_plane_wm_set(crtc_state, level, | |
6320 | plane_id, USHRT_MAX); | |
6321 | vlv_invalidate_wms(crtc, active, level); | |
6322 | ||
6323 | crtc_state->wm.vlv.optimal = *active; | |
4841da51 | 6324 | crtc_state->wm.vlv.intermediate = *active; |
ff32c54e | 6325 | |
f8d18d5c WK |
6326 | drm_dbg_kms(&dev_priv->drm, |
6327 | "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", | |
6328 | pipe_name(pipe), | |
6329 | wm->pipe[pipe].plane[PLANE_PRIMARY], | |
6330 | wm->pipe[pipe].plane[PLANE_CURSOR], | |
6331 | wm->pipe[pipe].plane[PLANE_SPRITE0], | |
6332 | wm->pipe[pipe].plane[PLANE_SPRITE1]); | |
ff32c54e | 6333 | } |
6eb1a681 | 6334 | |
f8d18d5c WK |
6335 | drm_dbg_kms(&dev_priv->drm, |
6336 | "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", | |
6337 | wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); | |
6eb1a681 VS |
6338 | } |
6339 | ||
602ae835 VS |
6340 | void vlv_wm_sanitize(struct drm_i915_private *dev_priv) |
6341 | { | |
6342 | struct intel_plane *plane; | |
6343 | struct intel_crtc *crtc; | |
6344 | ||
6345 | mutex_lock(&dev_priv->wm.wm_mutex); | |
6346 | ||
6347 | for_each_intel_plane(&dev_priv->drm, plane) { | |
6348 | struct intel_crtc *crtc = | |
6349 | intel_get_crtc_for_pipe(dev_priv, plane->pipe); | |
6350 | struct intel_crtc_state *crtc_state = | |
6351 | to_intel_crtc_state(crtc->base.state); | |
6352 | struct intel_plane_state *plane_state = | |
6353 | to_intel_plane_state(plane->base.state); | |
6354 | struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; | |
6355 | const struct vlv_fifo_state *fifo_state = | |
6356 | &crtc_state->wm.vlv.fifo_state; | |
6357 | enum plane_id plane_id = plane->id; | |
6358 | int level; | |
6359 | ||
f90a85e7 | 6360 | if (plane_state->uapi.visible) |
602ae835 VS |
6361 | continue; |
6362 | ||
6363 | for (level = 0; level < wm_state->num_levels; level++) { | |
114d7dc0 | 6364 | struct g4x_pipe_wm *raw = |
602ae835 VS |
6365 | &crtc_state->wm.vlv.raw[level]; |
6366 | ||
6367 | raw->plane[plane_id] = 0; | |
6368 | ||
6369 | wm_state->wm[level].plane[plane_id] = | |
6370 | vlv_invert_wm_value(raw->plane[plane_id], | |
6371 | fifo_state->plane[plane_id]); | |
6372 | } | |
6373 | } | |
6374 | ||
6375 | for_each_intel_crtc(&dev_priv->drm, crtc) { | |
6376 | struct intel_crtc_state *crtc_state = | |
6377 | to_intel_crtc_state(crtc->base.state); | |
6378 | ||
6379 | crtc_state->wm.vlv.intermediate = | |
6380 | crtc_state->wm.vlv.optimal; | |
6381 | crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; | |
6382 | } | |
6383 | ||
6384 | vlv_program_watermarks(dev_priv); | |
6385 | ||
6386 | mutex_unlock(&dev_priv->wm.wm_mutex); | |
6387 | } | |
6388 | ||
f72b84c6 VS |
6389 | /* |
6390 | * FIXME should probably kill this and improve | |
6391 | * the real watermark readout/sanitation instead | |
6392 | */ | |
6393 | static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) | |
6394 | { | |
6395 | I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); | |
6396 | I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); | |
6397 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | |
6398 | ||
6399 | /* | |
6400 | * Don't touch WM1S_LP_EN here. | |
6401 | * Doing so could cause underruns. | |
6402 | */ | |
6403 | } | |
6404 | ||
cd1d3ee9 | 6405 | void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) |
243e6a44 | 6406 | { |
820c1980 | 6407 | struct ilk_wm_values *hw = &dev_priv->wm.hw; |
cd1d3ee9 | 6408 | struct intel_crtc *crtc; |
243e6a44 | 6409 | |
f72b84c6 VS |
6410 | ilk_init_lp_watermarks(dev_priv); |
6411 | ||
cd1d3ee9 | 6412 | for_each_intel_crtc(&dev_priv->drm, crtc) |
243e6a44 VS |
6413 | ilk_pipe_wm_get_hw_state(crtc); |
6414 | ||
6415 | hw->wm_lp[0] = I915_READ(WM1_LP_ILK); | |
6416 | hw->wm_lp[1] = I915_READ(WM2_LP_ILK); | |
6417 | hw->wm_lp[2] = I915_READ(WM3_LP_ILK); | |
6418 | ||
6419 | hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); | |
175fded1 | 6420 | if (INTEL_GEN(dev_priv) >= 7) { |
cfa7698b VS |
6421 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); |
6422 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); | |
6423 | } | |
243e6a44 | 6424 | |
8652744b | 6425 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
ac9545fd VS |
6426 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? |
6427 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | |
fd6b8f43 | 6428 | else if (IS_IVYBRIDGE(dev_priv)) |
ac9545fd VS |
6429 | hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? |
6430 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | |
243e6a44 VS |
6431 | |
6432 | hw->enable_fbc_wm = | |
6433 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); | |
6434 | } | |
6435 | ||
b445e3b0 ED |
6436 | /** |
6437 | * intel_update_watermarks - update FIFO watermark values based on current modes | |
31383410 | 6438 | * @crtc: the #intel_crtc on which to compute the WM |
b445e3b0 ED |
6439 | * |
6440 | * Calculate watermark values for the various WM regs based on current mode | |
6441 | * and plane configuration. | |
6442 | * | |
6443 | * There are several cases to deal with here: | |
6444 | * - normal (i.e. non-self-refresh) | |
6445 | * - self-refresh (SR) mode | |
6446 | * - lines are large relative to FIFO size (buffer can hold up to 2) | |
6447 | * - lines are small relative to FIFO size (buffer can hold more than 2 | |
6448 | * lines), so need to account for TLB latency | |
6449 | * | |
6450 | * The normal calculation is: | |
6451 | * watermark = dotclock * bytes per pixel * latency | |
6452 | * where latency is platform & configuration dependent (we assume pessimal | |
6453 | * values here). | |
6454 | * | |
6455 | * The SR calculation is: | |
6456 | * watermark = (trunc(latency/line time)+1) * surface width * | |
6457 | * bytes per pixel | |
6458 | * where | |
6459 | * line time = htotal / dotclock | |
6460 | * surface width = hdisplay for normal plane and 64 for cursor | |
6461 | * and latency is assumed to be high, as above. | |
6462 | * | |
6463 | * The final value programmed to the register should always be rounded up, | |
6464 | * and include an extra 2 entries to account for clock crossings. | |
6465 | * | |
6466 | * We don't use the sprite, so we can ignore that. And on Crestline we have | |
6467 | * to set the non-SR watermarks to 8. | |
6468 | */ | |
432081bc | 6469 | void intel_update_watermarks(struct intel_crtc *crtc) |
b445e3b0 | 6470 | { |
432081bc | 6471 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
b445e3b0 ED |
6472 | |
6473 | if (dev_priv->display.update_wm) | |
46ba614c | 6474 | dev_priv->display.update_wm(crtc); |
b445e3b0 ED |
6475 | } |
6476 | ||
2503a0fe KM |
6477 | void intel_enable_ipc(struct drm_i915_private *dev_priv) |
6478 | { | |
6479 | u32 val; | |
6480 | ||
fd847b8e JRS |
6481 | if (!HAS_IPC(dev_priv)) |
6482 | return; | |
6483 | ||
2503a0fe KM |
6484 | val = I915_READ(DISP_ARB_CTL2); |
6485 | ||
6486 | if (dev_priv->ipc_enabled) | |
6487 | val |= DISP_IPC_ENABLE; | |
6488 | else | |
6489 | val &= ~DISP_IPC_ENABLE; | |
6490 | ||
6491 | I915_WRITE(DISP_ARB_CTL2, val); | |
6492 | } | |
6493 | ||
c91a45f4 VS |
6494 | static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv) |
6495 | { | |
6496 | /* Display WA #0477 WaDisableIPC: skl */ | |
6497 | if (IS_SKYLAKE(dev_priv)) | |
6498 | return false; | |
6499 | ||
6500 | /* Display WA #1141: SKL:all KBL:all CFL */ | |
6501 | if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) | |
6502 | return dev_priv->dram_info.symmetric_memory; | |
6503 | ||
6504 | return true; | |
6505 | } | |
6506 | ||
2503a0fe KM |
6507 | void intel_init_ipc(struct drm_i915_private *dev_priv) |
6508 | { | |
2503a0fe KM |
6509 | if (!HAS_IPC(dev_priv)) |
6510 | return; | |
6511 | ||
c91a45f4 | 6512 | dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv); |
c9b818d3 | 6513 | |
2503a0fe KM |
6514 | intel_enable_ipc(dev_priv); |
6515 | } | |
6516 | ||
3e7abf81 AS |
6517 | static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) |
6518 | { | |
6519 | /* | |
6520 | * On Ibex Peak and Cougar Point, we need to disable clock | |
6521 | * gating for the panel power sequencer or it will fail to | |
6522 | * start up when no ports are active. | |
6523 | */ | |
6524 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | |
6525 | } | |
9270388e | 6526 | |
3e7abf81 | 6527 | static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) |
2b4e57bd | 6528 | { |
3e7abf81 | 6529 | enum pipe pipe; |
2b4e57bd | 6530 | |
3e7abf81 AS |
6531 | for_each_pipe(dev_priv, pipe) { |
6532 | I915_WRITE(DSPCNTR(pipe), | |
6533 | I915_READ(DSPCNTR(pipe)) | | |
6534 | DISPPLANE_TRICKLE_FEED_DISABLE); | |
9270388e | 6535 | |
3e7abf81 AS |
6536 | I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); |
6537 | POSTING_READ(DSPSURF(pipe)); | |
2b4e57bd | 6538 | } |
2b4e57bd ED |
6539 | } |
6540 | ||
3e7abf81 | 6541 | static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) |
2b4e57bd | 6542 | { |
3e7abf81 | 6543 | u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
2b4e57bd | 6544 | |
3e7abf81 AS |
6545 | /* |
6546 | * Required for FBC | |
6547 | * WaFbcDisableDpfcClockGating:ilk | |
6548 | */ | |
6549 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | | |
6550 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | | |
6551 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; | |
2b4e57bd | 6552 | |
3e7abf81 AS |
6553 | I915_WRITE(PCH_3DCGDIS0, |
6554 | MARIUNIT_CLOCK_GATE_DISABLE | | |
6555 | SVSMUNIT_CLOCK_GATE_DISABLE); | |
6556 | I915_WRITE(PCH_3DCGDIS1, | |
6557 | VFMUNIT_CLOCK_GATE_DISABLE); | |
2b4e57bd | 6558 | |
3e7abf81 AS |
6559 | /* |
6560 | * According to the spec the following bits should be set in | |
6561 | * order to enable memory self-refresh | |
6562 | * The bit 22/21 of 0x42004 | |
6563 | * The bit 5 of 0x42020 | |
6564 | * The bit 15 of 0x45000 | |
6565 | */ | |
6566 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6567 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6568 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | |
6569 | dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; | |
6570 | I915_WRITE(DISP_ARB_CTL, | |
6571 | (I915_READ(DISP_ARB_CTL) | | |
6572 | DISP_FBC_WM_DIS)); | |
2b4e57bd ED |
6573 | |
6574 | /* | |
3e7abf81 AS |
6575 | * Based on the document from hardware guys the following bits |
6576 | * should be set unconditionally in order to enable FBC. | |
6577 | * The bit 22 of 0x42000 | |
6578 | * The bit 22 of 0x42004 | |
6579 | * The bit 7,8,9 of 0x42020. | |
2b4e57bd | 6580 | */ |
3e7abf81 AS |
6581 | if (IS_IRONLAKE_M(dev_priv)) { |
6582 | /* WaFbcAsynchFlipDisableFbcQueue:ilk */ | |
6583 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
6584 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
6585 | ILK_FBCQ_DIS); | |
6586 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6587 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6588 | ILK_DPARB_GATE); | |
6589 | } | |
2b4e57bd | 6590 | |
3e7abf81 | 6591 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); |
2b4e57bd | 6592 | |
3e7abf81 AS |
6593 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
6594 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6595 | ILK_ELPIN_409_SELECT); | |
6596 | I915_WRITE(_3D_CHICKEN2, | |
6597 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | |
6598 | _3D_CHICKEN2_WM_READ_PIPELINED); | |
2b4e57bd | 6599 | |
3e7abf81 AS |
6600 | /* WaDisableRenderCachePipelinedFlush:ilk */ |
6601 | I915_WRITE(CACHE_MODE_0, | |
6602 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | |
2b4e57bd | 6603 | |
3e7abf81 AS |
6604 | /* WaDisable_RenderCache_OperationalFlush:ilk */ |
6605 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
2b4e57bd | 6606 | |
3e7abf81 | 6607 | g4x_disable_trickle_feed(dev_priv); |
9270388e | 6608 | |
3e7abf81 | 6609 | ibx_init_clock_gating(dev_priv); |
2b4e57bd ED |
6610 | } |
6611 | ||
3e7abf81 | 6612 | static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) |
2b4e57bd | 6613 | { |
3e7abf81 AS |
6614 | enum pipe pipe; |
6615 | u32 val; | |
2b4e57bd | 6616 | |
3e7abf81 AS |
6617 | /* |
6618 | * On Ibex Peak and Cougar Point, we need to disable clock | |
6619 | * gating for the panel power sequencer or it will fail to | |
6620 | * start up when no ports are active. | |
6621 | */ | |
6622 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | | |
6623 | PCH_DPLUNIT_CLOCK_GATE_DISABLE | | |
6624 | PCH_CPUNIT_CLOCK_GATE_DISABLE); | |
6625 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | |
6626 | DPLS_EDP_PPS_FIX_DIS); | |
6627 | /* The below fixes the weird display corruption, a few pixels shifted | |
6628 | * downward, on (only) LVDS of some HP laptops with IVY. | |
6629 | */ | |
6630 | for_each_pipe(dev_priv, pipe) { | |
6631 | val = I915_READ(TRANS_CHICKEN2(pipe)); | |
6632 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | |
6633 | val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; | |
6634 | if (dev_priv->vbt.fdi_rx_polarity_inverted) | |
6635 | val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; | |
3e7abf81 AS |
6636 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; |
6637 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; | |
6638 | I915_WRITE(TRANS_CHICKEN2(pipe), val); | |
6639 | } | |
6640 | /* WADP0ClockGatingDisable */ | |
6641 | for_each_pipe(dev_priv, pipe) { | |
6642 | I915_WRITE(TRANS_CHICKEN1(pipe), | |
6643 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); | |
6644 | } | |
2b4e57bd ED |
6645 | } |
6646 | ||
3e7abf81 | 6647 | static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) |
2b4e57bd | 6648 | { |
3e7abf81 | 6649 | u32 tmp; |
20b46e59 | 6650 | |
3e7abf81 AS |
6651 | tmp = I915_READ(MCH_SSKPD); |
6652 | if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) | |
f8d18d5c WK |
6653 | drm_dbg_kms(&dev_priv->drm, |
6654 | "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", | |
6655 | tmp); | |
20b46e59 SV |
6656 | } |
6657 | ||
3e7abf81 | 6658 | static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) |
dd75fdc8 | 6659 | { |
3e7abf81 | 6660 | u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
dd75fdc8 | 6661 | |
3e7abf81 | 6662 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); |
dd75fdc8 | 6663 | |
3e7abf81 AS |
6664 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
6665 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6666 | ILK_ELPIN_409_SELECT); | |
dd75fdc8 | 6667 | |
3e7abf81 AS |
6668 | /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ |
6669 | I915_WRITE(_3D_CHICKEN, | |
6670 | _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); | |
dd75fdc8 | 6671 | |
3e7abf81 AS |
6672 | /* WaDisable_RenderCache_OperationalFlush:snb */ |
6673 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
dd75fdc8 | 6674 | |
3e7abf81 AS |
6675 | /* |
6676 | * BSpec recoomends 8x4 when MSAA is used, | |
6677 | * however in practice 16x4 seems fastest. | |
6678 | * | |
6679 | * Note that PS/WM thread counts depend on the WIZ hashing | |
6680 | * disable bit, which we don't touch here, but it's good | |
6681 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
6682 | */ | |
6683 | I915_WRITE(GEN6_GT_MODE, | |
6684 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); | |
dd75fdc8 | 6685 | |
3e7abf81 AS |
6686 | I915_WRITE(CACHE_MODE_0, |
6687 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | |
dd75fdc8 | 6688 | |
3e7abf81 AS |
6689 | I915_WRITE(GEN6_UCGCTL1, |
6690 | I915_READ(GEN6_UCGCTL1) | | |
6691 | GEN6_BLBUNIT_CLOCK_GATE_DISABLE | | |
6692 | GEN6_CSUNIT_CLOCK_GATE_DISABLE); | |
dd75fdc8 | 6693 | |
3e7abf81 AS |
6694 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
6695 | * gating disable must be set. Failure to set it results in | |
6696 | * flickering pixels due to Z write ordering failures after | |
6697 | * some amount of runtime in the Mesa "fire" demo, and Unigine | |
6698 | * Sanctuary and Tropics, and apparently anything else with | |
6699 | * alpha test or pixel discard. | |
6700 | * | |
6701 | * According to the spec, bit 11 (RCCUNIT) must also be set, | |
6702 | * but we didn't debug actual testcases to find it out. | |
6703 | * | |
6704 | * WaDisableRCCUnitClockGating:snb | |
6705 | * WaDisableRCPBUnitClockGating:snb | |
6067a27d | 6706 | */ |
3e7abf81 AS |
6707 | I915_WRITE(GEN6_UCGCTL2, |
6708 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | |
6709 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | |
60548c55 | 6710 | |
3e7abf81 AS |
6711 | /* WaStripsFansDisableFastClipPerformanceFix:snb */ |
6712 | I915_WRITE(_3D_CHICKEN3, | |
6713 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); | |
60548c55 | 6714 | |
3e7abf81 AS |
6715 | /* |
6716 | * Bspec says: | |
6717 | * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and | |
6718 | * 3DSTATE_SF number of SF output attributes is more than 16." | |
6719 | */ | |
6720 | I915_WRITE(_3D_CHICKEN3, | |
6721 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); | |
60548c55 | 6722 | |
3e7abf81 AS |
6723 | /* |
6724 | * According to the spec the following bits should be | |
6725 | * set in order to enable memory self-refresh and fbc: | |
6726 | * The bit21 and bit22 of 0x42000 | |
6727 | * The bit21 and bit22 of 0x42004 | |
6728 | * The bit5 and bit7 of 0x42020 | |
6729 | * The bit14 of 0x70180 | |
6730 | * The bit14 of 0x71180 | |
6731 | * | |
6732 | * WaFbcAsynchFlipDisableFbcQueue:snb | |
6733 | */ | |
6734 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
6735 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
6736 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); | |
6737 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6738 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6739 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | |
6740 | I915_WRITE(ILK_DSPCLK_GATE_D, | |
6741 | I915_READ(ILK_DSPCLK_GATE_D) | | |
6742 | ILK_DPARBUNIT_CLOCK_GATE_ENABLE | | |
6743 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); | |
dd75fdc8 | 6744 | |
3e7abf81 | 6745 | g4x_disable_trickle_feed(dev_priv); |
60548c55 | 6746 | |
3e7abf81 | 6747 | cpt_init_clock_gating(dev_priv); |
60548c55 | 6748 | |
3e7abf81 | 6749 | gen6_check_mch_setup(dev_priv); |
60548c55 CW |
6750 | } |
6751 | ||
3e7abf81 | 6752 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) |
2876ce73 | 6753 | { |
3e7abf81 | 6754 | u32 reg = I915_READ(GEN7_FF_THREAD_MODE); |
2876ce73 | 6755 | |
3e7abf81 AS |
6756 | /* |
6757 | * WaVSThreadDispatchOverride:ivb,vlv | |
6758 | * | |
6759 | * This actually overrides the dispatch | |
6760 | * mode for all thread types. | |
6761 | */ | |
6762 | reg &= ~GEN7_FF_SCHED_MASK; | |
6763 | reg |= GEN7_FF_TS_SCHED_HW; | |
6764 | reg |= GEN7_FF_VS_SCHED_HW; | |
6765 | reg |= GEN7_FF_DS_SCHED_HW; | |
7b3c29f6 | 6766 | |
3e7abf81 | 6767 | I915_WRITE(GEN7_FF_THREAD_MODE, reg); |
2876ce73 CW |
6768 | } |
6769 | ||
3e7abf81 | 6770 | static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) |
20b46e59 | 6771 | { |
3e7abf81 AS |
6772 | /* |
6773 | * TODO: this bit should only be enabled when really needed, then | |
6774 | * disabled when not needed anymore in order to save power. | |
17a303ec | 6775 | */ |
4f8036a2 | 6776 | if (HAS_PCH_LPT_LP(dev_priv)) |
17a303ec PZ |
6777 | I915_WRITE(SOUTH_DSPCLK_GATE_D, |
6778 | I915_READ(SOUTH_DSPCLK_GATE_D) | | |
6779 | PCH_LP_PARTITION_LEVEL_DISABLE); | |
0a790cdb PZ |
6780 | |
6781 | /* WADPOClockGatingDisable:hsw */ | |
36c0d0cf VS |
6782 | I915_WRITE(TRANS_CHICKEN1(PIPE_A), |
6783 | I915_READ(TRANS_CHICKEN1(PIPE_A)) | | |
0a790cdb | 6784 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); |
17a303ec PZ |
6785 | } |
6786 | ||
712bf364 | 6787 | static void lpt_suspend_hw(struct drm_i915_private *dev_priv) |
7d708ee4 | 6788 | { |
4f8036a2 | 6789 | if (HAS_PCH_LPT_LP(dev_priv)) { |
5ce9a649 | 6790 | u32 val = I915_READ(SOUTH_DSPCLK_GATE_D); |
7d708ee4 ID |
6791 | |
6792 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; | |
6793 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | |
6794 | } | |
6795 | } | |
6796 | ||
450174fe ID |
6797 | static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, |
6798 | int general_prio_credits, | |
6799 | int high_prio_credits) | |
6800 | { | |
6801 | u32 misccpctl; | |
930a784d | 6802 | u32 val; |
450174fe ID |
6803 | |
6804 | /* WaTempDisableDOPClkGating:bdw */ | |
6805 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
6806 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
6807 | ||
930a784d OM |
6808 | val = I915_READ(GEN8_L3SQCREG1); |
6809 | val &= ~L3_PRIO_CREDITS_MASK; | |
6810 | val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); | |
6811 | val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); | |
6812 | I915_WRITE(GEN8_L3SQCREG1, val); | |
450174fe ID |
6813 | |
6814 | /* | |
6815 | * Wait at least 100 clocks before re-enabling clock gating. | |
6816 | * See the definition of L3SQCREG1 in BSpec. | |
6817 | */ | |
6818 | POSTING_READ(GEN8_L3SQCREG1); | |
6819 | udelay(1); | |
6820 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
6821 | } | |
6822 | ||
d65dc3e4 OM |
6823 | static void icl_init_clock_gating(struct drm_i915_private *dev_priv) |
6824 | { | |
6825 | /* This is not an Wa. Enable to reduce Sampler power */ | |
6826 | I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, | |
6827 | I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); | |
622b3f68 | 6828 | |
6f4194c8 MA |
6829 | /*Wa_14010594013:icl, ehl */ |
6830 | intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, | |
6831 | 0, CNL_DELAY_PMRSP); | |
d65dc3e4 OM |
6832 | } |
6833 | ||
5d869230 MT |
6834 | static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) |
6835 | { | |
6836 | u32 vd_pg_enable = 0; | |
6837 | unsigned int i; | |
6838 | ||
6839 | /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ | |
6840 | for (i = 0; i < I915_MAX_VCS; i++) { | |
6841 | if (HAS_ENGINE(dev_priv, _VCS(i))) | |
6842 | vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | | |
6843 | VDN_MFX_POWERGATE_ENABLE(i); | |
6844 | } | |
6845 | ||
6846 | I915_WRITE(POWERGATE_ENABLE, | |
6847 | I915_READ(POWERGATE_ENABLE) | vd_pg_enable); | |
f78d5da6 RS |
6848 | |
6849 | /* Wa_1409825376:tgl (pre-prod)*/ | |
6850 | if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) | |
6851 | I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | | |
6852 | TGL_VRH_GATING_DIS); | |
5d869230 MT |
6853 | } |
6854 | ||
0a46ddd5 RV |
6855 | static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) |
6856 | { | |
6857 | if (!HAS_PCH_CNP(dev_priv)) | |
6858 | return; | |
6859 | ||
470e7c61 | 6860 | /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ |
4cc6feb7 RV |
6861 | I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) | |
6862 | CNP_PWM_CGE_GATING_DISABLE); | |
0a46ddd5 RV |
6863 | } |
6864 | ||
91200c09 | 6865 | static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) |
90007bca | 6866 | { |
8f067837 | 6867 | u32 val; |
0a46ddd5 RV |
6868 | cnp_init_clock_gating(dev_priv); |
6869 | ||
1a25db65 RV |
6870 | /* This is not an Wa. Enable for better image quality */ |
6871 | I915_WRITE(_3D_CHICKEN3, | |
6872 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); | |
6873 | ||
90007bca RV |
6874 | /* WaEnableChickenDCPR:cnl */ |
6875 | I915_WRITE(GEN8_CHICKEN_DCPR_1, | |
6876 | I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); | |
6877 | ||
6878 | /* WaFbcWakeMemOn:cnl */ | |
6879 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | | |
6880 | DISP_FBC_MEMORY_WAKE); | |
6881 | ||
34991bd4 CW |
6882 | val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE); |
6883 | /* ReadHitWriteOnlyDisable:cnl */ | |
6884 | val |= RCCUNIT_CLKGATE_DIS; | |
90007bca RV |
6885 | /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */ |
6886 | if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) | |
34991bd4 CW |
6887 | val |= SARBUNIT_CLKGATE_DIS; |
6888 | I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val); | |
01ab0f92 | 6889 | |
a4713c5a RV |
6890 | /* Wa_2201832410:cnl */ |
6891 | val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE); | |
6892 | val |= GWUNIT_CLKGATE_DIS; | |
6893 | I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val); | |
6894 | ||
01ab0f92 | 6895 | /* WaDisableVFclkgate:cnl */ |
14941b6e | 6896 | /* WaVFUnitClockGatingDisable:cnl */ |
01ab0f92 RA |
6897 | val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE); |
6898 | val |= VFUNIT_CLKGATE_DIS; | |
6899 | I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val); | |
90007bca RV |
6900 | } |
6901 | ||
0a46ddd5 RV |
6902 | static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) |
6903 | { | |
6904 | cnp_init_clock_gating(dev_priv); | |
6905 | gen9_init_clock_gating(dev_priv); | |
6906 | ||
6907 | /* WaFbcNukeOnHostModify:cfl */ | |
6908 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | | |
6909 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); | |
6910 | } | |
6911 | ||
91200c09 | 6912 | static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) |
9498dba7 | 6913 | { |
46f16e63 | 6914 | gen9_init_clock_gating(dev_priv); |
9498dba7 MK |
6915 | |
6916 | /* WaDisableSDEUnitClockGating:kbl */ | |
6917 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | |
6918 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
6919 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
8aeb7f62 MK |
6920 | |
6921 | /* WaDisableGamClockGating:kbl */ | |
6922 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | |
6923 | I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | | |
6924 | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); | |
031cd8c8 | 6925 | |
0a46ddd5 | 6926 | /* WaFbcNukeOnHostModify:kbl */ |
031cd8c8 MK |
6927 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | |
6928 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); | |
9498dba7 MK |
6929 | } |
6930 | ||
91200c09 | 6931 | static void skl_init_clock_gating(struct drm_i915_private *dev_priv) |
dc00b6a0 | 6932 | { |
46f16e63 | 6933 | gen9_init_clock_gating(dev_priv); |
44fff99f MK |
6934 | |
6935 | /* WAC6entrylatency:skl */ | |
6936 | I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | | |
6937 | FBC_LLC_FULLY_OPEN); | |
031cd8c8 MK |
6938 | |
6939 | /* WaFbcNukeOnHostModify:skl */ | |
6940 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | | |
6941 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); | |
dc00b6a0 SV |
6942 | } |
6943 | ||
91200c09 | 6944 | static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) |
1020a5c2 | 6945 | { |
07d27e20 | 6946 | enum pipe pipe; |
1020a5c2 | 6947 | |
ab57fff1 | 6948 | /* WaSwitchSolVfFArbitrationPriority:bdw */ |
50ed5fbd | 6949 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); |
fe4ab3ce | 6950 | |
ab57fff1 | 6951 | /* WaPsrDPAMaskVBlankInSRD:bdw */ |
fe4ab3ce BW |
6952 | I915_WRITE(CHICKEN_PAR1_1, |
6953 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); | |
6954 | ||
ab57fff1 | 6955 | /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ |
055e393f | 6956 | for_each_pipe(dev_priv, pipe) { |
07d27e20 | 6957 | I915_WRITE(CHICKEN_PIPESL_1(pipe), |
c7c65622 | 6958 | I915_READ(CHICKEN_PIPESL_1(pipe)) | |
8f670bb1 | 6959 | BDW_DPRS_MASK_VBLANK_SRD); |
fe4ab3ce | 6960 | } |
63801f21 | 6961 | |
ab57fff1 BW |
6962 | /* WaVSRefCountFullforceMissDisable:bdw */ |
6963 | /* WaDSRefCountFullforceMissDisable:bdw */ | |
6964 | I915_WRITE(GEN7_FF_THREAD_MODE, | |
6965 | I915_READ(GEN7_FF_THREAD_MODE) & | |
6966 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); | |
36075a4c | 6967 | |
295e8bb7 VS |
6968 | I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, |
6969 | _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); | |
4f1ca9e9 VS |
6970 | |
6971 | /* WaDisableSDEUnitClockGating:bdw */ | |
6972 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
6973 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
5d708680 | 6974 | |
450174fe ID |
6975 | /* WaProgramL3SqcReg1Default:bdw */ |
6976 | gen8_set_l3sqc_credits(dev_priv, 30, 2); | |
4d487cff | 6977 | |
17e0adf0 MK |
6978 | /* WaKVMNotificationOnConfigChange:bdw */ |
6979 | I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) | |
6980 | | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); | |
6981 | ||
46f16e63 | 6982 | lpt_init_clock_gating(dev_priv); |
9cc19733 RB |
6983 | |
6984 | /* WaDisableDopClockGating:bdw | |
6985 | * | |
6986 | * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP | |
6987 | * clock gating. | |
6988 | */ | |
6989 | I915_WRITE(GEN6_UCGCTL1, | |
6990 | I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); | |
1020a5c2 BW |
6991 | } |
6992 | ||
91200c09 | 6993 | static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) |
cad2a2d7 | 6994 | { |
f3fc4884 FJ |
6995 | /* L3 caching of data atomics doesn't work -- disable it. */ |
6996 | I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); | |
6997 | I915_WRITE(HSW_ROW_CHICKEN3, | |
6998 | _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); | |
6999 | ||
ecdb4eb7 | 7000 | /* This is required by WaCatErrorRejectionIssue:hsw */ |
cad2a2d7 ED |
7001 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
7002 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | |
7003 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | |
7004 | ||
e36ea7ff VS |
7005 | /* WaVSRefCountFullforceMissDisable:hsw */ |
7006 | I915_WRITE(GEN7_FF_THREAD_MODE, | |
7007 | I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); | |
cad2a2d7 | 7008 | |
4e04632e AG |
7009 | /* WaDisable_RenderCache_OperationalFlush:hsw */ |
7010 | I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7011 | ||
fe27c606 CW |
7012 | /* enable HiZ Raw Stall Optimization */ |
7013 | I915_WRITE(CACHE_MODE_0_GEN7, | |
7014 | _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); | |
7015 | ||
ecdb4eb7 | 7016 | /* WaDisable4x2SubspanOptimization:hsw */ |
cad2a2d7 ED |
7017 | I915_WRITE(CACHE_MODE_1, |
7018 | _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | |
1544d9d5 | 7019 | |
a12c4967 VS |
7020 | /* |
7021 | * BSpec recommends 8x4 when MSAA is used, | |
7022 | * however in practice 16x4 seems fastest. | |
c5c98a58 VS |
7023 | * |
7024 | * Note that PS/WM thread counts depend on the WIZ hashing | |
7025 | * disable bit, which we don't touch here, but it's good | |
7026 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
a12c4967 VS |
7027 | */ |
7028 | I915_WRITE(GEN7_GT_MODE, | |
98533251 | 7029 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); |
a12c4967 | 7030 | |
94411593 KG |
7031 | /* WaSampleCChickenBitEnable:hsw */ |
7032 | I915_WRITE(HALF_SLICE_CHICKEN3, | |
7033 | _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); | |
7034 | ||
ecdb4eb7 | 7035 | /* WaSwitchSolVfFArbitrationPriority:hsw */ |
e3dff585 BW |
7036 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); |
7037 | ||
46f16e63 | 7038 | lpt_init_clock_gating(dev_priv); |
cad2a2d7 ED |
7039 | } |
7040 | ||
91200c09 | 7041 | static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7042 | { |
5ce9a649 | 7043 | u32 snpcr; |
6f1d69b0 | 7044 | |
231e54f6 | 7045 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
6f1d69b0 | 7046 | |
ecdb4eb7 | 7047 | /* WaDisableEarlyCull:ivb */ |
87f8020e JB |
7048 | I915_WRITE(_3D_CHICKEN3, |
7049 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | |
7050 | ||
ecdb4eb7 | 7051 | /* WaDisableBackToBackFlipFix:ivb */ |
6f1d69b0 ED |
7052 | I915_WRITE(IVB_CHICKEN3, |
7053 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | |
7054 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | |
7055 | ||
ecdb4eb7 | 7056 | /* WaDisablePSDDualDispatchEnable:ivb */ |
50a0bc90 | 7057 | if (IS_IVB_GT1(dev_priv)) |
12f3382b JB |
7058 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, |
7059 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | |
12f3382b | 7060 | |
4e04632e AG |
7061 | /* WaDisable_RenderCache_OperationalFlush:ivb */ |
7062 | I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7063 | ||
ecdb4eb7 | 7064 | /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ |
6f1d69b0 ED |
7065 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
7066 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | |
7067 | ||
ecdb4eb7 | 7068 | /* WaApplyL3ControlAndL3ChickenMode:ivb */ |
6f1d69b0 ED |
7069 | I915_WRITE(GEN7_L3CNTLREG1, |
7070 | GEN7_WA_FOR_GEN7_L3_CONTROL); | |
7071 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | |
8ab43976 | 7072 | GEN7_WA_L3_CHICKEN_MODE); |
50a0bc90 | 7073 | if (IS_IVB_GT1(dev_priv)) |
8ab43976 JB |
7074 | I915_WRITE(GEN7_ROW_CHICKEN2, |
7075 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
412236c2 VS |
7076 | else { |
7077 | /* must write both registers */ | |
7078 | I915_WRITE(GEN7_ROW_CHICKEN2, | |
7079 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
8ab43976 JB |
7080 | I915_WRITE(GEN7_ROW_CHICKEN2_GT2, |
7081 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
412236c2 | 7082 | } |
6f1d69b0 | 7083 | |
ecdb4eb7 | 7084 | /* WaForceL3Serialization:ivb */ |
61939d97 JB |
7085 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & |
7086 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | |
7087 | ||
1b80a19a | 7088 | /* |
0f846f81 | 7089 | * According to the spec, bit 13 (RCZUNIT) must be set on IVB. |
ecdb4eb7 | 7090 | * This implements the WaDisableRCZUnitClockGating:ivb workaround. |
0f846f81 JB |
7091 | */ |
7092 | I915_WRITE(GEN6_UCGCTL2, | |
28acf3b2 | 7093 | GEN6_RCZUNIT_CLOCK_GATE_DISABLE); |
0f846f81 | 7094 | |
ecdb4eb7 | 7095 | /* This is required by WaCatErrorRejectionIssue:ivb */ |
6f1d69b0 ED |
7096 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
7097 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | |
7098 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | |
7099 | ||
46f16e63 | 7100 | g4x_disable_trickle_feed(dev_priv); |
6f1d69b0 ED |
7101 | |
7102 | gen7_setup_fixed_func_scheduler(dev_priv); | |
97e1930f | 7103 | |
22721343 CW |
7104 | if (0) { /* causes HiZ corruption on ivb:gt1 */ |
7105 | /* enable HiZ Raw Stall Optimization */ | |
7106 | I915_WRITE(CACHE_MODE_0_GEN7, | |
7107 | _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); | |
7108 | } | |
116f2b6d | 7109 | |
ecdb4eb7 | 7110 | /* WaDisable4x2SubspanOptimization:ivb */ |
97e1930f SV |
7111 | I915_WRITE(CACHE_MODE_1, |
7112 | _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | |
20848223 | 7113 | |
a607c1a4 VS |
7114 | /* |
7115 | * BSpec recommends 8x4 when MSAA is used, | |
7116 | * however in practice 16x4 seems fastest. | |
c5c98a58 VS |
7117 | * |
7118 | * Note that PS/WM thread counts depend on the WIZ hashing | |
7119 | * disable bit, which we don't touch here, but it's good | |
7120 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
a607c1a4 VS |
7121 | */ |
7122 | I915_WRITE(GEN7_GT_MODE, | |
98533251 | 7123 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); |
a607c1a4 | 7124 | |
20848223 BW |
7125 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); |
7126 | snpcr &= ~GEN6_MBC_SNPCR_MASK; | |
7127 | snpcr |= GEN6_MBC_SNPCR_MED; | |
7128 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | |
3107bd48 | 7129 | |
6e266956 | 7130 | if (!HAS_PCH_NOP(dev_priv)) |
46f16e63 | 7131 | cpt_init_clock_gating(dev_priv); |
1d7aaa0c | 7132 | |
46f16e63 | 7133 | gen6_check_mch_setup(dev_priv); |
6f1d69b0 ED |
7134 | } |
7135 | ||
91200c09 | 7136 | static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7137 | { |
ecdb4eb7 | 7138 | /* WaDisableEarlyCull:vlv */ |
87f8020e JB |
7139 | I915_WRITE(_3D_CHICKEN3, |
7140 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | |
7141 | ||
ecdb4eb7 | 7142 | /* WaDisableBackToBackFlipFix:vlv */ |
6f1d69b0 ED |
7143 | I915_WRITE(IVB_CHICKEN3, |
7144 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | |
7145 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | |
7146 | ||
fad7d36e | 7147 | /* WaPsdDispatchEnable:vlv */ |
ecdb4eb7 | 7148 | /* WaDisablePSDDualDispatchEnable:vlv */ |
12f3382b | 7149 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, |
d3bc0303 JB |
7150 | _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | |
7151 | GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | |
12f3382b | 7152 | |
4e04632e AG |
7153 | /* WaDisable_RenderCache_OperationalFlush:vlv */ |
7154 | I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7155 | ||
ecdb4eb7 | 7156 | /* WaForceL3Serialization:vlv */ |
61939d97 JB |
7157 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & |
7158 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | |
7159 | ||
ecdb4eb7 | 7160 | /* WaDisableDopClockGating:vlv */ |
8ab43976 JB |
7161 | I915_WRITE(GEN7_ROW_CHICKEN2, |
7162 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
7163 | ||
ecdb4eb7 | 7164 | /* This is required by WaCatErrorRejectionIssue:vlv */ |
6f1d69b0 ED |
7165 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
7166 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | |
7167 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | |
7168 | ||
46680e0a VS |
7169 | gen7_setup_fixed_func_scheduler(dev_priv); |
7170 | ||
3c0edaeb | 7171 | /* |
0f846f81 | 7172 | * According to the spec, bit 13 (RCZUNIT) must be set on IVB. |
ecdb4eb7 | 7173 | * This implements the WaDisableRCZUnitClockGating:vlv workaround. |
0f846f81 JB |
7174 | */ |
7175 | I915_WRITE(GEN6_UCGCTL2, | |
3c0edaeb | 7176 | GEN6_RCZUNIT_CLOCK_GATE_DISABLE); |
0f846f81 | 7177 | |
c98f5062 AG |
7178 | /* WaDisableL3Bank2xClockGate:vlv |
7179 | * Disabling L3 clock gating- MMIO 940c[25] = 1 | |
7180 | * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ | |
7181 | I915_WRITE(GEN7_UCGCTL4, | |
7182 | I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); | |
e3f33d46 | 7183 | |
afd58e79 VS |
7184 | /* |
7185 | * BSpec says this must be set, even though | |
7186 | * WaDisable4x2SubspanOptimization isn't listed for VLV. | |
7187 | */ | |
6b26c86d SV |
7188 | I915_WRITE(CACHE_MODE_1, |
7189 | _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | |
7983117f | 7190 | |
da2518f9 VS |
7191 | /* |
7192 | * BSpec recommends 8x4 when MSAA is used, | |
7193 | * however in practice 16x4 seems fastest. | |
7194 | * | |
7195 | * Note that PS/WM thread counts depend on the WIZ hashing | |
7196 | * disable bit, which we don't touch here, but it's good | |
7197 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
7198 | */ | |
7199 | I915_WRITE(GEN7_GT_MODE, | |
7200 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); | |
7201 | ||
031994ee VS |
7202 | /* |
7203 | * WaIncreaseL3CreditsForVLVB0:vlv | |
7204 | * This is the hardware default actually. | |
7205 | */ | |
7206 | I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); | |
7207 | ||
2d809570 | 7208 | /* |
ecdb4eb7 | 7209 | * WaDisableVLVClockGating_VBIIssue:vlv |
2d809570 JB |
7210 | * Disable clock gating on th GCFG unit to prevent a delay |
7211 | * in the reporting of vblank events. | |
7212 | */ | |
7a0d1eed | 7213 | I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); |
6f1d69b0 ED |
7214 | } |
7215 | ||
91200c09 | 7216 | static void chv_init_clock_gating(struct drm_i915_private *dev_priv) |
a4565da8 | 7217 | { |
232ce337 VS |
7218 | /* WaVSRefCountFullforceMissDisable:chv */ |
7219 | /* WaDSRefCountFullforceMissDisable:chv */ | |
7220 | I915_WRITE(GEN7_FF_THREAD_MODE, | |
7221 | I915_READ(GEN7_FF_THREAD_MODE) & | |
7222 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); | |
acea6f95 VS |
7223 | |
7224 | /* WaDisableSemaphoreAndSyncFlipWait:chv */ | |
7225 | I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, | |
7226 | _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); | |
0846697c VS |
7227 | |
7228 | /* WaDisableCSUnitClockGating:chv */ | |
7229 | I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | | |
7230 | GEN6_CSUNIT_CLOCK_GATE_DISABLE); | |
c631780f VS |
7231 | |
7232 | /* WaDisableSDEUnitClockGating:chv */ | |
7233 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
7234 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
6d50b065 | 7235 | |
450174fe ID |
7236 | /* |
7237 | * WaProgramL3SqcReg1Default:chv | |
7238 | * See gfxspecs/Related Documents/Performance Guide/ | |
7239 | * LSQC Setting Recommendations. | |
7240 | */ | |
7241 | gen8_set_l3sqc_credits(dev_priv, 38, 2); | |
a4565da8 VS |
7242 | } |
7243 | ||
46f16e63 | 7244 | static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7245 | { |
5ce9a649 | 7246 | u32 dspclk_gate; |
6f1d69b0 ED |
7247 | |
7248 | I915_WRITE(RENCLK_GATE_D1, 0); | |
7249 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | |
7250 | GS_UNIT_CLOCK_GATE_DISABLE | | |
7251 | CL_UNIT_CLOCK_GATE_DISABLE); | |
7252 | I915_WRITE(RAMCLK_GATE_D, 0); | |
7253 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | |
7254 | OVRUNIT_CLOCK_GATE_DISABLE | | |
7255 | OVCUNIT_CLOCK_GATE_DISABLE; | |
50a0bc90 | 7256 | if (IS_GM45(dev_priv)) |
6f1d69b0 ED |
7257 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; |
7258 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | |
4358a374 SV |
7259 | |
7260 | /* WaDisableRenderCachePipelinedFlush */ | |
7261 | I915_WRITE(CACHE_MODE_0, | |
7262 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | |
de1aa629 | 7263 | |
4e04632e AG |
7264 | /* WaDisable_RenderCache_OperationalFlush:g4x */ |
7265 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7266 | ||
46f16e63 | 7267 | g4x_disable_trickle_feed(dev_priv); |
6f1d69b0 ED |
7268 | } |
7269 | ||
91200c09 | 7270 | static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7271 | { |
4f5fd91f TU |
7272 | struct intel_uncore *uncore = &dev_priv->uncore; |
7273 | ||
7274 | intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | |
7275 | intel_uncore_write(uncore, RENCLK_GATE_D2, 0); | |
7276 | intel_uncore_write(uncore, DSPCLK_GATE_D, 0); | |
7277 | intel_uncore_write(uncore, RAMCLK_GATE_D, 0); | |
7278 | intel_uncore_write16(uncore, DEUC, 0); | |
7279 | intel_uncore_write(uncore, | |
7280 | MI_ARB_STATE, | |
7281 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); | |
4e04632e AG |
7282 | |
7283 | /* WaDisable_RenderCache_OperationalFlush:gen4 */ | |
4f5fd91f TU |
7284 | intel_uncore_write(uncore, |
7285 | CACHE_MODE_0, | |
7286 | _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6f1d69b0 ED |
7287 | } |
7288 | ||
91200c09 | 7289 | static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7290 | { |
6f1d69b0 ED |
7291 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | |
7292 | I965_RCC_CLOCK_GATE_DISABLE | | |
7293 | I965_RCPB_CLOCK_GATE_DISABLE | | |
7294 | I965_ISC_CLOCK_GATE_DISABLE | | |
7295 | I965_FBC_CLOCK_GATE_DISABLE); | |
7296 | I915_WRITE(RENCLK_GATE_D2, 0); | |
20f94967 VS |
7297 | I915_WRITE(MI_ARB_STATE, |
7298 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); | |
4e04632e AG |
7299 | |
7300 | /* WaDisable_RenderCache_OperationalFlush:gen4 */ | |
7301 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6f1d69b0 ED |
7302 | } |
7303 | ||
46f16e63 | 7304 | static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7305 | { |
6f1d69b0 ED |
7306 | u32 dstate = I915_READ(D_STATE); |
7307 | ||
7308 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | |
7309 | DSTATE_DOT_CLOCK_GATING; | |
7310 | I915_WRITE(D_STATE, dstate); | |
13a86b85 | 7311 | |
9b1e14f4 | 7312 | if (IS_PINEVIEW(dev_priv)) |
13a86b85 | 7313 | I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); |
974a3b0f SV |
7314 | |
7315 | /* IIR "flip pending" means done if this bit is set */ | |
7316 | I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); | |
12fabbcb VS |
7317 | |
7318 | /* interrupts should cause a wake up from C3 */ | |
3299254f | 7319 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); |
dbb42748 VS |
7320 | |
7321 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | |
7322 | I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); | |
1038392b VS |
7323 | |
7324 | I915_WRITE(MI_ARB_STATE, | |
7325 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); | |
6f1d69b0 ED |
7326 | } |
7327 | ||
46f16e63 | 7328 | static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7329 | { |
6f1d69b0 | 7330 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); |
54e472ae VS |
7331 | |
7332 | /* interrupts should cause a wake up from C3 */ | |
7333 | I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | | |
7334 | _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); | |
1038392b VS |
7335 | |
7336 | I915_WRITE(MEM_MODE, | |
7337 | _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); | |
6f1d69b0 ED |
7338 | } |
7339 | ||
46f16e63 | 7340 | static void i830_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7341 | { |
1038392b VS |
7342 | I915_WRITE(MEM_MODE, |
7343 | _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | | |
7344 | _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); | |
6f1d69b0 ED |
7345 | } |
7346 | ||
46f16e63 | 7347 | void intel_init_clock_gating(struct drm_i915_private *dev_priv) |
6f1d69b0 | 7348 | { |
46f16e63 | 7349 | dev_priv->display.init_clock_gating(dev_priv); |
6f1d69b0 ED |
7350 | } |
7351 | ||
712bf364 | 7352 | void intel_suspend_hw(struct drm_i915_private *dev_priv) |
7d708ee4 | 7353 | { |
712bf364 VS |
7354 | if (HAS_PCH_LPT(dev_priv)) |
7355 | lpt_suspend_hw(dev_priv); | |
7d708ee4 ID |
7356 | } |
7357 | ||
46f16e63 | 7358 | static void nop_init_clock_gating(struct drm_i915_private *dev_priv) |
bb400da9 | 7359 | { |
f8d18d5c WK |
7360 | drm_dbg_kms(&dev_priv->drm, |
7361 | "No clock gating settings or workarounds applied.\n"); | |
bb400da9 ID |
7362 | } |
7363 | ||
7364 | /** | |
7365 | * intel_init_clock_gating_hooks - setup the clock gating hooks | |
7366 | * @dev_priv: device private | |
7367 | * | |
7368 | * Setup the hooks that configure which clocks of a given platform can be | |
7369 | * gated and also apply various GT and display specific workarounds for these | |
7370 | * platforms. Note that some GT specific workarounds are applied separately | |
7371 | * when GPU contexts or batchbuffers start their execution. | |
7372 | */ | |
7373 | void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) | |
7374 | { | |
13e53c5c | 7375 | if (IS_GEN(dev_priv, 12)) |
5d869230 | 7376 | dev_priv->display.init_clock_gating = tgl_init_clock_gating; |
13e53c5c | 7377 | else if (IS_GEN(dev_priv, 11)) |
d65dc3e4 | 7378 | dev_priv->display.init_clock_gating = icl_init_clock_gating; |
cc38cae7 | 7379 | else if (IS_CANNONLAKE(dev_priv)) |
91200c09 | 7380 | dev_priv->display.init_clock_gating = cnl_init_clock_gating; |
0a46ddd5 RV |
7381 | else if (IS_COFFEELAKE(dev_priv)) |
7382 | dev_priv->display.init_clock_gating = cfl_init_clock_gating; | |
90007bca | 7383 | else if (IS_SKYLAKE(dev_priv)) |
91200c09 | 7384 | dev_priv->display.init_clock_gating = skl_init_clock_gating; |
0a46ddd5 | 7385 | else if (IS_KABYLAKE(dev_priv)) |
91200c09 | 7386 | dev_priv->display.init_clock_gating = kbl_init_clock_gating; |
9fb5026f | 7387 | else if (IS_BROXTON(dev_priv)) |
bb400da9 | 7388 | dev_priv->display.init_clock_gating = bxt_init_clock_gating; |
9fb5026f ACO |
7389 | else if (IS_GEMINILAKE(dev_priv)) |
7390 | dev_priv->display.init_clock_gating = glk_init_clock_gating; | |
bb400da9 | 7391 | else if (IS_BROADWELL(dev_priv)) |
91200c09 | 7392 | dev_priv->display.init_clock_gating = bdw_init_clock_gating; |
bb400da9 | 7393 | else if (IS_CHERRYVIEW(dev_priv)) |
91200c09 | 7394 | dev_priv->display.init_clock_gating = chv_init_clock_gating; |
bb400da9 | 7395 | else if (IS_HASWELL(dev_priv)) |
91200c09 | 7396 | dev_priv->display.init_clock_gating = hsw_init_clock_gating; |
bb400da9 | 7397 | else if (IS_IVYBRIDGE(dev_priv)) |
91200c09 | 7398 | dev_priv->display.init_clock_gating = ivb_init_clock_gating; |
bb400da9 | 7399 | else if (IS_VALLEYVIEW(dev_priv)) |
91200c09 | 7400 | dev_priv->display.init_clock_gating = vlv_init_clock_gating; |
cf819eff | 7401 | else if (IS_GEN(dev_priv, 6)) |
bb400da9 | 7402 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
cf819eff | 7403 | else if (IS_GEN(dev_priv, 5)) |
91200c09 | 7404 | dev_priv->display.init_clock_gating = ilk_init_clock_gating; |
bb400da9 ID |
7405 | else if (IS_G4X(dev_priv)) |
7406 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; | |
c0f86832 | 7407 | else if (IS_I965GM(dev_priv)) |
91200c09 | 7408 | dev_priv->display.init_clock_gating = i965gm_init_clock_gating; |
c0f86832 | 7409 | else if (IS_I965G(dev_priv)) |
91200c09 | 7410 | dev_priv->display.init_clock_gating = i965g_init_clock_gating; |
cf819eff | 7411 | else if (IS_GEN(dev_priv, 3)) |
bb400da9 ID |
7412 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
7413 | else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) | |
7414 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | |
cf819eff | 7415 | else if (IS_GEN(dev_priv, 2)) |
bb400da9 ID |
7416 | dev_priv->display.init_clock_gating = i830_init_clock_gating; |
7417 | else { | |
7418 | MISSING_CASE(INTEL_DEVID(dev_priv)); | |
7419 | dev_priv->display.init_clock_gating = nop_init_clock_gating; | |
7420 | } | |
7421 | } | |
7422 | ||
1fa61106 | 7423 | /* Set up chip specific power management-related functions */ |
62d75df7 | 7424 | void intel_init_pm(struct drm_i915_private *dev_priv) |
1fa61106 | 7425 | { |
c921aba8 | 7426 | /* For cxsr */ |
9b1e14f4 | 7427 | if (IS_PINEVIEW(dev_priv)) |
1d218220 | 7428 | pnv_get_mem_freq(dev_priv); |
cf819eff | 7429 | else if (IS_GEN(dev_priv, 5)) |
9eae5e27 | 7430 | ilk_get_mem_freq(dev_priv); |
c921aba8 | 7431 | |
b068a860 JA |
7432 | if (intel_has_sagv(dev_priv)) |
7433 | skl_setup_sagv_block_time(dev_priv); | |
7434 | ||
1fa61106 | 7435 | /* For FIFO watermark updates */ |
62d75df7 | 7436 | if (INTEL_GEN(dev_priv) >= 9) { |
bb726519 | 7437 | skl_setup_wm_latency(dev_priv); |
98d39494 | 7438 | dev_priv->display.compute_global_watermarks = skl_compute_wm; |
6e266956 | 7439 | } else if (HAS_PCH_SPLIT(dev_priv)) { |
bb726519 | 7440 | ilk_setup_wm_latency(dev_priv); |
53615a5e | 7441 | |
cf819eff | 7442 | if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] && |
bd602544 | 7443 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || |
cf819eff | 7444 | (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] && |
bd602544 | 7445 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { |
86c8bbbe | 7446 | dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; |
ed4a6a7c MR |
7447 | dev_priv->display.compute_intermediate_wm = |
7448 | ilk_compute_intermediate_wm; | |
7449 | dev_priv->display.initial_watermarks = | |
7450 | ilk_initial_watermarks; | |
7451 | dev_priv->display.optimize_watermarks = | |
7452 | ilk_optimize_watermarks; | |
bd602544 | 7453 | } else { |
f8d18d5c WK |
7454 | drm_dbg_kms(&dev_priv->drm, |
7455 | "Failed to read display plane latency. " | |
7456 | "Disable CxSR\n"); | |
bd602544 | 7457 | } |
6b6b3eef | 7458 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
bb726519 | 7459 | vlv_setup_wm_latency(dev_priv); |
ff32c54e | 7460 | dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm; |
4841da51 | 7461 | dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm; |
ff32c54e | 7462 | dev_priv->display.initial_watermarks = vlv_initial_watermarks; |
4841da51 | 7463 | dev_priv->display.optimize_watermarks = vlv_optimize_watermarks; |
ff32c54e | 7464 | dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo; |
04548cba VS |
7465 | } else if (IS_G4X(dev_priv)) { |
7466 | g4x_setup_wm_latency(dev_priv); | |
7467 | dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm; | |
7468 | dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm; | |
7469 | dev_priv->display.initial_watermarks = g4x_initial_watermarks; | |
7470 | dev_priv->display.optimize_watermarks = g4x_optimize_watermarks; | |
9b1e14f4 | 7471 | } else if (IS_PINEVIEW(dev_priv)) { |
86d35d4e | 7472 | if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), |
1fa61106 ED |
7473 | dev_priv->is_ddr3, |
7474 | dev_priv->fsb_freq, | |
7475 | dev_priv->mem_freq)) { | |
f8d18d5c WK |
7476 | drm_info(&dev_priv->drm, |
7477 | "failed to find known CxSR latency " | |
1fa61106 ED |
7478 | "(found ddr%s fsb freq %d, mem freq %d), " |
7479 | "disabling CxSR\n", | |
7480 | (dev_priv->is_ddr3 == 1) ? "3" : "2", | |
7481 | dev_priv->fsb_freq, dev_priv->mem_freq); | |
7482 | /* Disable CxSR and never update its watermark again */ | |
5209b1f4 | 7483 | intel_set_memory_cxsr(dev_priv, false); |
1fa61106 ED |
7484 | dev_priv->display.update_wm = NULL; |
7485 | } else | |
1d218220 | 7486 | dev_priv->display.update_wm = pnv_update_wm; |
cf819eff | 7487 | } else if (IS_GEN(dev_priv, 4)) { |
1fa61106 | 7488 | dev_priv->display.update_wm = i965_update_wm; |
cf819eff | 7489 | } else if (IS_GEN(dev_priv, 3)) { |
1fa61106 ED |
7490 | dev_priv->display.update_wm = i9xx_update_wm; |
7491 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | |
cf819eff | 7492 | } else if (IS_GEN(dev_priv, 2)) { |
24977870 | 7493 | if (INTEL_NUM_PIPES(dev_priv) == 1) { |
feb56b93 | 7494 | dev_priv->display.update_wm = i845_update_wm; |
1fa61106 | 7495 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
feb56b93 SV |
7496 | } else { |
7497 | dev_priv->display.update_wm = i9xx_update_wm; | |
1fa61106 | 7498 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
feb56b93 | 7499 | } |
feb56b93 | 7500 | } else { |
f8d18d5c WK |
7501 | drm_err(&dev_priv->drm, |
7502 | "unexpected fall-through in %s\n", __func__); | |
1fa61106 ED |
7503 | } |
7504 | } | |
7505 | ||
192aa181 | 7506 | void intel_pm_setup(struct drm_i915_private *dev_priv) |
907b28c5 | 7507 | { |
ad1443f0 SAK |
7508 | dev_priv->runtime_pm.suspended = false; |
7509 | atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); | |
907b28c5 | 7510 | } |