2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51 #include "intel_dsi.h"
52 #include "intel_frontbuffer.h"
54 #include "intel_drv.h"
55 #include "intel_dsi.h"
56 #include "intel_frontbuffer.h"
59 #include "i915_gem_clflush.h"
60 #include "i915_reset.h"
61 #include "i915_trace.h"
63 /* Primary plane formats for gen <= 3 */
64 static const u32 i8xx_primary_formats
[] = {
71 /* Primary plane formats for gen >= 4 */
72 static const u32 i965_primary_formats
[] = {
77 DRM_FORMAT_XRGB2101010
,
78 DRM_FORMAT_XBGR2101010
,
81 static const u64 i9xx_format_modifiers
[] = {
82 I915_FORMAT_MOD_X_TILED
,
83 DRM_FORMAT_MOD_LINEAR
,
84 DRM_FORMAT_MOD_INVALID
88 static const u32 intel_cursor_formats
[] = {
92 static const u64 cursor_format_modifiers
[] = {
93 DRM_FORMAT_MOD_LINEAR
,
94 DRM_FORMAT_MOD_INVALID
97 static void i9xx_crtc_clock_get(struct intel_crtc
*crtc
,
98 struct intel_crtc_state
*pipe_config
);
99 static void ironlake_pch_clock_get(struct intel_crtc
*crtc
,
100 struct intel_crtc_state
*pipe_config
);
102 static int intel_framebuffer_init(struct intel_framebuffer
*ifb
,
103 struct drm_i915_gem_object
*obj
,
104 struct drm_mode_fb_cmd2
*mode_cmd
);
105 static void intel_set_pipe_timings(const struct intel_crtc_state
*crtc_state
);
106 static void intel_set_pipe_src_size(const struct intel_crtc_state
*crtc_state
);
107 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state
*crtc_state
,
108 const struct intel_link_m_n
*m_n
,
109 const struct intel_link_m_n
*m2_n2
);
110 static void i9xx_set_pipeconf(const struct intel_crtc_state
*crtc_state
);
111 static void ironlake_set_pipeconf(const struct intel_crtc_state
*crtc_state
);
112 static void haswell_set_pipeconf(const struct intel_crtc_state
*crtc_state
);
113 static void haswell_set_pipemisc(const struct intel_crtc_state
*crtc_state
);
114 static void vlv_prepare_pll(struct intel_crtc
*crtc
,
115 const struct intel_crtc_state
*pipe_config
);
116 static void chv_prepare_pll(struct intel_crtc
*crtc
,
117 const struct intel_crtc_state
*pipe_config
);
118 static void intel_begin_crtc_commit(struct drm_crtc
*, struct drm_crtc_state
*);
119 static void intel_finish_crtc_commit(struct drm_crtc
*, struct drm_crtc_state
*);
120 static void intel_crtc_init_scalers(struct intel_crtc
*crtc
,
121 struct intel_crtc_state
*crtc_state
);
122 static void skylake_pfit_enable(const struct intel_crtc_state
*crtc_state
);
123 static void ironlake_pfit_disable(const struct intel_crtc_state
*old_crtc_state
);
124 static void ironlake_pfit_enable(const struct intel_crtc_state
*crtc_state
);
125 static void intel_modeset_setup_hw_state(struct drm_device
*dev
,
126 struct drm_modeset_acquire_ctx
*ctx
);
127 static void intel_pre_disable_primary_noatomic(struct drm_crtc
*crtc
);
132 } dot
, vco
, n
, m
, m1
, m2
, p
, p1
;
136 int p2_slow
, p2_fast
;
140 /* returns HPLL frequency in kHz */
141 int vlv_get_hpll_vco(struct drm_i915_private
*dev_priv
)
143 int hpll_freq
, vco_freq
[] = { 800, 1600, 2000, 2400 };
145 /* Obtain SKU information */
146 mutex_lock(&dev_priv
->sb_lock
);
147 hpll_freq
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
) &
148 CCK_FUSE_HPLL_FREQ_MASK
;
149 mutex_unlock(&dev_priv
->sb_lock
);
151 return vco_freq
[hpll_freq
] * 1000;
154 int vlv_get_cck_clock(struct drm_i915_private
*dev_priv
,
155 const char *name
, u32 reg
, int ref_freq
)
160 mutex_lock(&dev_priv
->sb_lock
);
161 val
= vlv_cck_read(dev_priv
, reg
);
162 mutex_unlock(&dev_priv
->sb_lock
);
164 divider
= val
& CCK_FREQUENCY_VALUES
;
166 WARN((val
& CCK_FREQUENCY_STATUS
) !=
167 (divider
<< CCK_FREQUENCY_STATUS_SHIFT
),
168 "%s change in progress\n", name
);
170 return DIV_ROUND_CLOSEST(ref_freq
<< 1, divider
+ 1);
173 int vlv_get_cck_clock_hpll(struct drm_i915_private
*dev_priv
,
174 const char *name
, u32 reg
)
176 if (dev_priv
->hpll_freq
== 0)
177 dev_priv
->hpll_freq
= vlv_get_hpll_vco(dev_priv
);
179 return vlv_get_cck_clock(dev_priv
, name
, reg
,
180 dev_priv
->hpll_freq
);
183 static void intel_update_czclk(struct drm_i915_private
*dev_priv
)
185 if (!(IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)))
188 dev_priv
->czclk_freq
= vlv_get_cck_clock_hpll(dev_priv
, "czclk",
189 CCK_CZ_CLOCK_CONTROL
);
191 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv
->czclk_freq
);
194 static inline u32
/* units of 100MHz */
195 intel_fdi_link_freq(struct drm_i915_private
*dev_priv
,
196 const struct intel_crtc_state
*pipe_config
)
198 if (HAS_DDI(dev_priv
))
199 return pipe_config
->port_clock
; /* SPLL */
201 return dev_priv
->fdi_pll_freq
;
204 static const struct intel_limit intel_limits_i8xx_dac
= {
205 .dot
= { .min
= 25000, .max
= 350000 },
206 .vco
= { .min
= 908000, .max
= 1512000 },
207 .n
= { .min
= 2, .max
= 16 },
208 .m
= { .min
= 96, .max
= 140 },
209 .m1
= { .min
= 18, .max
= 26 },
210 .m2
= { .min
= 6, .max
= 16 },
211 .p
= { .min
= 4, .max
= 128 },
212 .p1
= { .min
= 2, .max
= 33 },
213 .p2
= { .dot_limit
= 165000,
214 .p2_slow
= 4, .p2_fast
= 2 },
217 static const struct intel_limit intel_limits_i8xx_dvo
= {
218 .dot
= { .min
= 25000, .max
= 350000 },
219 .vco
= { .min
= 908000, .max
= 1512000 },
220 .n
= { .min
= 2, .max
= 16 },
221 .m
= { .min
= 96, .max
= 140 },
222 .m1
= { .min
= 18, .max
= 26 },
223 .m2
= { .min
= 6, .max
= 16 },
224 .p
= { .min
= 4, .max
= 128 },
225 .p1
= { .min
= 2, .max
= 33 },
226 .p2
= { .dot_limit
= 165000,
227 .p2_slow
= 4, .p2_fast
= 4 },
230 static const struct intel_limit intel_limits_i8xx_lvds
= {
231 .dot
= { .min
= 25000, .max
= 350000 },
232 .vco
= { .min
= 908000, .max
= 1512000 },
233 .n
= { .min
= 2, .max
= 16 },
234 .m
= { .min
= 96, .max
= 140 },
235 .m1
= { .min
= 18, .max
= 26 },
236 .m2
= { .min
= 6, .max
= 16 },
237 .p
= { .min
= 4, .max
= 128 },
238 .p1
= { .min
= 1, .max
= 6 },
239 .p2
= { .dot_limit
= 165000,
240 .p2_slow
= 14, .p2_fast
= 7 },
243 static const struct intel_limit intel_limits_i9xx_sdvo
= {
244 .dot
= { .min
= 20000, .max
= 400000 },
245 .vco
= { .min
= 1400000, .max
= 2800000 },
246 .n
= { .min
= 1, .max
= 6 },
247 .m
= { .min
= 70, .max
= 120 },
248 .m1
= { .min
= 8, .max
= 18 },
249 .m2
= { .min
= 3, .max
= 7 },
250 .p
= { .min
= 5, .max
= 80 },
251 .p1
= { .min
= 1, .max
= 8 },
252 .p2
= { .dot_limit
= 200000,
253 .p2_slow
= 10, .p2_fast
= 5 },
256 static const struct intel_limit intel_limits_i9xx_lvds
= {
257 .dot
= { .min
= 20000, .max
= 400000 },
258 .vco
= { .min
= 1400000, .max
= 2800000 },
259 .n
= { .min
= 1, .max
= 6 },
260 .m
= { .min
= 70, .max
= 120 },
261 .m1
= { .min
= 8, .max
= 18 },
262 .m2
= { .min
= 3, .max
= 7 },
263 .p
= { .min
= 7, .max
= 98 },
264 .p1
= { .min
= 1, .max
= 8 },
265 .p2
= { .dot_limit
= 112000,
266 .p2_slow
= 14, .p2_fast
= 7 },
270 static const struct intel_limit intel_limits_g4x_sdvo
= {
271 .dot
= { .min
= 25000, .max
= 270000 },
272 .vco
= { .min
= 1750000, .max
= 3500000},
273 .n
= { .min
= 1, .max
= 4 },
274 .m
= { .min
= 104, .max
= 138 },
275 .m1
= { .min
= 17, .max
= 23 },
276 .m2
= { .min
= 5, .max
= 11 },
277 .p
= { .min
= 10, .max
= 30 },
278 .p1
= { .min
= 1, .max
= 3},
279 .p2
= { .dot_limit
= 270000,
285 static const struct intel_limit intel_limits_g4x_hdmi
= {
286 .dot
= { .min
= 22000, .max
= 400000 },
287 .vco
= { .min
= 1750000, .max
= 3500000},
288 .n
= { .min
= 1, .max
= 4 },
289 .m
= { .min
= 104, .max
= 138 },
290 .m1
= { .min
= 16, .max
= 23 },
291 .m2
= { .min
= 5, .max
= 11 },
292 .p
= { .min
= 5, .max
= 80 },
293 .p1
= { .min
= 1, .max
= 8},
294 .p2
= { .dot_limit
= 165000,
295 .p2_slow
= 10, .p2_fast
= 5 },
298 static const struct intel_limit intel_limits_g4x_single_channel_lvds
= {
299 .dot
= { .min
= 20000, .max
= 115000 },
300 .vco
= { .min
= 1750000, .max
= 3500000 },
301 .n
= { .min
= 1, .max
= 3 },
302 .m
= { .min
= 104, .max
= 138 },
303 .m1
= { .min
= 17, .max
= 23 },
304 .m2
= { .min
= 5, .max
= 11 },
305 .p
= { .min
= 28, .max
= 112 },
306 .p1
= { .min
= 2, .max
= 8 },
307 .p2
= { .dot_limit
= 0,
308 .p2_slow
= 14, .p2_fast
= 14
312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds
= {
313 .dot
= { .min
= 80000, .max
= 224000 },
314 .vco
= { .min
= 1750000, .max
= 3500000 },
315 .n
= { .min
= 1, .max
= 3 },
316 .m
= { .min
= 104, .max
= 138 },
317 .m1
= { .min
= 17, .max
= 23 },
318 .m2
= { .min
= 5, .max
= 11 },
319 .p
= { .min
= 14, .max
= 42 },
320 .p1
= { .min
= 2, .max
= 6 },
321 .p2
= { .dot_limit
= 0,
322 .p2_slow
= 7, .p2_fast
= 7
326 static const struct intel_limit intel_limits_pineview_sdvo
= {
327 .dot
= { .min
= 20000, .max
= 400000},
328 .vco
= { .min
= 1700000, .max
= 3500000 },
329 /* Pineview's Ncounter is a ring counter */
330 .n
= { .min
= 3, .max
= 6 },
331 .m
= { .min
= 2, .max
= 256 },
332 /* Pineview only has one combined m divider, which we treat as m2. */
333 .m1
= { .min
= 0, .max
= 0 },
334 .m2
= { .min
= 0, .max
= 254 },
335 .p
= { .min
= 5, .max
= 80 },
336 .p1
= { .min
= 1, .max
= 8 },
337 .p2
= { .dot_limit
= 200000,
338 .p2_slow
= 10, .p2_fast
= 5 },
341 static const struct intel_limit intel_limits_pineview_lvds
= {
342 .dot
= { .min
= 20000, .max
= 400000 },
343 .vco
= { .min
= 1700000, .max
= 3500000 },
344 .n
= { .min
= 3, .max
= 6 },
345 .m
= { .min
= 2, .max
= 256 },
346 .m1
= { .min
= 0, .max
= 0 },
347 .m2
= { .min
= 0, .max
= 254 },
348 .p
= { .min
= 7, .max
= 112 },
349 .p1
= { .min
= 1, .max
= 8 },
350 .p2
= { .dot_limit
= 112000,
351 .p2_slow
= 14, .p2_fast
= 14 },
354 /* Ironlake / Sandybridge
356 * We calculate clock using (register_value + 2) for N/M1/M2, so here
357 * the range value for them is (actual_value - 2).
359 static const struct intel_limit intel_limits_ironlake_dac
= {
360 .dot
= { .min
= 25000, .max
= 350000 },
361 .vco
= { .min
= 1760000, .max
= 3510000 },
362 .n
= { .min
= 1, .max
= 5 },
363 .m
= { .min
= 79, .max
= 127 },
364 .m1
= { .min
= 12, .max
= 22 },
365 .m2
= { .min
= 5, .max
= 9 },
366 .p
= { .min
= 5, .max
= 80 },
367 .p1
= { .min
= 1, .max
= 8 },
368 .p2
= { .dot_limit
= 225000,
369 .p2_slow
= 10, .p2_fast
= 5 },
372 static const struct intel_limit intel_limits_ironlake_single_lvds
= {
373 .dot
= { .min
= 25000, .max
= 350000 },
374 .vco
= { .min
= 1760000, .max
= 3510000 },
375 .n
= { .min
= 1, .max
= 3 },
376 .m
= { .min
= 79, .max
= 118 },
377 .m1
= { .min
= 12, .max
= 22 },
378 .m2
= { .min
= 5, .max
= 9 },
379 .p
= { .min
= 28, .max
= 112 },
380 .p1
= { .min
= 2, .max
= 8 },
381 .p2
= { .dot_limit
= 225000,
382 .p2_slow
= 14, .p2_fast
= 14 },
385 static const struct intel_limit intel_limits_ironlake_dual_lvds
= {
386 .dot
= { .min
= 25000, .max
= 350000 },
387 .vco
= { .min
= 1760000, .max
= 3510000 },
388 .n
= { .min
= 1, .max
= 3 },
389 .m
= { .min
= 79, .max
= 127 },
390 .m1
= { .min
= 12, .max
= 22 },
391 .m2
= { .min
= 5, .max
= 9 },
392 .p
= { .min
= 14, .max
= 56 },
393 .p1
= { .min
= 2, .max
= 8 },
394 .p2
= { .dot_limit
= 225000,
395 .p2_slow
= 7, .p2_fast
= 7 },
398 /* LVDS 100mhz refclk limits. */
399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m
= {
400 .dot
= { .min
= 25000, .max
= 350000 },
401 .vco
= { .min
= 1760000, .max
= 3510000 },
402 .n
= { .min
= 1, .max
= 2 },
403 .m
= { .min
= 79, .max
= 126 },
404 .m1
= { .min
= 12, .max
= 22 },
405 .m2
= { .min
= 5, .max
= 9 },
406 .p
= { .min
= 28, .max
= 112 },
407 .p1
= { .min
= 2, .max
= 8 },
408 .p2
= { .dot_limit
= 225000,
409 .p2_slow
= 14, .p2_fast
= 14 },
412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m
= {
413 .dot
= { .min
= 25000, .max
= 350000 },
414 .vco
= { .min
= 1760000, .max
= 3510000 },
415 .n
= { .min
= 1, .max
= 3 },
416 .m
= { .min
= 79, .max
= 126 },
417 .m1
= { .min
= 12, .max
= 22 },
418 .m2
= { .min
= 5, .max
= 9 },
419 .p
= { .min
= 14, .max
= 42 },
420 .p1
= { .min
= 2, .max
= 6 },
421 .p2
= { .dot_limit
= 225000,
422 .p2_slow
= 7, .p2_fast
= 7 },
425 static const struct intel_limit intel_limits_vlv
= {
427 * These are the data rate limits (measured in fast clocks)
428 * since those are the strictest limits we have. The fast
429 * clock and actual rate limits are more relaxed, so checking
430 * them would make no difference.
432 .dot
= { .min
= 25000 * 5, .max
= 270000 * 5 },
433 .vco
= { .min
= 4000000, .max
= 6000000 },
434 .n
= { .min
= 1, .max
= 7 },
435 .m1
= { .min
= 2, .max
= 3 },
436 .m2
= { .min
= 11, .max
= 156 },
437 .p1
= { .min
= 2, .max
= 3 },
438 .p2
= { .p2_slow
= 2, .p2_fast
= 20 }, /* slow=min, fast=max */
441 static const struct intel_limit intel_limits_chv
= {
443 * These are the data rate limits (measured in fast clocks)
444 * since those are the strictest limits we have. The fast
445 * clock and actual rate limits are more relaxed, so checking
446 * them would make no difference.
448 .dot
= { .min
= 25000 * 5, .max
= 540000 * 5},
449 .vco
= { .min
= 4800000, .max
= 6480000 },
450 .n
= { .min
= 1, .max
= 1 },
451 .m1
= { .min
= 2, .max
= 2 },
452 .m2
= { .min
= 24 << 22, .max
= 175 << 22 },
453 .p1
= { .min
= 2, .max
= 4 },
454 .p2
= { .p2_slow
= 1, .p2_fast
= 14 },
457 static const struct intel_limit intel_limits_bxt
= {
458 /* FIXME: find real dot limits */
459 .dot
= { .min
= 0, .max
= INT_MAX
},
460 .vco
= { .min
= 4800000, .max
= 6700000 },
461 .n
= { .min
= 1, .max
= 1 },
462 .m1
= { .min
= 2, .max
= 2 },
463 /* FIXME: find real m2 limits */
464 .m2
= { .min
= 2 << 22, .max
= 255 << 22 },
465 .p1
= { .min
= 2, .max
= 4 },
466 .p2
= { .p2_slow
= 1, .p2_fast
= 20 },
470 skl_wa_clkgate(struct drm_i915_private
*dev_priv
, int pipe
, bool enable
)
473 I915_WRITE(CLKGATE_DIS_PSL(pipe
),
474 DUPS1_GATING_DIS
| DUPS2_GATING_DIS
);
476 I915_WRITE(CLKGATE_DIS_PSL(pipe
),
477 I915_READ(CLKGATE_DIS_PSL(pipe
)) &
478 ~(DUPS1_GATING_DIS
| DUPS2_GATING_DIS
));
482 needs_modeset(const struct drm_crtc_state
*state
)
484 return drm_atomic_crtc_needs_modeset(state
);
488 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
489 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
490 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
491 * The helpers' return value is the rate of the clock that is fed to the
492 * display engine's pipe which can be the above fast dot clock rate or a
493 * divided-down version of it.
495 /* m1 is reserved as 0 in Pineview, n is a ring counter */
496 static int pnv_calc_dpll_params(int refclk
, struct dpll
*clock
)
498 clock
->m
= clock
->m2
+ 2;
499 clock
->p
= clock
->p1
* clock
->p2
;
500 if (WARN_ON(clock
->n
== 0 || clock
->p
== 0))
502 clock
->vco
= DIV_ROUND_CLOSEST(refclk
* clock
->m
, clock
->n
);
503 clock
->dot
= DIV_ROUND_CLOSEST(clock
->vco
, clock
->p
);
508 static u32
i9xx_dpll_compute_m(struct dpll
*dpll
)
510 return 5 * (dpll
->m1
+ 2) + (dpll
->m2
+ 2);
513 static int i9xx_calc_dpll_params(int refclk
, struct dpll
*clock
)
515 clock
->m
= i9xx_dpll_compute_m(clock
);
516 clock
->p
= clock
->p1
* clock
->p2
;
517 if (WARN_ON(clock
->n
+ 2 == 0 || clock
->p
== 0))
519 clock
->vco
= DIV_ROUND_CLOSEST(refclk
* clock
->m
, clock
->n
+ 2);
520 clock
->dot
= DIV_ROUND_CLOSEST(clock
->vco
, clock
->p
);
525 static int vlv_calc_dpll_params(int refclk
, struct dpll
*clock
)
527 clock
->m
= clock
->m1
* clock
->m2
;
528 clock
->p
= clock
->p1
* clock
->p2
;
529 if (WARN_ON(clock
->n
== 0 || clock
->p
== 0))
531 clock
->vco
= DIV_ROUND_CLOSEST(refclk
* clock
->m
, clock
->n
);
532 clock
->dot
= DIV_ROUND_CLOSEST(clock
->vco
, clock
->p
);
534 return clock
->dot
/ 5;
537 int chv_calc_dpll_params(int refclk
, struct dpll
*clock
)
539 clock
->m
= clock
->m1
* clock
->m2
;
540 clock
->p
= clock
->p1
* clock
->p2
;
541 if (WARN_ON(clock
->n
== 0 || clock
->p
== 0))
543 clock
->vco
= DIV_ROUND_CLOSEST_ULL((u64
)refclk
* clock
->m
,
545 clock
->dot
= DIV_ROUND_CLOSEST(clock
->vco
, clock
->p
);
547 return clock
->dot
/ 5;
550 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
553 * Returns whether the given set of divisors are valid for a given refclk with
554 * the given connectors.
556 static bool intel_PLL_is_valid(struct drm_i915_private
*dev_priv
,
557 const struct intel_limit
*limit
,
558 const struct dpll
*clock
)
560 if (clock
->n
< limit
->n
.min
|| limit
->n
.max
< clock
->n
)
561 INTELPllInvalid("n out of range\n");
562 if (clock
->p1
< limit
->p1
.min
|| limit
->p1
.max
< clock
->p1
)
563 INTELPllInvalid("p1 out of range\n");
564 if (clock
->m2
< limit
->m2
.min
|| limit
->m2
.max
< clock
->m2
)
565 INTELPllInvalid("m2 out of range\n");
566 if (clock
->m1
< limit
->m1
.min
|| limit
->m1
.max
< clock
->m1
)
567 INTELPllInvalid("m1 out of range\n");
569 if (!IS_PINEVIEW(dev_priv
) && !IS_VALLEYVIEW(dev_priv
) &&
570 !IS_CHERRYVIEW(dev_priv
) && !IS_GEN9_LP(dev_priv
))
571 if (clock
->m1
<= clock
->m2
)
572 INTELPllInvalid("m1 <= m2\n");
574 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
) &&
575 !IS_GEN9_LP(dev_priv
)) {
576 if (clock
->p
< limit
->p
.min
|| limit
->p
.max
< clock
->p
)
577 INTELPllInvalid("p out of range\n");
578 if (clock
->m
< limit
->m
.min
|| limit
->m
.max
< clock
->m
)
579 INTELPllInvalid("m out of range\n");
582 if (clock
->vco
< limit
->vco
.min
|| limit
->vco
.max
< clock
->vco
)
583 INTELPllInvalid("vco out of range\n");
584 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
585 * connector, etc., rather than just a single range.
587 if (clock
->dot
< limit
->dot
.min
|| limit
->dot
.max
< clock
->dot
)
588 INTELPllInvalid("dot out of range\n");
594 i9xx_select_p2_div(const struct intel_limit
*limit
,
595 const struct intel_crtc_state
*crtc_state
,
598 struct drm_device
*dev
= crtc_state
->base
.crtc
->dev
;
600 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
602 * For LVDS just rely on its current settings for dual-channel.
603 * We haven't figured out how to reliably set up different
604 * single/dual channel state, if we even can.
606 if (intel_is_dual_link_lvds(dev
))
607 return limit
->p2
.p2_fast
;
609 return limit
->p2
.p2_slow
;
611 if (target
< limit
->p2
.dot_limit
)
612 return limit
->p2
.p2_slow
;
614 return limit
->p2
.p2_fast
;
619 * Returns a set of divisors for the desired target clock with the given
620 * refclk, or FALSE. The returned values represent the clock equation:
621 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
623 * Target and reference clocks are specified in kHz.
625 * If match_clock is provided, then best_clock P divider must match the P
626 * divider from @match_clock used for LVDS downclocking.
629 i9xx_find_best_dpll(const struct intel_limit
*limit
,
630 struct intel_crtc_state
*crtc_state
,
631 int target
, int refclk
, struct dpll
*match_clock
,
632 struct dpll
*best_clock
)
634 struct drm_device
*dev
= crtc_state
->base
.crtc
->dev
;
638 memset(best_clock
, 0, sizeof(*best_clock
));
640 clock
.p2
= i9xx_select_p2_div(limit
, crtc_state
, target
);
642 for (clock
.m1
= limit
->m1
.min
; clock
.m1
<= limit
->m1
.max
;
644 for (clock
.m2
= limit
->m2
.min
;
645 clock
.m2
<= limit
->m2
.max
; clock
.m2
++) {
646 if (clock
.m2
>= clock
.m1
)
648 for (clock
.n
= limit
->n
.min
;
649 clock
.n
<= limit
->n
.max
; clock
.n
++) {
650 for (clock
.p1
= limit
->p1
.min
;
651 clock
.p1
<= limit
->p1
.max
; clock
.p1
++) {
654 i9xx_calc_dpll_params(refclk
, &clock
);
655 if (!intel_PLL_is_valid(to_i915(dev
),
660 clock
.p
!= match_clock
->p
)
663 this_err
= abs(clock
.dot
- target
);
664 if (this_err
< err
) {
673 return (err
!= target
);
677 * Returns a set of divisors for the desired target clock with the given
678 * refclk, or FALSE. The returned values represent the clock equation:
679 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
681 * Target and reference clocks are specified in kHz.
683 * If match_clock is provided, then best_clock P divider must match the P
684 * divider from @match_clock used for LVDS downclocking.
687 pnv_find_best_dpll(const struct intel_limit
*limit
,
688 struct intel_crtc_state
*crtc_state
,
689 int target
, int refclk
, struct dpll
*match_clock
,
690 struct dpll
*best_clock
)
692 struct drm_device
*dev
= crtc_state
->base
.crtc
->dev
;
696 memset(best_clock
, 0, sizeof(*best_clock
));
698 clock
.p2
= i9xx_select_p2_div(limit
, crtc_state
, target
);
700 for (clock
.m1
= limit
->m1
.min
; clock
.m1
<= limit
->m1
.max
;
702 for (clock
.m2
= limit
->m2
.min
;
703 clock
.m2
<= limit
->m2
.max
; clock
.m2
++) {
704 for (clock
.n
= limit
->n
.min
;
705 clock
.n
<= limit
->n
.max
; clock
.n
++) {
706 for (clock
.p1
= limit
->p1
.min
;
707 clock
.p1
<= limit
->p1
.max
; clock
.p1
++) {
710 pnv_calc_dpll_params(refclk
, &clock
);
711 if (!intel_PLL_is_valid(to_i915(dev
),
716 clock
.p
!= match_clock
->p
)
719 this_err
= abs(clock
.dot
- target
);
720 if (this_err
< err
) {
729 return (err
!= target
);
733 * Returns a set of divisors for the desired target clock with the given
734 * refclk, or FALSE. The returned values represent the clock equation:
735 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
737 * Target and reference clocks are specified in kHz.
739 * If match_clock is provided, then best_clock P divider must match the P
740 * divider from @match_clock used for LVDS downclocking.
743 g4x_find_best_dpll(const struct intel_limit
*limit
,
744 struct intel_crtc_state
*crtc_state
,
745 int target
, int refclk
, struct dpll
*match_clock
,
746 struct dpll
*best_clock
)
748 struct drm_device
*dev
= crtc_state
->base
.crtc
->dev
;
752 /* approximately equals target * 0.00585 */
753 int err_most
= (target
>> 8) + (target
>> 9);
755 memset(best_clock
, 0, sizeof(*best_clock
));
757 clock
.p2
= i9xx_select_p2_div(limit
, crtc_state
, target
);
759 max_n
= limit
->n
.max
;
760 /* based on hardware requirement, prefer smaller n to precision */
761 for (clock
.n
= limit
->n
.min
; clock
.n
<= max_n
; clock
.n
++) {
762 /* based on hardware requirement, prefere larger m1,m2 */
763 for (clock
.m1
= limit
->m1
.max
;
764 clock
.m1
>= limit
->m1
.min
; clock
.m1
--) {
765 for (clock
.m2
= limit
->m2
.max
;
766 clock
.m2
>= limit
->m2
.min
; clock
.m2
--) {
767 for (clock
.p1
= limit
->p1
.max
;
768 clock
.p1
>= limit
->p1
.min
; clock
.p1
--) {
771 i9xx_calc_dpll_params(refclk
, &clock
);
772 if (!intel_PLL_is_valid(to_i915(dev
),
777 this_err
= abs(clock
.dot
- target
);
778 if (this_err
< err_most
) {
792 * Check if the calculated PLL configuration is more optimal compared to the
793 * best configuration and error found so far. Return the calculated error.
795 static bool vlv_PLL_is_optimal(struct drm_device
*dev
, int target_freq
,
796 const struct dpll
*calculated_clock
,
797 const struct dpll
*best_clock
,
798 unsigned int best_error_ppm
,
799 unsigned int *error_ppm
)
802 * For CHV ignore the error and consider only the P value.
803 * Prefer a bigger P value based on HW requirements.
805 if (IS_CHERRYVIEW(to_i915(dev
))) {
808 return calculated_clock
->p
> best_clock
->p
;
811 if (WARN_ON_ONCE(!target_freq
))
814 *error_ppm
= div_u64(1000000ULL *
815 abs(target_freq
- calculated_clock
->dot
),
818 * Prefer a better P value over a better (smaller) error if the error
819 * is small. Ensure this preference for future configurations too by
820 * setting the error to 0.
822 if (*error_ppm
< 100 && calculated_clock
->p
> best_clock
->p
) {
828 return *error_ppm
+ 10 < best_error_ppm
;
832 * Returns a set of divisors for the desired target clock with the given
833 * refclk, or FALSE. The returned values represent the clock equation:
834 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
837 vlv_find_best_dpll(const struct intel_limit
*limit
,
838 struct intel_crtc_state
*crtc_state
,
839 int target
, int refclk
, struct dpll
*match_clock
,
840 struct dpll
*best_clock
)
842 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
843 struct drm_device
*dev
= crtc
->base
.dev
;
845 unsigned int bestppm
= 1000000;
846 /* min update 19.2 MHz */
847 int max_n
= min(limit
->n
.max
, refclk
/ 19200);
850 target
*= 5; /* fast clock */
852 memset(best_clock
, 0, sizeof(*best_clock
));
854 /* based on hardware requirement, prefer smaller n to precision */
855 for (clock
.n
= limit
->n
.min
; clock
.n
<= max_n
; clock
.n
++) {
856 for (clock
.p1
= limit
->p1
.max
; clock
.p1
>= limit
->p1
.min
; clock
.p1
--) {
857 for (clock
.p2
= limit
->p2
.p2_fast
; clock
.p2
>= limit
->p2
.p2_slow
;
858 clock
.p2
-= clock
.p2
> 10 ? 2 : 1) {
859 clock
.p
= clock
.p1
* clock
.p2
;
860 /* based on hardware requirement, prefer bigger m1,m2 values */
861 for (clock
.m1
= limit
->m1
.min
; clock
.m1
<= limit
->m1
.max
; clock
.m1
++) {
864 clock
.m2
= DIV_ROUND_CLOSEST(target
* clock
.p
* clock
.n
,
867 vlv_calc_dpll_params(refclk
, &clock
);
869 if (!intel_PLL_is_valid(to_i915(dev
),
874 if (!vlv_PLL_is_optimal(dev
, target
,
892 * Returns a set of divisors for the desired target clock with the given
893 * refclk, or FALSE. The returned values represent the clock equation:
894 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
897 chv_find_best_dpll(const struct intel_limit
*limit
,
898 struct intel_crtc_state
*crtc_state
,
899 int target
, int refclk
, struct dpll
*match_clock
,
900 struct dpll
*best_clock
)
902 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
903 struct drm_device
*dev
= crtc
->base
.dev
;
904 unsigned int best_error_ppm
;
909 memset(best_clock
, 0, sizeof(*best_clock
));
910 best_error_ppm
= 1000000;
913 * Based on hardware doc, the n always set to 1, and m1 always
914 * set to 2. If requires to support 200Mhz refclk, we need to
915 * revisit this because n may not 1 anymore.
917 clock
.n
= 1, clock
.m1
= 2;
918 target
*= 5; /* fast clock */
920 for (clock
.p1
= limit
->p1
.max
; clock
.p1
>= limit
->p1
.min
; clock
.p1
--) {
921 for (clock
.p2
= limit
->p2
.p2_fast
;
922 clock
.p2
>= limit
->p2
.p2_slow
;
923 clock
.p2
-= clock
.p2
> 10 ? 2 : 1) {
924 unsigned int error_ppm
;
926 clock
.p
= clock
.p1
* clock
.p2
;
928 m2
= DIV_ROUND_CLOSEST_ULL(((u64
)target
* clock
.p
*
929 clock
.n
) << 22, refclk
* clock
.m1
);
931 if (m2
> INT_MAX
/clock
.m1
)
936 chv_calc_dpll_params(refclk
, &clock
);
938 if (!intel_PLL_is_valid(to_i915(dev
), limit
, &clock
))
941 if (!vlv_PLL_is_optimal(dev
, target
, &clock
, best_clock
,
942 best_error_ppm
, &error_ppm
))
946 best_error_ppm
= error_ppm
;
954 bool bxt_find_best_dpll(struct intel_crtc_state
*crtc_state
, int target_clock
,
955 struct dpll
*best_clock
)
958 const struct intel_limit
*limit
= &intel_limits_bxt
;
960 return chv_find_best_dpll(limit
, crtc_state
,
961 target_clock
, refclk
, NULL
, best_clock
);
964 bool intel_crtc_active(struct intel_crtc
*crtc
)
966 /* Be paranoid as we can arrive here with only partial
967 * state retrieved from the hardware during setup.
969 * We can ditch the adjusted_mode.crtc_clock check as soon
970 * as Haswell has gained clock readout/fastboot support.
972 * We can ditch the crtc->primary->state->fb check as soon as we can
973 * properly reconstruct framebuffers.
975 * FIXME: The intel_crtc->active here should be switched to
976 * crtc->state->active once we have proper CRTC states wired up
979 return crtc
->active
&& crtc
->base
.primary
->state
->fb
&&
980 crtc
->config
->base
.adjusted_mode
.crtc_clock
;
983 enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private
*dev_priv
,
986 struct intel_crtc
*crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
988 return crtc
->config
->cpu_transcoder
;
991 static bool pipe_scanline_is_moving(struct drm_i915_private
*dev_priv
,
994 i915_reg_t reg
= PIPEDSL(pipe
);
998 if (IS_GEN(dev_priv
, 2))
999 line_mask
= DSL_LINEMASK_GEN2
;
1001 line_mask
= DSL_LINEMASK_GEN3
;
1003 line1
= I915_READ(reg
) & line_mask
;
1005 line2
= I915_READ(reg
) & line_mask
;
1007 return line1
!= line2
;
1010 static void wait_for_pipe_scanline_moving(struct intel_crtc
*crtc
, bool state
)
1012 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1013 enum pipe pipe
= crtc
->pipe
;
1015 /* Wait for the display line to settle/start moving */
1016 if (wait_for(pipe_scanline_is_moving(dev_priv
, pipe
) == state
, 100))
1017 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1018 pipe_name(pipe
), onoff(state
));
1021 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc
*crtc
)
1023 wait_for_pipe_scanline_moving(crtc
, false);
1026 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc
*crtc
)
1028 wait_for_pipe_scanline_moving(crtc
, true);
1032 intel_wait_for_pipe_off(const struct intel_crtc_state
*old_crtc_state
)
1034 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
1035 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1037 if (INTEL_GEN(dev_priv
) >= 4) {
1038 enum transcoder cpu_transcoder
= old_crtc_state
->cpu_transcoder
;
1039 i915_reg_t reg
= PIPECONF(cpu_transcoder
);
1041 /* Wait for the Pipe State to go off */
1042 if (intel_wait_for_register(dev_priv
,
1043 reg
, I965_PIPECONF_ACTIVE
, 0,
1045 WARN(1, "pipe_off wait timed out\n");
1047 intel_wait_for_pipe_scanline_stopped(crtc
);
1051 /* Only for pre-ILK configs */
1052 void assert_pll(struct drm_i915_private
*dev_priv
,
1053 enum pipe pipe
, bool state
)
1058 val
= I915_READ(DPLL(pipe
));
1059 cur_state
= !!(val
& DPLL_VCO_ENABLE
);
1060 I915_STATE_WARN(cur_state
!= state
,
1061 "PLL state assertion failure (expected %s, current %s)\n",
1062 onoff(state
), onoff(cur_state
));
1065 /* XXX: the dsi pll is shared between MIPI DSI ports */
1066 void assert_dsi_pll(struct drm_i915_private
*dev_priv
, bool state
)
1071 mutex_lock(&dev_priv
->sb_lock
);
1072 val
= vlv_cck_read(dev_priv
, CCK_REG_DSI_PLL_CONTROL
);
1073 mutex_unlock(&dev_priv
->sb_lock
);
1075 cur_state
= val
& DSI_PLL_VCO_EN
;
1076 I915_STATE_WARN(cur_state
!= state
,
1077 "DSI PLL state assertion failure (expected %s, current %s)\n",
1078 onoff(state
), onoff(cur_state
));
1081 static void assert_fdi_tx(struct drm_i915_private
*dev_priv
,
1082 enum pipe pipe
, bool state
)
1085 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
1088 if (HAS_DDI(dev_priv
)) {
1089 /* DDI does not have a specific FDI_TX register */
1090 u32 val
= I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder
));
1091 cur_state
= !!(val
& TRANS_DDI_FUNC_ENABLE
);
1093 u32 val
= I915_READ(FDI_TX_CTL(pipe
));
1094 cur_state
= !!(val
& FDI_TX_ENABLE
);
1096 I915_STATE_WARN(cur_state
!= state
,
1097 "FDI TX state assertion failure (expected %s, current %s)\n",
1098 onoff(state
), onoff(cur_state
));
1100 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1101 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1103 static void assert_fdi_rx(struct drm_i915_private
*dev_priv
,
1104 enum pipe pipe
, bool state
)
1109 val
= I915_READ(FDI_RX_CTL(pipe
));
1110 cur_state
= !!(val
& FDI_RX_ENABLE
);
1111 I915_STATE_WARN(cur_state
!= state
,
1112 "FDI RX state assertion failure (expected %s, current %s)\n",
1113 onoff(state
), onoff(cur_state
));
1115 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1116 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1118 static void assert_fdi_tx_pll_enabled(struct drm_i915_private
*dev_priv
,
1123 /* ILK FDI PLL is always enabled */
1124 if (IS_GEN(dev_priv
, 5))
1127 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1128 if (HAS_DDI(dev_priv
))
1131 val
= I915_READ(FDI_TX_CTL(pipe
));
1132 I915_STATE_WARN(!(val
& FDI_TX_PLL_ENABLE
), "FDI TX PLL assertion failure, should be active but is disabled\n");
1135 void assert_fdi_rx_pll(struct drm_i915_private
*dev_priv
,
1136 enum pipe pipe
, bool state
)
1141 val
= I915_READ(FDI_RX_CTL(pipe
));
1142 cur_state
= !!(val
& FDI_RX_PLL_ENABLE
);
1143 I915_STATE_WARN(cur_state
!= state
,
1144 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1145 onoff(state
), onoff(cur_state
));
1148 void assert_panel_unlocked(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
1152 enum pipe panel_pipe
= INVALID_PIPE
;
1155 if (WARN_ON(HAS_DDI(dev_priv
)))
1158 if (HAS_PCH_SPLIT(dev_priv
)) {
1161 pp_reg
= PP_CONTROL(0);
1162 port_sel
= I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK
;
1165 case PANEL_PORT_SELECT_LVDS
:
1166 intel_lvds_port_enabled(dev_priv
, PCH_LVDS
, &panel_pipe
);
1168 case PANEL_PORT_SELECT_DPA
:
1169 intel_dp_port_enabled(dev_priv
, DP_A
, PORT_A
, &panel_pipe
);
1171 case PANEL_PORT_SELECT_DPC
:
1172 intel_dp_port_enabled(dev_priv
, PCH_DP_C
, PORT_C
, &panel_pipe
);
1174 case PANEL_PORT_SELECT_DPD
:
1175 intel_dp_port_enabled(dev_priv
, PCH_DP_D
, PORT_D
, &panel_pipe
);
1178 MISSING_CASE(port_sel
);
1181 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1182 /* presumably write lock depends on pipe, not port select */
1183 pp_reg
= PP_CONTROL(pipe
);
1188 pp_reg
= PP_CONTROL(0);
1189 port_sel
= I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK
;
1191 WARN_ON(port_sel
!= PANEL_PORT_SELECT_LVDS
);
1192 intel_lvds_port_enabled(dev_priv
, LVDS
, &panel_pipe
);
1195 val
= I915_READ(pp_reg
);
1196 if (!(val
& PANEL_POWER_ON
) ||
1197 ((val
& PANEL_UNLOCK_MASK
) == PANEL_UNLOCK_REGS
))
1200 I915_STATE_WARN(panel_pipe
== pipe
&& locked
,
1201 "panel assertion failure, pipe %c regs locked\n",
1205 void assert_pipe(struct drm_i915_private
*dev_priv
,
1206 enum pipe pipe
, bool state
)
1209 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
1211 enum intel_display_power_domain power_domain
;
1212 intel_wakeref_t wakeref
;
1214 /* we keep both pipes enabled on 830 */
1215 if (IS_I830(dev_priv
))
1218 power_domain
= POWER_DOMAIN_TRANSCODER(cpu_transcoder
);
1219 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
1221 u32 val
= I915_READ(PIPECONF(cpu_transcoder
));
1222 cur_state
= !!(val
& PIPECONF_ENABLE
);
1224 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
1229 I915_STATE_WARN(cur_state
!= state
,
1230 "pipe %c assertion failure (expected %s, current %s)\n",
1231 pipe_name(pipe
), onoff(state
), onoff(cur_state
));
1234 static void assert_plane(struct intel_plane
*plane
, bool state
)
1239 cur_state
= plane
->get_hw_state(plane
, &pipe
);
1241 I915_STATE_WARN(cur_state
!= state
,
1242 "%s assertion failure (expected %s, current %s)\n",
1243 plane
->base
.name
, onoff(state
), onoff(cur_state
));
1246 #define assert_plane_enabled(p) assert_plane(p, true)
1247 #define assert_plane_disabled(p) assert_plane(p, false)
1249 static void assert_planes_disabled(struct intel_crtc
*crtc
)
1251 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1252 struct intel_plane
*plane
;
1254 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
)
1255 assert_plane_disabled(plane
);
1258 static void assert_vblank_disabled(struct drm_crtc
*crtc
)
1260 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc
) == 0))
1261 drm_crtc_vblank_put(crtc
);
1264 void assert_pch_transcoder_disabled(struct drm_i915_private
*dev_priv
,
1270 val
= I915_READ(PCH_TRANSCONF(pipe
));
1271 enabled
= !!(val
& TRANS_ENABLE
);
1272 I915_STATE_WARN(enabled
,
1273 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1277 static void assert_pch_dp_disabled(struct drm_i915_private
*dev_priv
,
1278 enum pipe pipe
, enum port port
,
1281 enum pipe port_pipe
;
1284 state
= intel_dp_port_enabled(dev_priv
, dp_reg
, port
, &port_pipe
);
1286 I915_STATE_WARN(state
&& port_pipe
== pipe
,
1287 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1288 port_name(port
), pipe_name(pipe
));
1290 I915_STATE_WARN(HAS_PCH_IBX(dev_priv
) && !state
&& port_pipe
== PIPE_B
,
1291 "IBX PCH DP %c still using transcoder B\n",
1295 static void assert_pch_hdmi_disabled(struct drm_i915_private
*dev_priv
,
1296 enum pipe pipe
, enum port port
,
1297 i915_reg_t hdmi_reg
)
1299 enum pipe port_pipe
;
1302 state
= intel_sdvo_port_enabled(dev_priv
, hdmi_reg
, &port_pipe
);
1304 I915_STATE_WARN(state
&& port_pipe
== pipe
,
1305 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1306 port_name(port
), pipe_name(pipe
));
1308 I915_STATE_WARN(HAS_PCH_IBX(dev_priv
) && !state
&& port_pipe
== PIPE_B
,
1309 "IBX PCH HDMI %c still using transcoder B\n",
1313 static void assert_pch_ports_disabled(struct drm_i915_private
*dev_priv
,
1316 enum pipe port_pipe
;
1318 assert_pch_dp_disabled(dev_priv
, pipe
, PORT_B
, PCH_DP_B
);
1319 assert_pch_dp_disabled(dev_priv
, pipe
, PORT_C
, PCH_DP_C
);
1320 assert_pch_dp_disabled(dev_priv
, pipe
, PORT_D
, PCH_DP_D
);
1322 I915_STATE_WARN(intel_crt_port_enabled(dev_priv
, PCH_ADPA
, &port_pipe
) &&
1324 "PCH VGA enabled on transcoder %c, should be disabled\n",
1327 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv
, PCH_LVDS
, &port_pipe
) &&
1329 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1332 /* PCH SDVOB multiplex with HDMIB */
1333 assert_pch_hdmi_disabled(dev_priv
, pipe
, PORT_B
, PCH_HDMIB
);
1334 assert_pch_hdmi_disabled(dev_priv
, pipe
, PORT_C
, PCH_HDMIC
);
1335 assert_pch_hdmi_disabled(dev_priv
, pipe
, PORT_D
, PCH_HDMID
);
1338 static void _vlv_enable_pll(struct intel_crtc
*crtc
,
1339 const struct intel_crtc_state
*pipe_config
)
1341 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1342 enum pipe pipe
= crtc
->pipe
;
1344 I915_WRITE(DPLL(pipe
), pipe_config
->dpll_hw_state
.dpll
);
1345 POSTING_READ(DPLL(pipe
));
1348 if (intel_wait_for_register(dev_priv
,
1353 DRM_ERROR("DPLL %d failed to lock\n", pipe
);
1356 static void vlv_enable_pll(struct intel_crtc
*crtc
,
1357 const struct intel_crtc_state
*pipe_config
)
1359 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1360 enum pipe pipe
= crtc
->pipe
;
1362 assert_pipe_disabled(dev_priv
, pipe
);
1364 /* PLL is protected by panel, make sure we can write it */
1365 assert_panel_unlocked(dev_priv
, pipe
);
1367 if (pipe_config
->dpll_hw_state
.dpll
& DPLL_VCO_ENABLE
)
1368 _vlv_enable_pll(crtc
, pipe_config
);
1370 I915_WRITE(DPLL_MD(pipe
), pipe_config
->dpll_hw_state
.dpll_md
);
1371 POSTING_READ(DPLL_MD(pipe
));
1375 static void _chv_enable_pll(struct intel_crtc
*crtc
,
1376 const struct intel_crtc_state
*pipe_config
)
1378 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1379 enum pipe pipe
= crtc
->pipe
;
1380 enum dpio_channel port
= vlv_pipe_to_channel(pipe
);
1383 mutex_lock(&dev_priv
->sb_lock
);
1385 /* Enable back the 10bit clock to display controller */
1386 tmp
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW14(port
));
1387 tmp
|= DPIO_DCLKP_EN
;
1388 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW14(port
), tmp
);
1390 mutex_unlock(&dev_priv
->sb_lock
);
1393 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1398 I915_WRITE(DPLL(pipe
), pipe_config
->dpll_hw_state
.dpll
);
1400 /* Check PLL is locked */
1401 if (intel_wait_for_register(dev_priv
,
1402 DPLL(pipe
), DPLL_LOCK_VLV
, DPLL_LOCK_VLV
,
1404 DRM_ERROR("PLL %d failed to lock\n", pipe
);
1407 static void chv_enable_pll(struct intel_crtc
*crtc
,
1408 const struct intel_crtc_state
*pipe_config
)
1410 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1411 enum pipe pipe
= crtc
->pipe
;
1413 assert_pipe_disabled(dev_priv
, pipe
);
1415 /* PLL is protected by panel, make sure we can write it */
1416 assert_panel_unlocked(dev_priv
, pipe
);
1418 if (pipe_config
->dpll_hw_state
.dpll
& DPLL_VCO_ENABLE
)
1419 _chv_enable_pll(crtc
, pipe_config
);
1421 if (pipe
!= PIPE_A
) {
1423 * WaPixelRepeatModeFixForC0:chv
1425 * DPLLCMD is AWOL. Use chicken bits to propagate
1426 * the value from DPLLBMD to either pipe B or C.
1428 I915_WRITE(CBR4_VLV
, CBR_DPLLBMD_PIPE(pipe
));
1429 I915_WRITE(DPLL_MD(PIPE_B
), pipe_config
->dpll_hw_state
.dpll_md
);
1430 I915_WRITE(CBR4_VLV
, 0);
1431 dev_priv
->chv_dpll_md
[pipe
] = pipe_config
->dpll_hw_state
.dpll_md
;
1434 * DPLLB VGA mode also seems to cause problems.
1435 * We should always have it disabled.
1437 WARN_ON((I915_READ(DPLL(PIPE_B
)) & DPLL_VGA_MODE_DIS
) == 0);
1439 I915_WRITE(DPLL_MD(pipe
), pipe_config
->dpll_hw_state
.dpll_md
);
1440 POSTING_READ(DPLL_MD(pipe
));
1444 static int intel_num_dvo_pipes(struct drm_i915_private
*dev_priv
)
1446 struct intel_crtc
*crtc
;
1449 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1450 count
+= crtc
->base
.state
->active
&&
1451 intel_crtc_has_type(crtc
->config
, INTEL_OUTPUT_DVO
);
1457 static void i9xx_enable_pll(struct intel_crtc
*crtc
,
1458 const struct intel_crtc_state
*crtc_state
)
1460 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1461 i915_reg_t reg
= DPLL(crtc
->pipe
);
1462 u32 dpll
= crtc_state
->dpll_hw_state
.dpll
;
1465 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
1467 /* PLL is protected by panel, make sure we can write it */
1468 if (IS_MOBILE(dev_priv
) && !IS_I830(dev_priv
))
1469 assert_panel_unlocked(dev_priv
, crtc
->pipe
);
1471 /* Enable DVO 2x clock on both PLLs if necessary */
1472 if (IS_I830(dev_priv
) && intel_num_dvo_pipes(dev_priv
) > 0) {
1474 * It appears to be important that we don't enable this
1475 * for the current pipe before otherwise configuring the
1476 * PLL. No idea how this should be handled if multiple
1477 * DVO outputs are enabled simultaneosly.
1479 dpll
|= DPLL_DVO_2X_MODE
;
1480 I915_WRITE(DPLL(!crtc
->pipe
),
1481 I915_READ(DPLL(!crtc
->pipe
)) | DPLL_DVO_2X_MODE
);
1485 * Apparently we need to have VGA mode enabled prior to changing
1486 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1487 * dividers, even though the register value does change.
1491 I915_WRITE(reg
, dpll
);
1493 /* Wait for the clocks to stabilize. */
1497 if (INTEL_GEN(dev_priv
) >= 4) {
1498 I915_WRITE(DPLL_MD(crtc
->pipe
),
1499 crtc_state
->dpll_hw_state
.dpll_md
);
1501 /* The pixel multiplier can only be updated once the
1502 * DPLL is enabled and the clocks are stable.
1504 * So write it again.
1506 I915_WRITE(reg
, dpll
);
1509 /* We do this three times for luck */
1510 for (i
= 0; i
< 3; i
++) {
1511 I915_WRITE(reg
, dpll
);
1513 udelay(150); /* wait for warmup */
1517 static void i9xx_disable_pll(const struct intel_crtc_state
*crtc_state
)
1519 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1520 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1521 enum pipe pipe
= crtc
->pipe
;
1523 /* Disable DVO 2x clock on both PLLs if necessary */
1524 if (IS_I830(dev_priv
) &&
1525 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DVO
) &&
1526 !intel_num_dvo_pipes(dev_priv
)) {
1527 I915_WRITE(DPLL(PIPE_B
),
1528 I915_READ(DPLL(PIPE_B
)) & ~DPLL_DVO_2X_MODE
);
1529 I915_WRITE(DPLL(PIPE_A
),
1530 I915_READ(DPLL(PIPE_A
)) & ~DPLL_DVO_2X_MODE
);
1533 /* Don't disable pipe or pipe PLLs if needed */
1534 if (IS_I830(dev_priv
))
1537 /* Make sure the pipe isn't still relying on us */
1538 assert_pipe_disabled(dev_priv
, pipe
);
1540 I915_WRITE(DPLL(pipe
), DPLL_VGA_MODE_DIS
);
1541 POSTING_READ(DPLL(pipe
));
1544 static void vlv_disable_pll(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
1548 /* Make sure the pipe isn't still relying on us */
1549 assert_pipe_disabled(dev_priv
, pipe
);
1551 val
= DPLL_INTEGRATED_REF_CLK_VLV
|
1552 DPLL_REF_CLK_ENABLE_VLV
| DPLL_VGA_MODE_DIS
;
1554 val
|= DPLL_INTEGRATED_CRI_CLK_VLV
;
1556 I915_WRITE(DPLL(pipe
), val
);
1557 POSTING_READ(DPLL(pipe
));
1560 static void chv_disable_pll(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
1562 enum dpio_channel port
= vlv_pipe_to_channel(pipe
);
1565 /* Make sure the pipe isn't still relying on us */
1566 assert_pipe_disabled(dev_priv
, pipe
);
1568 val
= DPLL_SSC_REF_CLK_CHV
|
1569 DPLL_REF_CLK_ENABLE_VLV
| DPLL_VGA_MODE_DIS
;
1571 val
|= DPLL_INTEGRATED_CRI_CLK_VLV
;
1573 I915_WRITE(DPLL(pipe
), val
);
1574 POSTING_READ(DPLL(pipe
));
1576 mutex_lock(&dev_priv
->sb_lock
);
1578 /* Disable 10bit clock to display controller */
1579 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW14(port
));
1580 val
&= ~DPIO_DCLKP_EN
;
1581 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW14(port
), val
);
1583 mutex_unlock(&dev_priv
->sb_lock
);
1586 void vlv_wait_port_ready(struct drm_i915_private
*dev_priv
,
1587 struct intel_digital_port
*dport
,
1588 unsigned int expected_mask
)
1591 i915_reg_t dpll_reg
;
1593 switch (dport
->base
.port
) {
1595 port_mask
= DPLL_PORTB_READY_MASK
;
1599 port_mask
= DPLL_PORTC_READY_MASK
;
1601 expected_mask
<<= 4;
1604 port_mask
= DPLL_PORTD_READY_MASK
;
1605 dpll_reg
= DPIO_PHY_STATUS
;
1611 if (intel_wait_for_register(dev_priv
,
1612 dpll_reg
, port_mask
, expected_mask
,
1614 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1615 port_name(dport
->base
.port
),
1616 I915_READ(dpll_reg
) & port_mask
, expected_mask
);
1619 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state
*crtc_state
)
1621 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1622 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1623 enum pipe pipe
= crtc
->pipe
;
1625 u32 val
, pipeconf_val
;
1627 /* Make sure PCH DPLL is enabled */
1628 assert_shared_dpll_enabled(dev_priv
, crtc_state
->shared_dpll
);
1630 /* FDI must be feeding us bits for PCH ports */
1631 assert_fdi_tx_enabled(dev_priv
, pipe
);
1632 assert_fdi_rx_enabled(dev_priv
, pipe
);
1634 if (HAS_PCH_CPT(dev_priv
)) {
1635 /* Workaround: Set the timing override bit before enabling the
1636 * pch transcoder. */
1637 reg
= TRANS_CHICKEN2(pipe
);
1638 val
= I915_READ(reg
);
1639 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
1640 I915_WRITE(reg
, val
);
1643 reg
= PCH_TRANSCONF(pipe
);
1644 val
= I915_READ(reg
);
1645 pipeconf_val
= I915_READ(PIPECONF(pipe
));
1647 if (HAS_PCH_IBX(dev_priv
)) {
1649 * Make the BPC in transcoder be consistent with
1650 * that in pipeconf reg. For HDMI we must use 8bpc
1651 * here for both 8bpc and 12bpc.
1653 val
&= ~PIPECONF_BPC_MASK
;
1654 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_HDMI
))
1655 val
|= PIPECONF_8BPC
;
1657 val
|= pipeconf_val
& PIPECONF_BPC_MASK
;
1660 val
&= ~TRANS_INTERLACE_MASK
;
1661 if ((pipeconf_val
& PIPECONF_INTERLACE_MASK
) == PIPECONF_INTERLACED_ILK
)
1662 if (HAS_PCH_IBX(dev_priv
) &&
1663 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_SDVO
))
1664 val
|= TRANS_LEGACY_INTERLACED_ILK
;
1666 val
|= TRANS_INTERLACED
;
1668 val
|= TRANS_PROGRESSIVE
;
1670 I915_WRITE(reg
, val
| TRANS_ENABLE
);
1671 if (intel_wait_for_register(dev_priv
,
1672 reg
, TRANS_STATE_ENABLE
, TRANS_STATE_ENABLE
,
1674 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe
));
1677 static void lpt_enable_pch_transcoder(struct drm_i915_private
*dev_priv
,
1678 enum transcoder cpu_transcoder
)
1680 u32 val
, pipeconf_val
;
1682 /* FDI must be feeding us bits for PCH ports */
1683 assert_fdi_tx_enabled(dev_priv
, (enum pipe
) cpu_transcoder
);
1684 assert_fdi_rx_enabled(dev_priv
, PIPE_A
);
1686 /* Workaround: set timing override bit. */
1687 val
= I915_READ(TRANS_CHICKEN2(PIPE_A
));
1688 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
1689 I915_WRITE(TRANS_CHICKEN2(PIPE_A
), val
);
1692 pipeconf_val
= I915_READ(PIPECONF(cpu_transcoder
));
1694 if ((pipeconf_val
& PIPECONF_INTERLACE_MASK_HSW
) ==
1695 PIPECONF_INTERLACED_ILK
)
1696 val
|= TRANS_INTERLACED
;
1698 val
|= TRANS_PROGRESSIVE
;
1700 I915_WRITE(LPT_TRANSCONF
, val
);
1701 if (intel_wait_for_register(dev_priv
,
1706 DRM_ERROR("Failed to enable PCH transcoder\n");
1709 static void ironlake_disable_pch_transcoder(struct drm_i915_private
*dev_priv
,
1715 /* FDI relies on the transcoder */
1716 assert_fdi_tx_disabled(dev_priv
, pipe
);
1717 assert_fdi_rx_disabled(dev_priv
, pipe
);
1719 /* Ports must be off as well */
1720 assert_pch_ports_disabled(dev_priv
, pipe
);
1722 reg
= PCH_TRANSCONF(pipe
);
1723 val
= I915_READ(reg
);
1724 val
&= ~TRANS_ENABLE
;
1725 I915_WRITE(reg
, val
);
1726 /* wait for PCH transcoder off, transcoder state */
1727 if (intel_wait_for_register(dev_priv
,
1728 reg
, TRANS_STATE_ENABLE
, 0,
1730 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe
));
1732 if (HAS_PCH_CPT(dev_priv
)) {
1733 /* Workaround: Clear the timing override chicken bit again. */
1734 reg
= TRANS_CHICKEN2(pipe
);
1735 val
= I915_READ(reg
);
1736 val
&= ~TRANS_CHICKEN2_TIMING_OVERRIDE
;
1737 I915_WRITE(reg
, val
);
1741 void lpt_disable_pch_transcoder(struct drm_i915_private
*dev_priv
)
1745 val
= I915_READ(LPT_TRANSCONF
);
1746 val
&= ~TRANS_ENABLE
;
1747 I915_WRITE(LPT_TRANSCONF
, val
);
1748 /* wait for PCH transcoder off, transcoder state */
1749 if (intel_wait_for_register(dev_priv
,
1750 LPT_TRANSCONF
, TRANS_STATE_ENABLE
, 0,
1752 DRM_ERROR("Failed to disable PCH transcoder\n");
1754 /* Workaround: clear timing override bit. */
1755 val
= I915_READ(TRANS_CHICKEN2(PIPE_A
));
1756 val
&= ~TRANS_CHICKEN2_TIMING_OVERRIDE
;
1757 I915_WRITE(TRANS_CHICKEN2(PIPE_A
), val
);
1760 enum pipe
intel_crtc_pch_transcoder(struct intel_crtc
*crtc
)
1762 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1764 if (HAS_PCH_LPT(dev_priv
))
1770 static u32
intel_crtc_max_vblank_count(const struct intel_crtc_state
*crtc_state
)
1772 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1775 * On i965gm the hardware frame counter reads
1776 * zero when the TV encoder is enabled :(
1778 if (IS_I965GM(dev_priv
) &&
1779 (crtc_state
->output_types
& BIT(INTEL_OUTPUT_TVOUT
)))
1782 if (INTEL_GEN(dev_priv
) >= 5 || IS_G4X(dev_priv
))
1783 return 0xffffffff; /* full 32 bit counter */
1784 else if (INTEL_GEN(dev_priv
) >= 3)
1785 return 0xffffff; /* only 24 bits of frame count */
1787 return 0; /* Gen2 doesn't have a hardware frame counter */
1790 static void intel_crtc_vblank_on(const struct intel_crtc_state
*crtc_state
)
1792 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1794 drm_crtc_set_max_vblank_count(&crtc
->base
,
1795 intel_crtc_max_vblank_count(crtc_state
));
1796 drm_crtc_vblank_on(&crtc
->base
);
1799 static void intel_enable_pipe(const struct intel_crtc_state
*new_crtc_state
)
1801 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->base
.crtc
);
1802 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1803 enum transcoder cpu_transcoder
= new_crtc_state
->cpu_transcoder
;
1804 enum pipe pipe
= crtc
->pipe
;
1808 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe
));
1810 assert_planes_disabled(crtc
);
1813 * A pipe without a PLL won't actually be able to drive bits from
1814 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1817 if (HAS_GMCH(dev_priv
)) {
1818 if (intel_crtc_has_type(new_crtc_state
, INTEL_OUTPUT_DSI
))
1819 assert_dsi_pll_enabled(dev_priv
);
1821 assert_pll_enabled(dev_priv
, pipe
);
1823 if (new_crtc_state
->has_pch_encoder
) {
1824 /* if driving the PCH, we need FDI enabled */
1825 assert_fdi_rx_pll_enabled(dev_priv
,
1826 intel_crtc_pch_transcoder(crtc
));
1827 assert_fdi_tx_pll_enabled(dev_priv
,
1828 (enum pipe
) cpu_transcoder
);
1830 /* FIXME: assert CPU port conditions for SNB+ */
1833 reg
= PIPECONF(cpu_transcoder
);
1834 val
= I915_READ(reg
);
1835 if (val
& PIPECONF_ENABLE
) {
1836 /* we keep both pipes enabled on 830 */
1837 WARN_ON(!IS_I830(dev_priv
));
1841 I915_WRITE(reg
, val
| PIPECONF_ENABLE
);
1845 * Until the pipe starts PIPEDSL reads will return a stale value,
1846 * which causes an apparent vblank timestamp jump when PIPEDSL
1847 * resets to its proper value. That also messes up the frame count
1848 * when it's derived from the timestamps. So let's wait for the
1849 * pipe to start properly before we call drm_crtc_vblank_on()
1851 if (intel_crtc_max_vblank_count(new_crtc_state
) == 0)
1852 intel_wait_for_pipe_scanline_moving(crtc
);
1855 static void intel_disable_pipe(const struct intel_crtc_state
*old_crtc_state
)
1857 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
1858 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1859 enum transcoder cpu_transcoder
= old_crtc_state
->cpu_transcoder
;
1860 enum pipe pipe
= crtc
->pipe
;
1864 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe
));
1867 * Make sure planes won't keep trying to pump pixels to us,
1868 * or we might hang the display.
1870 assert_planes_disabled(crtc
);
1872 reg
= PIPECONF(cpu_transcoder
);
1873 val
= I915_READ(reg
);
1874 if ((val
& PIPECONF_ENABLE
) == 0)
1878 * Double wide has implications for planes
1879 * so best keep it disabled when not needed.
1881 if (old_crtc_state
->double_wide
)
1882 val
&= ~PIPECONF_DOUBLE_WIDE
;
1884 /* Don't disable pipe or pipe PLLs if needed */
1885 if (!IS_I830(dev_priv
))
1886 val
&= ~PIPECONF_ENABLE
;
1888 I915_WRITE(reg
, val
);
1889 if ((val
& PIPECONF_ENABLE
) == 0)
1890 intel_wait_for_pipe_off(old_crtc_state
);
1893 static unsigned int intel_tile_size(const struct drm_i915_private
*dev_priv
)
1895 return IS_GEN(dev_priv
, 2) ? 2048 : 4096;
1899 intel_tile_width_bytes(const struct drm_framebuffer
*fb
, int color_plane
)
1901 struct drm_i915_private
*dev_priv
= to_i915(fb
->dev
);
1902 unsigned int cpp
= fb
->format
->cpp
[color_plane
];
1904 switch (fb
->modifier
) {
1905 case DRM_FORMAT_MOD_LINEAR
:
1907 case I915_FORMAT_MOD_X_TILED
:
1908 if (IS_GEN(dev_priv
, 2))
1912 case I915_FORMAT_MOD_Y_TILED_CCS
:
1913 if (color_plane
== 1)
1916 case I915_FORMAT_MOD_Y_TILED
:
1917 if (IS_GEN(dev_priv
, 2) || HAS_128_BYTE_Y_TILING(dev_priv
))
1921 case I915_FORMAT_MOD_Yf_TILED_CCS
:
1922 if (color_plane
== 1)
1925 case I915_FORMAT_MOD_Yf_TILED
:
1941 MISSING_CASE(fb
->modifier
);
1947 intel_tile_height(const struct drm_framebuffer
*fb
, int color_plane
)
1949 if (fb
->modifier
== DRM_FORMAT_MOD_LINEAR
)
1952 return intel_tile_size(to_i915(fb
->dev
)) /
1953 intel_tile_width_bytes(fb
, color_plane
);
1956 /* Return the tile dimensions in pixel units */
1957 static void intel_tile_dims(const struct drm_framebuffer
*fb
, int color_plane
,
1958 unsigned int *tile_width
,
1959 unsigned int *tile_height
)
1961 unsigned int tile_width_bytes
= intel_tile_width_bytes(fb
, color_plane
);
1962 unsigned int cpp
= fb
->format
->cpp
[color_plane
];
1964 *tile_width
= tile_width_bytes
/ cpp
;
1965 *tile_height
= intel_tile_size(to_i915(fb
->dev
)) / tile_width_bytes
;
1969 intel_fb_align_height(const struct drm_framebuffer
*fb
,
1970 int color_plane
, unsigned int height
)
1972 unsigned int tile_height
= intel_tile_height(fb
, color_plane
);
1974 return ALIGN(height
, tile_height
);
1977 unsigned int intel_rotation_info_size(const struct intel_rotation_info
*rot_info
)
1979 unsigned int size
= 0;
1982 for (i
= 0 ; i
< ARRAY_SIZE(rot_info
->plane
); i
++)
1983 size
+= rot_info
->plane
[i
].width
* rot_info
->plane
[i
].height
;
1989 intel_fill_fb_ggtt_view(struct i915_ggtt_view
*view
,
1990 const struct drm_framebuffer
*fb
,
1991 unsigned int rotation
)
1993 view
->type
= I915_GGTT_VIEW_NORMAL
;
1994 if (drm_rotation_90_or_270(rotation
)) {
1995 view
->type
= I915_GGTT_VIEW_ROTATED
;
1996 view
->rotated
= to_intel_framebuffer(fb
)->rot_info
;
2000 static unsigned int intel_cursor_alignment(const struct drm_i915_private
*dev_priv
)
2002 if (IS_I830(dev_priv
))
2004 else if (IS_I85X(dev_priv
))
2006 else if (IS_I845G(dev_priv
) || IS_I865G(dev_priv
))
2012 static unsigned int intel_linear_alignment(const struct drm_i915_private
*dev_priv
)
2014 if (INTEL_GEN(dev_priv
) >= 9)
2016 else if (IS_I965G(dev_priv
) || IS_I965GM(dev_priv
) ||
2017 IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
2019 else if (INTEL_GEN(dev_priv
) >= 4)
2025 static unsigned int intel_surf_alignment(const struct drm_framebuffer
*fb
,
2028 struct drm_i915_private
*dev_priv
= to_i915(fb
->dev
);
2030 /* AUX_DIST needs only 4K alignment */
2031 if (color_plane
== 1)
2034 switch (fb
->modifier
) {
2035 case DRM_FORMAT_MOD_LINEAR
:
2036 return intel_linear_alignment(dev_priv
);
2037 case I915_FORMAT_MOD_X_TILED
:
2038 if (INTEL_GEN(dev_priv
) >= 9)
2041 case I915_FORMAT_MOD_Y_TILED_CCS
:
2042 case I915_FORMAT_MOD_Yf_TILED_CCS
:
2043 case I915_FORMAT_MOD_Y_TILED
:
2044 case I915_FORMAT_MOD_Yf_TILED
:
2045 return 1 * 1024 * 1024;
2047 MISSING_CASE(fb
->modifier
);
2052 static bool intel_plane_uses_fence(const struct intel_plane_state
*plane_state
)
2054 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
2055 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
2057 return INTEL_GEN(dev_priv
) < 4 || plane
->has_fbc
;
2061 intel_pin_and_fence_fb_obj(struct drm_framebuffer
*fb
,
2062 const struct i915_ggtt_view
*view
,
2064 unsigned long *out_flags
)
2066 struct drm_device
*dev
= fb
->dev
;
2067 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2068 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
2069 intel_wakeref_t wakeref
;
2070 struct i915_vma
*vma
;
2071 unsigned int pinctl
;
2074 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
2076 alignment
= intel_surf_alignment(fb
, 0);
2078 /* Note that the w/a also requires 64 PTE of padding following the
2079 * bo. We currently fill all unused PTE with the shadow page and so
2080 * we should always have valid PTE following the scanout preventing
2083 if (intel_scanout_needs_vtd_wa(dev_priv
) && alignment
< 256 * 1024)
2084 alignment
= 256 * 1024;
2087 * Global gtt pte registers are special registers which actually forward
2088 * writes to a chunk of system memory. Which means that there is no risk
2089 * that the register values disappear as soon as we call
2090 * intel_runtime_pm_put(), so it is correct to wrap only the
2091 * pin/unpin/fence and not more.
2093 wakeref
= intel_runtime_pm_get(dev_priv
);
2095 atomic_inc(&dev_priv
->gpu_error
.pending_fb_pin
);
2099 /* Valleyview is definitely limited to scanning out the first
2100 * 512MiB. Lets presume this behaviour was inherited from the
2101 * g4x display engine and that all earlier gen are similarly
2102 * limited. Testing suggests that it is a little more
2103 * complicated than this. For example, Cherryview appears quite
2104 * happy to scanout from anywhere within its global aperture.
2106 if (HAS_GMCH(dev_priv
))
2107 pinctl
|= PIN_MAPPABLE
;
2109 vma
= i915_gem_object_pin_to_display_plane(obj
,
2110 alignment
, view
, pinctl
);
2114 if (uses_fence
&& i915_vma_is_map_and_fenceable(vma
)) {
2117 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2118 * fence, whereas 965+ only requires a fence if using
2119 * framebuffer compression. For simplicity, we always, when
2120 * possible, install a fence as the cost is not that onerous.
2122 * If we fail to fence the tiled scanout, then either the
2123 * modeset will reject the change (which is highly unlikely as
2124 * the affected systems, all but one, do not have unmappable
2125 * space) or we will not be able to enable full powersaving
2126 * techniques (also likely not to apply due to various limits
2127 * FBC and the like impose on the size of the buffer, which
2128 * presumably we violated anyway with this unmappable buffer).
2129 * Anyway, it is presumably better to stumble onwards with
2130 * something and try to run the system in a "less than optimal"
2131 * mode that matches the user configuration.
2133 ret
= i915_vma_pin_fence(vma
);
2134 if (ret
!= 0 && INTEL_GEN(dev_priv
) < 4) {
2135 i915_gem_object_unpin_from_display_plane(vma
);
2140 if (ret
== 0 && vma
->fence
)
2141 *out_flags
|= PLANE_HAS_FENCE
;
2146 atomic_dec(&dev_priv
->gpu_error
.pending_fb_pin
);
2148 intel_runtime_pm_put(dev_priv
, wakeref
);
2152 void intel_unpin_fb_vma(struct i915_vma
*vma
, unsigned long flags
)
2154 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
2156 if (flags
& PLANE_HAS_FENCE
)
2157 i915_vma_unpin_fence(vma
);
2158 i915_gem_object_unpin_from_display_plane(vma
);
2162 static int intel_fb_pitch(const struct drm_framebuffer
*fb
, int color_plane
,
2163 unsigned int rotation
)
2165 if (drm_rotation_90_or_270(rotation
))
2166 return to_intel_framebuffer(fb
)->rotated
[color_plane
].pitch
;
2168 return fb
->pitches
[color_plane
];
2172 * Convert the x/y offsets into a linear offset.
2173 * Only valid with 0/180 degree rotation, which is fine since linear
2174 * offset is only used with linear buffers on pre-hsw and tiled buffers
2175 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2177 u32
intel_fb_xy_to_linear(int x
, int y
,
2178 const struct intel_plane_state
*state
,
2181 const struct drm_framebuffer
*fb
= state
->base
.fb
;
2182 unsigned int cpp
= fb
->format
->cpp
[color_plane
];
2183 unsigned int pitch
= state
->color_plane
[color_plane
].stride
;
2185 return y
* pitch
+ x
* cpp
;
2189 * Add the x/y offsets derived from fb->offsets[] to the user
2190 * specified plane src x/y offsets. The resulting x/y offsets
2191 * specify the start of scanout from the beginning of the gtt mapping.
2193 void intel_add_fb_offsets(int *x
, int *y
,
2194 const struct intel_plane_state
*state
,
2198 const struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(state
->base
.fb
);
2199 unsigned int rotation
= state
->base
.rotation
;
2201 if (drm_rotation_90_or_270(rotation
)) {
2202 *x
+= intel_fb
->rotated
[color_plane
].x
;
2203 *y
+= intel_fb
->rotated
[color_plane
].y
;
2205 *x
+= intel_fb
->normal
[color_plane
].x
;
2206 *y
+= intel_fb
->normal
[color_plane
].y
;
2210 static u32
intel_adjust_tile_offset(int *x
, int *y
,
2211 unsigned int tile_width
,
2212 unsigned int tile_height
,
2213 unsigned int tile_size
,
2214 unsigned int pitch_tiles
,
2218 unsigned int pitch_pixels
= pitch_tiles
* tile_width
;
2221 WARN_ON(old_offset
& (tile_size
- 1));
2222 WARN_ON(new_offset
& (tile_size
- 1));
2223 WARN_ON(new_offset
> old_offset
);
2225 tiles
= (old_offset
- new_offset
) / tile_size
;
2227 *y
+= tiles
/ pitch_tiles
* tile_height
;
2228 *x
+= tiles
% pitch_tiles
* tile_width
;
2230 /* minimize x in case it got needlessly big */
2231 *y
+= *x
/ pitch_pixels
* tile_height
;
2237 static bool is_surface_linear(u64 modifier
, int color_plane
)
2239 return modifier
== DRM_FORMAT_MOD_LINEAR
;
2242 static u32
intel_adjust_aligned_offset(int *x
, int *y
,
2243 const struct drm_framebuffer
*fb
,
2245 unsigned int rotation
,
2247 u32 old_offset
, u32 new_offset
)
2249 struct drm_i915_private
*dev_priv
= to_i915(fb
->dev
);
2250 unsigned int cpp
= fb
->format
->cpp
[color_plane
];
2252 WARN_ON(new_offset
> old_offset
);
2254 if (!is_surface_linear(fb
->modifier
, color_plane
)) {
2255 unsigned int tile_size
, tile_width
, tile_height
;
2256 unsigned int pitch_tiles
;
2258 tile_size
= intel_tile_size(dev_priv
);
2259 intel_tile_dims(fb
, color_plane
, &tile_width
, &tile_height
);
2261 if (drm_rotation_90_or_270(rotation
)) {
2262 pitch_tiles
= pitch
/ tile_height
;
2263 swap(tile_width
, tile_height
);
2265 pitch_tiles
= pitch
/ (tile_width
* cpp
);
2268 intel_adjust_tile_offset(x
, y
, tile_width
, tile_height
,
2269 tile_size
, pitch_tiles
,
2270 old_offset
, new_offset
);
2272 old_offset
+= *y
* pitch
+ *x
* cpp
;
2274 *y
= (old_offset
- new_offset
) / pitch
;
2275 *x
= ((old_offset
- new_offset
) - *y
* pitch
) / cpp
;
2282 * Adjust the tile offset by moving the difference into
2285 static u32
intel_plane_adjust_aligned_offset(int *x
, int *y
,
2286 const struct intel_plane_state
*state
,
2288 u32 old_offset
, u32 new_offset
)
2290 return intel_adjust_aligned_offset(x
, y
, state
->base
.fb
, color_plane
,
2291 state
->base
.rotation
,
2292 state
->color_plane
[color_plane
].stride
,
2293 old_offset
, new_offset
);
2297 * Computes the aligned offset to the base tile and adjusts
2298 * x, y. bytes per pixel is assumed to be a power-of-two.
2300 * In the 90/270 rotated case, x and y are assumed
2301 * to be already rotated to match the rotated GTT view, and
2302 * pitch is the tile_height aligned framebuffer height.
2304 * This function is used when computing the derived information
2305 * under intel_framebuffer, so using any of that information
2306 * here is not allowed. Anything under drm_framebuffer can be
2307 * used. This is why the user has to pass in the pitch since it
2308 * is specified in the rotated orientation.
2310 static u32
intel_compute_aligned_offset(struct drm_i915_private
*dev_priv
,
2312 const struct drm_framebuffer
*fb
,
2315 unsigned int rotation
,
2318 unsigned int cpp
= fb
->format
->cpp
[color_plane
];
2319 u32 offset
, offset_aligned
;
2324 if (!is_surface_linear(fb
->modifier
, color_plane
)) {
2325 unsigned int tile_size
, tile_width
, tile_height
;
2326 unsigned int tile_rows
, tiles
, pitch_tiles
;
2328 tile_size
= intel_tile_size(dev_priv
);
2329 intel_tile_dims(fb
, color_plane
, &tile_width
, &tile_height
);
2331 if (drm_rotation_90_or_270(rotation
)) {
2332 pitch_tiles
= pitch
/ tile_height
;
2333 swap(tile_width
, tile_height
);
2335 pitch_tiles
= pitch
/ (tile_width
* cpp
);
2338 tile_rows
= *y
/ tile_height
;
2341 tiles
= *x
/ tile_width
;
2344 offset
= (tile_rows
* pitch_tiles
+ tiles
) * tile_size
;
2345 offset_aligned
= offset
& ~alignment
;
2347 intel_adjust_tile_offset(x
, y
, tile_width
, tile_height
,
2348 tile_size
, pitch_tiles
,
2349 offset
, offset_aligned
);
2351 offset
= *y
* pitch
+ *x
* cpp
;
2352 offset_aligned
= offset
& ~alignment
;
2354 *y
= (offset
& alignment
) / pitch
;
2355 *x
= ((offset
& alignment
) - *y
* pitch
) / cpp
;
2358 return offset_aligned
;
2361 static u32
intel_plane_compute_aligned_offset(int *x
, int *y
,
2362 const struct intel_plane_state
*state
,
2365 struct intel_plane
*intel_plane
= to_intel_plane(state
->base
.plane
);
2366 struct drm_i915_private
*dev_priv
= to_i915(intel_plane
->base
.dev
);
2367 const struct drm_framebuffer
*fb
= state
->base
.fb
;
2368 unsigned int rotation
= state
->base
.rotation
;
2369 int pitch
= state
->color_plane
[color_plane
].stride
;
2372 if (intel_plane
->id
== PLANE_CURSOR
)
2373 alignment
= intel_cursor_alignment(dev_priv
);
2375 alignment
= intel_surf_alignment(fb
, color_plane
);
2377 return intel_compute_aligned_offset(dev_priv
, x
, y
, fb
, color_plane
,
2378 pitch
, rotation
, alignment
);
2381 /* Convert the fb->offset[] into x/y offsets */
2382 static int intel_fb_offset_to_xy(int *x
, int *y
,
2383 const struct drm_framebuffer
*fb
,
2386 struct drm_i915_private
*dev_priv
= to_i915(fb
->dev
);
2387 unsigned int height
;
2389 if (fb
->modifier
!= DRM_FORMAT_MOD_LINEAR
&&
2390 fb
->offsets
[color_plane
] % intel_tile_size(dev_priv
)) {
2391 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2392 fb
->offsets
[color_plane
], color_plane
);
2396 height
= drm_framebuffer_plane_height(fb
->height
, fb
, color_plane
);
2397 height
= ALIGN(height
, intel_tile_height(fb
, color_plane
));
2399 /* Catch potential overflows early */
2400 if (add_overflows_t(u32
, mul_u32_u32(height
, fb
->pitches
[color_plane
]),
2401 fb
->offsets
[color_plane
])) {
2402 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2403 fb
->offsets
[color_plane
], fb
->pitches
[color_plane
],
2411 intel_adjust_aligned_offset(x
, y
,
2412 fb
, color_plane
, DRM_MODE_ROTATE_0
,
2413 fb
->pitches
[color_plane
],
2414 fb
->offsets
[color_plane
], 0);
2419 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier
)
2421 switch (fb_modifier
) {
2422 case I915_FORMAT_MOD_X_TILED
:
2423 return I915_TILING_X
;
2424 case I915_FORMAT_MOD_Y_TILED
:
2425 case I915_FORMAT_MOD_Y_TILED_CCS
:
2426 return I915_TILING_Y
;
2428 return I915_TILING_NONE
;
2433 * From the Sky Lake PRM:
2434 * "The Color Control Surface (CCS) contains the compression status of
2435 * the cache-line pairs. The compression state of the cache-line pair
2436 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2437 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2438 * cache-line-pairs. CCS is always Y tiled."
2440 * Since cache line pairs refers to horizontally adjacent cache lines,
2441 * each cache line in the CCS corresponds to an area of 32x16 cache
2442 * lines on the main surface. Since each pixel is 4 bytes, this gives
2443 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2446 static const struct drm_format_info ccs_formats
[] = {
2447 { .format
= DRM_FORMAT_XRGB8888
, .depth
= 24, .num_planes
= 2, .cpp
= { 4, 1, }, .hsub
= 8, .vsub
= 16, },
2448 { .format
= DRM_FORMAT_XBGR8888
, .depth
= 24, .num_planes
= 2, .cpp
= { 4, 1, }, .hsub
= 8, .vsub
= 16, },
2449 { .format
= DRM_FORMAT_ARGB8888
, .depth
= 32, .num_planes
= 2, .cpp
= { 4, 1, }, .hsub
= 8, .vsub
= 16, },
2450 { .format
= DRM_FORMAT_ABGR8888
, .depth
= 32, .num_planes
= 2, .cpp
= { 4, 1, }, .hsub
= 8, .vsub
= 16, },
2453 static const struct drm_format_info
*
2454 lookup_format_info(const struct drm_format_info formats
[],
2455 int num_formats
, u32 format
)
2459 for (i
= 0; i
< num_formats
; i
++) {
2460 if (formats
[i
].format
== format
)
2467 static const struct drm_format_info
*
2468 intel_get_format_info(const struct drm_mode_fb_cmd2
*cmd
)
2470 switch (cmd
->modifier
[0]) {
2471 case I915_FORMAT_MOD_Y_TILED_CCS
:
2472 case I915_FORMAT_MOD_Yf_TILED_CCS
:
2473 return lookup_format_info(ccs_formats
,
2474 ARRAY_SIZE(ccs_formats
),
2481 bool is_ccs_modifier(u64 modifier
)
2483 return modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
2484 modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
2488 intel_fill_fb_info(struct drm_i915_private
*dev_priv
,
2489 struct drm_framebuffer
*fb
)
2491 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
2492 struct intel_rotation_info
*rot_info
= &intel_fb
->rot_info
;
2493 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
2494 u32 gtt_offset_rotated
= 0;
2495 unsigned int max_size
= 0;
2496 int i
, num_planes
= fb
->format
->num_planes
;
2497 unsigned int tile_size
= intel_tile_size(dev_priv
);
2499 for (i
= 0; i
< num_planes
; i
++) {
2500 unsigned int width
, height
;
2501 unsigned int cpp
, size
;
2506 cpp
= fb
->format
->cpp
[i
];
2507 width
= drm_framebuffer_plane_width(fb
->width
, fb
, i
);
2508 height
= drm_framebuffer_plane_height(fb
->height
, fb
, i
);
2510 ret
= intel_fb_offset_to_xy(&x
, &y
, fb
, i
);
2512 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2517 if (is_ccs_modifier(fb
->modifier
) && i
== 1) {
2518 int hsub
= fb
->format
->hsub
;
2519 int vsub
= fb
->format
->vsub
;
2520 int tile_width
, tile_height
;
2524 intel_tile_dims(fb
, i
, &tile_width
, &tile_height
);
2526 tile_height
*= vsub
;
2528 ccs_x
= (x
* hsub
) % tile_width
;
2529 ccs_y
= (y
* vsub
) % tile_height
;
2530 main_x
= intel_fb
->normal
[0].x
% tile_width
;
2531 main_y
= intel_fb
->normal
[0].y
% tile_height
;
2534 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2535 * x/y offsets must match between CCS and the main surface.
2537 if (main_x
!= ccs_x
|| main_y
!= ccs_y
) {
2538 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2541 intel_fb
->normal
[0].x
,
2542 intel_fb
->normal
[0].y
,
2549 * The fence (if used) is aligned to the start of the object
2550 * so having the framebuffer wrap around across the edge of the
2551 * fenced region doesn't really work. We have no API to configure
2552 * the fence start offset within the object (nor could we probably
2553 * on gen2/3). So it's just easier if we just require that the
2554 * fb layout agrees with the fence layout. We already check that the
2555 * fb stride matches the fence stride elsewhere.
2557 if (i
== 0 && i915_gem_object_is_tiled(obj
) &&
2558 (x
+ width
) * cpp
> fb
->pitches
[i
]) {
2559 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2565 * First pixel of the framebuffer from
2566 * the start of the normal gtt mapping.
2568 intel_fb
->normal
[i
].x
= x
;
2569 intel_fb
->normal
[i
].y
= y
;
2571 offset
= intel_compute_aligned_offset(dev_priv
, &x
, &y
, fb
, i
,
2575 offset
/= tile_size
;
2577 if (!is_surface_linear(fb
->modifier
, i
)) {
2578 unsigned int tile_width
, tile_height
;
2579 unsigned int pitch_tiles
;
2582 intel_tile_dims(fb
, i
, &tile_width
, &tile_height
);
2584 rot_info
->plane
[i
].offset
= offset
;
2585 rot_info
->plane
[i
].stride
= DIV_ROUND_UP(fb
->pitches
[i
], tile_width
* cpp
);
2586 rot_info
->plane
[i
].width
= DIV_ROUND_UP(x
+ width
, tile_width
);
2587 rot_info
->plane
[i
].height
= DIV_ROUND_UP(y
+ height
, tile_height
);
2589 intel_fb
->rotated
[i
].pitch
=
2590 rot_info
->plane
[i
].height
* tile_height
;
2592 /* how many tiles does this plane need */
2593 size
= rot_info
->plane
[i
].stride
* rot_info
->plane
[i
].height
;
2595 * If the plane isn't horizontally tile aligned,
2596 * we need one more tile.
2601 /* rotate the x/y offsets to match the GTT view */
2607 rot_info
->plane
[i
].width
* tile_width
,
2608 rot_info
->plane
[i
].height
* tile_height
,
2609 DRM_MODE_ROTATE_270
);
2613 /* rotate the tile dimensions to match the GTT view */
2614 pitch_tiles
= intel_fb
->rotated
[i
].pitch
/ tile_height
;
2615 swap(tile_width
, tile_height
);
2618 * We only keep the x/y offsets, so push all of the
2619 * gtt offset into the x/y offsets.
2621 intel_adjust_tile_offset(&x
, &y
,
2622 tile_width
, tile_height
,
2623 tile_size
, pitch_tiles
,
2624 gtt_offset_rotated
* tile_size
, 0);
2626 gtt_offset_rotated
+= rot_info
->plane
[i
].width
* rot_info
->plane
[i
].height
;
2629 * First pixel of the framebuffer from
2630 * the start of the rotated gtt mapping.
2632 intel_fb
->rotated
[i
].x
= x
;
2633 intel_fb
->rotated
[i
].y
= y
;
2635 size
= DIV_ROUND_UP((y
+ height
) * fb
->pitches
[i
] +
2636 x
* cpp
, tile_size
);
2639 /* how many tiles in total needed in the bo */
2640 max_size
= max(max_size
, offset
+ size
);
2643 if (mul_u32_u32(max_size
, tile_size
) > obj
->base
.size
) {
2644 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2645 mul_u32_u32(max_size
, tile_size
), obj
->base
.size
);
2652 static int i9xx_format_to_fourcc(int format
)
2655 case DISPPLANE_8BPP
:
2656 return DRM_FORMAT_C8
;
2657 case DISPPLANE_BGRX555
:
2658 return DRM_FORMAT_XRGB1555
;
2659 case DISPPLANE_BGRX565
:
2660 return DRM_FORMAT_RGB565
;
2662 case DISPPLANE_BGRX888
:
2663 return DRM_FORMAT_XRGB8888
;
2664 case DISPPLANE_RGBX888
:
2665 return DRM_FORMAT_XBGR8888
;
2666 case DISPPLANE_BGRX101010
:
2667 return DRM_FORMAT_XRGB2101010
;
2668 case DISPPLANE_RGBX101010
:
2669 return DRM_FORMAT_XBGR2101010
;
2673 int skl_format_to_fourcc(int format
, bool rgb_order
, bool alpha
)
2676 case PLANE_CTL_FORMAT_RGB_565
:
2677 return DRM_FORMAT_RGB565
;
2678 case PLANE_CTL_FORMAT_NV12
:
2679 return DRM_FORMAT_NV12
;
2681 case PLANE_CTL_FORMAT_XRGB_8888
:
2684 return DRM_FORMAT_ABGR8888
;
2686 return DRM_FORMAT_XBGR8888
;
2689 return DRM_FORMAT_ARGB8888
;
2691 return DRM_FORMAT_XRGB8888
;
2693 case PLANE_CTL_FORMAT_XRGB_2101010
:
2695 return DRM_FORMAT_XBGR2101010
;
2697 return DRM_FORMAT_XRGB2101010
;
2702 intel_alloc_initial_plane_obj(struct intel_crtc
*crtc
,
2703 struct intel_initial_plane_config
*plane_config
)
2705 struct drm_device
*dev
= crtc
->base
.dev
;
2706 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2707 struct drm_i915_gem_object
*obj
= NULL
;
2708 struct drm_mode_fb_cmd2 mode_cmd
= { 0 };
2709 struct drm_framebuffer
*fb
= &plane_config
->fb
->base
;
2710 u32 base_aligned
= round_down(plane_config
->base
, PAGE_SIZE
);
2711 u32 size_aligned
= round_up(plane_config
->base
+ plane_config
->size
,
2714 size_aligned
-= base_aligned
;
2716 if (plane_config
->size
== 0)
2719 /* If the FB is too big, just don't use it since fbdev is not very
2720 * important and we should probably use that space with FBC or other
2722 if (size_aligned
* 2 > dev_priv
->stolen_usable_size
)
2725 switch (fb
->modifier
) {
2726 case DRM_FORMAT_MOD_LINEAR
:
2727 case I915_FORMAT_MOD_X_TILED
:
2728 case I915_FORMAT_MOD_Y_TILED
:
2731 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2736 mutex_lock(&dev
->struct_mutex
);
2737 obj
= i915_gem_object_create_stolen_for_preallocated(dev_priv
,
2741 mutex_unlock(&dev
->struct_mutex
);
2745 switch (plane_config
->tiling
) {
2746 case I915_TILING_NONE
:
2750 obj
->tiling_and_stride
= fb
->pitches
[0] | plane_config
->tiling
;
2753 MISSING_CASE(plane_config
->tiling
);
2757 mode_cmd
.pixel_format
= fb
->format
->format
;
2758 mode_cmd
.width
= fb
->width
;
2759 mode_cmd
.height
= fb
->height
;
2760 mode_cmd
.pitches
[0] = fb
->pitches
[0];
2761 mode_cmd
.modifier
[0] = fb
->modifier
;
2762 mode_cmd
.flags
= DRM_MODE_FB_MODIFIERS
;
2764 if (intel_framebuffer_init(to_intel_framebuffer(fb
), obj
, &mode_cmd
)) {
2765 DRM_DEBUG_KMS("intel fb init failed\n");
2770 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj
);
2774 i915_gem_object_put(obj
);
2779 intel_set_plane_visible(struct intel_crtc_state
*crtc_state
,
2780 struct intel_plane_state
*plane_state
,
2783 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
2785 plane_state
->base
.visible
= visible
;
2788 crtc_state
->base
.plane_mask
|= drm_plane_mask(&plane
->base
);
2790 crtc_state
->base
.plane_mask
&= ~drm_plane_mask(&plane
->base
);
2793 static void fixup_active_planes(struct intel_crtc_state
*crtc_state
)
2795 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
2796 struct drm_plane
*plane
;
2799 * Active_planes aliases if multiple "primary" or cursor planes
2800 * have been used on the same (or wrong) pipe. plane_mask uses
2801 * unique ids, hence we can use that to reconstruct active_planes.
2803 crtc_state
->active_planes
= 0;
2805 drm_for_each_plane_mask(plane
, &dev_priv
->drm
,
2806 crtc_state
->base
.plane_mask
)
2807 crtc_state
->active_planes
|= BIT(to_intel_plane(plane
)->id
);
2810 static void intel_plane_disable_noatomic(struct intel_crtc
*crtc
,
2811 struct intel_plane
*plane
)
2813 struct intel_crtc_state
*crtc_state
=
2814 to_intel_crtc_state(crtc
->base
.state
);
2815 struct intel_plane_state
*plane_state
=
2816 to_intel_plane_state(plane
->base
.state
);
2818 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2819 plane
->base
.base
.id
, plane
->base
.name
,
2820 crtc
->base
.base
.id
, crtc
->base
.name
);
2822 intel_set_plane_visible(crtc_state
, plane_state
, false);
2823 fixup_active_planes(crtc_state
);
2825 if (plane
->id
== PLANE_PRIMARY
)
2826 intel_pre_disable_primary_noatomic(&crtc
->base
);
2828 trace_intel_disable_plane(&plane
->base
, crtc
);
2829 plane
->disable_plane(plane
, crtc_state
);
2833 intel_find_initial_plane_obj(struct intel_crtc
*intel_crtc
,
2834 struct intel_initial_plane_config
*plane_config
)
2836 struct drm_device
*dev
= intel_crtc
->base
.dev
;
2837 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2839 struct drm_i915_gem_object
*obj
;
2840 struct drm_plane
*primary
= intel_crtc
->base
.primary
;
2841 struct drm_plane_state
*plane_state
= primary
->state
;
2842 struct intel_plane
*intel_plane
= to_intel_plane(primary
);
2843 struct intel_plane_state
*intel_state
=
2844 to_intel_plane_state(plane_state
);
2845 struct drm_framebuffer
*fb
;
2847 if (!plane_config
->fb
)
2850 if (intel_alloc_initial_plane_obj(intel_crtc
, plane_config
)) {
2851 fb
= &plane_config
->fb
->base
;
2855 kfree(plane_config
->fb
);
2858 * Failed to alloc the obj, check to see if we should share
2859 * an fb with another CRTC instead
2861 for_each_crtc(dev
, c
) {
2862 struct intel_plane_state
*state
;
2864 if (c
== &intel_crtc
->base
)
2867 if (!to_intel_crtc(c
)->active
)
2870 state
= to_intel_plane_state(c
->primary
->state
);
2874 if (intel_plane_ggtt_offset(state
) == plane_config
->base
) {
2875 fb
= state
->base
.fb
;
2876 drm_framebuffer_get(fb
);
2882 * We've failed to reconstruct the BIOS FB. Current display state
2883 * indicates that the primary plane is visible, but has a NULL FB,
2884 * which will lead to problems later if we don't fix it up. The
2885 * simplest solution is to just disable the primary plane now and
2886 * pretend the BIOS never had it enabled.
2888 intel_plane_disable_noatomic(intel_crtc
, intel_plane
);
2893 intel_state
->base
.rotation
= plane_config
->rotation
;
2894 intel_fill_fb_ggtt_view(&intel_state
->view
, fb
,
2895 intel_state
->base
.rotation
);
2896 intel_state
->color_plane
[0].stride
=
2897 intel_fb_pitch(fb
, 0, intel_state
->base
.rotation
);
2899 mutex_lock(&dev
->struct_mutex
);
2901 intel_pin_and_fence_fb_obj(fb
,
2903 intel_plane_uses_fence(intel_state
),
2904 &intel_state
->flags
);
2905 mutex_unlock(&dev
->struct_mutex
);
2906 if (IS_ERR(intel_state
->vma
)) {
2907 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2908 intel_crtc
->pipe
, PTR_ERR(intel_state
->vma
));
2910 intel_state
->vma
= NULL
;
2911 drm_framebuffer_put(fb
);
2915 obj
= intel_fb_obj(fb
);
2916 intel_fb_obj_flush(obj
, ORIGIN_DIRTYFB
);
2918 plane_state
->src_x
= 0;
2919 plane_state
->src_y
= 0;
2920 plane_state
->src_w
= fb
->width
<< 16;
2921 plane_state
->src_h
= fb
->height
<< 16;
2923 plane_state
->crtc_x
= 0;
2924 plane_state
->crtc_y
= 0;
2925 plane_state
->crtc_w
= fb
->width
;
2926 plane_state
->crtc_h
= fb
->height
;
2928 intel_state
->base
.src
= drm_plane_state_src(plane_state
);
2929 intel_state
->base
.dst
= drm_plane_state_dest(plane_state
);
2931 if (i915_gem_object_is_tiled(obj
))
2932 dev_priv
->preserve_bios_swizzle
= true;
2934 plane_state
->fb
= fb
;
2935 plane_state
->crtc
= &intel_crtc
->base
;
2937 atomic_or(to_intel_plane(primary
)->frontbuffer_bit
,
2938 &obj
->frontbuffer_bits
);
2941 static int skl_max_plane_width(const struct drm_framebuffer
*fb
,
2943 unsigned int rotation
)
2945 int cpp
= fb
->format
->cpp
[color_plane
];
2947 switch (fb
->modifier
) {
2948 case DRM_FORMAT_MOD_LINEAR
:
2949 case I915_FORMAT_MOD_X_TILED
:
2962 case I915_FORMAT_MOD_Y_TILED_CCS
:
2963 case I915_FORMAT_MOD_Yf_TILED_CCS
:
2964 /* FIXME AUX plane? */
2965 case I915_FORMAT_MOD_Y_TILED
:
2966 case I915_FORMAT_MOD_Yf_TILED
:
2981 MISSING_CASE(fb
->modifier
);
2987 static bool skl_check_main_ccs_coordinates(struct intel_plane_state
*plane_state
,
2988 int main_x
, int main_y
, u32 main_offset
)
2990 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
2991 int hsub
= fb
->format
->hsub
;
2992 int vsub
= fb
->format
->vsub
;
2993 int aux_x
= plane_state
->color_plane
[1].x
;
2994 int aux_y
= plane_state
->color_plane
[1].y
;
2995 u32 aux_offset
= plane_state
->color_plane
[1].offset
;
2996 u32 alignment
= intel_surf_alignment(fb
, 1);
2998 while (aux_offset
>= main_offset
&& aux_y
<= main_y
) {
3001 if (aux_x
== main_x
&& aux_y
== main_y
)
3004 if (aux_offset
== 0)
3009 aux_offset
= intel_plane_adjust_aligned_offset(&x
, &y
, plane_state
, 1,
3010 aux_offset
, aux_offset
- alignment
);
3011 aux_x
= x
* hsub
+ aux_x
% hsub
;
3012 aux_y
= y
* vsub
+ aux_y
% vsub
;
3015 if (aux_x
!= main_x
|| aux_y
!= main_y
)
3018 plane_state
->color_plane
[1].offset
= aux_offset
;
3019 plane_state
->color_plane
[1].x
= aux_x
;
3020 plane_state
->color_plane
[1].y
= aux_y
;
3025 static int skl_check_main_surface(struct intel_plane_state
*plane_state
)
3027 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3028 unsigned int rotation
= plane_state
->base
.rotation
;
3029 int x
= plane_state
->base
.src
.x1
>> 16;
3030 int y
= plane_state
->base
.src
.y1
>> 16;
3031 int w
= drm_rect_width(&plane_state
->base
.src
) >> 16;
3032 int h
= drm_rect_height(&plane_state
->base
.src
) >> 16;
3033 int max_width
= skl_max_plane_width(fb
, 0, rotation
);
3034 int max_height
= 4096;
3035 u32 alignment
, offset
, aux_offset
= plane_state
->color_plane
[1].offset
;
3037 if (w
> max_width
|| h
> max_height
) {
3038 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3039 w
, h
, max_width
, max_height
);
3043 intel_add_fb_offsets(&x
, &y
, plane_state
, 0);
3044 offset
= intel_plane_compute_aligned_offset(&x
, &y
, plane_state
, 0);
3045 alignment
= intel_surf_alignment(fb
, 0);
3048 * AUX surface offset is specified as the distance from the
3049 * main surface offset, and it must be non-negative. Make
3050 * sure that is what we will get.
3052 if (offset
> aux_offset
)
3053 offset
= intel_plane_adjust_aligned_offset(&x
, &y
, plane_state
, 0,
3054 offset
, aux_offset
& ~(alignment
- 1));
3057 * When using an X-tiled surface, the plane blows up
3058 * if the x offset + width exceed the stride.
3060 * TODO: linear and Y-tiled seem fine, Yf untested,
3062 if (fb
->modifier
== I915_FORMAT_MOD_X_TILED
) {
3063 int cpp
= fb
->format
->cpp
[0];
3065 while ((x
+ w
) * cpp
> plane_state
->color_plane
[0].stride
) {
3067 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3071 offset
= intel_plane_adjust_aligned_offset(&x
, &y
, plane_state
, 0,
3072 offset
, offset
- alignment
);
3077 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3078 * they match with the main surface x/y offsets.
3080 if (is_ccs_modifier(fb
->modifier
)) {
3081 while (!skl_check_main_ccs_coordinates(plane_state
, x
, y
, offset
)) {
3085 offset
= intel_plane_adjust_aligned_offset(&x
, &y
, plane_state
, 0,
3086 offset
, offset
- alignment
);
3089 if (x
!= plane_state
->color_plane
[1].x
|| y
!= plane_state
->color_plane
[1].y
) {
3090 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3095 plane_state
->color_plane
[0].offset
= offset
;
3096 plane_state
->color_plane
[0].x
= x
;
3097 plane_state
->color_plane
[0].y
= y
;
3102 static int skl_check_nv12_aux_surface(struct intel_plane_state
*plane_state
)
3104 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3105 unsigned int rotation
= plane_state
->base
.rotation
;
3106 int max_width
= skl_max_plane_width(fb
, 1, rotation
);
3107 int max_height
= 4096;
3108 int x
= plane_state
->base
.src
.x1
>> 17;
3109 int y
= plane_state
->base
.src
.y1
>> 17;
3110 int w
= drm_rect_width(&plane_state
->base
.src
) >> 17;
3111 int h
= drm_rect_height(&plane_state
->base
.src
) >> 17;
3114 intel_add_fb_offsets(&x
, &y
, plane_state
, 1);
3115 offset
= intel_plane_compute_aligned_offset(&x
, &y
, plane_state
, 1);
3117 /* FIXME not quite sure how/if these apply to the chroma plane */
3118 if (w
> max_width
|| h
> max_height
) {
3119 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3120 w
, h
, max_width
, max_height
);
3124 plane_state
->color_plane
[1].offset
= offset
;
3125 plane_state
->color_plane
[1].x
= x
;
3126 plane_state
->color_plane
[1].y
= y
;
3131 static int skl_check_ccs_aux_surface(struct intel_plane_state
*plane_state
)
3133 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3134 int src_x
= plane_state
->base
.src
.x1
>> 16;
3135 int src_y
= plane_state
->base
.src
.y1
>> 16;
3136 int hsub
= fb
->format
->hsub
;
3137 int vsub
= fb
->format
->vsub
;
3138 int x
= src_x
/ hsub
;
3139 int y
= src_y
/ vsub
;
3142 intel_add_fb_offsets(&x
, &y
, plane_state
, 1);
3143 offset
= intel_plane_compute_aligned_offset(&x
, &y
, plane_state
, 1);
3145 plane_state
->color_plane
[1].offset
= offset
;
3146 plane_state
->color_plane
[1].x
= x
* hsub
+ src_x
% hsub
;
3147 plane_state
->color_plane
[1].y
= y
* vsub
+ src_y
% vsub
;
3152 int skl_check_plane_surface(struct intel_plane_state
*plane_state
)
3154 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3155 unsigned int rotation
= plane_state
->base
.rotation
;
3158 intel_fill_fb_ggtt_view(&plane_state
->view
, fb
, rotation
);
3159 plane_state
->color_plane
[0].stride
= intel_fb_pitch(fb
, 0, rotation
);
3160 plane_state
->color_plane
[1].stride
= intel_fb_pitch(fb
, 1, rotation
);
3162 ret
= intel_plane_check_stride(plane_state
);
3166 if (!plane_state
->base
.visible
)
3169 /* Rotate src coordinates to match rotated GTT view */
3170 if (drm_rotation_90_or_270(rotation
))
3171 drm_rect_rotate(&plane_state
->base
.src
,
3172 fb
->width
<< 16, fb
->height
<< 16,
3173 DRM_MODE_ROTATE_270
);
3176 * Handle the AUX surface first since
3177 * the main surface setup depends on it.
3179 if (fb
->format
->format
== DRM_FORMAT_NV12
) {
3180 ret
= skl_check_nv12_aux_surface(plane_state
);
3183 } else if (is_ccs_modifier(fb
->modifier
)) {
3184 ret
= skl_check_ccs_aux_surface(plane_state
);
3188 plane_state
->color_plane
[1].offset
= ~0xfff;
3189 plane_state
->color_plane
[1].x
= 0;
3190 plane_state
->color_plane
[1].y
= 0;
3193 ret
= skl_check_main_surface(plane_state
);
3201 i9xx_plane_max_stride(struct intel_plane
*plane
,
3202 u32 pixel_format
, u64 modifier
,
3203 unsigned int rotation
)
3205 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
3207 if (!HAS_GMCH(dev_priv
)) {
3209 } else if (INTEL_GEN(dev_priv
) >= 4) {
3210 if (modifier
== I915_FORMAT_MOD_X_TILED
)
3214 } else if (INTEL_GEN(dev_priv
) >= 3) {
3215 if (modifier
== I915_FORMAT_MOD_X_TILED
)
3220 if (plane
->i9xx_plane
== PLANE_C
)
3227 static u32
i9xx_plane_ctl_crtc(const struct intel_crtc_state
*crtc_state
)
3229 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
3230 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
3233 dspcntr
|= DISPPLANE_GAMMA_ENABLE
;
3235 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
3236 dspcntr
|= DISPPLANE_PIPE_CSC_ENABLE
;
3238 if (INTEL_GEN(dev_priv
) < 5)
3239 dspcntr
|= DISPPLANE_SEL_PIPE(crtc
->pipe
);
3244 static u32
i9xx_plane_ctl(const struct intel_crtc_state
*crtc_state
,
3245 const struct intel_plane_state
*plane_state
)
3247 struct drm_i915_private
*dev_priv
=
3248 to_i915(plane_state
->base
.plane
->dev
);
3249 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3250 unsigned int rotation
= plane_state
->base
.rotation
;
3253 dspcntr
= DISPLAY_PLANE_ENABLE
;
3255 if (IS_G4X(dev_priv
) || IS_GEN(dev_priv
, 5) ||
3256 IS_GEN(dev_priv
, 6) || IS_IVYBRIDGE(dev_priv
))
3257 dspcntr
|= DISPPLANE_TRICKLE_FEED_DISABLE
;
3259 switch (fb
->format
->format
) {
3261 dspcntr
|= DISPPLANE_8BPP
;
3263 case DRM_FORMAT_XRGB1555
:
3264 dspcntr
|= DISPPLANE_BGRX555
;
3266 case DRM_FORMAT_RGB565
:
3267 dspcntr
|= DISPPLANE_BGRX565
;
3269 case DRM_FORMAT_XRGB8888
:
3270 dspcntr
|= DISPPLANE_BGRX888
;
3272 case DRM_FORMAT_XBGR8888
:
3273 dspcntr
|= DISPPLANE_RGBX888
;
3275 case DRM_FORMAT_XRGB2101010
:
3276 dspcntr
|= DISPPLANE_BGRX101010
;
3278 case DRM_FORMAT_XBGR2101010
:
3279 dspcntr
|= DISPPLANE_RGBX101010
;
3282 MISSING_CASE(fb
->format
->format
);
3286 if (INTEL_GEN(dev_priv
) >= 4 &&
3287 fb
->modifier
== I915_FORMAT_MOD_X_TILED
)
3288 dspcntr
|= DISPPLANE_TILED
;
3290 if (rotation
& DRM_MODE_ROTATE_180
)
3291 dspcntr
|= DISPPLANE_ROTATE_180
;
3293 if (rotation
& DRM_MODE_REFLECT_X
)
3294 dspcntr
|= DISPPLANE_MIRROR
;
3299 int i9xx_check_plane_surface(struct intel_plane_state
*plane_state
)
3301 struct drm_i915_private
*dev_priv
=
3302 to_i915(plane_state
->base
.plane
->dev
);
3303 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3304 unsigned int rotation
= plane_state
->base
.rotation
;
3305 int src_x
= plane_state
->base
.src
.x1
>> 16;
3306 int src_y
= plane_state
->base
.src
.y1
>> 16;
3310 intel_fill_fb_ggtt_view(&plane_state
->view
, fb
, rotation
);
3311 plane_state
->color_plane
[0].stride
= intel_fb_pitch(fb
, 0, rotation
);
3313 ret
= intel_plane_check_stride(plane_state
);
3317 intel_add_fb_offsets(&src_x
, &src_y
, plane_state
, 0);
3319 if (INTEL_GEN(dev_priv
) >= 4)
3320 offset
= intel_plane_compute_aligned_offset(&src_x
, &src_y
,
3325 /* HSW/BDW do this automagically in hardware */
3326 if (!IS_HASWELL(dev_priv
) && !IS_BROADWELL(dev_priv
)) {
3327 int src_w
= drm_rect_width(&plane_state
->base
.src
) >> 16;
3328 int src_h
= drm_rect_height(&plane_state
->base
.src
) >> 16;
3330 if (rotation
& DRM_MODE_ROTATE_180
) {
3333 } else if (rotation
& DRM_MODE_REFLECT_X
) {
3338 plane_state
->color_plane
[0].offset
= offset
;
3339 plane_state
->color_plane
[0].x
= src_x
;
3340 plane_state
->color_plane
[0].y
= src_y
;
3346 i9xx_plane_check(struct intel_crtc_state
*crtc_state
,
3347 struct intel_plane_state
*plane_state
)
3351 ret
= chv_plane_check_rotation(plane_state
);
3355 ret
= drm_atomic_helper_check_plane_state(&plane_state
->base
,
3357 DRM_PLANE_HELPER_NO_SCALING
,
3358 DRM_PLANE_HELPER_NO_SCALING
,
3363 if (!plane_state
->base
.visible
)
3366 ret
= intel_plane_check_src_coordinates(plane_state
);
3370 ret
= i9xx_check_plane_surface(plane_state
);
3374 plane_state
->ctl
= i9xx_plane_ctl(crtc_state
, plane_state
);
3379 static void i9xx_update_plane(struct intel_plane
*plane
,
3380 const struct intel_crtc_state
*crtc_state
,
3381 const struct intel_plane_state
*plane_state
)
3383 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
3384 enum i9xx_plane_id i9xx_plane
= plane
->i9xx_plane
;
3386 int x
= plane_state
->color_plane
[0].x
;
3387 int y
= plane_state
->color_plane
[0].y
;
3388 unsigned long irqflags
;
3392 dspcntr
= plane_state
->ctl
| i9xx_plane_ctl_crtc(crtc_state
);
3394 linear_offset
= intel_fb_xy_to_linear(x
, y
, plane_state
, 0);
3396 if (INTEL_GEN(dev_priv
) >= 4)
3397 dspaddr_offset
= plane_state
->color_plane
[0].offset
;
3399 dspaddr_offset
= linear_offset
;
3401 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
3403 I915_WRITE_FW(DSPSTRIDE(i9xx_plane
), plane_state
->color_plane
[0].stride
);
3405 if (INTEL_GEN(dev_priv
) < 4) {
3406 /* pipesrc and dspsize control the size that is scaled from,
3407 * which should always be the user's requested size.
3409 I915_WRITE_FW(DSPPOS(i9xx_plane
), 0);
3410 I915_WRITE_FW(DSPSIZE(i9xx_plane
),
3411 ((crtc_state
->pipe_src_h
- 1) << 16) |
3412 (crtc_state
->pipe_src_w
- 1));
3413 } else if (IS_CHERRYVIEW(dev_priv
) && i9xx_plane
== PLANE_B
) {
3414 I915_WRITE_FW(PRIMPOS(i9xx_plane
), 0);
3415 I915_WRITE_FW(PRIMSIZE(i9xx_plane
),
3416 ((crtc_state
->pipe_src_h
- 1) << 16) |
3417 (crtc_state
->pipe_src_w
- 1));
3418 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane
), 0);
3421 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
3422 I915_WRITE_FW(DSPOFFSET(i9xx_plane
), (y
<< 16) | x
);
3423 } else if (INTEL_GEN(dev_priv
) >= 4) {
3424 I915_WRITE_FW(DSPLINOFF(i9xx_plane
), linear_offset
);
3425 I915_WRITE_FW(DSPTILEOFF(i9xx_plane
), (y
<< 16) | x
);
3429 * The control register self-arms if the plane was previously
3430 * disabled. Try to make the plane enable atomic by writing
3431 * the control register just before the surface register.
3433 I915_WRITE_FW(DSPCNTR(i9xx_plane
), dspcntr
);
3434 if (INTEL_GEN(dev_priv
) >= 4)
3435 I915_WRITE_FW(DSPSURF(i9xx_plane
),
3436 intel_plane_ggtt_offset(plane_state
) +
3439 I915_WRITE_FW(DSPADDR(i9xx_plane
),
3440 intel_plane_ggtt_offset(plane_state
) +
3443 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
3446 static void i9xx_disable_plane(struct intel_plane
*plane
,
3447 const struct intel_crtc_state
*crtc_state
)
3449 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
3450 enum i9xx_plane_id i9xx_plane
= plane
->i9xx_plane
;
3451 unsigned long irqflags
;
3455 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3456 * enable on ilk+ affect the pipe bottom color as
3457 * well, so we must configure them even if the plane
3460 * On pre-g4x there is no way to gamma correct the
3461 * pipe bottom color but we'll keep on doing this
3464 dspcntr
= i9xx_plane_ctl_crtc(crtc_state
);
3466 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
3468 I915_WRITE_FW(DSPCNTR(i9xx_plane
), dspcntr
);
3469 if (INTEL_GEN(dev_priv
) >= 4)
3470 I915_WRITE_FW(DSPSURF(i9xx_plane
), 0);
3472 I915_WRITE_FW(DSPADDR(i9xx_plane
), 0);
3474 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
3477 static bool i9xx_plane_get_hw_state(struct intel_plane
*plane
,
3480 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
3481 enum intel_display_power_domain power_domain
;
3482 enum i9xx_plane_id i9xx_plane
= plane
->i9xx_plane
;
3483 intel_wakeref_t wakeref
;
3488 * Not 100% correct for planes that can move between pipes,
3489 * but that's only the case for gen2-4 which don't have any
3490 * display power wells.
3492 power_domain
= POWER_DOMAIN_PIPE(plane
->pipe
);
3493 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
3497 val
= I915_READ(DSPCNTR(i9xx_plane
));
3499 ret
= val
& DISPLAY_PLANE_ENABLE
;
3501 if (INTEL_GEN(dev_priv
) >= 5)
3502 *pipe
= plane
->pipe
;
3504 *pipe
= (val
& DISPPLANE_SEL_PIPE_MASK
) >>
3505 DISPPLANE_SEL_PIPE_SHIFT
;
3507 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
3513 intel_fb_stride_alignment(const struct drm_framebuffer
*fb
, int color_plane
)
3515 if (fb
->modifier
== DRM_FORMAT_MOD_LINEAR
)
3518 return intel_tile_width_bytes(fb
, color_plane
);
3521 static void skl_detach_scaler(struct intel_crtc
*intel_crtc
, int id
)
3523 struct drm_device
*dev
= intel_crtc
->base
.dev
;
3524 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3526 I915_WRITE(SKL_PS_CTRL(intel_crtc
->pipe
, id
), 0);
3527 I915_WRITE(SKL_PS_WIN_POS(intel_crtc
->pipe
, id
), 0);
3528 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc
->pipe
, id
), 0);
3532 * This function detaches (aka. unbinds) unused scalers in hardware
3534 static void skl_detach_scalers(const struct intel_crtc_state
*crtc_state
)
3536 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
3537 const struct intel_crtc_scaler_state
*scaler_state
=
3538 &crtc_state
->scaler_state
;
3541 /* loop through and disable scalers that aren't in use */
3542 for (i
= 0; i
< intel_crtc
->num_scalers
; i
++) {
3543 if (!scaler_state
->scalers
[i
].in_use
)
3544 skl_detach_scaler(intel_crtc
, i
);
3548 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer
*fb
,
3549 int color_plane
, unsigned int rotation
)
3552 * The stride is either expressed as a multiple of 64 bytes chunks for
3553 * linear buffers or in number of tiles for tiled buffers.
3555 if (fb
->modifier
== DRM_FORMAT_MOD_LINEAR
)
3557 else if (drm_rotation_90_or_270(rotation
))
3558 return intel_tile_height(fb
, color_plane
);
3560 return intel_tile_width_bytes(fb
, color_plane
);
3563 u32
skl_plane_stride(const struct intel_plane_state
*plane_state
,
3566 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3567 unsigned int rotation
= plane_state
->base
.rotation
;
3568 u32 stride
= plane_state
->color_plane
[color_plane
].stride
;
3570 if (color_plane
>= fb
->format
->num_planes
)
3573 return stride
/ skl_plane_stride_mult(fb
, color_plane
, rotation
);
3576 static u32
skl_plane_ctl_format(u32 pixel_format
)
3578 switch (pixel_format
) {
3580 return PLANE_CTL_FORMAT_INDEXED
;
3581 case DRM_FORMAT_RGB565
:
3582 return PLANE_CTL_FORMAT_RGB_565
;
3583 case DRM_FORMAT_XBGR8888
:
3584 case DRM_FORMAT_ABGR8888
:
3585 return PLANE_CTL_FORMAT_XRGB_8888
| PLANE_CTL_ORDER_RGBX
;
3586 case DRM_FORMAT_XRGB8888
:
3587 case DRM_FORMAT_ARGB8888
:
3588 return PLANE_CTL_FORMAT_XRGB_8888
;
3589 case DRM_FORMAT_XRGB2101010
:
3590 return PLANE_CTL_FORMAT_XRGB_2101010
;
3591 case DRM_FORMAT_XBGR2101010
:
3592 return PLANE_CTL_ORDER_RGBX
| PLANE_CTL_FORMAT_XRGB_2101010
;
3593 case DRM_FORMAT_YUYV
:
3594 return PLANE_CTL_FORMAT_YUV422
| PLANE_CTL_YUV422_YUYV
;
3595 case DRM_FORMAT_YVYU
:
3596 return PLANE_CTL_FORMAT_YUV422
| PLANE_CTL_YUV422_YVYU
;
3597 case DRM_FORMAT_UYVY
:
3598 return PLANE_CTL_FORMAT_YUV422
| PLANE_CTL_YUV422_UYVY
;
3599 case DRM_FORMAT_VYUY
:
3600 return PLANE_CTL_FORMAT_YUV422
| PLANE_CTL_YUV422_VYUY
;
3601 case DRM_FORMAT_NV12
:
3602 return PLANE_CTL_FORMAT_NV12
;
3604 MISSING_CASE(pixel_format
);
3610 static u32
skl_plane_ctl_alpha(const struct intel_plane_state
*plane_state
)
3612 if (!plane_state
->base
.fb
->format
->has_alpha
)
3613 return PLANE_CTL_ALPHA_DISABLE
;
3615 switch (plane_state
->base
.pixel_blend_mode
) {
3616 case DRM_MODE_BLEND_PIXEL_NONE
:
3617 return PLANE_CTL_ALPHA_DISABLE
;
3618 case DRM_MODE_BLEND_PREMULTI
:
3619 return PLANE_CTL_ALPHA_SW_PREMULTIPLY
;
3620 case DRM_MODE_BLEND_COVERAGE
:
3621 return PLANE_CTL_ALPHA_HW_PREMULTIPLY
;
3623 MISSING_CASE(plane_state
->base
.pixel_blend_mode
);
3624 return PLANE_CTL_ALPHA_DISABLE
;
3628 static u32
glk_plane_color_ctl_alpha(const struct intel_plane_state
*plane_state
)
3630 if (!plane_state
->base
.fb
->format
->has_alpha
)
3631 return PLANE_COLOR_ALPHA_DISABLE
;
3633 switch (plane_state
->base
.pixel_blend_mode
) {
3634 case DRM_MODE_BLEND_PIXEL_NONE
:
3635 return PLANE_COLOR_ALPHA_DISABLE
;
3636 case DRM_MODE_BLEND_PREMULTI
:
3637 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY
;
3638 case DRM_MODE_BLEND_COVERAGE
:
3639 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY
;
3641 MISSING_CASE(plane_state
->base
.pixel_blend_mode
);
3642 return PLANE_COLOR_ALPHA_DISABLE
;
3646 static u32
skl_plane_ctl_tiling(u64 fb_modifier
)
3648 switch (fb_modifier
) {
3649 case DRM_FORMAT_MOD_LINEAR
:
3651 case I915_FORMAT_MOD_X_TILED
:
3652 return PLANE_CTL_TILED_X
;
3653 case I915_FORMAT_MOD_Y_TILED
:
3654 return PLANE_CTL_TILED_Y
;
3655 case I915_FORMAT_MOD_Y_TILED_CCS
:
3656 return PLANE_CTL_TILED_Y
| PLANE_CTL_RENDER_DECOMPRESSION_ENABLE
;
3657 case I915_FORMAT_MOD_Yf_TILED
:
3658 return PLANE_CTL_TILED_YF
;
3659 case I915_FORMAT_MOD_Yf_TILED_CCS
:
3660 return PLANE_CTL_TILED_YF
| PLANE_CTL_RENDER_DECOMPRESSION_ENABLE
;
3662 MISSING_CASE(fb_modifier
);
3668 static u32
skl_plane_ctl_rotate(unsigned int rotate
)
3671 case DRM_MODE_ROTATE_0
:
3674 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3675 * while i915 HW rotation is clockwise, thats why this swapping.
3677 case DRM_MODE_ROTATE_90
:
3678 return PLANE_CTL_ROTATE_270
;
3679 case DRM_MODE_ROTATE_180
:
3680 return PLANE_CTL_ROTATE_180
;
3681 case DRM_MODE_ROTATE_270
:
3682 return PLANE_CTL_ROTATE_90
;
3684 MISSING_CASE(rotate
);
3690 static u32
cnl_plane_ctl_flip(unsigned int reflect
)
3695 case DRM_MODE_REFLECT_X
:
3696 return PLANE_CTL_FLIP_HORIZONTAL
;
3697 case DRM_MODE_REFLECT_Y
:
3699 MISSING_CASE(reflect
);
3705 u32
skl_plane_ctl_crtc(const struct intel_crtc_state
*crtc_state
)
3707 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
3710 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
3713 plane_ctl
|= PLANE_CTL_PIPE_GAMMA_ENABLE
;
3714 plane_ctl
|= PLANE_CTL_PIPE_CSC_ENABLE
;
3719 u32
skl_plane_ctl(const struct intel_crtc_state
*crtc_state
,
3720 const struct intel_plane_state
*plane_state
)
3722 struct drm_i915_private
*dev_priv
=
3723 to_i915(plane_state
->base
.plane
->dev
);
3724 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3725 unsigned int rotation
= plane_state
->base
.rotation
;
3726 const struct drm_intel_sprite_colorkey
*key
= &plane_state
->ckey
;
3729 plane_ctl
= PLANE_CTL_ENABLE
;
3731 if (INTEL_GEN(dev_priv
) < 10 && !IS_GEMINILAKE(dev_priv
)) {
3732 plane_ctl
|= skl_plane_ctl_alpha(plane_state
);
3733 plane_ctl
|= PLANE_CTL_PLANE_GAMMA_DISABLE
;
3735 if (plane_state
->base
.color_encoding
== DRM_COLOR_YCBCR_BT709
)
3736 plane_ctl
|= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709
;
3738 if (plane_state
->base
.color_range
== DRM_COLOR_YCBCR_FULL_RANGE
)
3739 plane_ctl
|= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE
;
3742 plane_ctl
|= skl_plane_ctl_format(fb
->format
->format
);
3743 plane_ctl
|= skl_plane_ctl_tiling(fb
->modifier
);
3744 plane_ctl
|= skl_plane_ctl_rotate(rotation
& DRM_MODE_ROTATE_MASK
);
3746 if (INTEL_GEN(dev_priv
) >= 10)
3747 plane_ctl
|= cnl_plane_ctl_flip(rotation
&
3748 DRM_MODE_REFLECT_MASK
);
3750 if (key
->flags
& I915_SET_COLORKEY_DESTINATION
)
3751 plane_ctl
|= PLANE_CTL_KEY_ENABLE_DESTINATION
;
3752 else if (key
->flags
& I915_SET_COLORKEY_SOURCE
)
3753 plane_ctl
|= PLANE_CTL_KEY_ENABLE_SOURCE
;
3758 u32
glk_plane_color_ctl_crtc(const struct intel_crtc_state
*crtc_state
)
3760 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
3761 u32 plane_color_ctl
= 0;
3763 if (INTEL_GEN(dev_priv
) >= 11)
3764 return plane_color_ctl
;
3766 plane_color_ctl
|= PLANE_COLOR_PIPE_GAMMA_ENABLE
;
3767 plane_color_ctl
|= PLANE_COLOR_PIPE_CSC_ENABLE
;
3769 return plane_color_ctl
;
3772 u32
glk_plane_color_ctl(const struct intel_crtc_state
*crtc_state
,
3773 const struct intel_plane_state
*plane_state
)
3775 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
3776 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
3777 u32 plane_color_ctl
= 0;
3779 plane_color_ctl
|= PLANE_COLOR_PLANE_GAMMA_DISABLE
;
3780 plane_color_ctl
|= glk_plane_color_ctl_alpha(plane_state
);
3782 if (fb
->format
->is_yuv
&& !icl_is_hdr_plane(plane
)) {
3783 if (plane_state
->base
.color_encoding
== DRM_COLOR_YCBCR_BT709
)
3784 plane_color_ctl
|= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709
;
3786 plane_color_ctl
|= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709
;
3788 if (plane_state
->base
.color_range
== DRM_COLOR_YCBCR_FULL_RANGE
)
3789 plane_color_ctl
|= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE
;
3790 } else if (fb
->format
->is_yuv
) {
3791 plane_color_ctl
|= PLANE_COLOR_INPUT_CSC_ENABLE
;
3794 return plane_color_ctl
;
3798 __intel_display_resume(struct drm_device
*dev
,
3799 struct drm_atomic_state
*state
,
3800 struct drm_modeset_acquire_ctx
*ctx
)
3802 struct drm_crtc_state
*crtc_state
;
3803 struct drm_crtc
*crtc
;
3806 intel_modeset_setup_hw_state(dev
, ctx
);
3807 i915_redisable_vga(to_i915(dev
));
3813 * We've duplicated the state, pointers to the old state are invalid.
3815 * Don't attempt to use the old state until we commit the duplicated state.
3817 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
3819 * Force recalculation even if we restore
3820 * current state. With fast modeset this may not result
3821 * in a modeset when the state is compatible.
3823 crtc_state
->mode_changed
= true;
3826 /* ignore any reset values/BIOS leftovers in the WM registers */
3827 if (!HAS_GMCH(to_i915(dev
)))
3828 to_intel_atomic_state(state
)->skip_intermediate_wm
= true;
3830 ret
= drm_atomic_helper_commit_duplicated_state(state
, ctx
);
3832 WARN_ON(ret
== -EDEADLK
);
3836 static bool gpu_reset_clobbers_display(struct drm_i915_private
*dev_priv
)
3838 return (INTEL_INFO(dev_priv
)->gpu_reset_clobbers_display
&&
3839 intel_has_gpu_reset(dev_priv
));
3842 void intel_prepare_reset(struct drm_i915_private
*dev_priv
)
3844 struct drm_device
*dev
= &dev_priv
->drm
;
3845 struct drm_modeset_acquire_ctx
*ctx
= &dev_priv
->reset_ctx
;
3846 struct drm_atomic_state
*state
;
3849 /* reset doesn't touch the display */
3850 if (!i915_modparams
.force_reset_modeset_test
&&
3851 !gpu_reset_clobbers_display(dev_priv
))
3854 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3855 set_bit(I915_RESET_MODESET
, &dev_priv
->gpu_error
.flags
);
3856 wake_up_all(&dev_priv
->gpu_error
.wait_queue
);
3858 if (atomic_read(&dev_priv
->gpu_error
.pending_fb_pin
)) {
3859 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3860 i915_gem_set_wedged(dev_priv
);
3864 * Need mode_config.mutex so that we don't
3865 * trample ongoing ->detect() and whatnot.
3867 mutex_lock(&dev
->mode_config
.mutex
);
3868 drm_modeset_acquire_init(ctx
, 0);
3870 ret
= drm_modeset_lock_all_ctx(dev
, ctx
);
3871 if (ret
!= -EDEADLK
)
3874 drm_modeset_backoff(ctx
);
3877 * Disabling the crtcs gracefully seems nicer. Also the
3878 * g33 docs say we should at least disable all the planes.
3880 state
= drm_atomic_helper_duplicate_state(dev
, ctx
);
3881 if (IS_ERR(state
)) {
3882 ret
= PTR_ERR(state
);
3883 DRM_ERROR("Duplicating state failed with %i\n", ret
);
3887 ret
= drm_atomic_helper_disable_all(dev
, ctx
);
3889 DRM_ERROR("Suspending crtc's failed with %i\n", ret
);
3890 drm_atomic_state_put(state
);
3894 dev_priv
->modeset_restore_state
= state
;
3895 state
->acquire_ctx
= ctx
;
3898 void intel_finish_reset(struct drm_i915_private
*dev_priv
)
3900 struct drm_device
*dev
= &dev_priv
->drm
;
3901 struct drm_modeset_acquire_ctx
*ctx
= &dev_priv
->reset_ctx
;
3902 struct drm_atomic_state
*state
;
3905 /* reset doesn't touch the display */
3906 if (!test_bit(I915_RESET_MODESET
, &dev_priv
->gpu_error
.flags
))
3909 state
= fetch_and_zero(&dev_priv
->modeset_restore_state
);
3913 /* reset doesn't touch the display */
3914 if (!gpu_reset_clobbers_display(dev_priv
)) {
3915 /* for testing only restore the display */
3916 ret
= __intel_display_resume(dev
, state
, ctx
);
3918 DRM_ERROR("Restoring old state failed with %i\n", ret
);
3921 * The display has been reset as well,
3922 * so need a full re-initialization.
3924 intel_runtime_pm_disable_interrupts(dev_priv
);
3925 intel_runtime_pm_enable_interrupts(dev_priv
);
3927 intel_pps_unlock_regs_wa(dev_priv
);
3928 intel_modeset_init_hw(dev
);
3929 intel_init_clock_gating(dev_priv
);
3931 spin_lock_irq(&dev_priv
->irq_lock
);
3932 if (dev_priv
->display
.hpd_irq_setup
)
3933 dev_priv
->display
.hpd_irq_setup(dev_priv
);
3934 spin_unlock_irq(&dev_priv
->irq_lock
);
3936 ret
= __intel_display_resume(dev
, state
, ctx
);
3938 DRM_ERROR("Restoring old state failed with %i\n", ret
);
3940 intel_hpd_init(dev_priv
);
3943 drm_atomic_state_put(state
);
3945 drm_modeset_drop_locks(ctx
);
3946 drm_modeset_acquire_fini(ctx
);
3947 mutex_unlock(&dev
->mode_config
.mutex
);
3949 clear_bit(I915_RESET_MODESET
, &dev_priv
->gpu_error
.flags
);
3952 static void icl_set_pipe_chicken(struct intel_crtc
*crtc
)
3954 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
3955 enum pipe pipe
= crtc
->pipe
;
3958 tmp
= I915_READ(PIPE_CHICKEN(pipe
));
3961 * Display WA #1153: icl
3962 * enable hardware to bypass the alpha math
3963 * and rounding for per-pixel values 00 and 0xff
3965 tmp
|= PER_PIXEL_ALPHA_BYPASS_EN
;
3968 * W/A for underruns with linear/X-tiled with
3971 tmp
|= PM_FILL_MAINTAIN_DBUF_FULLNESS
;
3973 I915_WRITE(PIPE_CHICKEN(pipe
), tmp
);
3976 static void intel_update_pipe_config(const struct intel_crtc_state
*old_crtc_state
,
3977 const struct intel_crtc_state
*new_crtc_state
)
3979 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->base
.crtc
);
3980 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
3982 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3983 crtc
->base
.mode
= new_crtc_state
->base
.mode
;
3986 * Update pipe size and adjust fitter if needed: the reason for this is
3987 * that in compute_mode_changes we check the native mode (not the pfit
3988 * mode) to see if we can flip rather than do a full mode set. In the
3989 * fastboot case, we'll flip, but if we don't update the pipesrc and
3990 * pfit state, we'll end up with a big fb scanned out into the wrong
3994 I915_WRITE(PIPESRC(crtc
->pipe
),
3995 ((new_crtc_state
->pipe_src_w
- 1) << 16) |
3996 (new_crtc_state
->pipe_src_h
- 1));
3998 /* on skylake this is done by detaching scalers */
3999 if (INTEL_GEN(dev_priv
) >= 9) {
4000 skl_detach_scalers(new_crtc_state
);
4002 if (new_crtc_state
->pch_pfit
.enabled
)
4003 skylake_pfit_enable(new_crtc_state
);
4004 } else if (HAS_PCH_SPLIT(dev_priv
)) {
4005 if (new_crtc_state
->pch_pfit
.enabled
)
4006 ironlake_pfit_enable(new_crtc_state
);
4007 else if (old_crtc_state
->pch_pfit
.enabled
)
4008 ironlake_pfit_disable(old_crtc_state
);
4012 * We don't (yet) allow userspace to control the pipe background color,
4013 * so force it to black, but apply pipe gamma and CSC so that its
4014 * handling will match how we program our planes.
4016 if (INTEL_GEN(dev_priv
) >= 9)
4017 I915_WRITE(SKL_BOTTOM_COLOR(crtc
->pipe
),
4018 SKL_BOTTOM_COLOR_GAMMA_ENABLE
|
4019 SKL_BOTTOM_COLOR_CSC_ENABLE
);
4021 if (INTEL_GEN(dev_priv
) >= 11)
4022 icl_set_pipe_chicken(crtc
);
4025 static void intel_fdi_normal_train(struct intel_crtc
*crtc
)
4027 struct drm_device
*dev
= crtc
->base
.dev
;
4028 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4029 int pipe
= crtc
->pipe
;
4033 /* enable normal train */
4034 reg
= FDI_TX_CTL(pipe
);
4035 temp
= I915_READ(reg
);
4036 if (IS_IVYBRIDGE(dev_priv
)) {
4037 temp
&= ~FDI_LINK_TRAIN_NONE_IVB
;
4038 temp
|= FDI_LINK_TRAIN_NONE_IVB
| FDI_TX_ENHANCE_FRAME_ENABLE
;
4040 temp
&= ~FDI_LINK_TRAIN_NONE
;
4041 temp
|= FDI_LINK_TRAIN_NONE
| FDI_TX_ENHANCE_FRAME_ENABLE
;
4043 I915_WRITE(reg
, temp
);
4045 reg
= FDI_RX_CTL(pipe
);
4046 temp
= I915_READ(reg
);
4047 if (HAS_PCH_CPT(dev_priv
)) {
4048 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
4049 temp
|= FDI_LINK_TRAIN_NORMAL_CPT
;
4051 temp
&= ~FDI_LINK_TRAIN_NONE
;
4052 temp
|= FDI_LINK_TRAIN_NONE
;
4054 I915_WRITE(reg
, temp
| FDI_RX_ENHANCE_FRAME_ENABLE
);
4056 /* wait one idle pattern time */
4060 /* IVB wants error correction enabled */
4061 if (IS_IVYBRIDGE(dev_priv
))
4062 I915_WRITE(reg
, I915_READ(reg
) | FDI_FS_ERRC_ENABLE
|
4063 FDI_FE_ERRC_ENABLE
);
4066 /* The FDI link training functions for ILK/Ibexpeak. */
4067 static void ironlake_fdi_link_train(struct intel_crtc
*crtc
,
4068 const struct intel_crtc_state
*crtc_state
)
4070 struct drm_device
*dev
= crtc
->base
.dev
;
4071 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4072 int pipe
= crtc
->pipe
;
4076 /* FDI needs bits from pipe first */
4077 assert_pipe_enabled(dev_priv
, pipe
);
4079 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4081 reg
= FDI_RX_IMR(pipe
);
4082 temp
= I915_READ(reg
);
4083 temp
&= ~FDI_RX_SYMBOL_LOCK
;
4084 temp
&= ~FDI_RX_BIT_LOCK
;
4085 I915_WRITE(reg
, temp
);
4089 /* enable CPU FDI TX and PCH FDI RX */
4090 reg
= FDI_TX_CTL(pipe
);
4091 temp
= I915_READ(reg
);
4092 temp
&= ~FDI_DP_PORT_WIDTH_MASK
;
4093 temp
|= FDI_DP_PORT_WIDTH(crtc_state
->fdi_lanes
);
4094 temp
&= ~FDI_LINK_TRAIN_NONE
;
4095 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
4096 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
4098 reg
= FDI_RX_CTL(pipe
);
4099 temp
= I915_READ(reg
);
4100 temp
&= ~FDI_LINK_TRAIN_NONE
;
4101 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
4102 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
4107 /* Ironlake workaround, enable clock pointer after FDI enable*/
4108 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
);
4109 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
|
4110 FDI_RX_PHASE_SYNC_POINTER_EN
);
4112 reg
= FDI_RX_IIR(pipe
);
4113 for (tries
= 0; tries
< 5; tries
++) {
4114 temp
= I915_READ(reg
);
4115 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
4117 if ((temp
& FDI_RX_BIT_LOCK
)) {
4118 DRM_DEBUG_KMS("FDI train 1 done.\n");
4119 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
4124 DRM_ERROR("FDI train 1 fail!\n");
4127 reg
= FDI_TX_CTL(pipe
);
4128 temp
= I915_READ(reg
);
4129 temp
&= ~FDI_LINK_TRAIN_NONE
;
4130 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
4131 I915_WRITE(reg
, temp
);
4133 reg
= FDI_RX_CTL(pipe
);
4134 temp
= I915_READ(reg
);
4135 temp
&= ~FDI_LINK_TRAIN_NONE
;
4136 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
4137 I915_WRITE(reg
, temp
);
4142 reg
= FDI_RX_IIR(pipe
);
4143 for (tries
= 0; tries
< 5; tries
++) {
4144 temp
= I915_READ(reg
);
4145 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
4147 if (temp
& FDI_RX_SYMBOL_LOCK
) {
4148 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
4149 DRM_DEBUG_KMS("FDI train 2 done.\n");
4154 DRM_ERROR("FDI train 2 fail!\n");
4156 DRM_DEBUG_KMS("FDI train done\n");
4160 static const int snb_b_fdi_train_param
[] = {
4161 FDI_LINK_TRAIN_400MV_0DB_SNB_B
,
4162 FDI_LINK_TRAIN_400MV_6DB_SNB_B
,
4163 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B
,
4164 FDI_LINK_TRAIN_800MV_0DB_SNB_B
,
4167 /* The FDI link training functions for SNB/Cougarpoint. */
4168 static void gen6_fdi_link_train(struct intel_crtc
*crtc
,
4169 const struct intel_crtc_state
*crtc_state
)
4171 struct drm_device
*dev
= crtc
->base
.dev
;
4172 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4173 int pipe
= crtc
->pipe
;
4177 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4179 reg
= FDI_RX_IMR(pipe
);
4180 temp
= I915_READ(reg
);
4181 temp
&= ~FDI_RX_SYMBOL_LOCK
;
4182 temp
&= ~FDI_RX_BIT_LOCK
;
4183 I915_WRITE(reg
, temp
);
4188 /* enable CPU FDI TX and PCH FDI RX */
4189 reg
= FDI_TX_CTL(pipe
);
4190 temp
= I915_READ(reg
);
4191 temp
&= ~FDI_DP_PORT_WIDTH_MASK
;
4192 temp
|= FDI_DP_PORT_WIDTH(crtc_state
->fdi_lanes
);
4193 temp
&= ~FDI_LINK_TRAIN_NONE
;
4194 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
4195 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
4197 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
4198 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
4200 I915_WRITE(FDI_RX_MISC(pipe
),
4201 FDI_RX_TP1_TO_TP2_48
| FDI_RX_FDI_DELAY_90
);
4203 reg
= FDI_RX_CTL(pipe
);
4204 temp
= I915_READ(reg
);
4205 if (HAS_PCH_CPT(dev_priv
)) {
4206 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
4207 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
4209 temp
&= ~FDI_LINK_TRAIN_NONE
;
4210 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
4212 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
4217 for (i
= 0; i
< 4; i
++) {
4218 reg
= FDI_TX_CTL(pipe
);
4219 temp
= I915_READ(reg
);
4220 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
4221 temp
|= snb_b_fdi_train_param
[i
];
4222 I915_WRITE(reg
, temp
);
4227 for (retry
= 0; retry
< 5; retry
++) {
4228 reg
= FDI_RX_IIR(pipe
);
4229 temp
= I915_READ(reg
);
4230 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
4231 if (temp
& FDI_RX_BIT_LOCK
) {
4232 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
4233 DRM_DEBUG_KMS("FDI train 1 done.\n");
4242 DRM_ERROR("FDI train 1 fail!\n");
4245 reg
= FDI_TX_CTL(pipe
);
4246 temp
= I915_READ(reg
);
4247 temp
&= ~FDI_LINK_TRAIN_NONE
;
4248 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
4249 if (IS_GEN(dev_priv
, 6)) {
4250 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
4252 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
4254 I915_WRITE(reg
, temp
);
4256 reg
= FDI_RX_CTL(pipe
);
4257 temp
= I915_READ(reg
);
4258 if (HAS_PCH_CPT(dev_priv
)) {
4259 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
4260 temp
|= FDI_LINK_TRAIN_PATTERN_2_CPT
;
4262 temp
&= ~FDI_LINK_TRAIN_NONE
;
4263 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
4265 I915_WRITE(reg
, temp
);
4270 for (i
= 0; i
< 4; i
++) {
4271 reg
= FDI_TX_CTL(pipe
);
4272 temp
= I915_READ(reg
);
4273 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
4274 temp
|= snb_b_fdi_train_param
[i
];
4275 I915_WRITE(reg
, temp
);
4280 for (retry
= 0; retry
< 5; retry
++) {
4281 reg
= FDI_RX_IIR(pipe
);
4282 temp
= I915_READ(reg
);
4283 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
4284 if (temp
& FDI_RX_SYMBOL_LOCK
) {
4285 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
4286 DRM_DEBUG_KMS("FDI train 2 done.\n");
4295 DRM_ERROR("FDI train 2 fail!\n");
4297 DRM_DEBUG_KMS("FDI train done.\n");
4300 /* Manual link training for Ivy Bridge A0 parts */
4301 static void ivb_manual_fdi_link_train(struct intel_crtc
*crtc
,
4302 const struct intel_crtc_state
*crtc_state
)
4304 struct drm_device
*dev
= crtc
->base
.dev
;
4305 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4306 int pipe
= crtc
->pipe
;
4310 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4312 reg
= FDI_RX_IMR(pipe
);
4313 temp
= I915_READ(reg
);
4314 temp
&= ~FDI_RX_SYMBOL_LOCK
;
4315 temp
&= ~FDI_RX_BIT_LOCK
;
4316 I915_WRITE(reg
, temp
);
4321 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4322 I915_READ(FDI_RX_IIR(pipe
)));
4324 /* Try each vswing and preemphasis setting twice before moving on */
4325 for (j
= 0; j
< ARRAY_SIZE(snb_b_fdi_train_param
) * 2; j
++) {
4326 /* disable first in case we need to retry */
4327 reg
= FDI_TX_CTL(pipe
);
4328 temp
= I915_READ(reg
);
4329 temp
&= ~(FDI_LINK_TRAIN_AUTO
| FDI_LINK_TRAIN_NONE_IVB
);
4330 temp
&= ~FDI_TX_ENABLE
;
4331 I915_WRITE(reg
, temp
);
4333 reg
= FDI_RX_CTL(pipe
);
4334 temp
= I915_READ(reg
);
4335 temp
&= ~FDI_LINK_TRAIN_AUTO
;
4336 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
4337 temp
&= ~FDI_RX_ENABLE
;
4338 I915_WRITE(reg
, temp
);
4340 /* enable CPU FDI TX and PCH FDI RX */
4341 reg
= FDI_TX_CTL(pipe
);
4342 temp
= I915_READ(reg
);
4343 temp
&= ~FDI_DP_PORT_WIDTH_MASK
;
4344 temp
|= FDI_DP_PORT_WIDTH(crtc_state
->fdi_lanes
);
4345 temp
|= FDI_LINK_TRAIN_PATTERN_1_IVB
;
4346 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
4347 temp
|= snb_b_fdi_train_param
[j
/2];
4348 temp
|= FDI_COMPOSITE_SYNC
;
4349 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
4351 I915_WRITE(FDI_RX_MISC(pipe
),
4352 FDI_RX_TP1_TO_TP2_48
| FDI_RX_FDI_DELAY_90
);
4354 reg
= FDI_RX_CTL(pipe
);
4355 temp
= I915_READ(reg
);
4356 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
4357 temp
|= FDI_COMPOSITE_SYNC
;
4358 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
4361 udelay(1); /* should be 0.5us */
4363 for (i
= 0; i
< 4; i
++) {
4364 reg
= FDI_RX_IIR(pipe
);
4365 temp
= I915_READ(reg
);
4366 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
4368 if (temp
& FDI_RX_BIT_LOCK
||
4369 (I915_READ(reg
) & FDI_RX_BIT_LOCK
)) {
4370 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
4371 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4375 udelay(1); /* should be 0.5us */
4378 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j
/ 2);
4383 reg
= FDI_TX_CTL(pipe
);
4384 temp
= I915_READ(reg
);
4385 temp
&= ~FDI_LINK_TRAIN_NONE_IVB
;
4386 temp
|= FDI_LINK_TRAIN_PATTERN_2_IVB
;
4387 I915_WRITE(reg
, temp
);
4389 reg
= FDI_RX_CTL(pipe
);
4390 temp
= I915_READ(reg
);
4391 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
4392 temp
|= FDI_LINK_TRAIN_PATTERN_2_CPT
;
4393 I915_WRITE(reg
, temp
);
4396 udelay(2); /* should be 1.5us */
4398 for (i
= 0; i
< 4; i
++) {
4399 reg
= FDI_RX_IIR(pipe
);
4400 temp
= I915_READ(reg
);
4401 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
4403 if (temp
& FDI_RX_SYMBOL_LOCK
||
4404 (I915_READ(reg
) & FDI_RX_SYMBOL_LOCK
)) {
4405 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
4406 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4410 udelay(2); /* should be 1.5us */
4413 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j
/ 2);
4417 DRM_DEBUG_KMS("FDI train done.\n");
4420 static void ironlake_fdi_pll_enable(const struct intel_crtc_state
*crtc_state
)
4422 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4423 struct drm_i915_private
*dev_priv
= to_i915(intel_crtc
->base
.dev
);
4424 int pipe
= intel_crtc
->pipe
;
4428 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4429 reg
= FDI_RX_CTL(pipe
);
4430 temp
= I915_READ(reg
);
4431 temp
&= ~(FDI_DP_PORT_WIDTH_MASK
| (0x7 << 16));
4432 temp
|= FDI_DP_PORT_WIDTH(crtc_state
->fdi_lanes
);
4433 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) << 11;
4434 I915_WRITE(reg
, temp
| FDI_RX_PLL_ENABLE
);
4439 /* Switch from Rawclk to PCDclk */
4440 temp
= I915_READ(reg
);
4441 I915_WRITE(reg
, temp
| FDI_PCDCLK
);
4446 /* Enable CPU FDI TX PLL, always on for Ironlake */
4447 reg
= FDI_TX_CTL(pipe
);
4448 temp
= I915_READ(reg
);
4449 if ((temp
& FDI_TX_PLL_ENABLE
) == 0) {
4450 I915_WRITE(reg
, temp
| FDI_TX_PLL_ENABLE
);
4457 static void ironlake_fdi_pll_disable(struct intel_crtc
*intel_crtc
)
4459 struct drm_device
*dev
= intel_crtc
->base
.dev
;
4460 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4461 int pipe
= intel_crtc
->pipe
;
4465 /* Switch from PCDclk to Rawclk */
4466 reg
= FDI_RX_CTL(pipe
);
4467 temp
= I915_READ(reg
);
4468 I915_WRITE(reg
, temp
& ~FDI_PCDCLK
);
4470 /* Disable CPU FDI TX PLL */
4471 reg
= FDI_TX_CTL(pipe
);
4472 temp
= I915_READ(reg
);
4473 I915_WRITE(reg
, temp
& ~FDI_TX_PLL_ENABLE
);
4478 reg
= FDI_RX_CTL(pipe
);
4479 temp
= I915_READ(reg
);
4480 I915_WRITE(reg
, temp
& ~FDI_RX_PLL_ENABLE
);
4482 /* Wait for the clocks to turn off. */
4487 static void ironlake_fdi_disable(struct drm_crtc
*crtc
)
4489 struct drm_device
*dev
= crtc
->dev
;
4490 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4491 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4492 int pipe
= intel_crtc
->pipe
;
4496 /* disable CPU FDI tx and PCH FDI rx */
4497 reg
= FDI_TX_CTL(pipe
);
4498 temp
= I915_READ(reg
);
4499 I915_WRITE(reg
, temp
& ~FDI_TX_ENABLE
);
4502 reg
= FDI_RX_CTL(pipe
);
4503 temp
= I915_READ(reg
);
4504 temp
&= ~(0x7 << 16);
4505 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) << 11;
4506 I915_WRITE(reg
, temp
& ~FDI_RX_ENABLE
);
4511 /* Ironlake workaround, disable clock pointer after downing FDI */
4512 if (HAS_PCH_IBX(dev_priv
))
4513 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
);
4515 /* still set train pattern 1 */
4516 reg
= FDI_TX_CTL(pipe
);
4517 temp
= I915_READ(reg
);
4518 temp
&= ~FDI_LINK_TRAIN_NONE
;
4519 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
4520 I915_WRITE(reg
, temp
);
4522 reg
= FDI_RX_CTL(pipe
);
4523 temp
= I915_READ(reg
);
4524 if (HAS_PCH_CPT(dev_priv
)) {
4525 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
4526 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
4528 temp
&= ~FDI_LINK_TRAIN_NONE
;
4529 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
4531 /* BPC in FDI rx is consistent with that in PIPECONF */
4532 temp
&= ~(0x07 << 16);
4533 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) << 11;
4534 I915_WRITE(reg
, temp
);
4540 bool intel_has_pending_fb_unpin(struct drm_i915_private
*dev_priv
)
4542 struct drm_crtc
*crtc
;
4545 drm_for_each_crtc(crtc
, &dev_priv
->drm
) {
4546 struct drm_crtc_commit
*commit
;
4547 spin_lock(&crtc
->commit_lock
);
4548 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4549 struct drm_crtc_commit
, commit_entry
);
4550 cleanup_done
= commit
?
4551 try_wait_for_completion(&commit
->cleanup_done
) : true;
4552 spin_unlock(&crtc
->commit_lock
);
4557 drm_crtc_wait_one_vblank(crtc
);
4565 void lpt_disable_iclkip(struct drm_i915_private
*dev_priv
)
4569 I915_WRITE(PIXCLK_GATE
, PIXCLK_GATE_GATE
);
4571 mutex_lock(&dev_priv
->sb_lock
);
4573 temp
= intel_sbi_read(dev_priv
, SBI_SSCCTL6
, SBI_ICLK
);
4574 temp
|= SBI_SSCCTL_DISABLE
;
4575 intel_sbi_write(dev_priv
, SBI_SSCCTL6
, temp
, SBI_ICLK
);
4577 mutex_unlock(&dev_priv
->sb_lock
);
4580 /* Program iCLKIP clock to the desired frequency */
4581 static void lpt_program_iclkip(const struct intel_crtc_state
*crtc_state
)
4583 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4584 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4585 int clock
= crtc_state
->base
.adjusted_mode
.crtc_clock
;
4586 u32 divsel
, phaseinc
, auxdiv
, phasedir
= 0;
4589 lpt_disable_iclkip(dev_priv
);
4591 /* The iCLK virtual clock root frequency is in MHz,
4592 * but the adjusted_mode->crtc_clock in in KHz. To get the
4593 * divisors, it is necessary to divide one by another, so we
4594 * convert the virtual clock precision to KHz here for higher
4597 for (auxdiv
= 0; auxdiv
< 2; auxdiv
++) {
4598 u32 iclk_virtual_root_freq
= 172800 * 1000;
4599 u32 iclk_pi_range
= 64;
4600 u32 desired_divisor
;
4602 desired_divisor
= DIV_ROUND_CLOSEST(iclk_virtual_root_freq
,
4604 divsel
= (desired_divisor
/ iclk_pi_range
) - 2;
4605 phaseinc
= desired_divisor
% iclk_pi_range
;
4608 * Near 20MHz is a corner case which is
4609 * out of range for the 7-bit divisor
4615 /* This should not happen with any sane values */
4616 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel
) &
4617 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK
);
4618 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir
) &
4619 ~SBI_SSCDIVINTPHASE_INCVAL_MASK
);
4621 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4628 mutex_lock(&dev_priv
->sb_lock
);
4630 /* Program SSCDIVINTPHASE6 */
4631 temp
= intel_sbi_read(dev_priv
, SBI_SSCDIVINTPHASE6
, SBI_ICLK
);
4632 temp
&= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK
;
4633 temp
|= SBI_SSCDIVINTPHASE_DIVSEL(divsel
);
4634 temp
&= ~SBI_SSCDIVINTPHASE_INCVAL_MASK
;
4635 temp
|= SBI_SSCDIVINTPHASE_INCVAL(phaseinc
);
4636 temp
|= SBI_SSCDIVINTPHASE_DIR(phasedir
);
4637 temp
|= SBI_SSCDIVINTPHASE_PROPAGATE
;
4638 intel_sbi_write(dev_priv
, SBI_SSCDIVINTPHASE6
, temp
, SBI_ICLK
);
4640 /* Program SSCAUXDIV */
4641 temp
= intel_sbi_read(dev_priv
, SBI_SSCAUXDIV6
, SBI_ICLK
);
4642 temp
&= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4643 temp
|= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv
);
4644 intel_sbi_write(dev_priv
, SBI_SSCAUXDIV6
, temp
, SBI_ICLK
);
4646 /* Enable modulator and associated divider */
4647 temp
= intel_sbi_read(dev_priv
, SBI_SSCCTL6
, SBI_ICLK
);
4648 temp
&= ~SBI_SSCCTL_DISABLE
;
4649 intel_sbi_write(dev_priv
, SBI_SSCCTL6
, temp
, SBI_ICLK
);
4651 mutex_unlock(&dev_priv
->sb_lock
);
4653 /* Wait for initialization time */
4656 I915_WRITE(PIXCLK_GATE
, PIXCLK_GATE_UNGATE
);
4659 int lpt_get_iclkip(struct drm_i915_private
*dev_priv
)
4661 u32 divsel
, phaseinc
, auxdiv
;
4662 u32 iclk_virtual_root_freq
= 172800 * 1000;
4663 u32 iclk_pi_range
= 64;
4664 u32 desired_divisor
;
4667 if ((I915_READ(PIXCLK_GATE
) & PIXCLK_GATE_UNGATE
) == 0)
4670 mutex_lock(&dev_priv
->sb_lock
);
4672 temp
= intel_sbi_read(dev_priv
, SBI_SSCCTL6
, SBI_ICLK
);
4673 if (temp
& SBI_SSCCTL_DISABLE
) {
4674 mutex_unlock(&dev_priv
->sb_lock
);
4678 temp
= intel_sbi_read(dev_priv
, SBI_SSCDIVINTPHASE6
, SBI_ICLK
);
4679 divsel
= (temp
& SBI_SSCDIVINTPHASE_DIVSEL_MASK
) >>
4680 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT
;
4681 phaseinc
= (temp
& SBI_SSCDIVINTPHASE_INCVAL_MASK
) >>
4682 SBI_SSCDIVINTPHASE_INCVAL_SHIFT
;
4684 temp
= intel_sbi_read(dev_priv
, SBI_SSCAUXDIV6
, SBI_ICLK
);
4685 auxdiv
= (temp
& SBI_SSCAUXDIV_FINALDIV2SEL_MASK
) >>
4686 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT
;
4688 mutex_unlock(&dev_priv
->sb_lock
);
4690 desired_divisor
= (divsel
+ 2) * iclk_pi_range
+ phaseinc
;
4692 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq
,
4693 desired_divisor
<< auxdiv
);
4696 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state
*crtc_state
,
4697 enum pipe pch_transcoder
)
4699 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4700 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4701 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
4703 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder
),
4704 I915_READ(HTOTAL(cpu_transcoder
)));
4705 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder
),
4706 I915_READ(HBLANK(cpu_transcoder
)));
4707 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder
),
4708 I915_READ(HSYNC(cpu_transcoder
)));
4710 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder
),
4711 I915_READ(VTOTAL(cpu_transcoder
)));
4712 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder
),
4713 I915_READ(VBLANK(cpu_transcoder
)));
4714 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder
),
4715 I915_READ(VSYNC(cpu_transcoder
)));
4716 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder
),
4717 I915_READ(VSYNCSHIFT(cpu_transcoder
)));
4720 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private
*dev_priv
, bool enable
)
4724 temp
= I915_READ(SOUTH_CHICKEN1
);
4725 if (!!(temp
& FDI_BC_BIFURCATION_SELECT
) == enable
)
4728 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B
)) & FDI_RX_ENABLE
);
4729 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C
)) & FDI_RX_ENABLE
);
4731 temp
&= ~FDI_BC_BIFURCATION_SELECT
;
4733 temp
|= FDI_BC_BIFURCATION_SELECT
;
4735 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable
? "en" : "dis");
4736 I915_WRITE(SOUTH_CHICKEN1
, temp
);
4737 POSTING_READ(SOUTH_CHICKEN1
);
4740 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state
*crtc_state
)
4742 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4743 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4745 switch (crtc
->pipe
) {
4749 if (crtc_state
->fdi_lanes
> 2)
4750 cpt_set_fdi_bc_bifurcation(dev_priv
, false);
4752 cpt_set_fdi_bc_bifurcation(dev_priv
, true);
4756 cpt_set_fdi_bc_bifurcation(dev_priv
, true);
4765 * Finds the encoder associated with the given CRTC. This can only be
4766 * used when we know that the CRTC isn't feeding multiple encoders!
4768 static struct intel_encoder
*
4769 intel_get_crtc_new_encoder(const struct intel_atomic_state
*state
,
4770 const struct intel_crtc_state
*crtc_state
)
4772 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4773 const struct drm_connector_state
*connector_state
;
4774 const struct drm_connector
*connector
;
4775 struct intel_encoder
*encoder
= NULL
;
4776 int num_encoders
= 0;
4779 for_each_new_connector_in_state(&state
->base
, connector
, connector_state
, i
) {
4780 if (connector_state
->crtc
!= &crtc
->base
)
4783 encoder
= to_intel_encoder(connector_state
->best_encoder
);
4787 WARN(num_encoders
!= 1, "%d encoders for pipe %c\n",
4788 num_encoders
, pipe_name(crtc
->pipe
));
4794 * Enable PCH resources required for PCH ports:
4796 * - FDI training & RX/TX
4797 * - update transcoder timings
4798 * - DP transcoding bits
4801 static void ironlake_pch_enable(const struct intel_atomic_state
*state
,
4802 const struct intel_crtc_state
*crtc_state
)
4804 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4805 struct drm_device
*dev
= crtc
->base
.dev
;
4806 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4807 int pipe
= crtc
->pipe
;
4810 assert_pch_transcoder_disabled(dev_priv
, pipe
);
4812 if (IS_IVYBRIDGE(dev_priv
))
4813 ivybridge_update_fdi_bc_bifurcation(crtc_state
);
4815 /* Write the TU size bits before fdi link training, so that error
4816 * detection works. */
4817 I915_WRITE(FDI_RX_TUSIZE1(pipe
),
4818 I915_READ(PIPE_DATA_M1(pipe
)) & TU_SIZE_MASK
);
4820 /* For PCH output, training FDI link */
4821 dev_priv
->display
.fdi_link_train(crtc
, crtc_state
);
4823 /* We need to program the right clock selection before writing the pixel
4824 * mutliplier into the DPLL. */
4825 if (HAS_PCH_CPT(dev_priv
)) {
4828 temp
= I915_READ(PCH_DPLL_SEL
);
4829 temp
|= TRANS_DPLL_ENABLE(pipe
);
4830 sel
= TRANS_DPLLB_SEL(pipe
);
4831 if (crtc_state
->shared_dpll
==
4832 intel_get_shared_dpll_by_id(dev_priv
, DPLL_ID_PCH_PLL_B
))
4836 I915_WRITE(PCH_DPLL_SEL
, temp
);
4839 /* XXX: pch pll's can be enabled any time before we enable the PCH
4840 * transcoder, and we actually should do this to not upset any PCH
4841 * transcoder that already use the clock when we share it.
4843 * Note that enable_shared_dpll tries to do the right thing, but
4844 * get_shared_dpll unconditionally resets the pll - we need that to have
4845 * the right LVDS enable sequence. */
4846 intel_enable_shared_dpll(crtc_state
);
4848 /* set transcoder timing, panel must allow it */
4849 assert_panel_unlocked(dev_priv
, pipe
);
4850 ironlake_pch_transcoder_set_timings(crtc_state
, pipe
);
4852 intel_fdi_normal_train(crtc
);
4854 /* For PCH DP, enable TRANS_DP_CTL */
4855 if (HAS_PCH_CPT(dev_priv
) &&
4856 intel_crtc_has_dp_encoder(crtc_state
)) {
4857 const struct drm_display_mode
*adjusted_mode
=
4858 &crtc_state
->base
.adjusted_mode
;
4859 u32 bpc
= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) >> 5;
4860 i915_reg_t reg
= TRANS_DP_CTL(pipe
);
4863 temp
= I915_READ(reg
);
4864 temp
&= ~(TRANS_DP_PORT_SEL_MASK
|
4865 TRANS_DP_SYNC_MASK
|
4867 temp
|= TRANS_DP_OUTPUT_ENABLE
;
4868 temp
|= bpc
<< 9; /* same format but at 11:9 */
4870 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
4871 temp
|= TRANS_DP_HSYNC_ACTIVE_HIGH
;
4872 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
4873 temp
|= TRANS_DP_VSYNC_ACTIVE_HIGH
;
4875 port
= intel_get_crtc_new_encoder(state
, crtc_state
)->port
;
4876 WARN_ON(port
< PORT_B
|| port
> PORT_D
);
4877 temp
|= TRANS_DP_PORT_SEL(port
);
4879 I915_WRITE(reg
, temp
);
4882 ironlake_enable_pch_transcoder(crtc_state
);
4885 static void lpt_pch_enable(const struct intel_atomic_state
*state
,
4886 const struct intel_crtc_state
*crtc_state
)
4888 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
4889 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4890 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
4892 assert_pch_transcoder_disabled(dev_priv
, PIPE_A
);
4894 lpt_program_iclkip(crtc_state
);
4896 /* Set transcoder timing. */
4897 ironlake_pch_transcoder_set_timings(crtc_state
, PIPE_A
);
4899 lpt_enable_pch_transcoder(dev_priv
, cpu_transcoder
);
4902 static void cpt_verify_modeset(struct drm_device
*dev
, int pipe
)
4904 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4905 i915_reg_t dslreg
= PIPEDSL(pipe
);
4908 temp
= I915_READ(dslreg
);
4910 if (wait_for(I915_READ(dslreg
) != temp
, 5)) {
4911 if (wait_for(I915_READ(dslreg
) != temp
, 5))
4912 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe
));
4917 * The hardware phase 0.0 refers to the center of the pixel.
4918 * We want to start from the top/left edge which is phase
4919 * -0.5. That matches how the hardware calculates the scaling
4920 * factors (from top-left of the first pixel to bottom-right
4921 * of the last pixel, as opposed to the pixel centers).
4923 * For 4:2:0 subsampled chroma planes we obviously have to
4924 * adjust that so that the chroma sample position lands in
4927 * Note that for packed YCbCr 4:2:2 formats there is no way to
4928 * control chroma siting. The hardware simply replicates the
4929 * chroma samples for both of the luma samples, and thus we don't
4930 * actually get the expected MPEG2 chroma siting convention :(
4931 * The same behaviour is observed on pre-SKL platforms as well.
4933 * Theory behind the formula (note that we ignore sub-pixel
4934 * source coordinates):
4935 * s = source sample position
4936 * d = destination sample position
4941 * | | 1.5 (initial phase)
4949 * | -0.375 (initial phase)
4956 u16
skl_scaler_calc_phase(int sub
, int scale
, bool chroma_cosited
)
4958 int phase
= -0x8000;
4962 phase
+= (sub
- 1) * 0x8000 / sub
;
4964 phase
+= scale
/ (2 * sub
);
4967 * Hardware initial phase limited to [-0.5:1.5].
4968 * Since the max hardware scale factor is 3.0, we
4969 * should never actually excdeed 1.0 here.
4971 WARN_ON(phase
< -0x8000 || phase
> 0x18000);
4974 phase
= 0x10000 + phase
;
4976 trip
= PS_PHASE_TRIP
;
4978 return ((phase
>> 2) & PS_PHASE_MASK
) | trip
;
4982 skl_update_scaler(struct intel_crtc_state
*crtc_state
, bool force_detach
,
4983 unsigned int scaler_user
, int *scaler_id
,
4984 int src_w
, int src_h
, int dst_w
, int dst_h
,
4985 const struct drm_format_info
*format
, bool need_scaler
)
4987 struct intel_crtc_scaler_state
*scaler_state
=
4988 &crtc_state
->scaler_state
;
4989 struct intel_crtc
*intel_crtc
=
4990 to_intel_crtc(crtc_state
->base
.crtc
);
4991 struct drm_i915_private
*dev_priv
= to_i915(intel_crtc
->base
.dev
);
4992 const struct drm_display_mode
*adjusted_mode
=
4993 &crtc_state
->base
.adjusted_mode
;
4996 * Src coordinates are already rotated by 270 degrees for
4997 * the 90/270 degree plane rotation cases (to match the
4998 * GTT mapping), hence no need to account for rotation here.
5000 if (src_w
!= dst_w
|| src_h
!= dst_h
)
5004 * Scaling/fitting not supported in IF-ID mode in GEN9+
5005 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5006 * Once NV12 is enabled, handle it here while allocating scaler
5009 if (INTEL_GEN(dev_priv
) >= 9 && crtc_state
->base
.enable
&&
5010 need_scaler
&& adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
5011 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5016 * if plane is being disabled or scaler is no more required or force detach
5017 * - free scaler binded to this plane/crtc
5018 * - in order to do this, update crtc->scaler_usage
5020 * Here scaler state in crtc_state is set free so that
5021 * scaler can be assigned to other user. Actual register
5022 * update to free the scaler is done in plane/panel-fit programming.
5023 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5025 if (force_detach
|| !need_scaler
) {
5026 if (*scaler_id
>= 0) {
5027 scaler_state
->scaler_users
&= ~(1 << scaler_user
);
5028 scaler_state
->scalers
[*scaler_id
].in_use
= 0;
5030 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5031 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5032 intel_crtc
->pipe
, scaler_user
, *scaler_id
,
5033 scaler_state
->scaler_users
);
5039 if (format
&& format
->format
== DRM_FORMAT_NV12
&&
5040 (src_h
< SKL_MIN_YUV_420_SRC_H
|| src_w
< SKL_MIN_YUV_420_SRC_W
)) {
5041 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
5046 if (src_w
< SKL_MIN_SRC_W
|| src_h
< SKL_MIN_SRC_H
||
5047 dst_w
< SKL_MIN_DST_W
|| dst_h
< SKL_MIN_DST_H
||
5048 (IS_GEN(dev_priv
, 11) &&
5049 (src_w
> ICL_MAX_SRC_W
|| src_h
> ICL_MAX_SRC_H
||
5050 dst_w
> ICL_MAX_DST_W
|| dst_h
> ICL_MAX_DST_H
)) ||
5051 (!IS_GEN(dev_priv
, 11) &&
5052 (src_w
> SKL_MAX_SRC_W
|| src_h
> SKL_MAX_SRC_H
||
5053 dst_w
> SKL_MAX_DST_W
|| dst_h
> SKL_MAX_DST_H
))) {
5054 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5055 "size is out of scaler range\n",
5056 intel_crtc
->pipe
, scaler_user
, src_w
, src_h
, dst_w
, dst_h
);
5060 /* mark this plane as a scaler user in crtc_state */
5061 scaler_state
->scaler_users
|= (1 << scaler_user
);
5062 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5063 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5064 intel_crtc
->pipe
, scaler_user
, src_w
, src_h
, dst_w
, dst_h
,
5065 scaler_state
->scaler_users
);
5071 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5073 * @state: crtc's scaler state
5076 * 0 - scaler_usage updated successfully
5077 * error - requested scaling cannot be supported or other error condition
5079 int skl_update_scaler_crtc(struct intel_crtc_state
*state
)
5081 const struct drm_display_mode
*adjusted_mode
= &state
->base
.adjusted_mode
;
5082 bool need_scaler
= false;
5084 if (state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
)
5087 return skl_update_scaler(state
, !state
->base
.active
, SKL_CRTC_INDEX
,
5088 &state
->scaler_state
.scaler_id
,
5089 state
->pipe_src_w
, state
->pipe_src_h
,
5090 adjusted_mode
->crtc_hdisplay
,
5091 adjusted_mode
->crtc_vdisplay
, NULL
, need_scaler
);
5095 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5096 * @crtc_state: crtc's scaler state
5097 * @plane_state: atomic plane state to update
5100 * 0 - scaler_usage updated successfully
5101 * error - requested scaling cannot be supported or other error condition
5103 static int skl_update_scaler_plane(struct intel_crtc_state
*crtc_state
,
5104 struct intel_plane_state
*plane_state
)
5106 struct intel_plane
*intel_plane
=
5107 to_intel_plane(plane_state
->base
.plane
);
5108 struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
5110 bool force_detach
= !fb
|| !plane_state
->base
.visible
;
5111 bool need_scaler
= false;
5113 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5114 if (!icl_is_hdr_plane(intel_plane
) &&
5115 fb
&& fb
->format
->format
== DRM_FORMAT_NV12
)
5118 ret
= skl_update_scaler(crtc_state
, force_detach
,
5119 drm_plane_index(&intel_plane
->base
),
5120 &plane_state
->scaler_id
,
5121 drm_rect_width(&plane_state
->base
.src
) >> 16,
5122 drm_rect_height(&plane_state
->base
.src
) >> 16,
5123 drm_rect_width(&plane_state
->base
.dst
),
5124 drm_rect_height(&plane_state
->base
.dst
),
5125 fb
? fb
->format
: NULL
, need_scaler
);
5127 if (ret
|| plane_state
->scaler_id
< 0)
5130 /* check colorkey */
5131 if (plane_state
->ckey
.flags
) {
5132 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5133 intel_plane
->base
.base
.id
,
5134 intel_plane
->base
.name
);
5138 /* Check src format */
5139 switch (fb
->format
->format
) {
5140 case DRM_FORMAT_RGB565
:
5141 case DRM_FORMAT_XBGR8888
:
5142 case DRM_FORMAT_XRGB8888
:
5143 case DRM_FORMAT_ABGR8888
:
5144 case DRM_FORMAT_ARGB8888
:
5145 case DRM_FORMAT_XRGB2101010
:
5146 case DRM_FORMAT_XBGR2101010
:
5147 case DRM_FORMAT_YUYV
:
5148 case DRM_FORMAT_YVYU
:
5149 case DRM_FORMAT_UYVY
:
5150 case DRM_FORMAT_VYUY
:
5151 case DRM_FORMAT_NV12
:
5154 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5155 intel_plane
->base
.base
.id
, intel_plane
->base
.name
,
5156 fb
->base
.id
, fb
->format
->format
);
5163 static void skylake_scaler_disable(struct intel_crtc
*crtc
)
5167 for (i
= 0; i
< crtc
->num_scalers
; i
++)
5168 skl_detach_scaler(crtc
, i
);
5171 static void skylake_pfit_enable(const struct intel_crtc_state
*crtc_state
)
5173 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
5174 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5175 enum pipe pipe
= crtc
->pipe
;
5176 const struct intel_crtc_scaler_state
*scaler_state
=
5177 &crtc_state
->scaler_state
;
5179 if (crtc_state
->pch_pfit
.enabled
) {
5180 u16 uv_rgb_hphase
, uv_rgb_vphase
;
5181 int pfit_w
, pfit_h
, hscale
, vscale
;
5184 if (WARN_ON(crtc_state
->scaler_state
.scaler_id
< 0))
5187 pfit_w
= (crtc_state
->pch_pfit
.size
>> 16) & 0xFFFF;
5188 pfit_h
= crtc_state
->pch_pfit
.size
& 0xFFFF;
5190 hscale
= (crtc_state
->pipe_src_w
<< 16) / pfit_w
;
5191 vscale
= (crtc_state
->pipe_src_h
<< 16) / pfit_h
;
5193 uv_rgb_hphase
= skl_scaler_calc_phase(1, hscale
, false);
5194 uv_rgb_vphase
= skl_scaler_calc_phase(1, vscale
, false);
5196 id
= scaler_state
->scaler_id
;
5197 I915_WRITE(SKL_PS_CTRL(pipe
, id
), PS_SCALER_EN
|
5198 PS_FILTER_MEDIUM
| scaler_state
->scalers
[id
].mode
);
5199 I915_WRITE_FW(SKL_PS_VPHASE(pipe
, id
),
5200 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase
));
5201 I915_WRITE_FW(SKL_PS_HPHASE(pipe
, id
),
5202 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase
));
5203 I915_WRITE(SKL_PS_WIN_POS(pipe
, id
), crtc_state
->pch_pfit
.pos
);
5204 I915_WRITE(SKL_PS_WIN_SZ(pipe
, id
), crtc_state
->pch_pfit
.size
);
5208 static void ironlake_pfit_enable(const struct intel_crtc_state
*crtc_state
)
5210 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
5211 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5212 int pipe
= crtc
->pipe
;
5214 if (crtc_state
->pch_pfit
.enabled
) {
5215 /* Force use of hard-coded filter coefficients
5216 * as some pre-programmed values are broken,
5219 if (IS_IVYBRIDGE(dev_priv
) || IS_HASWELL(dev_priv
))
5220 I915_WRITE(PF_CTL(pipe
), PF_ENABLE
| PF_FILTER_MED_3x3
|
5221 PF_PIPE_SEL_IVB(pipe
));
5223 I915_WRITE(PF_CTL(pipe
), PF_ENABLE
| PF_FILTER_MED_3x3
);
5224 I915_WRITE(PF_WIN_POS(pipe
), crtc_state
->pch_pfit
.pos
);
5225 I915_WRITE(PF_WIN_SZ(pipe
), crtc_state
->pch_pfit
.size
);
5229 void hsw_enable_ips(const struct intel_crtc_state
*crtc_state
)
5231 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
5232 struct drm_device
*dev
= crtc
->base
.dev
;
5233 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5235 if (!crtc_state
->ips_enabled
)
5239 * We can only enable IPS after we enable a plane and wait for a vblank
5240 * This function is called from post_plane_update, which is run after
5243 WARN_ON(!(crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
)));
5245 if (IS_BROADWELL(dev_priv
)) {
5246 mutex_lock(&dev_priv
->pcu_lock
);
5247 WARN_ON(sandybridge_pcode_write(dev_priv
, DISPLAY_IPS_CONTROL
,
5248 IPS_ENABLE
| IPS_PCODE_CONTROL
));
5249 mutex_unlock(&dev_priv
->pcu_lock
);
5250 /* Quoting Art Runyan: "its not safe to expect any particular
5251 * value in IPS_CTL bit 31 after enabling IPS through the
5252 * mailbox." Moreover, the mailbox may return a bogus state,
5253 * so we need to just enable it and continue on.
5256 I915_WRITE(IPS_CTL
, IPS_ENABLE
);
5257 /* The bit only becomes 1 in the next vblank, so this wait here
5258 * is essentially intel_wait_for_vblank. If we don't have this
5259 * and don't wait for vblanks until the end of crtc_enable, then
5260 * the HW state readout code will complain that the expected
5261 * IPS_CTL value is not the one we read. */
5262 if (intel_wait_for_register(dev_priv
,
5263 IPS_CTL
, IPS_ENABLE
, IPS_ENABLE
,
5265 DRM_ERROR("Timed out waiting for IPS enable\n");
5269 void hsw_disable_ips(const struct intel_crtc_state
*crtc_state
)
5271 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
5272 struct drm_device
*dev
= crtc
->base
.dev
;
5273 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5275 if (!crtc_state
->ips_enabled
)
5278 if (IS_BROADWELL(dev_priv
)) {
5279 mutex_lock(&dev_priv
->pcu_lock
);
5280 WARN_ON(sandybridge_pcode_write(dev_priv
, DISPLAY_IPS_CONTROL
, 0));
5281 mutex_unlock(&dev_priv
->pcu_lock
);
5283 * Wait for PCODE to finish disabling IPS. The BSpec specified
5284 * 42ms timeout value leads to occasional timeouts so use 100ms
5287 if (intel_wait_for_register(dev_priv
,
5288 IPS_CTL
, IPS_ENABLE
, 0,
5290 DRM_ERROR("Timed out waiting for IPS disable\n");
5292 I915_WRITE(IPS_CTL
, 0);
5293 POSTING_READ(IPS_CTL
);
5296 /* We need to wait for a vblank before we can disable the plane. */
5297 intel_wait_for_vblank(dev_priv
, crtc
->pipe
);
5300 static void intel_crtc_dpms_overlay_disable(struct intel_crtc
*intel_crtc
)
5302 if (intel_crtc
->overlay
) {
5303 struct drm_device
*dev
= intel_crtc
->base
.dev
;
5305 mutex_lock(&dev
->struct_mutex
);
5306 (void) intel_overlay_switch_off(intel_crtc
->overlay
);
5307 mutex_unlock(&dev
->struct_mutex
);
5310 /* Let userspace switch the overlay on again. In most cases userspace
5311 * has to recompute where to put it anyway.
5316 * intel_post_enable_primary - Perform operations after enabling primary plane
5317 * @crtc: the CRTC whose primary plane was just enabled
5318 * @new_crtc_state: the enabling state
5320 * Performs potentially sleeping operations that must be done after the primary
5321 * plane is enabled, such as updating FBC and IPS. Note that this may be
5322 * called due to an explicit primary plane update, or due to an implicit
5323 * re-enable that is caused when a sprite plane is updated to no longer
5324 * completely hide the primary plane.
5327 intel_post_enable_primary(struct drm_crtc
*crtc
,
5328 const struct intel_crtc_state
*new_crtc_state
)
5330 struct drm_device
*dev
= crtc
->dev
;
5331 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5332 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5333 int pipe
= intel_crtc
->pipe
;
5336 * Gen2 reports pipe underruns whenever all planes are disabled.
5337 * So don't enable underrun reporting before at least some planes
5339 * FIXME: Need to fix the logic to work when we turn off all planes
5340 * but leave the pipe running.
5342 if (IS_GEN(dev_priv
, 2))
5343 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
5345 /* Underruns don't always raise interrupts, so check manually. */
5346 intel_check_cpu_fifo_underruns(dev_priv
);
5347 intel_check_pch_fifo_underruns(dev_priv
);
5350 /* FIXME get rid of this and use pre_plane_update */
5352 intel_pre_disable_primary_noatomic(struct drm_crtc
*crtc
)
5354 struct drm_device
*dev
= crtc
->dev
;
5355 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5356 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5357 int pipe
= intel_crtc
->pipe
;
5360 * Gen2 reports pipe underruns whenever all planes are disabled.
5361 * So disable underrun reporting before all the planes get disabled.
5363 if (IS_GEN(dev_priv
, 2))
5364 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, false);
5366 hsw_disable_ips(to_intel_crtc_state(crtc
->state
));
5369 * Vblank time updates from the shadow to live plane control register
5370 * are blocked if the memory self-refresh mode is active at that
5371 * moment. So to make sure the plane gets truly disabled, disable
5372 * first the self-refresh mode. The self-refresh enable bit in turn
5373 * will be checked/applied by the HW only at the next frame start
5374 * event which is after the vblank start event, so we need to have a
5375 * wait-for-vblank between disabling the plane and the pipe.
5377 if (HAS_GMCH(dev_priv
) &&
5378 intel_set_memory_cxsr(dev_priv
, false))
5379 intel_wait_for_vblank(dev_priv
, pipe
);
5382 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state
*old_crtc_state
,
5383 const struct intel_crtc_state
*new_crtc_state
)
5385 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->base
.crtc
);
5386 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5388 if (!old_crtc_state
->ips_enabled
)
5391 if (needs_modeset(&new_crtc_state
->base
))
5395 * Workaround : Do not read or write the pipe palette/gamma data while
5396 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5398 * Disable IPS before we program the LUT.
5400 if (IS_HASWELL(dev_priv
) &&
5401 (new_crtc_state
->base
.color_mgmt_changed
||
5402 new_crtc_state
->update_pipe
) &&
5403 new_crtc_state
->gamma_mode
== GAMMA_MODE_MODE_SPLIT
)
5406 return !new_crtc_state
->ips_enabled
;
5409 static bool hsw_post_update_enable_ips(const struct intel_crtc_state
*old_crtc_state
,
5410 const struct intel_crtc_state
*new_crtc_state
)
5412 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->base
.crtc
);
5413 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5415 if (!new_crtc_state
->ips_enabled
)
5418 if (needs_modeset(&new_crtc_state
->base
))
5422 * Workaround : Do not read or write the pipe palette/gamma data while
5423 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5425 * Re-enable IPS after the LUT has been programmed.
5427 if (IS_HASWELL(dev_priv
) &&
5428 (new_crtc_state
->base
.color_mgmt_changed
||
5429 new_crtc_state
->update_pipe
) &&
5430 new_crtc_state
->gamma_mode
== GAMMA_MODE_MODE_SPLIT
)
5434 * We can't read out IPS on broadwell, assume the worst and
5435 * forcibly enable IPS on the first fastset.
5437 if (new_crtc_state
->update_pipe
&&
5438 old_crtc_state
->base
.adjusted_mode
.private_flags
& I915_MODE_FLAG_INHERITED
)
5441 return !old_crtc_state
->ips_enabled
;
5444 static bool needs_nv12_wa(struct drm_i915_private
*dev_priv
,
5445 const struct intel_crtc_state
*crtc_state
)
5447 if (!crtc_state
->nv12_planes
)
5450 /* WA Display #0827: Gen9:all */
5451 if (IS_GEN(dev_priv
, 9) && !IS_GEMINILAKE(dev_priv
))
5457 static void intel_post_plane_update(struct intel_crtc_state
*old_crtc_state
)
5459 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
5460 struct drm_device
*dev
= crtc
->base
.dev
;
5461 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5462 struct drm_atomic_state
*old_state
= old_crtc_state
->base
.state
;
5463 struct intel_crtc_state
*pipe_config
=
5464 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state
),
5466 struct drm_plane
*primary
= crtc
->base
.primary
;
5467 struct drm_plane_state
*old_primary_state
=
5468 drm_atomic_get_old_plane_state(old_state
, primary
);
5470 intel_frontbuffer_flip(to_i915(crtc
->base
.dev
), pipe_config
->fb_bits
);
5472 if (pipe_config
->update_wm_post
&& pipe_config
->base
.active
)
5473 intel_update_watermarks(crtc
);
5475 if (hsw_post_update_enable_ips(old_crtc_state
, pipe_config
))
5476 hsw_enable_ips(pipe_config
);
5478 if (old_primary_state
) {
5479 struct drm_plane_state
*new_primary_state
=
5480 drm_atomic_get_new_plane_state(old_state
, primary
);
5482 intel_fbc_post_update(crtc
);
5484 if (new_primary_state
->visible
&&
5485 (needs_modeset(&pipe_config
->base
) ||
5486 !old_primary_state
->visible
))
5487 intel_post_enable_primary(&crtc
->base
, pipe_config
);
5490 /* Display WA 827 */
5491 if (needs_nv12_wa(dev_priv
, old_crtc_state
) &&
5492 !needs_nv12_wa(dev_priv
, pipe_config
)) {
5493 skl_wa_clkgate(dev_priv
, crtc
->pipe
, false);
5497 static void intel_pre_plane_update(struct intel_crtc_state
*old_crtc_state
,
5498 struct intel_crtc_state
*pipe_config
)
5500 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
5501 struct drm_device
*dev
= crtc
->base
.dev
;
5502 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5503 struct drm_atomic_state
*old_state
= old_crtc_state
->base
.state
;
5504 struct drm_plane
*primary
= crtc
->base
.primary
;
5505 struct drm_plane_state
*old_primary_state
=
5506 drm_atomic_get_old_plane_state(old_state
, primary
);
5507 bool modeset
= needs_modeset(&pipe_config
->base
);
5508 struct intel_atomic_state
*old_intel_state
=
5509 to_intel_atomic_state(old_state
);
5511 if (hsw_pre_update_disable_ips(old_crtc_state
, pipe_config
))
5512 hsw_disable_ips(old_crtc_state
);
5514 if (old_primary_state
) {
5515 struct intel_plane_state
*new_primary_state
=
5516 intel_atomic_get_new_plane_state(old_intel_state
,
5517 to_intel_plane(primary
));
5519 intel_fbc_pre_update(crtc
, pipe_config
, new_primary_state
);
5521 * Gen2 reports pipe underruns whenever all planes are disabled.
5522 * So disable underrun reporting before all the planes get disabled.
5524 if (IS_GEN(dev_priv
, 2) && old_primary_state
->visible
&&
5525 (modeset
|| !new_primary_state
->base
.visible
))
5526 intel_set_cpu_fifo_underrun_reporting(dev_priv
, crtc
->pipe
, false);
5529 /* Display WA 827 */
5530 if (!needs_nv12_wa(dev_priv
, old_crtc_state
) &&
5531 needs_nv12_wa(dev_priv
, pipe_config
)) {
5532 skl_wa_clkgate(dev_priv
, crtc
->pipe
, true);
5536 * Vblank time updates from the shadow to live plane control register
5537 * are blocked if the memory self-refresh mode is active at that
5538 * moment. So to make sure the plane gets truly disabled, disable
5539 * first the self-refresh mode. The self-refresh enable bit in turn
5540 * will be checked/applied by the HW only at the next frame start
5541 * event which is after the vblank start event, so we need to have a
5542 * wait-for-vblank between disabling the plane and the pipe.
5544 if (HAS_GMCH(dev_priv
) && old_crtc_state
->base
.active
&&
5545 pipe_config
->disable_cxsr
&& intel_set_memory_cxsr(dev_priv
, false))
5546 intel_wait_for_vblank(dev_priv
, crtc
->pipe
);
5549 * IVB workaround: must disable low power watermarks for at least
5550 * one frame before enabling scaling. LP watermarks can be re-enabled
5551 * when scaling is disabled.
5553 * WaCxSRDisabledForSpriteScaling:ivb
5555 if (pipe_config
->disable_lp_wm
&& ilk_disable_lp_wm(dev
) &&
5556 old_crtc_state
->base
.active
)
5557 intel_wait_for_vblank(dev_priv
, crtc
->pipe
);
5560 * If we're doing a modeset, we're done. No need to do any pre-vblank
5561 * watermark programming here.
5563 if (needs_modeset(&pipe_config
->base
))
5567 * For platforms that support atomic watermarks, program the
5568 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5569 * will be the intermediate values that are safe for both pre- and
5570 * post- vblank; when vblank happens, the 'active' values will be set
5571 * to the final 'target' values and we'll do this again to get the
5572 * optimal watermarks. For gen9+ platforms, the values we program here
5573 * will be the final target values which will get automatically latched
5574 * at vblank time; no further programming will be necessary.
5576 * If a platform hasn't been transitioned to atomic watermarks yet,
5577 * we'll continue to update watermarks the old way, if flags tell
5580 if (dev_priv
->display
.initial_watermarks
!= NULL
)
5581 dev_priv
->display
.initial_watermarks(old_intel_state
,
5583 else if (pipe_config
->update_wm_pre
)
5584 intel_update_watermarks(crtc
);
5587 static void intel_crtc_disable_planes(struct intel_atomic_state
*state
,
5588 struct intel_crtc
*crtc
)
5590 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5591 const struct intel_crtc_state
*new_crtc_state
=
5592 intel_atomic_get_new_crtc_state(state
, crtc
);
5593 unsigned int update_mask
= new_crtc_state
->update_planes
;
5594 const struct intel_plane_state
*old_plane_state
;
5595 struct intel_plane
*plane
;
5596 unsigned fb_bits
= 0;
5599 intel_crtc_dpms_overlay_disable(crtc
);
5601 for_each_old_intel_plane_in_state(state
, plane
, old_plane_state
, i
) {
5602 if (crtc
->pipe
!= plane
->pipe
||
5603 !(update_mask
& BIT(plane
->id
)))
5606 plane
->disable_plane(plane
, new_crtc_state
);
5608 if (old_plane_state
->base
.visible
)
5609 fb_bits
|= plane
->frontbuffer_bit
;
5612 intel_frontbuffer_flip(dev_priv
, fb_bits
);
5615 static void intel_encoders_pre_pll_enable(struct drm_crtc
*crtc
,
5616 struct intel_crtc_state
*crtc_state
,
5617 struct drm_atomic_state
*old_state
)
5619 struct drm_connector_state
*conn_state
;
5620 struct drm_connector
*conn
;
5623 for_each_new_connector_in_state(old_state
, conn
, conn_state
, i
) {
5624 struct intel_encoder
*encoder
=
5625 to_intel_encoder(conn_state
->best_encoder
);
5627 if (conn_state
->crtc
!= crtc
)
5630 if (encoder
->pre_pll_enable
)
5631 encoder
->pre_pll_enable(encoder
, crtc_state
, conn_state
);
5635 static void intel_encoders_pre_enable(struct drm_crtc
*crtc
,
5636 struct intel_crtc_state
*crtc_state
,
5637 struct drm_atomic_state
*old_state
)
5639 struct drm_connector_state
*conn_state
;
5640 struct drm_connector
*conn
;
5643 for_each_new_connector_in_state(old_state
, conn
, conn_state
, i
) {
5644 struct intel_encoder
*encoder
=
5645 to_intel_encoder(conn_state
->best_encoder
);
5647 if (conn_state
->crtc
!= crtc
)
5650 if (encoder
->pre_enable
)
5651 encoder
->pre_enable(encoder
, crtc_state
, conn_state
);
5655 static void intel_encoders_enable(struct drm_crtc
*crtc
,
5656 struct intel_crtc_state
*crtc_state
,
5657 struct drm_atomic_state
*old_state
)
5659 struct drm_connector_state
*conn_state
;
5660 struct drm_connector
*conn
;
5663 for_each_new_connector_in_state(old_state
, conn
, conn_state
, i
) {
5664 struct intel_encoder
*encoder
=
5665 to_intel_encoder(conn_state
->best_encoder
);
5667 if (conn_state
->crtc
!= crtc
)
5670 if (encoder
->enable
)
5671 encoder
->enable(encoder
, crtc_state
, conn_state
);
5672 intel_opregion_notify_encoder(encoder
, true);
5676 static void intel_encoders_disable(struct drm_crtc
*crtc
,
5677 struct intel_crtc_state
*old_crtc_state
,
5678 struct drm_atomic_state
*old_state
)
5680 struct drm_connector_state
*old_conn_state
;
5681 struct drm_connector
*conn
;
5684 for_each_old_connector_in_state(old_state
, conn
, old_conn_state
, i
) {
5685 struct intel_encoder
*encoder
=
5686 to_intel_encoder(old_conn_state
->best_encoder
);
5688 if (old_conn_state
->crtc
!= crtc
)
5691 intel_opregion_notify_encoder(encoder
, false);
5692 if (encoder
->disable
)
5693 encoder
->disable(encoder
, old_crtc_state
, old_conn_state
);
5697 static void intel_encoders_post_disable(struct drm_crtc
*crtc
,
5698 struct intel_crtc_state
*old_crtc_state
,
5699 struct drm_atomic_state
*old_state
)
5701 struct drm_connector_state
*old_conn_state
;
5702 struct drm_connector
*conn
;
5705 for_each_old_connector_in_state(old_state
, conn
, old_conn_state
, i
) {
5706 struct intel_encoder
*encoder
=
5707 to_intel_encoder(old_conn_state
->best_encoder
);
5709 if (old_conn_state
->crtc
!= crtc
)
5712 if (encoder
->post_disable
)
5713 encoder
->post_disable(encoder
, old_crtc_state
, old_conn_state
);
5717 static void intel_encoders_post_pll_disable(struct drm_crtc
*crtc
,
5718 struct intel_crtc_state
*old_crtc_state
,
5719 struct drm_atomic_state
*old_state
)
5721 struct drm_connector_state
*old_conn_state
;
5722 struct drm_connector
*conn
;
5725 for_each_old_connector_in_state(old_state
, conn
, old_conn_state
, i
) {
5726 struct intel_encoder
*encoder
=
5727 to_intel_encoder(old_conn_state
->best_encoder
);
5729 if (old_conn_state
->crtc
!= crtc
)
5732 if (encoder
->post_pll_disable
)
5733 encoder
->post_pll_disable(encoder
, old_crtc_state
, old_conn_state
);
5737 static void intel_encoders_update_pipe(struct drm_crtc
*crtc
,
5738 struct intel_crtc_state
*crtc_state
,
5739 struct drm_atomic_state
*old_state
)
5741 struct drm_connector_state
*conn_state
;
5742 struct drm_connector
*conn
;
5745 for_each_new_connector_in_state(old_state
, conn
, conn_state
, i
) {
5746 struct intel_encoder
*encoder
=
5747 to_intel_encoder(conn_state
->best_encoder
);
5749 if (conn_state
->crtc
!= crtc
)
5752 if (encoder
->update_pipe
)
5753 encoder
->update_pipe(encoder
, crtc_state
, conn_state
);
5757 static void ironlake_crtc_enable(struct intel_crtc_state
*pipe_config
,
5758 struct drm_atomic_state
*old_state
)
5760 struct drm_crtc
*crtc
= pipe_config
->base
.crtc
;
5761 struct drm_device
*dev
= crtc
->dev
;
5762 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5763 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5764 int pipe
= intel_crtc
->pipe
;
5765 struct intel_atomic_state
*old_intel_state
=
5766 to_intel_atomic_state(old_state
);
5768 if (WARN_ON(intel_crtc
->active
))
5772 * Sometimes spurious CPU pipe underruns happen during FDI
5773 * training, at least with VGA+HDMI cloning. Suppress them.
5775 * On ILK we get an occasional spurious CPU pipe underruns
5776 * between eDP port A enable and vdd enable. Also PCH port
5777 * enable seems to result in the occasional CPU pipe underrun.
5779 * Spurious PCH underruns also occur during PCH enabling.
5781 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, false);
5782 intel_set_pch_fifo_underrun_reporting(dev_priv
, pipe
, false);
5784 if (pipe_config
->has_pch_encoder
)
5785 intel_prepare_shared_dpll(pipe_config
);
5787 if (intel_crtc_has_dp_encoder(pipe_config
))
5788 intel_dp_set_m_n(pipe_config
, M1_N1
);
5790 intel_set_pipe_timings(pipe_config
);
5791 intel_set_pipe_src_size(pipe_config
);
5793 if (pipe_config
->has_pch_encoder
) {
5794 intel_cpu_transcoder_set_m_n(pipe_config
,
5795 &pipe_config
->fdi_m_n
, NULL
);
5798 ironlake_set_pipeconf(pipe_config
);
5800 intel_crtc
->active
= true;
5802 intel_encoders_pre_enable(crtc
, pipe_config
, old_state
);
5804 if (pipe_config
->has_pch_encoder
) {
5805 /* Note: FDI PLL enabling _must_ be done before we enable the
5806 * cpu pipes, hence this is separate from all the other fdi/pch
5808 ironlake_fdi_pll_enable(pipe_config
);
5810 assert_fdi_tx_disabled(dev_priv
, pipe
);
5811 assert_fdi_rx_disabled(dev_priv
, pipe
);
5814 ironlake_pfit_enable(pipe_config
);
5817 * On ILK+ LUT must be loaded before the pipe is running but with
5820 intel_color_load_luts(pipe_config
);
5821 intel_color_commit(pipe_config
);
5823 if (dev_priv
->display
.initial_watermarks
!= NULL
)
5824 dev_priv
->display
.initial_watermarks(old_intel_state
, pipe_config
);
5825 intel_enable_pipe(pipe_config
);
5827 if (pipe_config
->has_pch_encoder
)
5828 ironlake_pch_enable(old_intel_state
, pipe_config
);
5830 assert_vblank_disabled(crtc
);
5831 intel_crtc_vblank_on(pipe_config
);
5833 intel_encoders_enable(crtc
, pipe_config
, old_state
);
5835 if (HAS_PCH_CPT(dev_priv
))
5836 cpt_verify_modeset(dev
, intel_crtc
->pipe
);
5839 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5840 * And a second vblank wait is needed at least on ILK with
5841 * some interlaced HDMI modes. Let's do the double wait always
5842 * in case there are more corner cases we don't know about.
5844 if (pipe_config
->has_pch_encoder
) {
5845 intel_wait_for_vblank(dev_priv
, pipe
);
5846 intel_wait_for_vblank(dev_priv
, pipe
);
5848 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
5849 intel_set_pch_fifo_underrun_reporting(dev_priv
, pipe
, true);
5852 /* IPS only exists on ULT machines and is tied to pipe A. */
5853 static bool hsw_crtc_supports_ips(struct intel_crtc
*crtc
)
5855 return HAS_IPS(to_i915(crtc
->base
.dev
)) && crtc
->pipe
== PIPE_A
;
5858 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private
*dev_priv
,
5859 enum pipe pipe
, bool apply
)
5861 u32 val
= I915_READ(CLKGATE_DIS_PSL(pipe
));
5862 u32 mask
= DPF_GATING_DIS
| DPF_RAM_GATING_DIS
| DPFR_GATING_DIS
;
5869 I915_WRITE(CLKGATE_DIS_PSL(pipe
), val
);
5872 static void icl_pipe_mbus_enable(struct intel_crtc
*crtc
)
5874 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5875 enum pipe pipe
= crtc
->pipe
;
5878 val
= MBUS_DBOX_A_CREDIT(2);
5879 val
|= MBUS_DBOX_BW_CREDIT(1);
5880 val
|= MBUS_DBOX_B_CREDIT(8);
5882 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe
), val
);
5885 static void haswell_crtc_enable(struct intel_crtc_state
*pipe_config
,
5886 struct drm_atomic_state
*old_state
)
5888 struct drm_crtc
*crtc
= pipe_config
->base
.crtc
;
5889 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
5890 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5891 int pipe
= intel_crtc
->pipe
, hsw_workaround_pipe
;
5892 enum transcoder cpu_transcoder
= pipe_config
->cpu_transcoder
;
5893 struct intel_atomic_state
*old_intel_state
=
5894 to_intel_atomic_state(old_state
);
5895 bool psl_clkgate_wa
;
5897 if (WARN_ON(intel_crtc
->active
))
5900 intel_encoders_pre_pll_enable(crtc
, pipe_config
, old_state
);
5902 if (pipe_config
->shared_dpll
)
5903 intel_enable_shared_dpll(pipe_config
);
5905 intel_encoders_pre_enable(crtc
, pipe_config
, old_state
);
5907 if (intel_crtc_has_dp_encoder(pipe_config
))
5908 intel_dp_set_m_n(pipe_config
, M1_N1
);
5910 if (!transcoder_is_dsi(cpu_transcoder
))
5911 intel_set_pipe_timings(pipe_config
);
5913 intel_set_pipe_src_size(pipe_config
);
5915 if (cpu_transcoder
!= TRANSCODER_EDP
&&
5916 !transcoder_is_dsi(cpu_transcoder
)) {
5917 I915_WRITE(PIPE_MULT(cpu_transcoder
),
5918 pipe_config
->pixel_multiplier
- 1);
5921 if (pipe_config
->has_pch_encoder
) {
5922 intel_cpu_transcoder_set_m_n(pipe_config
,
5923 &pipe_config
->fdi_m_n
, NULL
);
5926 if (!transcoder_is_dsi(cpu_transcoder
))
5927 haswell_set_pipeconf(pipe_config
);
5929 haswell_set_pipemisc(pipe_config
);
5931 intel_crtc
->active
= true;
5933 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5934 psl_clkgate_wa
= (IS_GEMINILAKE(dev_priv
) || IS_CANNONLAKE(dev_priv
)) &&
5935 pipe_config
->pch_pfit
.enabled
;
5937 glk_pipe_scaler_clock_gating_wa(dev_priv
, pipe
, true);
5939 if (INTEL_GEN(dev_priv
) >= 9)
5940 skylake_pfit_enable(pipe_config
);
5942 ironlake_pfit_enable(pipe_config
);
5945 * On ILK+ LUT must be loaded before the pipe is running but with
5948 intel_color_load_luts(pipe_config
);
5949 intel_color_commit(pipe_config
);
5951 if (INTEL_GEN(dev_priv
) >= 11)
5952 icl_set_pipe_chicken(intel_crtc
);
5954 intel_ddi_set_pipe_settings(pipe_config
);
5955 if (!transcoder_is_dsi(cpu_transcoder
))
5956 intel_ddi_enable_transcoder_func(pipe_config
);
5958 if (dev_priv
->display
.initial_watermarks
!= NULL
)
5959 dev_priv
->display
.initial_watermarks(old_intel_state
, pipe_config
);
5961 if (INTEL_GEN(dev_priv
) >= 11)
5962 icl_pipe_mbus_enable(intel_crtc
);
5964 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5965 if (!transcoder_is_dsi(cpu_transcoder
))
5966 intel_enable_pipe(pipe_config
);
5968 if (pipe_config
->has_pch_encoder
)
5969 lpt_pch_enable(old_intel_state
, pipe_config
);
5971 if (intel_crtc_has_type(pipe_config
, INTEL_OUTPUT_DP_MST
))
5972 intel_ddi_set_vc_payload_alloc(pipe_config
, true);
5974 assert_vblank_disabled(crtc
);
5975 intel_crtc_vblank_on(pipe_config
);
5977 intel_encoders_enable(crtc
, pipe_config
, old_state
);
5979 if (psl_clkgate_wa
) {
5980 intel_wait_for_vblank(dev_priv
, pipe
);
5981 glk_pipe_scaler_clock_gating_wa(dev_priv
, pipe
, false);
5984 /* If we change the relative order between pipe/planes enabling, we need
5985 * to change the workaround. */
5986 hsw_workaround_pipe
= pipe_config
->hsw_workaround_pipe
;
5987 if (IS_HASWELL(dev_priv
) && hsw_workaround_pipe
!= INVALID_PIPE
) {
5988 intel_wait_for_vblank(dev_priv
, hsw_workaround_pipe
);
5989 intel_wait_for_vblank(dev_priv
, hsw_workaround_pipe
);
5993 static void ironlake_pfit_disable(const struct intel_crtc_state
*old_crtc_state
)
5995 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
5996 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5997 enum pipe pipe
= crtc
->pipe
;
5999 /* To avoid upsetting the power well on haswell only disable the pfit if
6000 * it's in use. The hw state code will make sure we get this right. */
6001 if (old_crtc_state
->pch_pfit
.enabled
) {
6002 I915_WRITE(PF_CTL(pipe
), 0);
6003 I915_WRITE(PF_WIN_POS(pipe
), 0);
6004 I915_WRITE(PF_WIN_SZ(pipe
), 0);
6008 static void ironlake_crtc_disable(struct intel_crtc_state
*old_crtc_state
,
6009 struct drm_atomic_state
*old_state
)
6011 struct drm_crtc
*crtc
= old_crtc_state
->base
.crtc
;
6012 struct drm_device
*dev
= crtc
->dev
;
6013 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6014 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6015 int pipe
= intel_crtc
->pipe
;
6018 * Sometimes spurious CPU pipe underruns happen when the
6019 * pipe is already disabled, but FDI RX/TX is still enabled.
6020 * Happens at least with VGA+HDMI cloning. Suppress them.
6022 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, false);
6023 intel_set_pch_fifo_underrun_reporting(dev_priv
, pipe
, false);
6025 intel_encoders_disable(crtc
, old_crtc_state
, old_state
);
6027 drm_crtc_vblank_off(crtc
);
6028 assert_vblank_disabled(crtc
);
6030 intel_disable_pipe(old_crtc_state
);
6032 ironlake_pfit_disable(old_crtc_state
);
6034 if (old_crtc_state
->has_pch_encoder
)
6035 ironlake_fdi_disable(crtc
);
6037 intel_encoders_post_disable(crtc
, old_crtc_state
, old_state
);
6039 if (old_crtc_state
->has_pch_encoder
) {
6040 ironlake_disable_pch_transcoder(dev_priv
, pipe
);
6042 if (HAS_PCH_CPT(dev_priv
)) {
6046 /* disable TRANS_DP_CTL */
6047 reg
= TRANS_DP_CTL(pipe
);
6048 temp
= I915_READ(reg
);
6049 temp
&= ~(TRANS_DP_OUTPUT_ENABLE
|
6050 TRANS_DP_PORT_SEL_MASK
);
6051 temp
|= TRANS_DP_PORT_SEL_NONE
;
6052 I915_WRITE(reg
, temp
);
6054 /* disable DPLL_SEL */
6055 temp
= I915_READ(PCH_DPLL_SEL
);
6056 temp
&= ~(TRANS_DPLL_ENABLE(pipe
) | TRANS_DPLLB_SEL(pipe
));
6057 I915_WRITE(PCH_DPLL_SEL
, temp
);
6060 ironlake_fdi_pll_disable(intel_crtc
);
6063 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
6064 intel_set_pch_fifo_underrun_reporting(dev_priv
, pipe
, true);
6067 static void haswell_crtc_disable(struct intel_crtc_state
*old_crtc_state
,
6068 struct drm_atomic_state
*old_state
)
6070 struct drm_crtc
*crtc
= old_crtc_state
->base
.crtc
;
6071 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
6072 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6073 enum transcoder cpu_transcoder
= old_crtc_state
->cpu_transcoder
;
6075 intel_encoders_disable(crtc
, old_crtc_state
, old_state
);
6077 drm_crtc_vblank_off(crtc
);
6078 assert_vblank_disabled(crtc
);
6080 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6081 if (!transcoder_is_dsi(cpu_transcoder
))
6082 intel_disable_pipe(old_crtc_state
);
6084 if (intel_crtc_has_type(old_crtc_state
, INTEL_OUTPUT_DP_MST
))
6085 intel_ddi_set_vc_payload_alloc(old_crtc_state
, false);
6087 if (!transcoder_is_dsi(cpu_transcoder
))
6088 intel_ddi_disable_transcoder_func(old_crtc_state
);
6090 intel_dsc_disable(old_crtc_state
);
6092 if (INTEL_GEN(dev_priv
) >= 9)
6093 skylake_scaler_disable(intel_crtc
);
6095 ironlake_pfit_disable(old_crtc_state
);
6097 intel_encoders_post_disable(crtc
, old_crtc_state
, old_state
);
6099 intel_encoders_post_pll_disable(crtc
, old_crtc_state
, old_state
);
6102 static void i9xx_pfit_enable(const struct intel_crtc_state
*crtc_state
)
6104 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
6105 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6107 if (!crtc_state
->gmch_pfit
.control
)
6111 * The panel fitter should only be adjusted whilst the pipe is disabled,
6112 * according to register description and PRM.
6114 WARN_ON(I915_READ(PFIT_CONTROL
) & PFIT_ENABLE
);
6115 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
6117 I915_WRITE(PFIT_PGM_RATIOS
, crtc_state
->gmch_pfit
.pgm_ratios
);
6118 I915_WRITE(PFIT_CONTROL
, crtc_state
->gmch_pfit
.control
);
6120 /* Border color in case we don't scale up to the full screen. Black by
6121 * default, change to something else for debugging. */
6122 I915_WRITE(BCLRPAT(crtc
->pipe
), 0);
6125 bool intel_port_is_combophy(struct drm_i915_private
*dev_priv
, enum port port
)
6127 if (port
== PORT_NONE
)
6130 if (IS_ICELAKE(dev_priv
))
6131 return port
<= PORT_B
;
6136 bool intel_port_is_tc(struct drm_i915_private
*dev_priv
, enum port port
)
6138 if (IS_ICELAKE(dev_priv
))
6139 return port
>= PORT_C
&& port
<= PORT_F
;
6144 enum tc_port
intel_port_to_tc(struct drm_i915_private
*dev_priv
, enum port port
)
6146 if (!intel_port_is_tc(dev_priv
, port
))
6147 return PORT_TC_NONE
;
6149 return port
- PORT_C
;
6152 enum intel_display_power_domain
intel_port_to_power_domain(enum port port
)
6156 return POWER_DOMAIN_PORT_DDI_A_LANES
;
6158 return POWER_DOMAIN_PORT_DDI_B_LANES
;
6160 return POWER_DOMAIN_PORT_DDI_C_LANES
;
6162 return POWER_DOMAIN_PORT_DDI_D_LANES
;
6164 return POWER_DOMAIN_PORT_DDI_E_LANES
;
6166 return POWER_DOMAIN_PORT_DDI_F_LANES
;
6169 return POWER_DOMAIN_PORT_OTHER
;
6173 enum intel_display_power_domain
6174 intel_aux_power_domain(struct intel_digital_port
*dig_port
)
6176 switch (dig_port
->aux_ch
) {
6178 return POWER_DOMAIN_AUX_A
;
6180 return POWER_DOMAIN_AUX_B
;
6182 return POWER_DOMAIN_AUX_C
;
6184 return POWER_DOMAIN_AUX_D
;
6186 return POWER_DOMAIN_AUX_E
;
6188 return POWER_DOMAIN_AUX_F
;
6190 MISSING_CASE(dig_port
->aux_ch
);
6191 return POWER_DOMAIN_AUX_A
;
6195 static u64
get_crtc_power_domains(struct drm_crtc
*crtc
,
6196 struct intel_crtc_state
*crtc_state
)
6198 struct drm_device
*dev
= crtc
->dev
;
6199 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6200 struct drm_encoder
*encoder
;
6201 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6202 enum pipe pipe
= intel_crtc
->pipe
;
6204 enum transcoder transcoder
= crtc_state
->cpu_transcoder
;
6206 if (!crtc_state
->base
.active
)
6209 mask
= BIT_ULL(POWER_DOMAIN_PIPE(pipe
));
6210 mask
|= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder
));
6211 if (crtc_state
->pch_pfit
.enabled
||
6212 crtc_state
->pch_pfit
.force_thru
)
6213 mask
|= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe
));
6215 drm_for_each_encoder_mask(encoder
, dev
, crtc_state
->base
.encoder_mask
) {
6216 struct intel_encoder
*intel_encoder
= to_intel_encoder(encoder
);
6218 mask
|= BIT_ULL(intel_encoder
->power_domain
);
6221 if (HAS_DDI(dev_priv
) && crtc_state
->has_audio
)
6222 mask
|= BIT_ULL(POWER_DOMAIN_AUDIO
);
6224 if (crtc_state
->shared_dpll
)
6225 mask
|= BIT_ULL(POWER_DOMAIN_PLLS
);
6231 modeset_get_crtc_power_domains(struct drm_crtc
*crtc
,
6232 struct intel_crtc_state
*crtc_state
)
6234 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
6235 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6236 enum intel_display_power_domain domain
;
6237 u64 domains
, new_domains
, old_domains
;
6239 old_domains
= intel_crtc
->enabled_power_domains
;
6240 intel_crtc
->enabled_power_domains
= new_domains
=
6241 get_crtc_power_domains(crtc
, crtc_state
);
6243 domains
= new_domains
& ~old_domains
;
6245 for_each_power_domain(domain
, domains
)
6246 intel_display_power_get(dev_priv
, domain
);
6248 return old_domains
& ~new_domains
;
6251 static void modeset_put_power_domains(struct drm_i915_private
*dev_priv
,
6254 enum intel_display_power_domain domain
;
6256 for_each_power_domain(domain
, domains
)
6257 intel_display_power_put_unchecked(dev_priv
, domain
);
6260 static void valleyview_crtc_enable(struct intel_crtc_state
*pipe_config
,
6261 struct drm_atomic_state
*old_state
)
6263 struct intel_atomic_state
*old_intel_state
=
6264 to_intel_atomic_state(old_state
);
6265 struct drm_crtc
*crtc
= pipe_config
->base
.crtc
;
6266 struct drm_device
*dev
= crtc
->dev
;
6267 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6268 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6269 int pipe
= intel_crtc
->pipe
;
6271 if (WARN_ON(intel_crtc
->active
))
6274 if (intel_crtc_has_dp_encoder(pipe_config
))
6275 intel_dp_set_m_n(pipe_config
, M1_N1
);
6277 intel_set_pipe_timings(pipe_config
);
6278 intel_set_pipe_src_size(pipe_config
);
6280 if (IS_CHERRYVIEW(dev_priv
) && pipe
== PIPE_B
) {
6281 I915_WRITE(CHV_BLEND(pipe
), CHV_BLEND_LEGACY
);
6282 I915_WRITE(CHV_CANVAS(pipe
), 0);
6285 i9xx_set_pipeconf(pipe_config
);
6287 intel_crtc
->active
= true;
6289 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
6291 intel_encoders_pre_pll_enable(crtc
, pipe_config
, old_state
);
6293 if (IS_CHERRYVIEW(dev_priv
)) {
6294 chv_prepare_pll(intel_crtc
, pipe_config
);
6295 chv_enable_pll(intel_crtc
, pipe_config
);
6297 vlv_prepare_pll(intel_crtc
, pipe_config
);
6298 vlv_enable_pll(intel_crtc
, pipe_config
);
6301 intel_encoders_pre_enable(crtc
, pipe_config
, old_state
);
6303 i9xx_pfit_enable(pipe_config
);
6305 intel_color_load_luts(pipe_config
);
6306 intel_color_commit(pipe_config
);
6308 dev_priv
->display
.initial_watermarks(old_intel_state
,
6310 intel_enable_pipe(pipe_config
);
6312 assert_vblank_disabled(crtc
);
6313 intel_crtc_vblank_on(pipe_config
);
6315 intel_encoders_enable(crtc
, pipe_config
, old_state
);
6318 static void i9xx_set_pll_dividers(const struct intel_crtc_state
*crtc_state
)
6320 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
6321 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6323 I915_WRITE(FP0(crtc
->pipe
), crtc_state
->dpll_hw_state
.fp0
);
6324 I915_WRITE(FP1(crtc
->pipe
), crtc_state
->dpll_hw_state
.fp1
);
6327 static void i9xx_crtc_enable(struct intel_crtc_state
*pipe_config
,
6328 struct drm_atomic_state
*old_state
)
6330 struct intel_atomic_state
*old_intel_state
=
6331 to_intel_atomic_state(old_state
);
6332 struct drm_crtc
*crtc
= pipe_config
->base
.crtc
;
6333 struct drm_device
*dev
= crtc
->dev
;
6334 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6335 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6336 enum pipe pipe
= intel_crtc
->pipe
;
6338 if (WARN_ON(intel_crtc
->active
))
6341 i9xx_set_pll_dividers(pipe_config
);
6343 if (intel_crtc_has_dp_encoder(pipe_config
))
6344 intel_dp_set_m_n(pipe_config
, M1_N1
);
6346 intel_set_pipe_timings(pipe_config
);
6347 intel_set_pipe_src_size(pipe_config
);
6349 i9xx_set_pipeconf(pipe_config
);
6351 intel_crtc
->active
= true;
6353 if (!IS_GEN(dev_priv
, 2))
6354 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
6356 intel_encoders_pre_enable(crtc
, pipe_config
, old_state
);
6358 i9xx_enable_pll(intel_crtc
, pipe_config
);
6360 i9xx_pfit_enable(pipe_config
);
6362 intel_color_load_luts(pipe_config
);
6363 intel_color_commit(pipe_config
);
6365 if (dev_priv
->display
.initial_watermarks
!= NULL
)
6366 dev_priv
->display
.initial_watermarks(old_intel_state
,
6369 intel_update_watermarks(intel_crtc
);
6370 intel_enable_pipe(pipe_config
);
6372 assert_vblank_disabled(crtc
);
6373 intel_crtc_vblank_on(pipe_config
);
6375 intel_encoders_enable(crtc
, pipe_config
, old_state
);
6378 static void i9xx_pfit_disable(const struct intel_crtc_state
*old_crtc_state
)
6380 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
6381 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6383 if (!old_crtc_state
->gmch_pfit
.control
)
6386 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
6388 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6389 I915_READ(PFIT_CONTROL
));
6390 I915_WRITE(PFIT_CONTROL
, 0);
6393 static void i9xx_crtc_disable(struct intel_crtc_state
*old_crtc_state
,
6394 struct drm_atomic_state
*old_state
)
6396 struct drm_crtc
*crtc
= old_crtc_state
->base
.crtc
;
6397 struct drm_device
*dev
= crtc
->dev
;
6398 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6399 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6400 int pipe
= intel_crtc
->pipe
;
6403 * On gen2 planes are double buffered but the pipe isn't, so we must
6404 * wait for planes to fully turn off before disabling the pipe.
6406 if (IS_GEN(dev_priv
, 2))
6407 intel_wait_for_vblank(dev_priv
, pipe
);
6409 intel_encoders_disable(crtc
, old_crtc_state
, old_state
);
6411 drm_crtc_vblank_off(crtc
);
6412 assert_vblank_disabled(crtc
);
6414 intel_disable_pipe(old_crtc_state
);
6416 i9xx_pfit_disable(old_crtc_state
);
6418 intel_encoders_post_disable(crtc
, old_crtc_state
, old_state
);
6420 if (!intel_crtc_has_type(old_crtc_state
, INTEL_OUTPUT_DSI
)) {
6421 if (IS_CHERRYVIEW(dev_priv
))
6422 chv_disable_pll(dev_priv
, pipe
);
6423 else if (IS_VALLEYVIEW(dev_priv
))
6424 vlv_disable_pll(dev_priv
, pipe
);
6426 i9xx_disable_pll(old_crtc_state
);
6429 intel_encoders_post_pll_disable(crtc
, old_crtc_state
, old_state
);
6431 if (!IS_GEN(dev_priv
, 2))
6432 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, false);
6434 if (!dev_priv
->display
.initial_watermarks
)
6435 intel_update_watermarks(intel_crtc
);
6437 /* clock the pipe down to 640x480@60 to potentially save power */
6438 if (IS_I830(dev_priv
))
6439 i830_enable_pipe(dev_priv
, pipe
);
6442 static void intel_crtc_disable_noatomic(struct drm_crtc
*crtc
,
6443 struct drm_modeset_acquire_ctx
*ctx
)
6445 struct intel_encoder
*encoder
;
6446 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6447 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
6448 enum intel_display_power_domain domain
;
6449 struct intel_plane
*plane
;
6451 struct drm_atomic_state
*state
;
6452 struct intel_crtc_state
*crtc_state
;
6455 if (!intel_crtc
->active
)
6458 for_each_intel_plane_on_crtc(&dev_priv
->drm
, intel_crtc
, plane
) {
6459 const struct intel_plane_state
*plane_state
=
6460 to_intel_plane_state(plane
->base
.state
);
6462 if (plane_state
->base
.visible
)
6463 intel_plane_disable_noatomic(intel_crtc
, plane
);
6466 state
= drm_atomic_state_alloc(crtc
->dev
);
6468 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6469 crtc
->base
.id
, crtc
->name
);
6473 state
->acquire_ctx
= ctx
;
6475 /* Everything's already locked, -EDEADLK can't happen. */
6476 crtc_state
= intel_atomic_get_crtc_state(state
, intel_crtc
);
6477 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
6479 WARN_ON(IS_ERR(crtc_state
) || ret
);
6481 dev_priv
->display
.crtc_disable(crtc_state
, state
);
6483 drm_atomic_state_put(state
);
6485 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6486 crtc
->base
.id
, crtc
->name
);
6488 WARN_ON(drm_atomic_set_mode_for_crtc(crtc
->state
, NULL
) < 0);
6489 crtc
->state
->active
= false;
6490 intel_crtc
->active
= false;
6491 crtc
->enabled
= false;
6492 crtc
->state
->connector_mask
= 0;
6493 crtc
->state
->encoder_mask
= 0;
6495 for_each_encoder_on_crtc(crtc
->dev
, crtc
, encoder
)
6496 encoder
->base
.crtc
= NULL
;
6498 intel_fbc_disable(intel_crtc
);
6499 intel_update_watermarks(intel_crtc
);
6500 intel_disable_shared_dpll(to_intel_crtc_state(crtc
->state
));
6502 domains
= intel_crtc
->enabled_power_domains
;
6503 for_each_power_domain(domain
, domains
)
6504 intel_display_power_put_unchecked(dev_priv
, domain
);
6505 intel_crtc
->enabled_power_domains
= 0;
6507 dev_priv
->active_crtcs
&= ~(1 << intel_crtc
->pipe
);
6508 dev_priv
->min_cdclk
[intel_crtc
->pipe
] = 0;
6509 dev_priv
->min_voltage_level
[intel_crtc
->pipe
] = 0;
6513 * turn all crtc's off, but do not adjust state
6514 * This has to be paired with a call to intel_modeset_setup_hw_state.
6516 int intel_display_suspend(struct drm_device
*dev
)
6518 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6519 struct drm_atomic_state
*state
;
6522 state
= drm_atomic_helper_suspend(dev
);
6523 ret
= PTR_ERR_OR_ZERO(state
);
6525 DRM_ERROR("Suspending crtc's failed with %i\n", ret
);
6527 dev_priv
->modeset_restore_state
= state
;
6531 void intel_encoder_destroy(struct drm_encoder
*encoder
)
6533 struct intel_encoder
*intel_encoder
= to_intel_encoder(encoder
);
6535 drm_encoder_cleanup(encoder
);
6536 kfree(intel_encoder
);
6539 /* Cross check the actual hw state with our own modeset state tracking (and it's
6540 * internal consistency). */
6541 static void intel_connector_verify_state(struct drm_crtc_state
*crtc_state
,
6542 struct drm_connector_state
*conn_state
)
6544 struct intel_connector
*connector
= to_intel_connector(conn_state
->connector
);
6546 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6547 connector
->base
.base
.id
,
6548 connector
->base
.name
);
6550 if (connector
->get_hw_state(connector
)) {
6551 struct intel_encoder
*encoder
= connector
->encoder
;
6553 I915_STATE_WARN(!crtc_state
,
6554 "connector enabled without attached crtc\n");
6559 I915_STATE_WARN(!crtc_state
->active
,
6560 "connector is active, but attached crtc isn't\n");
6562 if (!encoder
|| encoder
->type
== INTEL_OUTPUT_DP_MST
)
6565 I915_STATE_WARN(conn_state
->best_encoder
!= &encoder
->base
,
6566 "atomic encoder doesn't match attached encoder\n");
6568 I915_STATE_WARN(conn_state
->crtc
!= encoder
->base
.crtc
,
6569 "attached encoder crtc differs from connector crtc\n");
6571 I915_STATE_WARN(crtc_state
&& crtc_state
->active
,
6572 "attached crtc is active, but connector isn't\n");
6573 I915_STATE_WARN(!crtc_state
&& conn_state
->best_encoder
,
6574 "best encoder set without crtc!\n");
6578 static int pipe_required_fdi_lanes(struct intel_crtc_state
*crtc_state
)
6580 if (crtc_state
->base
.enable
&& crtc_state
->has_pch_encoder
)
6581 return crtc_state
->fdi_lanes
;
6586 static int ironlake_check_fdi_lanes(struct drm_device
*dev
, enum pipe pipe
,
6587 struct intel_crtc_state
*pipe_config
)
6589 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6590 struct drm_atomic_state
*state
= pipe_config
->base
.state
;
6591 struct intel_crtc
*other_crtc
;
6592 struct intel_crtc_state
*other_crtc_state
;
6594 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6595 pipe_name(pipe
), pipe_config
->fdi_lanes
);
6596 if (pipe_config
->fdi_lanes
> 4) {
6597 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6598 pipe_name(pipe
), pipe_config
->fdi_lanes
);
6602 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
6603 if (pipe_config
->fdi_lanes
> 2) {
6604 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6605 pipe_config
->fdi_lanes
);
6612 if (INTEL_INFO(dev_priv
)->num_pipes
== 2)
6615 /* Ivybridge 3 pipe is really complicated */
6620 if (pipe_config
->fdi_lanes
<= 2)
6623 other_crtc
= intel_get_crtc_for_pipe(dev_priv
, PIPE_C
);
6625 intel_atomic_get_crtc_state(state
, other_crtc
);
6626 if (IS_ERR(other_crtc_state
))
6627 return PTR_ERR(other_crtc_state
);
6629 if (pipe_required_fdi_lanes(other_crtc_state
) > 0) {
6630 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6631 pipe_name(pipe
), pipe_config
->fdi_lanes
);
6636 if (pipe_config
->fdi_lanes
> 2) {
6637 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6638 pipe_name(pipe
), pipe_config
->fdi_lanes
);
6642 other_crtc
= intel_get_crtc_for_pipe(dev_priv
, PIPE_B
);
6644 intel_atomic_get_crtc_state(state
, other_crtc
);
6645 if (IS_ERR(other_crtc_state
))
6646 return PTR_ERR(other_crtc_state
);
6648 if (pipe_required_fdi_lanes(other_crtc_state
) > 2) {
6649 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6659 static int ironlake_fdi_compute_config(struct intel_crtc
*intel_crtc
,
6660 struct intel_crtc_state
*pipe_config
)
6662 struct drm_device
*dev
= intel_crtc
->base
.dev
;
6663 const struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
6664 int lane
, link_bw
, fdi_dotclock
, ret
;
6665 bool needs_recompute
= false;
6668 /* FDI is a binary signal running at ~2.7GHz, encoding
6669 * each output octet as 10 bits. The actual frequency
6670 * is stored as a divider into a 100MHz clock, and the
6671 * mode pixel clock is stored in units of 1KHz.
6672 * Hence the bw of each lane in terms of the mode signal
6675 link_bw
= intel_fdi_link_freq(to_i915(dev
), pipe_config
);
6677 fdi_dotclock
= adjusted_mode
->crtc_clock
;
6679 lane
= ironlake_get_lanes_required(fdi_dotclock
, link_bw
,
6680 pipe_config
->pipe_bpp
);
6682 pipe_config
->fdi_lanes
= lane
;
6684 intel_link_compute_m_n(pipe_config
->pipe_bpp
, lane
, fdi_dotclock
,
6685 link_bw
, &pipe_config
->fdi_m_n
, false);
6687 ret
= ironlake_check_fdi_lanes(dev
, intel_crtc
->pipe
, pipe_config
);
6688 if (ret
== -EDEADLK
)
6691 if (ret
== -EINVAL
&& pipe_config
->pipe_bpp
> 6*3) {
6692 pipe_config
->pipe_bpp
-= 2*3;
6693 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6694 pipe_config
->pipe_bpp
);
6695 needs_recompute
= true;
6696 pipe_config
->bw_constrained
= true;
6701 if (needs_recompute
)
6707 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state
*crtc_state
)
6709 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
6710 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6712 /* IPS only exists on ULT machines and is tied to pipe A. */
6713 if (!hsw_crtc_supports_ips(crtc
))
6716 if (!i915_modparams
.enable_ips
)
6719 if (crtc_state
->pipe_bpp
> 24)
6723 * We compare against max which means we must take
6724 * the increased cdclk requirement into account when
6725 * calculating the new cdclk.
6727 * Should measure whether using a lower cdclk w/o IPS
6729 if (IS_BROADWELL(dev_priv
) &&
6730 crtc_state
->pixel_rate
> dev_priv
->max_cdclk_freq
* 95 / 100)
6736 static bool hsw_compute_ips_config(struct intel_crtc_state
*crtc_state
)
6738 struct drm_i915_private
*dev_priv
=
6739 to_i915(crtc_state
->base
.crtc
->dev
);
6740 struct intel_atomic_state
*intel_state
=
6741 to_intel_atomic_state(crtc_state
->base
.state
);
6743 if (!hsw_crtc_state_ips_capable(crtc_state
))
6746 if (crtc_state
->ips_force_disable
)
6749 /* IPS should be fine as long as at least one plane is enabled. */
6750 if (!(crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
)))
6753 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6754 if (IS_BROADWELL(dev_priv
) &&
6755 crtc_state
->pixel_rate
> intel_state
->cdclk
.logical
.cdclk
* 95 / 100)
6761 static bool intel_crtc_supports_double_wide(const struct intel_crtc
*crtc
)
6763 const struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6765 /* GDG double wide on either pipe, otherwise pipe A only */
6766 return INTEL_GEN(dev_priv
) < 4 &&
6767 (crtc
->pipe
== PIPE_A
|| IS_I915G(dev_priv
));
6770 static u32
ilk_pipe_pixel_rate(const struct intel_crtc_state
*pipe_config
)
6774 pixel_rate
= pipe_config
->base
.adjusted_mode
.crtc_clock
;
6777 * We only use IF-ID interlacing. If we ever use
6778 * PF-ID we'll need to adjust the pixel_rate here.
6781 if (pipe_config
->pch_pfit
.enabled
) {
6782 u64 pipe_w
, pipe_h
, pfit_w
, pfit_h
;
6783 u32 pfit_size
= pipe_config
->pch_pfit
.size
;
6785 pipe_w
= pipe_config
->pipe_src_w
;
6786 pipe_h
= pipe_config
->pipe_src_h
;
6788 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
6789 pfit_h
= pfit_size
& 0xFFFF;
6790 if (pipe_w
< pfit_w
)
6792 if (pipe_h
< pfit_h
)
6795 if (WARN_ON(!pfit_w
|| !pfit_h
))
6798 pixel_rate
= div_u64((u64
)pixel_rate
* pipe_w
* pipe_h
,
6805 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state
*crtc_state
)
6807 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
6809 if (HAS_GMCH(dev_priv
))
6810 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6811 crtc_state
->pixel_rate
=
6812 crtc_state
->base
.adjusted_mode
.crtc_clock
;
6814 crtc_state
->pixel_rate
=
6815 ilk_pipe_pixel_rate(crtc_state
);
6818 static int intel_crtc_compute_config(struct intel_crtc
*crtc
,
6819 struct intel_crtc_state
*pipe_config
)
6821 struct drm_device
*dev
= crtc
->base
.dev
;
6822 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6823 const struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
6824 int clock_limit
= dev_priv
->max_dotclk_freq
;
6826 if (INTEL_GEN(dev_priv
) < 4) {
6827 clock_limit
= dev_priv
->max_cdclk_freq
* 9 / 10;
6830 * Enable double wide mode when the dot clock
6831 * is > 90% of the (display) core speed.
6833 if (intel_crtc_supports_double_wide(crtc
) &&
6834 adjusted_mode
->crtc_clock
> clock_limit
) {
6835 clock_limit
= dev_priv
->max_dotclk_freq
;
6836 pipe_config
->double_wide
= true;
6840 if (adjusted_mode
->crtc_clock
> clock_limit
) {
6841 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6842 adjusted_mode
->crtc_clock
, clock_limit
,
6843 yesno(pipe_config
->double_wide
));
6847 if ((pipe_config
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
||
6848 pipe_config
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR444
) &&
6849 pipe_config
->base
.ctm
) {
6851 * There is only one pipe CSC unit per pipe, and we need that
6852 * for output conversion from RGB->YCBCR. So if CTM is already
6853 * applied we can't support YCBCR420 output.
6855 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6860 * Pipe horizontal size must be even in:
6862 * - LVDS dual channel mode
6863 * - Double wide pipe
6865 if (pipe_config
->pipe_src_w
& 1) {
6866 if (pipe_config
->double_wide
) {
6867 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6871 if (intel_crtc_has_type(pipe_config
, INTEL_OUTPUT_LVDS
) &&
6872 intel_is_dual_link_lvds(dev
)) {
6873 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6878 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6879 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6881 if ((INTEL_GEN(dev_priv
) > 4 || IS_G4X(dev_priv
)) &&
6882 adjusted_mode
->crtc_hsync_start
== adjusted_mode
->crtc_hdisplay
)
6885 intel_crtc_compute_pixel_rate(pipe_config
);
6887 if (pipe_config
->has_pch_encoder
)
6888 return ironlake_fdi_compute_config(crtc
, pipe_config
);
6894 intel_reduce_m_n_ratio(u32
*num
, u32
*den
)
6896 while (*num
> DATA_LINK_M_N_MASK
||
6897 *den
> DATA_LINK_M_N_MASK
) {
6903 static void compute_m_n(unsigned int m
, unsigned int n
,
6904 u32
*ret_m
, u32
*ret_n
,
6908 * Several DP dongles in particular seem to be fussy about
6909 * too large link M/N values. Give N value as 0x8000 that
6910 * should be acceptable by specific devices. 0x8000 is the
6911 * specified fixed N value for asynchronous clock mode,
6912 * which the devices expect also in synchronous clock mode.
6917 *ret_n
= min_t(unsigned int, roundup_pow_of_two(n
), DATA_LINK_N_MAX
);
6919 *ret_m
= div_u64((u64
)m
* *ret_n
, n
);
6920 intel_reduce_m_n_ratio(ret_m
, ret_n
);
6924 intel_link_compute_m_n(u16 bits_per_pixel
, int nlanes
,
6925 int pixel_clock
, int link_clock
,
6926 struct intel_link_m_n
*m_n
,
6931 compute_m_n(bits_per_pixel
* pixel_clock
,
6932 link_clock
* nlanes
* 8,
6933 &m_n
->gmch_m
, &m_n
->gmch_n
,
6936 compute_m_n(pixel_clock
, link_clock
,
6937 &m_n
->link_m
, &m_n
->link_n
,
6941 static inline bool intel_panel_use_ssc(struct drm_i915_private
*dev_priv
)
6943 if (i915_modparams
.panel_use_ssc
>= 0)
6944 return i915_modparams
.panel_use_ssc
!= 0;
6945 return dev_priv
->vbt
.lvds_use_ssc
6946 && !(dev_priv
->quirks
& QUIRK_LVDS_SSC_DISABLE
);
6949 static u32
pnv_dpll_compute_fp(struct dpll
*dpll
)
6951 return (1 << dpll
->n
) << 16 | dpll
->m2
;
6954 static u32
i9xx_dpll_compute_fp(struct dpll
*dpll
)
6956 return dpll
->n
<< 16 | dpll
->m1
<< 8 | dpll
->m2
;
6959 static void i9xx_update_pll_dividers(struct intel_crtc
*crtc
,
6960 struct intel_crtc_state
*crtc_state
,
6961 struct dpll
*reduced_clock
)
6963 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6966 if (IS_PINEVIEW(dev_priv
)) {
6967 fp
= pnv_dpll_compute_fp(&crtc_state
->dpll
);
6969 fp2
= pnv_dpll_compute_fp(reduced_clock
);
6971 fp
= i9xx_dpll_compute_fp(&crtc_state
->dpll
);
6973 fp2
= i9xx_dpll_compute_fp(reduced_clock
);
6976 crtc_state
->dpll_hw_state
.fp0
= fp
;
6978 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
) &&
6980 crtc_state
->dpll_hw_state
.fp1
= fp2
;
6982 crtc_state
->dpll_hw_state
.fp1
= fp
;
6986 static void vlv_pllb_recal_opamp(struct drm_i915_private
*dev_priv
, enum pipe
6992 * PLLB opamp always calibrates to max value of 0x3f, force enable it
6993 * and set it to a reasonable value instead.
6995 reg_val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PLL_DW9(1));
6996 reg_val
&= 0xffffff00;
6997 reg_val
|= 0x00000030;
6998 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW9(1), reg_val
);
7000 reg_val
= vlv_dpio_read(dev_priv
, pipe
, VLV_REF_DW13
);
7001 reg_val
&= 0x00ffffff;
7002 reg_val
|= 0x8c000000;
7003 vlv_dpio_write(dev_priv
, pipe
, VLV_REF_DW13
, reg_val
);
7005 reg_val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PLL_DW9(1));
7006 reg_val
&= 0xffffff00;
7007 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW9(1), reg_val
);
7009 reg_val
= vlv_dpio_read(dev_priv
, pipe
, VLV_REF_DW13
);
7010 reg_val
&= 0x00ffffff;
7011 reg_val
|= 0xb0000000;
7012 vlv_dpio_write(dev_priv
, pipe
, VLV_REF_DW13
, reg_val
);
7015 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state
*crtc_state
,
7016 const struct intel_link_m_n
*m_n
)
7018 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
7019 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7020 enum pipe pipe
= crtc
->pipe
;
7022 I915_WRITE(PCH_TRANS_DATA_M1(pipe
), TU_SIZE(m_n
->tu
) | m_n
->gmch_m
);
7023 I915_WRITE(PCH_TRANS_DATA_N1(pipe
), m_n
->gmch_n
);
7024 I915_WRITE(PCH_TRANS_LINK_M1(pipe
), m_n
->link_m
);
7025 I915_WRITE(PCH_TRANS_LINK_N1(pipe
), m_n
->link_n
);
7028 static bool transcoder_has_m2_n2(struct drm_i915_private
*dev_priv
,
7029 enum transcoder transcoder
)
7031 if (IS_HASWELL(dev_priv
))
7032 return transcoder
== TRANSCODER_EDP
;
7035 * Strictly speaking some registers are available before
7036 * gen7, but we only support DRRS on gen7+
7038 return IS_GEN(dev_priv
, 7) || IS_CHERRYVIEW(dev_priv
);
7041 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state
*crtc_state
,
7042 const struct intel_link_m_n
*m_n
,
7043 const struct intel_link_m_n
*m2_n2
)
7045 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
7046 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7047 enum pipe pipe
= crtc
->pipe
;
7048 enum transcoder transcoder
= crtc_state
->cpu_transcoder
;
7050 if (INTEL_GEN(dev_priv
) >= 5) {
7051 I915_WRITE(PIPE_DATA_M1(transcoder
), TU_SIZE(m_n
->tu
) | m_n
->gmch_m
);
7052 I915_WRITE(PIPE_DATA_N1(transcoder
), m_n
->gmch_n
);
7053 I915_WRITE(PIPE_LINK_M1(transcoder
), m_n
->link_m
);
7054 I915_WRITE(PIPE_LINK_N1(transcoder
), m_n
->link_n
);
7056 * M2_N2 registers are set only if DRRS is supported
7057 * (to make sure the registers are not unnecessarily accessed).
7059 if (m2_n2
&& crtc_state
->has_drrs
&&
7060 transcoder_has_m2_n2(dev_priv
, transcoder
)) {
7061 I915_WRITE(PIPE_DATA_M2(transcoder
),
7062 TU_SIZE(m2_n2
->tu
) | m2_n2
->gmch_m
);
7063 I915_WRITE(PIPE_DATA_N2(transcoder
), m2_n2
->gmch_n
);
7064 I915_WRITE(PIPE_LINK_M2(transcoder
), m2_n2
->link_m
);
7065 I915_WRITE(PIPE_LINK_N2(transcoder
), m2_n2
->link_n
);
7068 I915_WRITE(PIPE_DATA_M_G4X(pipe
), TU_SIZE(m_n
->tu
) | m_n
->gmch_m
);
7069 I915_WRITE(PIPE_DATA_N_G4X(pipe
), m_n
->gmch_n
);
7070 I915_WRITE(PIPE_LINK_M_G4X(pipe
), m_n
->link_m
);
7071 I915_WRITE(PIPE_LINK_N_G4X(pipe
), m_n
->link_n
);
7075 void intel_dp_set_m_n(const struct intel_crtc_state
*crtc_state
, enum link_m_n_set m_n
)
7077 const struct intel_link_m_n
*dp_m_n
, *dp_m2_n2
= NULL
;
7080 dp_m_n
= &crtc_state
->dp_m_n
;
7081 dp_m2_n2
= &crtc_state
->dp_m2_n2
;
7082 } else if (m_n
== M2_N2
) {
7085 * M2_N2 registers are not supported. Hence m2_n2 divider value
7086 * needs to be programmed into M1_N1.
7088 dp_m_n
= &crtc_state
->dp_m2_n2
;
7090 DRM_ERROR("Unsupported divider value\n");
7094 if (crtc_state
->has_pch_encoder
)
7095 intel_pch_transcoder_set_m_n(crtc_state
, &crtc_state
->dp_m_n
);
7097 intel_cpu_transcoder_set_m_n(crtc_state
, dp_m_n
, dp_m2_n2
);
7100 static void vlv_compute_dpll(struct intel_crtc
*crtc
,
7101 struct intel_crtc_state
*pipe_config
)
7103 pipe_config
->dpll_hw_state
.dpll
= DPLL_INTEGRATED_REF_CLK_VLV
|
7104 DPLL_REF_CLK_ENABLE_VLV
| DPLL_VGA_MODE_DIS
;
7105 if (crtc
->pipe
!= PIPE_A
)
7106 pipe_config
->dpll_hw_state
.dpll
|= DPLL_INTEGRATED_CRI_CLK_VLV
;
7108 /* DPLL not used with DSI, but still need the rest set up */
7109 if (!intel_crtc_has_type(pipe_config
, INTEL_OUTPUT_DSI
))
7110 pipe_config
->dpll_hw_state
.dpll
|= DPLL_VCO_ENABLE
|
7111 DPLL_EXT_BUFFER_ENABLE_VLV
;
7113 pipe_config
->dpll_hw_state
.dpll_md
=
7114 (pipe_config
->pixel_multiplier
- 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
7117 static void chv_compute_dpll(struct intel_crtc
*crtc
,
7118 struct intel_crtc_state
*pipe_config
)
7120 pipe_config
->dpll_hw_state
.dpll
= DPLL_SSC_REF_CLK_CHV
|
7121 DPLL_REF_CLK_ENABLE_VLV
| DPLL_VGA_MODE_DIS
;
7122 if (crtc
->pipe
!= PIPE_A
)
7123 pipe_config
->dpll_hw_state
.dpll
|= DPLL_INTEGRATED_CRI_CLK_VLV
;
7125 /* DPLL not used with DSI, but still need the rest set up */
7126 if (!intel_crtc_has_type(pipe_config
, INTEL_OUTPUT_DSI
))
7127 pipe_config
->dpll_hw_state
.dpll
|= DPLL_VCO_ENABLE
;
7129 pipe_config
->dpll_hw_state
.dpll_md
=
7130 (pipe_config
->pixel_multiplier
- 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
7133 static void vlv_prepare_pll(struct intel_crtc
*crtc
,
7134 const struct intel_crtc_state
*pipe_config
)
7136 struct drm_device
*dev
= crtc
->base
.dev
;
7137 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7138 enum pipe pipe
= crtc
->pipe
;
7140 u32 bestn
, bestm1
, bestm2
, bestp1
, bestp2
;
7141 u32 coreclk
, reg_val
;
7144 I915_WRITE(DPLL(pipe
),
7145 pipe_config
->dpll_hw_state
.dpll
&
7146 ~(DPLL_VCO_ENABLE
| DPLL_EXT_BUFFER_ENABLE_VLV
));
7148 /* No need to actually set up the DPLL with DSI */
7149 if ((pipe_config
->dpll_hw_state
.dpll
& DPLL_VCO_ENABLE
) == 0)
7152 mutex_lock(&dev_priv
->sb_lock
);
7154 bestn
= pipe_config
->dpll
.n
;
7155 bestm1
= pipe_config
->dpll
.m1
;
7156 bestm2
= pipe_config
->dpll
.m2
;
7157 bestp1
= pipe_config
->dpll
.p1
;
7158 bestp2
= pipe_config
->dpll
.p2
;
7160 /* See eDP HDMI DPIO driver vbios notes doc */
7162 /* PLL B needs special handling */
7164 vlv_pllb_recal_opamp(dev_priv
, pipe
);
7166 /* Set up Tx target for periodic Rcomp update */
7167 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW9_BCAST
, 0x0100000f);
7169 /* Disable target IRef on PLL */
7170 reg_val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PLL_DW8(pipe
));
7171 reg_val
&= 0x00ffffff;
7172 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW8(pipe
), reg_val
);
7174 /* Disable fast lock */
7175 vlv_dpio_write(dev_priv
, pipe
, VLV_CMN_DW0
, 0x610);
7177 /* Set idtafcrecal before PLL is enabled */
7178 mdiv
= ((bestm1
<< DPIO_M1DIV_SHIFT
) | (bestm2
& DPIO_M2DIV_MASK
));
7179 mdiv
|= ((bestp1
<< DPIO_P1_SHIFT
) | (bestp2
<< DPIO_P2_SHIFT
));
7180 mdiv
|= ((bestn
<< DPIO_N_SHIFT
));
7181 mdiv
|= (1 << DPIO_K_SHIFT
);
7184 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7185 * but we don't support that).
7186 * Note: don't use the DAC post divider as it seems unstable.
7188 mdiv
|= (DPIO_POST_DIV_HDMIDP
<< DPIO_POST_DIV_SHIFT
);
7189 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW3(pipe
), mdiv
);
7191 mdiv
|= DPIO_ENABLE_CALIBRATION
;
7192 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW3(pipe
), mdiv
);
7194 /* Set HBR and RBR LPF coefficients */
7195 if (pipe_config
->port_clock
== 162000 ||
7196 intel_crtc_has_type(pipe_config
, INTEL_OUTPUT_ANALOG
) ||
7197 intel_crtc_has_type(pipe_config
, INTEL_OUTPUT_HDMI
))
7198 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW10(pipe
),
7201 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW10(pipe
),
7204 if (intel_crtc_has_dp_encoder(pipe_config
)) {
7205 /* Use SSC source */
7207 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW5(pipe
),
7210 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW5(pipe
),
7212 } else { /* HDMI or VGA */
7213 /* Use bend source */
7215 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW5(pipe
),
7218 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW5(pipe
),
7222 coreclk
= vlv_dpio_read(dev_priv
, pipe
, VLV_PLL_DW7(pipe
));
7223 coreclk
= (coreclk
& 0x0000ff00) | 0x01c00000;
7224 if (intel_crtc_has_dp_encoder(pipe_config
))
7225 coreclk
|= 0x01000000;
7226 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW7(pipe
), coreclk
);
7228 vlv_dpio_write(dev_priv
, pipe
, VLV_PLL_DW11(pipe
), 0x87871000);
7229 mutex_unlock(&dev_priv
->sb_lock
);
7232 static void chv_prepare_pll(struct intel_crtc
*crtc
,
7233 const struct intel_crtc_state
*pipe_config
)
7235 struct drm_device
*dev
= crtc
->base
.dev
;
7236 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7237 enum pipe pipe
= crtc
->pipe
;
7238 enum dpio_channel port
= vlv_pipe_to_channel(pipe
);
7239 u32 loopfilter
, tribuf_calcntr
;
7240 u32 bestn
, bestm1
, bestm2
, bestp1
, bestp2
, bestm2_frac
;
7244 /* Enable Refclk and SSC */
7245 I915_WRITE(DPLL(pipe
),
7246 pipe_config
->dpll_hw_state
.dpll
& ~DPLL_VCO_ENABLE
);
7248 /* No need to actually set up the DPLL with DSI */
7249 if ((pipe_config
->dpll_hw_state
.dpll
& DPLL_VCO_ENABLE
) == 0)
7252 bestn
= pipe_config
->dpll
.n
;
7253 bestm2_frac
= pipe_config
->dpll
.m2
& 0x3fffff;
7254 bestm1
= pipe_config
->dpll
.m1
;
7255 bestm2
= pipe_config
->dpll
.m2
>> 22;
7256 bestp1
= pipe_config
->dpll
.p1
;
7257 bestp2
= pipe_config
->dpll
.p2
;
7258 vco
= pipe_config
->dpll
.vco
;
7262 mutex_lock(&dev_priv
->sb_lock
);
7264 /* p1 and p2 divider */
7265 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW13(port
),
7266 5 << DPIO_CHV_S1_DIV_SHIFT
|
7267 bestp1
<< DPIO_CHV_P1_DIV_SHIFT
|
7268 bestp2
<< DPIO_CHV_P2_DIV_SHIFT
|
7269 1 << DPIO_CHV_K_DIV_SHIFT
);
7271 /* Feedback post-divider - m2 */
7272 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW0(port
), bestm2
);
7274 /* Feedback refclk divider - n and m1 */
7275 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW1(port
),
7276 DPIO_CHV_M1_DIV_BY_2
|
7277 1 << DPIO_CHV_N_DIV_SHIFT
);
7279 /* M2 fraction division */
7280 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW2(port
), bestm2_frac
);
7282 /* M2 fraction division enable */
7283 dpio_val
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW3(port
));
7284 dpio_val
&= ~(DPIO_CHV_FEEDFWD_GAIN_MASK
| DPIO_CHV_FRAC_DIV_EN
);
7285 dpio_val
|= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT
);
7287 dpio_val
|= DPIO_CHV_FRAC_DIV_EN
;
7288 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW3(port
), dpio_val
);
7290 /* Program digital lock detect threshold */
7291 dpio_val
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW9(port
));
7292 dpio_val
&= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK
|
7293 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE
);
7294 dpio_val
|= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT
);
7296 dpio_val
|= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE
;
7297 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW9(port
), dpio_val
);
7300 if (vco
== 5400000) {
7301 loopfilter
|= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT
);
7302 loopfilter
|= (0x8 << DPIO_CHV_INT_COEFF_SHIFT
);
7303 loopfilter
|= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT
);
7304 tribuf_calcntr
= 0x9;
7305 } else if (vco
<= 6200000) {
7306 loopfilter
|= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT
);
7307 loopfilter
|= (0xB << DPIO_CHV_INT_COEFF_SHIFT
);
7308 loopfilter
|= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT
);
7309 tribuf_calcntr
= 0x9;
7310 } else if (vco
<= 6480000) {
7311 loopfilter
|= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT
);
7312 loopfilter
|= (0x9 << DPIO_CHV_INT_COEFF_SHIFT
);
7313 loopfilter
|= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT
);
7314 tribuf_calcntr
= 0x8;
7316 /* Not supported. Apply the same limits as in the max case */
7317 loopfilter
|= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT
);
7318 loopfilter
|= (0x9 << DPIO_CHV_INT_COEFF_SHIFT
);
7319 loopfilter
|= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT
);
7322 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW6(port
), loopfilter
);
7324 dpio_val
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW8(port
));
7325 dpio_val
&= ~DPIO_CHV_TDC_TARGET_CNT_MASK
;
7326 dpio_val
|= (tribuf_calcntr
<< DPIO_CHV_TDC_TARGET_CNT_SHIFT
);
7327 vlv_dpio_write(dev_priv
, pipe
, CHV_PLL_DW8(port
), dpio_val
);
7330 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW14(port
),
7331 vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW14(port
)) |
7334 mutex_unlock(&dev_priv
->sb_lock
);
7338 * vlv_force_pll_on - forcibly enable just the PLL
7339 * @dev_priv: i915 private structure
7340 * @pipe: pipe PLL to enable
7341 * @dpll: PLL configuration
7343 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7344 * in cases where we need the PLL enabled even when @pipe is not going to
7347 int vlv_force_pll_on(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
7348 const struct dpll
*dpll
)
7350 struct intel_crtc
*crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
7351 struct intel_crtc_state
*pipe_config
;
7353 pipe_config
= kzalloc(sizeof(*pipe_config
), GFP_KERNEL
);
7357 pipe_config
->base
.crtc
= &crtc
->base
;
7358 pipe_config
->pixel_multiplier
= 1;
7359 pipe_config
->dpll
= *dpll
;
7361 if (IS_CHERRYVIEW(dev_priv
)) {
7362 chv_compute_dpll(crtc
, pipe_config
);
7363 chv_prepare_pll(crtc
, pipe_config
);
7364 chv_enable_pll(crtc
, pipe_config
);
7366 vlv_compute_dpll(crtc
, pipe_config
);
7367 vlv_prepare_pll(crtc
, pipe_config
);
7368 vlv_enable_pll(crtc
, pipe_config
);
7377 * vlv_force_pll_off - forcibly disable just the PLL
7378 * @dev_priv: i915 private structure
7379 * @pipe: pipe PLL to disable
7381 * Disable the PLL for @pipe. To be used in cases where we need
7382 * the PLL enabled even when @pipe is not going to be enabled.
7384 void vlv_force_pll_off(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
7386 if (IS_CHERRYVIEW(dev_priv
))
7387 chv_disable_pll(dev_priv
, pipe
);
7389 vlv_disable_pll(dev_priv
, pipe
);
7392 static void i9xx_compute_dpll(struct intel_crtc
*crtc
,
7393 struct intel_crtc_state
*crtc_state
,
7394 struct dpll
*reduced_clock
)
7396 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7398 struct dpll
*clock
= &crtc_state
->dpll
;
7400 i9xx_update_pll_dividers(crtc
, crtc_state
, reduced_clock
);
7402 dpll
= DPLL_VGA_MODE_DIS
;
7404 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
))
7405 dpll
|= DPLLB_MODE_LVDS
;
7407 dpll
|= DPLLB_MODE_DAC_SERIAL
;
7409 if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
) ||
7410 IS_G33(dev_priv
) || IS_PINEVIEW(dev_priv
)) {
7411 dpll
|= (crtc_state
->pixel_multiplier
- 1)
7412 << SDVO_MULTIPLIER_SHIFT_HIRES
;
7415 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_SDVO
) ||
7416 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_HDMI
))
7417 dpll
|= DPLL_SDVO_HIGH_SPEED
;
7419 if (intel_crtc_has_dp_encoder(crtc_state
))
7420 dpll
|= DPLL_SDVO_HIGH_SPEED
;
7422 /* compute bitmask from p1 value */
7423 if (IS_PINEVIEW(dev_priv
))
7424 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW
;
7426 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
7427 if (IS_G4X(dev_priv
) && reduced_clock
)
7428 dpll
|= (1 << (reduced_clock
->p1
- 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT
;
7430 switch (clock
->p2
) {
7432 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
;
7435 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_7
;
7438 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10
;
7441 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_14
;
7444 if (INTEL_GEN(dev_priv
) >= 4)
7445 dpll
|= (6 << PLL_LOAD_PULSE_PHASE_SHIFT
);
7447 if (crtc_state
->sdvo_tv_clock
)
7448 dpll
|= PLL_REF_INPUT_TVCLKINBC
;
7449 else if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
) &&
7450 intel_panel_use_ssc(dev_priv
))
7451 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
7453 dpll
|= PLL_REF_INPUT_DREFCLK
;
7455 dpll
|= DPLL_VCO_ENABLE
;
7456 crtc_state
->dpll_hw_state
.dpll
= dpll
;
7458 if (INTEL_GEN(dev_priv
) >= 4) {
7459 u32 dpll_md
= (crtc_state
->pixel_multiplier
- 1)
7460 << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
7461 crtc_state
->dpll_hw_state
.dpll_md
= dpll_md
;
7465 static void i8xx_compute_dpll(struct intel_crtc
*crtc
,
7466 struct intel_crtc_state
*crtc_state
,
7467 struct dpll
*reduced_clock
)
7469 struct drm_device
*dev
= crtc
->base
.dev
;
7470 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7472 struct dpll
*clock
= &crtc_state
->dpll
;
7474 i9xx_update_pll_dividers(crtc
, crtc_state
, reduced_clock
);
7476 dpll
= DPLL_VGA_MODE_DIS
;
7478 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
7479 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
7482 dpll
|= PLL_P1_DIVIDE_BY_TWO
;
7484 dpll
|= (clock
->p1
- 2) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
7486 dpll
|= PLL_P2_DIVIDE_BY_4
;
7489 if (!IS_I830(dev_priv
) &&
7490 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DVO
))
7491 dpll
|= DPLL_DVO_2X_MODE
;
7493 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
) &&
7494 intel_panel_use_ssc(dev_priv
))
7495 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
7497 dpll
|= PLL_REF_INPUT_DREFCLK
;
7499 dpll
|= DPLL_VCO_ENABLE
;
7500 crtc_state
->dpll_hw_state
.dpll
= dpll
;
7503 static void intel_set_pipe_timings(const struct intel_crtc_state
*crtc_state
)
7505 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
7506 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7507 enum pipe pipe
= crtc
->pipe
;
7508 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
7509 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->base
.adjusted_mode
;
7510 u32 crtc_vtotal
, crtc_vblank_end
;
7513 /* We need to be careful not to changed the adjusted mode, for otherwise
7514 * the hw state checker will get angry at the mismatch. */
7515 crtc_vtotal
= adjusted_mode
->crtc_vtotal
;
7516 crtc_vblank_end
= adjusted_mode
->crtc_vblank_end
;
7518 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
7519 /* the chip adds 2 halflines automatically */
7521 crtc_vblank_end
-= 1;
7523 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_SDVO
))
7524 vsyncshift
= (adjusted_mode
->crtc_htotal
- 1) / 2;
7526 vsyncshift
= adjusted_mode
->crtc_hsync_start
-
7527 adjusted_mode
->crtc_htotal
/ 2;
7529 vsyncshift
+= adjusted_mode
->crtc_htotal
;
7532 if (INTEL_GEN(dev_priv
) > 3)
7533 I915_WRITE(VSYNCSHIFT(cpu_transcoder
), vsyncshift
);
7535 I915_WRITE(HTOTAL(cpu_transcoder
),
7536 (adjusted_mode
->crtc_hdisplay
- 1) |
7537 ((adjusted_mode
->crtc_htotal
- 1) << 16));
7538 I915_WRITE(HBLANK(cpu_transcoder
),
7539 (adjusted_mode
->crtc_hblank_start
- 1) |
7540 ((adjusted_mode
->crtc_hblank_end
- 1) << 16));
7541 I915_WRITE(HSYNC(cpu_transcoder
),
7542 (adjusted_mode
->crtc_hsync_start
- 1) |
7543 ((adjusted_mode
->crtc_hsync_end
- 1) << 16));
7545 I915_WRITE(VTOTAL(cpu_transcoder
),
7546 (adjusted_mode
->crtc_vdisplay
- 1) |
7547 ((crtc_vtotal
- 1) << 16));
7548 I915_WRITE(VBLANK(cpu_transcoder
),
7549 (adjusted_mode
->crtc_vblank_start
- 1) |
7550 ((crtc_vblank_end
- 1) << 16));
7551 I915_WRITE(VSYNC(cpu_transcoder
),
7552 (adjusted_mode
->crtc_vsync_start
- 1) |
7553 ((adjusted_mode
->crtc_vsync_end
- 1) << 16));
7555 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7556 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7557 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7559 if (IS_HASWELL(dev_priv
) && cpu_transcoder
== TRANSCODER_EDP
&&
7560 (pipe
== PIPE_B
|| pipe
== PIPE_C
))
7561 I915_WRITE(VTOTAL(pipe
), I915_READ(VTOTAL(cpu_transcoder
)));
7565 static void intel_set_pipe_src_size(const struct intel_crtc_state
*crtc_state
)
7567 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
7568 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7569 enum pipe pipe
= crtc
->pipe
;
7571 /* pipesrc controls the size that is scaled from, which should
7572 * always be the user's requested size.
7574 I915_WRITE(PIPESRC(pipe
),
7575 ((crtc_state
->pipe_src_w
- 1) << 16) |
7576 (crtc_state
->pipe_src_h
- 1));
7579 static void intel_get_pipe_timings(struct intel_crtc
*crtc
,
7580 struct intel_crtc_state
*pipe_config
)
7582 struct drm_device
*dev
= crtc
->base
.dev
;
7583 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7584 enum transcoder cpu_transcoder
= pipe_config
->cpu_transcoder
;
7587 tmp
= I915_READ(HTOTAL(cpu_transcoder
));
7588 pipe_config
->base
.adjusted_mode
.crtc_hdisplay
= (tmp
& 0xffff) + 1;
7589 pipe_config
->base
.adjusted_mode
.crtc_htotal
= ((tmp
>> 16) & 0xffff) + 1;
7590 tmp
= I915_READ(HBLANK(cpu_transcoder
));
7591 pipe_config
->base
.adjusted_mode
.crtc_hblank_start
= (tmp
& 0xffff) + 1;
7592 pipe_config
->base
.adjusted_mode
.crtc_hblank_end
= ((tmp
>> 16) & 0xffff) + 1;
7593 tmp
= I915_READ(HSYNC(cpu_transcoder
));
7594 pipe_config
->base
.adjusted_mode
.crtc_hsync_start
= (tmp
& 0xffff) + 1;
7595 pipe_config
->base
.adjusted_mode
.crtc_hsync_end
= ((tmp
>> 16) & 0xffff) + 1;
7597 tmp
= I915_READ(VTOTAL(cpu_transcoder
));
7598 pipe_config
->base
.adjusted_mode
.crtc_vdisplay
= (tmp
& 0xffff) + 1;
7599 pipe_config
->base
.adjusted_mode
.crtc_vtotal
= ((tmp
>> 16) & 0xffff) + 1;
7600 tmp
= I915_READ(VBLANK(cpu_transcoder
));
7601 pipe_config
->base
.adjusted_mode
.crtc_vblank_start
= (tmp
& 0xffff) + 1;
7602 pipe_config
->base
.adjusted_mode
.crtc_vblank_end
= ((tmp
>> 16) & 0xffff) + 1;
7603 tmp
= I915_READ(VSYNC(cpu_transcoder
));
7604 pipe_config
->base
.adjusted_mode
.crtc_vsync_start
= (tmp
& 0xffff) + 1;
7605 pipe_config
->base
.adjusted_mode
.crtc_vsync_end
= ((tmp
>> 16) & 0xffff) + 1;
7607 if (I915_READ(PIPECONF(cpu_transcoder
)) & PIPECONF_INTERLACE_MASK
) {
7608 pipe_config
->base
.adjusted_mode
.flags
|= DRM_MODE_FLAG_INTERLACE
;
7609 pipe_config
->base
.adjusted_mode
.crtc_vtotal
+= 1;
7610 pipe_config
->base
.adjusted_mode
.crtc_vblank_end
+= 1;
7614 static void intel_get_pipe_src_size(struct intel_crtc
*crtc
,
7615 struct intel_crtc_state
*pipe_config
)
7617 struct drm_device
*dev
= crtc
->base
.dev
;
7618 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7621 tmp
= I915_READ(PIPESRC(crtc
->pipe
));
7622 pipe_config
->pipe_src_h
= (tmp
& 0xffff) + 1;
7623 pipe_config
->pipe_src_w
= ((tmp
>> 16) & 0xffff) + 1;
7625 pipe_config
->base
.mode
.vdisplay
= pipe_config
->pipe_src_h
;
7626 pipe_config
->base
.mode
.hdisplay
= pipe_config
->pipe_src_w
;
7629 void intel_mode_from_pipe_config(struct drm_display_mode
*mode
,
7630 struct intel_crtc_state
*pipe_config
)
7632 mode
->hdisplay
= pipe_config
->base
.adjusted_mode
.crtc_hdisplay
;
7633 mode
->htotal
= pipe_config
->base
.adjusted_mode
.crtc_htotal
;
7634 mode
->hsync_start
= pipe_config
->base
.adjusted_mode
.crtc_hsync_start
;
7635 mode
->hsync_end
= pipe_config
->base
.adjusted_mode
.crtc_hsync_end
;
7637 mode
->vdisplay
= pipe_config
->base
.adjusted_mode
.crtc_vdisplay
;
7638 mode
->vtotal
= pipe_config
->base
.adjusted_mode
.crtc_vtotal
;
7639 mode
->vsync_start
= pipe_config
->base
.adjusted_mode
.crtc_vsync_start
;
7640 mode
->vsync_end
= pipe_config
->base
.adjusted_mode
.crtc_vsync_end
;
7642 mode
->flags
= pipe_config
->base
.adjusted_mode
.flags
;
7643 mode
->type
= DRM_MODE_TYPE_DRIVER
;
7645 mode
->clock
= pipe_config
->base
.adjusted_mode
.crtc_clock
;
7647 mode
->hsync
= drm_mode_hsync(mode
);
7648 mode
->vrefresh
= drm_mode_vrefresh(mode
);
7649 drm_mode_set_name(mode
);
7652 static void i9xx_set_pipeconf(const struct intel_crtc_state
*crtc_state
)
7654 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
7655 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7660 /* we keep both pipes enabled on 830 */
7661 if (IS_I830(dev_priv
))
7662 pipeconf
|= I915_READ(PIPECONF(crtc
->pipe
)) & PIPECONF_ENABLE
;
7664 if (crtc_state
->double_wide
)
7665 pipeconf
|= PIPECONF_DOUBLE_WIDE
;
7667 /* only g4x and later have fancy bpc/dither controls */
7668 if (IS_G4X(dev_priv
) || IS_VALLEYVIEW(dev_priv
) ||
7669 IS_CHERRYVIEW(dev_priv
)) {
7670 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7671 if (crtc_state
->dither
&& crtc_state
->pipe_bpp
!= 30)
7672 pipeconf
|= PIPECONF_DITHER_EN
|
7673 PIPECONF_DITHER_TYPE_SP
;
7675 switch (crtc_state
->pipe_bpp
) {
7677 pipeconf
|= PIPECONF_6BPC
;
7680 pipeconf
|= PIPECONF_8BPC
;
7683 pipeconf
|= PIPECONF_10BPC
;
7686 /* Case prevented by intel_choose_pipe_bpp_dither. */
7691 if (crtc_state
->base
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
) {
7692 if (INTEL_GEN(dev_priv
) < 4 ||
7693 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_SDVO
))
7694 pipeconf
|= PIPECONF_INTERLACE_W_FIELD_INDICATION
;
7696 pipeconf
|= PIPECONF_INTERLACE_W_SYNC_SHIFT
;
7698 pipeconf
|= PIPECONF_PROGRESSIVE
;
7700 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
7701 crtc_state
->limited_color_range
)
7702 pipeconf
|= PIPECONF_COLOR_RANGE_SELECT
;
7704 I915_WRITE(PIPECONF(crtc
->pipe
), pipeconf
);
7705 POSTING_READ(PIPECONF(crtc
->pipe
));
7708 static int i8xx_crtc_compute_clock(struct intel_crtc
*crtc
,
7709 struct intel_crtc_state
*crtc_state
)
7711 struct drm_device
*dev
= crtc
->base
.dev
;
7712 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7713 const struct intel_limit
*limit
;
7716 memset(&crtc_state
->dpll_hw_state
, 0,
7717 sizeof(crtc_state
->dpll_hw_state
));
7719 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
7720 if (intel_panel_use_ssc(dev_priv
)) {
7721 refclk
= dev_priv
->vbt
.lvds_ssc_freq
;
7722 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk
);
7725 limit
= &intel_limits_i8xx_lvds
;
7726 } else if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DVO
)) {
7727 limit
= &intel_limits_i8xx_dvo
;
7729 limit
= &intel_limits_i8xx_dac
;
7732 if (!crtc_state
->clock_set
&&
7733 !i9xx_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
7734 refclk
, NULL
, &crtc_state
->dpll
)) {
7735 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7739 i8xx_compute_dpll(crtc
, crtc_state
, NULL
);
7744 static int g4x_crtc_compute_clock(struct intel_crtc
*crtc
,
7745 struct intel_crtc_state
*crtc_state
)
7747 struct drm_device
*dev
= crtc
->base
.dev
;
7748 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7749 const struct intel_limit
*limit
;
7752 memset(&crtc_state
->dpll_hw_state
, 0,
7753 sizeof(crtc_state
->dpll_hw_state
));
7755 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
7756 if (intel_panel_use_ssc(dev_priv
)) {
7757 refclk
= dev_priv
->vbt
.lvds_ssc_freq
;
7758 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk
);
7761 if (intel_is_dual_link_lvds(dev
))
7762 limit
= &intel_limits_g4x_dual_channel_lvds
;
7764 limit
= &intel_limits_g4x_single_channel_lvds
;
7765 } else if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_HDMI
) ||
7766 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_ANALOG
)) {
7767 limit
= &intel_limits_g4x_hdmi
;
7768 } else if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_SDVO
)) {
7769 limit
= &intel_limits_g4x_sdvo
;
7771 /* The option is for other outputs */
7772 limit
= &intel_limits_i9xx_sdvo
;
7775 if (!crtc_state
->clock_set
&&
7776 !g4x_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
7777 refclk
, NULL
, &crtc_state
->dpll
)) {
7778 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7782 i9xx_compute_dpll(crtc
, crtc_state
, NULL
);
7787 static int pnv_crtc_compute_clock(struct intel_crtc
*crtc
,
7788 struct intel_crtc_state
*crtc_state
)
7790 struct drm_device
*dev
= crtc
->base
.dev
;
7791 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7792 const struct intel_limit
*limit
;
7795 memset(&crtc_state
->dpll_hw_state
, 0,
7796 sizeof(crtc_state
->dpll_hw_state
));
7798 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
7799 if (intel_panel_use_ssc(dev_priv
)) {
7800 refclk
= dev_priv
->vbt
.lvds_ssc_freq
;
7801 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk
);
7804 limit
= &intel_limits_pineview_lvds
;
7806 limit
= &intel_limits_pineview_sdvo
;
7809 if (!crtc_state
->clock_set
&&
7810 !pnv_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
7811 refclk
, NULL
, &crtc_state
->dpll
)) {
7812 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7816 i9xx_compute_dpll(crtc
, crtc_state
, NULL
);
7821 static int i9xx_crtc_compute_clock(struct intel_crtc
*crtc
,
7822 struct intel_crtc_state
*crtc_state
)
7824 struct drm_device
*dev
= crtc
->base
.dev
;
7825 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7826 const struct intel_limit
*limit
;
7829 memset(&crtc_state
->dpll_hw_state
, 0,
7830 sizeof(crtc_state
->dpll_hw_state
));
7832 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
7833 if (intel_panel_use_ssc(dev_priv
)) {
7834 refclk
= dev_priv
->vbt
.lvds_ssc_freq
;
7835 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk
);
7838 limit
= &intel_limits_i9xx_lvds
;
7840 limit
= &intel_limits_i9xx_sdvo
;
7843 if (!crtc_state
->clock_set
&&
7844 !i9xx_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
7845 refclk
, NULL
, &crtc_state
->dpll
)) {
7846 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7850 i9xx_compute_dpll(crtc
, crtc_state
, NULL
);
7855 static int chv_crtc_compute_clock(struct intel_crtc
*crtc
,
7856 struct intel_crtc_state
*crtc_state
)
7858 int refclk
= 100000;
7859 const struct intel_limit
*limit
= &intel_limits_chv
;
7861 memset(&crtc_state
->dpll_hw_state
, 0,
7862 sizeof(crtc_state
->dpll_hw_state
));
7864 if (!crtc_state
->clock_set
&&
7865 !chv_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
7866 refclk
, NULL
, &crtc_state
->dpll
)) {
7867 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7871 chv_compute_dpll(crtc
, crtc_state
);
7876 static int vlv_crtc_compute_clock(struct intel_crtc
*crtc
,
7877 struct intel_crtc_state
*crtc_state
)
7879 int refclk
= 100000;
7880 const struct intel_limit
*limit
= &intel_limits_vlv
;
7882 memset(&crtc_state
->dpll_hw_state
, 0,
7883 sizeof(crtc_state
->dpll_hw_state
));
7885 if (!crtc_state
->clock_set
&&
7886 !vlv_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
7887 refclk
, NULL
, &crtc_state
->dpll
)) {
7888 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7892 vlv_compute_dpll(crtc
, crtc_state
);
7897 static void i9xx_get_pfit_config(struct intel_crtc
*crtc
,
7898 struct intel_crtc_state
*pipe_config
)
7900 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
7903 if (INTEL_GEN(dev_priv
) <= 3 &&
7904 (IS_I830(dev_priv
) || !IS_MOBILE(dev_priv
)))
7907 tmp
= I915_READ(PFIT_CONTROL
);
7908 if (!(tmp
& PFIT_ENABLE
))
7911 /* Check whether the pfit is attached to our pipe. */
7912 if (INTEL_GEN(dev_priv
) < 4) {
7913 if (crtc
->pipe
!= PIPE_B
)
7916 if ((tmp
& PFIT_PIPE_MASK
) != (crtc
->pipe
<< PFIT_PIPE_SHIFT
))
7920 pipe_config
->gmch_pfit
.control
= tmp
;
7921 pipe_config
->gmch_pfit
.pgm_ratios
= I915_READ(PFIT_PGM_RATIOS
);
7924 static void vlv_crtc_clock_get(struct intel_crtc
*crtc
,
7925 struct intel_crtc_state
*pipe_config
)
7927 struct drm_device
*dev
= crtc
->base
.dev
;
7928 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7929 int pipe
= pipe_config
->cpu_transcoder
;
7932 int refclk
= 100000;
7934 /* In case of DSI, DPLL will not be used */
7935 if ((pipe_config
->dpll_hw_state
.dpll
& DPLL_VCO_ENABLE
) == 0)
7938 mutex_lock(&dev_priv
->sb_lock
);
7939 mdiv
= vlv_dpio_read(dev_priv
, pipe
, VLV_PLL_DW3(pipe
));
7940 mutex_unlock(&dev_priv
->sb_lock
);
7942 clock
.m1
= (mdiv
>> DPIO_M1DIV_SHIFT
) & 7;
7943 clock
.m2
= mdiv
& DPIO_M2DIV_MASK
;
7944 clock
.n
= (mdiv
>> DPIO_N_SHIFT
) & 0xf;
7945 clock
.p1
= (mdiv
>> DPIO_P1_SHIFT
) & 7;
7946 clock
.p2
= (mdiv
>> DPIO_P2_SHIFT
) & 0x1f;
7948 pipe_config
->port_clock
= vlv_calc_dpll_params(refclk
, &clock
);
7952 i9xx_get_initial_plane_config(struct intel_crtc
*crtc
,
7953 struct intel_initial_plane_config
*plane_config
)
7955 struct drm_device
*dev
= crtc
->base
.dev
;
7956 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7957 struct intel_plane
*plane
= to_intel_plane(crtc
->base
.primary
);
7958 enum i9xx_plane_id i9xx_plane
= plane
->i9xx_plane
;
7960 u32 val
, base
, offset
;
7961 int fourcc
, pixel_format
;
7962 unsigned int aligned_height
;
7963 struct drm_framebuffer
*fb
;
7964 struct intel_framebuffer
*intel_fb
;
7966 if (!plane
->get_hw_state(plane
, &pipe
))
7969 WARN_ON(pipe
!= crtc
->pipe
);
7971 intel_fb
= kzalloc(sizeof(*intel_fb
), GFP_KERNEL
);
7973 DRM_DEBUG_KMS("failed to alloc fb\n");
7977 fb
= &intel_fb
->base
;
7981 val
= I915_READ(DSPCNTR(i9xx_plane
));
7983 if (INTEL_GEN(dev_priv
) >= 4) {
7984 if (val
& DISPPLANE_TILED
) {
7985 plane_config
->tiling
= I915_TILING_X
;
7986 fb
->modifier
= I915_FORMAT_MOD_X_TILED
;
7989 if (val
& DISPPLANE_ROTATE_180
)
7990 plane_config
->rotation
= DRM_MODE_ROTATE_180
;
7993 if (IS_CHERRYVIEW(dev_priv
) && pipe
== PIPE_B
&&
7994 val
& DISPPLANE_MIRROR
)
7995 plane_config
->rotation
|= DRM_MODE_REFLECT_X
;
7997 pixel_format
= val
& DISPPLANE_PIXFORMAT_MASK
;
7998 fourcc
= i9xx_format_to_fourcc(pixel_format
);
7999 fb
->format
= drm_format_info(fourcc
);
8001 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
8002 offset
= I915_READ(DSPOFFSET(i9xx_plane
));
8003 base
= I915_READ(DSPSURF(i9xx_plane
)) & 0xfffff000;
8004 } else if (INTEL_GEN(dev_priv
) >= 4) {
8005 if (plane_config
->tiling
)
8006 offset
= I915_READ(DSPTILEOFF(i9xx_plane
));
8008 offset
= I915_READ(DSPLINOFF(i9xx_plane
));
8009 base
= I915_READ(DSPSURF(i9xx_plane
)) & 0xfffff000;
8011 base
= I915_READ(DSPADDR(i9xx_plane
));
8013 plane_config
->base
= base
;
8015 val
= I915_READ(PIPESRC(pipe
));
8016 fb
->width
= ((val
>> 16) & 0xfff) + 1;
8017 fb
->height
= ((val
>> 0) & 0xfff) + 1;
8019 val
= I915_READ(DSPSTRIDE(i9xx_plane
));
8020 fb
->pitches
[0] = val
& 0xffffffc0;
8022 aligned_height
= intel_fb_align_height(fb
, 0, fb
->height
);
8024 plane_config
->size
= fb
->pitches
[0] * aligned_height
;
8026 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8027 crtc
->base
.name
, plane
->base
.name
, fb
->width
, fb
->height
,
8028 fb
->format
->cpp
[0] * 8, base
, fb
->pitches
[0],
8029 plane_config
->size
);
8031 plane_config
->fb
= intel_fb
;
8034 static void chv_crtc_clock_get(struct intel_crtc
*crtc
,
8035 struct intel_crtc_state
*pipe_config
)
8037 struct drm_device
*dev
= crtc
->base
.dev
;
8038 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8039 int pipe
= pipe_config
->cpu_transcoder
;
8040 enum dpio_channel port
= vlv_pipe_to_channel(pipe
);
8042 u32 cmn_dw13
, pll_dw0
, pll_dw1
, pll_dw2
, pll_dw3
;
8043 int refclk
= 100000;
8045 /* In case of DSI, DPLL will not be used */
8046 if ((pipe_config
->dpll_hw_state
.dpll
& DPLL_VCO_ENABLE
) == 0)
8049 mutex_lock(&dev_priv
->sb_lock
);
8050 cmn_dw13
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW13(port
));
8051 pll_dw0
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW0(port
));
8052 pll_dw1
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW1(port
));
8053 pll_dw2
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW2(port
));
8054 pll_dw3
= vlv_dpio_read(dev_priv
, pipe
, CHV_PLL_DW3(port
));
8055 mutex_unlock(&dev_priv
->sb_lock
);
8057 clock
.m1
= (pll_dw1
& 0x7) == DPIO_CHV_M1_DIV_BY_2
? 2 : 0;
8058 clock
.m2
= (pll_dw0
& 0xff) << 22;
8059 if (pll_dw3
& DPIO_CHV_FRAC_DIV_EN
)
8060 clock
.m2
|= pll_dw2
& 0x3fffff;
8061 clock
.n
= (pll_dw1
>> DPIO_CHV_N_DIV_SHIFT
) & 0xf;
8062 clock
.p1
= (cmn_dw13
>> DPIO_CHV_P1_DIV_SHIFT
) & 0x7;
8063 clock
.p2
= (cmn_dw13
>> DPIO_CHV_P2_DIV_SHIFT
) & 0x1f;
8065 pipe_config
->port_clock
= chv_calc_dpll_params(refclk
, &clock
);
8068 static void intel_get_crtc_ycbcr_config(struct intel_crtc
*crtc
,
8069 struct intel_crtc_state
*pipe_config
)
8071 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
8072 enum intel_output_format output
= INTEL_OUTPUT_FORMAT_RGB
;
8074 pipe_config
->lspcon_downsampling
= false;
8076 if (IS_BROADWELL(dev_priv
) || INTEL_GEN(dev_priv
) >= 9) {
8077 u32 tmp
= I915_READ(PIPEMISC(crtc
->pipe
));
8079 if (tmp
& PIPEMISC_OUTPUT_COLORSPACE_YUV
) {
8080 bool ycbcr420_enabled
= tmp
& PIPEMISC_YUV420_ENABLE
;
8081 bool blend
= tmp
& PIPEMISC_YUV420_MODE_FULL_BLEND
;
8083 if (ycbcr420_enabled
) {
8084 /* We support 4:2:0 in full blend mode only */
8086 output
= INTEL_OUTPUT_FORMAT_INVALID
;
8087 else if (!(IS_GEMINILAKE(dev_priv
) ||
8088 INTEL_GEN(dev_priv
) >= 10))
8089 output
= INTEL_OUTPUT_FORMAT_INVALID
;
8091 output
= INTEL_OUTPUT_FORMAT_YCBCR420
;
8094 * Currently there is no interface defined to
8095 * check user preference between RGB/YCBCR444
8096 * or YCBCR420. So the only possible case for
8097 * YCBCR444 usage is driving YCBCR420 output
8098 * with LSPCON, when pipe is configured for
8099 * YCBCR444 output and LSPCON takes care of
8102 pipe_config
->lspcon_downsampling
= true;
8103 output
= INTEL_OUTPUT_FORMAT_YCBCR444
;
8108 pipe_config
->output_format
= output
;
8111 static bool i9xx_get_pipe_config(struct intel_crtc
*crtc
,
8112 struct intel_crtc_state
*pipe_config
)
8114 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
8115 enum intel_display_power_domain power_domain
;
8116 intel_wakeref_t wakeref
;
8120 power_domain
= POWER_DOMAIN_PIPE(crtc
->pipe
);
8121 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
8125 pipe_config
->output_format
= INTEL_OUTPUT_FORMAT_RGB
;
8126 pipe_config
->cpu_transcoder
= (enum transcoder
) crtc
->pipe
;
8127 pipe_config
->shared_dpll
= NULL
;
8131 tmp
= I915_READ(PIPECONF(crtc
->pipe
));
8132 if (!(tmp
& PIPECONF_ENABLE
))
8135 if (IS_G4X(dev_priv
) || IS_VALLEYVIEW(dev_priv
) ||
8136 IS_CHERRYVIEW(dev_priv
)) {
8137 switch (tmp
& PIPECONF_BPC_MASK
) {
8139 pipe_config
->pipe_bpp
= 18;
8142 pipe_config
->pipe_bpp
= 24;
8144 case PIPECONF_10BPC
:
8145 pipe_config
->pipe_bpp
= 30;
8152 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
8153 (tmp
& PIPECONF_COLOR_RANGE_SELECT
))
8154 pipe_config
->limited_color_range
= true;
8156 if (INTEL_GEN(dev_priv
) < 4)
8157 pipe_config
->double_wide
= tmp
& PIPECONF_DOUBLE_WIDE
;
8159 intel_get_pipe_timings(crtc
, pipe_config
);
8160 intel_get_pipe_src_size(crtc
, pipe_config
);
8162 i9xx_get_pfit_config(crtc
, pipe_config
);
8164 if (INTEL_GEN(dev_priv
) >= 4) {
8165 /* No way to read it out on pipes B and C */
8166 if (IS_CHERRYVIEW(dev_priv
) && crtc
->pipe
!= PIPE_A
)
8167 tmp
= dev_priv
->chv_dpll_md
[crtc
->pipe
];
8169 tmp
= I915_READ(DPLL_MD(crtc
->pipe
));
8170 pipe_config
->pixel_multiplier
=
8171 ((tmp
& DPLL_MD_UDI_MULTIPLIER_MASK
)
8172 >> DPLL_MD_UDI_MULTIPLIER_SHIFT
) + 1;
8173 pipe_config
->dpll_hw_state
.dpll_md
= tmp
;
8174 } else if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
) ||
8175 IS_G33(dev_priv
) || IS_PINEVIEW(dev_priv
)) {
8176 tmp
= I915_READ(DPLL(crtc
->pipe
));
8177 pipe_config
->pixel_multiplier
=
8178 ((tmp
& SDVO_MULTIPLIER_MASK
)
8179 >> SDVO_MULTIPLIER_SHIFT_HIRES
) + 1;
8181 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8182 * port and will be fixed up in the encoder->get_config
8184 pipe_config
->pixel_multiplier
= 1;
8186 pipe_config
->dpll_hw_state
.dpll
= I915_READ(DPLL(crtc
->pipe
));
8187 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
)) {
8189 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8190 * on 830. Filter it out here so that we don't
8191 * report errors due to that.
8193 if (IS_I830(dev_priv
))
8194 pipe_config
->dpll_hw_state
.dpll
&= ~DPLL_DVO_2X_MODE
;
8196 pipe_config
->dpll_hw_state
.fp0
= I915_READ(FP0(crtc
->pipe
));
8197 pipe_config
->dpll_hw_state
.fp1
= I915_READ(FP1(crtc
->pipe
));
8199 /* Mask out read-only status bits. */
8200 pipe_config
->dpll_hw_state
.dpll
&= ~(DPLL_LOCK_VLV
|
8201 DPLL_PORTC_READY_MASK
|
8202 DPLL_PORTB_READY_MASK
);
8205 if (IS_CHERRYVIEW(dev_priv
))
8206 chv_crtc_clock_get(crtc
, pipe_config
);
8207 else if (IS_VALLEYVIEW(dev_priv
))
8208 vlv_crtc_clock_get(crtc
, pipe_config
);
8210 i9xx_crtc_clock_get(crtc
, pipe_config
);
8213 * Normally the dotclock is filled in by the encoder .get_config()
8214 * but in case the pipe is enabled w/o any ports we need a sane
8217 pipe_config
->base
.adjusted_mode
.crtc_clock
=
8218 pipe_config
->port_clock
/ pipe_config
->pixel_multiplier
;
8223 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
8228 static void ironlake_init_pch_refclk(struct drm_i915_private
*dev_priv
)
8230 struct intel_encoder
*encoder
;
8233 bool has_lvds
= false;
8234 bool has_cpu_edp
= false;
8235 bool has_panel
= false;
8236 bool has_ck505
= false;
8237 bool can_ssc
= false;
8238 bool using_ssc_source
= false;
8240 /* We need to take the global config into account */
8241 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
8242 switch (encoder
->type
) {
8243 case INTEL_OUTPUT_LVDS
:
8247 case INTEL_OUTPUT_EDP
:
8249 if (encoder
->port
== PORT_A
)
8257 if (HAS_PCH_IBX(dev_priv
)) {
8258 has_ck505
= dev_priv
->vbt
.display_clock_mode
;
8259 can_ssc
= has_ck505
;
8265 /* Check if any DPLLs are using the SSC source */
8266 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
8267 u32 temp
= I915_READ(PCH_DPLL(i
));
8269 if (!(temp
& DPLL_VCO_ENABLE
))
8272 if ((temp
& PLL_REF_INPUT_MASK
) ==
8273 PLLB_REF_INPUT_SPREADSPECTRUMIN
) {
8274 using_ssc_source
= true;
8279 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8280 has_panel
, has_lvds
, has_ck505
, using_ssc_source
);
8282 /* Ironlake: try to setup display ref clock before DPLL
8283 * enabling. This is only under driver's control after
8284 * PCH B stepping, previous chipset stepping should be
8285 * ignoring this setting.
8287 val
= I915_READ(PCH_DREF_CONTROL
);
8289 /* As we must carefully and slowly disable/enable each source in turn,
8290 * compute the final state we want first and check if we need to
8291 * make any changes at all.
8294 final
&= ~DREF_NONSPREAD_SOURCE_MASK
;
8296 final
|= DREF_NONSPREAD_CK505_ENABLE
;
8298 final
|= DREF_NONSPREAD_SOURCE_ENABLE
;
8300 final
&= ~DREF_SSC_SOURCE_MASK
;
8301 final
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
8302 final
&= ~DREF_SSC1_ENABLE
;
8305 final
|= DREF_SSC_SOURCE_ENABLE
;
8307 if (intel_panel_use_ssc(dev_priv
) && can_ssc
)
8308 final
|= DREF_SSC1_ENABLE
;
8311 if (intel_panel_use_ssc(dev_priv
) && can_ssc
)
8312 final
|= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD
;
8314 final
|= DREF_CPU_SOURCE_OUTPUT_NONSPREAD
;
8316 final
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
8317 } else if (using_ssc_source
) {
8318 final
|= DREF_SSC_SOURCE_ENABLE
;
8319 final
|= DREF_SSC1_ENABLE
;
8325 /* Always enable nonspread source */
8326 val
&= ~DREF_NONSPREAD_SOURCE_MASK
;
8329 val
|= DREF_NONSPREAD_CK505_ENABLE
;
8331 val
|= DREF_NONSPREAD_SOURCE_ENABLE
;
8334 val
&= ~DREF_SSC_SOURCE_MASK
;
8335 val
|= DREF_SSC_SOURCE_ENABLE
;
8337 /* SSC must be turned on before enabling the CPU output */
8338 if (intel_panel_use_ssc(dev_priv
) && can_ssc
) {
8339 DRM_DEBUG_KMS("Using SSC on panel\n");
8340 val
|= DREF_SSC1_ENABLE
;
8342 val
&= ~DREF_SSC1_ENABLE
;
8344 /* Get SSC going before enabling the outputs */
8345 I915_WRITE(PCH_DREF_CONTROL
, val
);
8346 POSTING_READ(PCH_DREF_CONTROL
);
8349 val
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
8351 /* Enable CPU source on CPU attached eDP */
8353 if (intel_panel_use_ssc(dev_priv
) && can_ssc
) {
8354 DRM_DEBUG_KMS("Using SSC on eDP\n");
8355 val
|= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD
;
8357 val
|= DREF_CPU_SOURCE_OUTPUT_NONSPREAD
;
8359 val
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
8361 I915_WRITE(PCH_DREF_CONTROL
, val
);
8362 POSTING_READ(PCH_DREF_CONTROL
);
8365 DRM_DEBUG_KMS("Disabling CPU source output\n");
8367 val
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
8369 /* Turn off CPU output */
8370 val
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
8372 I915_WRITE(PCH_DREF_CONTROL
, val
);
8373 POSTING_READ(PCH_DREF_CONTROL
);
8376 if (!using_ssc_source
) {
8377 DRM_DEBUG_KMS("Disabling SSC source\n");
8379 /* Turn off the SSC source */
8380 val
&= ~DREF_SSC_SOURCE_MASK
;
8381 val
|= DREF_SSC_SOURCE_DISABLE
;
8384 val
&= ~DREF_SSC1_ENABLE
;
8386 I915_WRITE(PCH_DREF_CONTROL
, val
);
8387 POSTING_READ(PCH_DREF_CONTROL
);
8392 BUG_ON(val
!= final
);
8395 static void lpt_reset_fdi_mphy(struct drm_i915_private
*dev_priv
)
8399 tmp
= I915_READ(SOUTH_CHICKEN2
);
8400 tmp
|= FDI_MPHY_IOSFSB_RESET_CTL
;
8401 I915_WRITE(SOUTH_CHICKEN2
, tmp
);
8403 if (wait_for_us(I915_READ(SOUTH_CHICKEN2
) &
8404 FDI_MPHY_IOSFSB_RESET_STATUS
, 100))
8405 DRM_ERROR("FDI mPHY reset assert timeout\n");
8407 tmp
= I915_READ(SOUTH_CHICKEN2
);
8408 tmp
&= ~FDI_MPHY_IOSFSB_RESET_CTL
;
8409 I915_WRITE(SOUTH_CHICKEN2
, tmp
);
8411 if (wait_for_us((I915_READ(SOUTH_CHICKEN2
) &
8412 FDI_MPHY_IOSFSB_RESET_STATUS
) == 0, 100))
8413 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8416 /* WaMPhyProgramming:hsw */
8417 static void lpt_program_fdi_mphy(struct drm_i915_private
*dev_priv
)
8421 tmp
= intel_sbi_read(dev_priv
, 0x8008, SBI_MPHY
);
8422 tmp
&= ~(0xFF << 24);
8423 tmp
|= (0x12 << 24);
8424 intel_sbi_write(dev_priv
, 0x8008, tmp
, SBI_MPHY
);
8426 tmp
= intel_sbi_read(dev_priv
, 0x2008, SBI_MPHY
);
8428 intel_sbi_write(dev_priv
, 0x2008, tmp
, SBI_MPHY
);
8430 tmp
= intel_sbi_read(dev_priv
, 0x2108, SBI_MPHY
);
8432 intel_sbi_write(dev_priv
, 0x2108, tmp
, SBI_MPHY
);
8434 tmp
= intel_sbi_read(dev_priv
, 0x206C, SBI_MPHY
);
8435 tmp
|= (1 << 24) | (1 << 21) | (1 << 18);
8436 intel_sbi_write(dev_priv
, 0x206C, tmp
, SBI_MPHY
);
8438 tmp
= intel_sbi_read(dev_priv
, 0x216C, SBI_MPHY
);
8439 tmp
|= (1 << 24) | (1 << 21) | (1 << 18);
8440 intel_sbi_write(dev_priv
, 0x216C, tmp
, SBI_MPHY
);
8442 tmp
= intel_sbi_read(dev_priv
, 0x2080, SBI_MPHY
);
8445 intel_sbi_write(dev_priv
, 0x2080, tmp
, SBI_MPHY
);
8447 tmp
= intel_sbi_read(dev_priv
, 0x2180, SBI_MPHY
);
8450 intel_sbi_write(dev_priv
, 0x2180, tmp
, SBI_MPHY
);
8452 tmp
= intel_sbi_read(dev_priv
, 0x208C, SBI_MPHY
);
8455 intel_sbi_write(dev_priv
, 0x208C, tmp
, SBI_MPHY
);
8457 tmp
= intel_sbi_read(dev_priv
, 0x218C, SBI_MPHY
);
8460 intel_sbi_write(dev_priv
, 0x218C, tmp
, SBI_MPHY
);
8462 tmp
= intel_sbi_read(dev_priv
, 0x2098, SBI_MPHY
);
8463 tmp
&= ~(0xFF << 16);
8464 tmp
|= (0x1C << 16);
8465 intel_sbi_write(dev_priv
, 0x2098, tmp
, SBI_MPHY
);
8467 tmp
= intel_sbi_read(dev_priv
, 0x2198, SBI_MPHY
);
8468 tmp
&= ~(0xFF << 16);
8469 tmp
|= (0x1C << 16);
8470 intel_sbi_write(dev_priv
, 0x2198, tmp
, SBI_MPHY
);
8472 tmp
= intel_sbi_read(dev_priv
, 0x20C4, SBI_MPHY
);
8474 intel_sbi_write(dev_priv
, 0x20C4, tmp
, SBI_MPHY
);
8476 tmp
= intel_sbi_read(dev_priv
, 0x21C4, SBI_MPHY
);
8478 intel_sbi_write(dev_priv
, 0x21C4, tmp
, SBI_MPHY
);
8480 tmp
= intel_sbi_read(dev_priv
, 0x20EC, SBI_MPHY
);
8481 tmp
&= ~(0xF << 28);
8483 intel_sbi_write(dev_priv
, 0x20EC, tmp
, SBI_MPHY
);
8485 tmp
= intel_sbi_read(dev_priv
, 0x21EC, SBI_MPHY
);
8486 tmp
&= ~(0xF << 28);
8488 intel_sbi_write(dev_priv
, 0x21EC, tmp
, SBI_MPHY
);
8491 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8492 * Programming" based on the parameters passed:
8493 * - Sequence to enable CLKOUT_DP
8494 * - Sequence to enable CLKOUT_DP without spread
8495 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8497 static void lpt_enable_clkout_dp(struct drm_i915_private
*dev_priv
,
8498 bool with_spread
, bool with_fdi
)
8502 if (WARN(with_fdi
&& !with_spread
, "FDI requires downspread\n"))
8504 if (WARN(HAS_PCH_LPT_LP(dev_priv
) &&
8505 with_fdi
, "LP PCH doesn't have FDI\n"))
8508 mutex_lock(&dev_priv
->sb_lock
);
8510 tmp
= intel_sbi_read(dev_priv
, SBI_SSCCTL
, SBI_ICLK
);
8511 tmp
&= ~SBI_SSCCTL_DISABLE
;
8512 tmp
|= SBI_SSCCTL_PATHALT
;
8513 intel_sbi_write(dev_priv
, SBI_SSCCTL
, tmp
, SBI_ICLK
);
8518 tmp
= intel_sbi_read(dev_priv
, SBI_SSCCTL
, SBI_ICLK
);
8519 tmp
&= ~SBI_SSCCTL_PATHALT
;
8520 intel_sbi_write(dev_priv
, SBI_SSCCTL
, tmp
, SBI_ICLK
);
8523 lpt_reset_fdi_mphy(dev_priv
);
8524 lpt_program_fdi_mphy(dev_priv
);
8528 reg
= HAS_PCH_LPT_LP(dev_priv
) ? SBI_GEN0
: SBI_DBUFF0
;
8529 tmp
= intel_sbi_read(dev_priv
, reg
, SBI_ICLK
);
8530 tmp
|= SBI_GEN0_CFG_BUFFENABLE_DISABLE
;
8531 intel_sbi_write(dev_priv
, reg
, tmp
, SBI_ICLK
);
8533 mutex_unlock(&dev_priv
->sb_lock
);
8536 /* Sequence to disable CLKOUT_DP */
8537 static void lpt_disable_clkout_dp(struct drm_i915_private
*dev_priv
)
8541 mutex_lock(&dev_priv
->sb_lock
);
8543 reg
= HAS_PCH_LPT_LP(dev_priv
) ? SBI_GEN0
: SBI_DBUFF0
;
8544 tmp
= intel_sbi_read(dev_priv
, reg
, SBI_ICLK
);
8545 tmp
&= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE
;
8546 intel_sbi_write(dev_priv
, reg
, tmp
, SBI_ICLK
);
8548 tmp
= intel_sbi_read(dev_priv
, SBI_SSCCTL
, SBI_ICLK
);
8549 if (!(tmp
& SBI_SSCCTL_DISABLE
)) {
8550 if (!(tmp
& SBI_SSCCTL_PATHALT
)) {
8551 tmp
|= SBI_SSCCTL_PATHALT
;
8552 intel_sbi_write(dev_priv
, SBI_SSCCTL
, tmp
, SBI_ICLK
);
8555 tmp
|= SBI_SSCCTL_DISABLE
;
8556 intel_sbi_write(dev_priv
, SBI_SSCCTL
, tmp
, SBI_ICLK
);
8559 mutex_unlock(&dev_priv
->sb_lock
);
8562 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8564 static const u16 sscdivintphase
[] = {
8565 [BEND_IDX( 50)] = 0x3B23,
8566 [BEND_IDX( 45)] = 0x3B23,
8567 [BEND_IDX( 40)] = 0x3C23,
8568 [BEND_IDX( 35)] = 0x3C23,
8569 [BEND_IDX( 30)] = 0x3D23,
8570 [BEND_IDX( 25)] = 0x3D23,
8571 [BEND_IDX( 20)] = 0x3E23,
8572 [BEND_IDX( 15)] = 0x3E23,
8573 [BEND_IDX( 10)] = 0x3F23,
8574 [BEND_IDX( 5)] = 0x3F23,
8575 [BEND_IDX( 0)] = 0x0025,
8576 [BEND_IDX( -5)] = 0x0025,
8577 [BEND_IDX(-10)] = 0x0125,
8578 [BEND_IDX(-15)] = 0x0125,
8579 [BEND_IDX(-20)] = 0x0225,
8580 [BEND_IDX(-25)] = 0x0225,
8581 [BEND_IDX(-30)] = 0x0325,
8582 [BEND_IDX(-35)] = 0x0325,
8583 [BEND_IDX(-40)] = 0x0425,
8584 [BEND_IDX(-45)] = 0x0425,
8585 [BEND_IDX(-50)] = 0x0525,
8590 * steps -50 to 50 inclusive, in steps of 5
8591 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8592 * change in clock period = -(steps / 10) * 5.787 ps
8594 static void lpt_bend_clkout_dp(struct drm_i915_private
*dev_priv
, int steps
)
8597 int idx
= BEND_IDX(steps
);
8599 if (WARN_ON(steps
% 5 != 0))
8602 if (WARN_ON(idx
>= ARRAY_SIZE(sscdivintphase
)))
8605 mutex_lock(&dev_priv
->sb_lock
);
8607 if (steps
% 10 != 0)
8611 intel_sbi_write(dev_priv
, SBI_SSCDITHPHASE
, tmp
, SBI_ICLK
);
8613 tmp
= intel_sbi_read(dev_priv
, SBI_SSCDIVINTPHASE
, SBI_ICLK
);
8615 tmp
|= sscdivintphase
[idx
];
8616 intel_sbi_write(dev_priv
, SBI_SSCDIVINTPHASE
, tmp
, SBI_ICLK
);
8618 mutex_unlock(&dev_priv
->sb_lock
);
8623 static void lpt_init_pch_refclk(struct drm_i915_private
*dev_priv
)
8625 struct intel_encoder
*encoder
;
8626 bool has_vga
= false;
8628 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
8629 switch (encoder
->type
) {
8630 case INTEL_OUTPUT_ANALOG
:
8639 lpt_bend_clkout_dp(dev_priv
, 0);
8640 lpt_enable_clkout_dp(dev_priv
, true, true);
8642 lpt_disable_clkout_dp(dev_priv
);
8647 * Initialize reference clocks when the driver loads
8649 void intel_init_pch_refclk(struct drm_i915_private
*dev_priv
)
8651 if (HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
))
8652 ironlake_init_pch_refclk(dev_priv
);
8653 else if (HAS_PCH_LPT(dev_priv
))
8654 lpt_init_pch_refclk(dev_priv
);
8657 static void ironlake_set_pipeconf(const struct intel_crtc_state
*crtc_state
)
8659 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
8660 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
8661 enum pipe pipe
= crtc
->pipe
;
8666 switch (crtc_state
->pipe_bpp
) {
8668 val
|= PIPECONF_6BPC
;
8671 val
|= PIPECONF_8BPC
;
8674 val
|= PIPECONF_10BPC
;
8677 val
|= PIPECONF_12BPC
;
8680 /* Case prevented by intel_choose_pipe_bpp_dither. */
8684 if (crtc_state
->dither
)
8685 val
|= (PIPECONF_DITHER_EN
| PIPECONF_DITHER_TYPE_SP
);
8687 if (crtc_state
->base
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
8688 val
|= PIPECONF_INTERLACED_ILK
;
8690 val
|= PIPECONF_PROGRESSIVE
;
8692 if (crtc_state
->limited_color_range
)
8693 val
|= PIPECONF_COLOR_RANGE_SELECT
;
8695 I915_WRITE(PIPECONF(pipe
), val
);
8696 POSTING_READ(PIPECONF(pipe
));
8699 static void haswell_set_pipeconf(const struct intel_crtc_state
*crtc_state
)
8701 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
8702 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
8703 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
8706 if (IS_HASWELL(dev_priv
) && crtc_state
->dither
)
8707 val
|= (PIPECONF_DITHER_EN
| PIPECONF_DITHER_TYPE_SP
);
8709 if (crtc_state
->base
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
8710 val
|= PIPECONF_INTERLACED_ILK
;
8712 val
|= PIPECONF_PROGRESSIVE
;
8714 I915_WRITE(PIPECONF(cpu_transcoder
), val
);
8715 POSTING_READ(PIPECONF(cpu_transcoder
));
8718 static void haswell_set_pipemisc(const struct intel_crtc_state
*crtc_state
)
8720 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
8721 struct drm_i915_private
*dev_priv
= to_i915(intel_crtc
->base
.dev
);
8723 if (IS_BROADWELL(dev_priv
) || INTEL_GEN(dev_priv
) >= 9) {
8726 switch (crtc_state
->pipe_bpp
) {
8728 val
|= PIPEMISC_DITHER_6_BPC
;
8731 val
|= PIPEMISC_DITHER_8_BPC
;
8734 val
|= PIPEMISC_DITHER_10_BPC
;
8737 val
|= PIPEMISC_DITHER_12_BPC
;
8740 /* Case prevented by pipe_config_set_bpp. */
8744 if (crtc_state
->dither
)
8745 val
|= PIPEMISC_DITHER_ENABLE
| PIPEMISC_DITHER_TYPE_SP
;
8747 if (crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
||
8748 crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR444
)
8749 val
|= PIPEMISC_OUTPUT_COLORSPACE_YUV
;
8751 if (crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
)
8752 val
|= PIPEMISC_YUV420_ENABLE
|
8753 PIPEMISC_YUV420_MODE_FULL_BLEND
;
8755 I915_WRITE(PIPEMISC(intel_crtc
->pipe
), val
);
8759 int ironlake_get_lanes_required(int target_clock
, int link_bw
, int bpp
)
8762 * Account for spread spectrum to avoid
8763 * oversubscribing the link. Max center spread
8764 * is 2.5%; use 5% for safety's sake.
8766 u32 bps
= target_clock
* bpp
* 21 / 20;
8767 return DIV_ROUND_UP(bps
, link_bw
* 8);
8770 static bool ironlake_needs_fb_cb_tune(struct dpll
*dpll
, int factor
)
8772 return i9xx_dpll_compute_m(dpll
) < factor
* dpll
->n
;
8775 static void ironlake_compute_dpll(struct intel_crtc
*intel_crtc
,
8776 struct intel_crtc_state
*crtc_state
,
8777 struct dpll
*reduced_clock
)
8779 struct drm_crtc
*crtc
= &intel_crtc
->base
;
8780 struct drm_device
*dev
= crtc
->dev
;
8781 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8785 /* Enable autotuning of the PLL clock (if permissible) */
8787 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
8788 if ((intel_panel_use_ssc(dev_priv
) &&
8789 dev_priv
->vbt
.lvds_ssc_freq
== 100000) ||
8790 (HAS_PCH_IBX(dev_priv
) && intel_is_dual_link_lvds(dev
)))
8792 } else if (crtc_state
->sdvo_tv_clock
)
8795 fp
= i9xx_dpll_compute_fp(&crtc_state
->dpll
);
8797 if (ironlake_needs_fb_cb_tune(&crtc_state
->dpll
, factor
))
8800 if (reduced_clock
) {
8801 fp2
= i9xx_dpll_compute_fp(reduced_clock
);
8803 if (reduced_clock
->m
< factor
* reduced_clock
->n
)
8811 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
))
8812 dpll
|= DPLLB_MODE_LVDS
;
8814 dpll
|= DPLLB_MODE_DAC_SERIAL
;
8816 dpll
|= (crtc_state
->pixel_multiplier
- 1)
8817 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT
;
8819 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_SDVO
) ||
8820 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_HDMI
))
8821 dpll
|= DPLL_SDVO_HIGH_SPEED
;
8823 if (intel_crtc_has_dp_encoder(crtc_state
))
8824 dpll
|= DPLL_SDVO_HIGH_SPEED
;
8827 * The high speed IO clock is only really required for
8828 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8829 * possible to share the DPLL between CRT and HDMI. Enabling
8830 * the clock needlessly does no real harm, except use up a
8831 * bit of power potentially.
8833 * We'll limit this to IVB with 3 pipes, since it has only two
8834 * DPLLs and so DPLL sharing is the only way to get three pipes
8835 * driving PCH ports at the same time. On SNB we could do this,
8836 * and potentially avoid enabling the second DPLL, but it's not
8837 * clear if it''s a win or loss power wise. No point in doing
8838 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8840 if (INTEL_INFO(dev_priv
)->num_pipes
== 3 &&
8841 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_ANALOG
))
8842 dpll
|= DPLL_SDVO_HIGH_SPEED
;
8844 /* compute bitmask from p1 value */
8845 dpll
|= (1 << (crtc_state
->dpll
.p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
8847 dpll
|= (1 << (crtc_state
->dpll
.p1
- 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT
;
8849 switch (crtc_state
->dpll
.p2
) {
8851 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
;
8854 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_7
;
8857 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10
;
8860 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_14
;
8864 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
) &&
8865 intel_panel_use_ssc(dev_priv
))
8866 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
8868 dpll
|= PLL_REF_INPUT_DREFCLK
;
8870 dpll
|= DPLL_VCO_ENABLE
;
8872 crtc_state
->dpll_hw_state
.dpll
= dpll
;
8873 crtc_state
->dpll_hw_state
.fp0
= fp
;
8874 crtc_state
->dpll_hw_state
.fp1
= fp2
;
8877 static int ironlake_crtc_compute_clock(struct intel_crtc
*crtc
,
8878 struct intel_crtc_state
*crtc_state
)
8880 struct drm_device
*dev
= crtc
->base
.dev
;
8881 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8882 const struct intel_limit
*limit
;
8883 int refclk
= 120000;
8885 memset(&crtc_state
->dpll_hw_state
, 0,
8886 sizeof(crtc_state
->dpll_hw_state
));
8888 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8889 if (!crtc_state
->has_pch_encoder
)
8892 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_LVDS
)) {
8893 if (intel_panel_use_ssc(dev_priv
)) {
8894 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8895 dev_priv
->vbt
.lvds_ssc_freq
);
8896 refclk
= dev_priv
->vbt
.lvds_ssc_freq
;
8899 if (intel_is_dual_link_lvds(dev
)) {
8900 if (refclk
== 100000)
8901 limit
= &intel_limits_ironlake_dual_lvds_100m
;
8903 limit
= &intel_limits_ironlake_dual_lvds
;
8905 if (refclk
== 100000)
8906 limit
= &intel_limits_ironlake_single_lvds_100m
;
8908 limit
= &intel_limits_ironlake_single_lvds
;
8911 limit
= &intel_limits_ironlake_dac
;
8914 if (!crtc_state
->clock_set
&&
8915 !g4x_find_best_dpll(limit
, crtc_state
, crtc_state
->port_clock
,
8916 refclk
, NULL
, &crtc_state
->dpll
)) {
8917 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8921 ironlake_compute_dpll(crtc
, crtc_state
, NULL
);
8923 if (!intel_get_shared_dpll(crtc
, crtc_state
, NULL
)) {
8924 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8925 pipe_name(crtc
->pipe
));
8932 static void intel_pch_transcoder_get_m_n(struct intel_crtc
*crtc
,
8933 struct intel_link_m_n
*m_n
)
8935 struct drm_device
*dev
= crtc
->base
.dev
;
8936 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8937 enum pipe pipe
= crtc
->pipe
;
8939 m_n
->link_m
= I915_READ(PCH_TRANS_LINK_M1(pipe
));
8940 m_n
->link_n
= I915_READ(PCH_TRANS_LINK_N1(pipe
));
8941 m_n
->gmch_m
= I915_READ(PCH_TRANS_DATA_M1(pipe
))
8943 m_n
->gmch_n
= I915_READ(PCH_TRANS_DATA_N1(pipe
));
8944 m_n
->tu
= ((I915_READ(PCH_TRANS_DATA_M1(pipe
))
8945 & TU_SIZE_MASK
) >> TU_SIZE_SHIFT
) + 1;
8948 static void intel_cpu_transcoder_get_m_n(struct intel_crtc
*crtc
,
8949 enum transcoder transcoder
,
8950 struct intel_link_m_n
*m_n
,
8951 struct intel_link_m_n
*m2_n2
)
8953 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
8954 enum pipe pipe
= crtc
->pipe
;
8956 if (INTEL_GEN(dev_priv
) >= 5) {
8957 m_n
->link_m
= I915_READ(PIPE_LINK_M1(transcoder
));
8958 m_n
->link_n
= I915_READ(PIPE_LINK_N1(transcoder
));
8959 m_n
->gmch_m
= I915_READ(PIPE_DATA_M1(transcoder
))
8961 m_n
->gmch_n
= I915_READ(PIPE_DATA_N1(transcoder
));
8962 m_n
->tu
= ((I915_READ(PIPE_DATA_M1(transcoder
))
8963 & TU_SIZE_MASK
) >> TU_SIZE_SHIFT
) + 1;
8965 if (m2_n2
&& transcoder_has_m2_n2(dev_priv
, transcoder
)) {
8966 m2_n2
->link_m
= I915_READ(PIPE_LINK_M2(transcoder
));
8967 m2_n2
->link_n
= I915_READ(PIPE_LINK_N2(transcoder
));
8968 m2_n2
->gmch_m
= I915_READ(PIPE_DATA_M2(transcoder
))
8970 m2_n2
->gmch_n
= I915_READ(PIPE_DATA_N2(transcoder
));
8971 m2_n2
->tu
= ((I915_READ(PIPE_DATA_M2(transcoder
))
8972 & TU_SIZE_MASK
) >> TU_SIZE_SHIFT
) + 1;
8975 m_n
->link_m
= I915_READ(PIPE_LINK_M_G4X(pipe
));
8976 m_n
->link_n
= I915_READ(PIPE_LINK_N_G4X(pipe
));
8977 m_n
->gmch_m
= I915_READ(PIPE_DATA_M_G4X(pipe
))
8979 m_n
->gmch_n
= I915_READ(PIPE_DATA_N_G4X(pipe
));
8980 m_n
->tu
= ((I915_READ(PIPE_DATA_M_G4X(pipe
))
8981 & TU_SIZE_MASK
) >> TU_SIZE_SHIFT
) + 1;
8985 void intel_dp_get_m_n(struct intel_crtc
*crtc
,
8986 struct intel_crtc_state
*pipe_config
)
8988 if (pipe_config
->has_pch_encoder
)
8989 intel_pch_transcoder_get_m_n(crtc
, &pipe_config
->dp_m_n
);
8991 intel_cpu_transcoder_get_m_n(crtc
, pipe_config
->cpu_transcoder
,
8992 &pipe_config
->dp_m_n
,
8993 &pipe_config
->dp_m2_n2
);
8996 static void ironlake_get_fdi_m_n_config(struct intel_crtc
*crtc
,
8997 struct intel_crtc_state
*pipe_config
)
8999 intel_cpu_transcoder_get_m_n(crtc
, pipe_config
->cpu_transcoder
,
9000 &pipe_config
->fdi_m_n
, NULL
);
9003 static void skylake_get_pfit_config(struct intel_crtc
*crtc
,
9004 struct intel_crtc_state
*pipe_config
)
9006 struct drm_device
*dev
= crtc
->base
.dev
;
9007 struct drm_i915_private
*dev_priv
= to_i915(dev
);
9008 struct intel_crtc_scaler_state
*scaler_state
= &pipe_config
->scaler_state
;
9013 /* find scaler attached to this pipe */
9014 for (i
= 0; i
< crtc
->num_scalers
; i
++) {
9015 ps_ctrl
= I915_READ(SKL_PS_CTRL(crtc
->pipe
, i
));
9016 if (ps_ctrl
& PS_SCALER_EN
&& !(ps_ctrl
& PS_PLANE_SEL_MASK
)) {
9018 pipe_config
->pch_pfit
.enabled
= true;
9019 pipe_config
->pch_pfit
.pos
= I915_READ(SKL_PS_WIN_POS(crtc
->pipe
, i
));
9020 pipe_config
->pch_pfit
.size
= I915_READ(SKL_PS_WIN_SZ(crtc
->pipe
, i
));
9021 scaler_state
->scalers
[i
].in_use
= true;
9026 scaler_state
->scaler_id
= id
;
9028 scaler_state
->scaler_users
|= (1 << SKL_CRTC_INDEX
);
9030 scaler_state
->scaler_users
&= ~(1 << SKL_CRTC_INDEX
);
9035 skylake_get_initial_plane_config(struct intel_crtc
*crtc
,
9036 struct intel_initial_plane_config
*plane_config
)
9038 struct drm_device
*dev
= crtc
->base
.dev
;
9039 struct drm_i915_private
*dev_priv
= to_i915(dev
);
9040 struct intel_plane
*plane
= to_intel_plane(crtc
->base
.primary
);
9041 enum plane_id plane_id
= plane
->id
;
9043 u32 val
, base
, offset
, stride_mult
, tiling
, alpha
;
9044 int fourcc
, pixel_format
;
9045 unsigned int aligned_height
;
9046 struct drm_framebuffer
*fb
;
9047 struct intel_framebuffer
*intel_fb
;
9049 if (!plane
->get_hw_state(plane
, &pipe
))
9052 WARN_ON(pipe
!= crtc
->pipe
);
9054 intel_fb
= kzalloc(sizeof(*intel_fb
), GFP_KERNEL
);
9056 DRM_DEBUG_KMS("failed to alloc fb\n");
9060 fb
= &intel_fb
->base
;
9064 val
= I915_READ(PLANE_CTL(pipe
, plane_id
));
9066 if (INTEL_GEN(dev_priv
) >= 11)
9067 pixel_format
= val
& ICL_PLANE_CTL_FORMAT_MASK
;
9069 pixel_format
= val
& PLANE_CTL_FORMAT_MASK
;
9071 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
9072 alpha
= I915_READ(PLANE_COLOR_CTL(pipe
, plane_id
));
9073 alpha
&= PLANE_COLOR_ALPHA_MASK
;
9075 alpha
= val
& PLANE_CTL_ALPHA_MASK
;
9078 fourcc
= skl_format_to_fourcc(pixel_format
,
9079 val
& PLANE_CTL_ORDER_RGBX
, alpha
);
9080 fb
->format
= drm_format_info(fourcc
);
9082 tiling
= val
& PLANE_CTL_TILED_MASK
;
9084 case PLANE_CTL_TILED_LINEAR
:
9085 fb
->modifier
= DRM_FORMAT_MOD_LINEAR
;
9087 case PLANE_CTL_TILED_X
:
9088 plane_config
->tiling
= I915_TILING_X
;
9089 fb
->modifier
= I915_FORMAT_MOD_X_TILED
;
9091 case PLANE_CTL_TILED_Y
:
9092 plane_config
->tiling
= I915_TILING_Y
;
9093 if (val
& PLANE_CTL_RENDER_DECOMPRESSION_ENABLE
)
9094 fb
->modifier
= I915_FORMAT_MOD_Y_TILED_CCS
;
9096 fb
->modifier
= I915_FORMAT_MOD_Y_TILED
;
9098 case PLANE_CTL_TILED_YF
:
9099 if (val
& PLANE_CTL_RENDER_DECOMPRESSION_ENABLE
)
9100 fb
->modifier
= I915_FORMAT_MOD_Yf_TILED_CCS
;
9102 fb
->modifier
= I915_FORMAT_MOD_Yf_TILED
;
9105 MISSING_CASE(tiling
);
9110 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9111 * while i915 HW rotation is clockwise, thats why this swapping.
9113 switch (val
& PLANE_CTL_ROTATE_MASK
) {
9114 case PLANE_CTL_ROTATE_0
:
9115 plane_config
->rotation
= DRM_MODE_ROTATE_0
;
9117 case PLANE_CTL_ROTATE_90
:
9118 plane_config
->rotation
= DRM_MODE_ROTATE_270
;
9120 case PLANE_CTL_ROTATE_180
:
9121 plane_config
->rotation
= DRM_MODE_ROTATE_180
;
9123 case PLANE_CTL_ROTATE_270
:
9124 plane_config
->rotation
= DRM_MODE_ROTATE_90
;
9128 if (INTEL_GEN(dev_priv
) >= 10 &&
9129 val
& PLANE_CTL_FLIP_HORIZONTAL
)
9130 plane_config
->rotation
|= DRM_MODE_REFLECT_X
;
9132 base
= I915_READ(PLANE_SURF(pipe
, plane_id
)) & 0xfffff000;
9133 plane_config
->base
= base
;
9135 offset
= I915_READ(PLANE_OFFSET(pipe
, plane_id
));
9137 val
= I915_READ(PLANE_SIZE(pipe
, plane_id
));
9138 fb
->height
= ((val
>> 16) & 0xfff) + 1;
9139 fb
->width
= ((val
>> 0) & 0x1fff) + 1;
9141 val
= I915_READ(PLANE_STRIDE(pipe
, plane_id
));
9142 stride_mult
= skl_plane_stride_mult(fb
, 0, DRM_MODE_ROTATE_0
);
9143 fb
->pitches
[0] = (val
& 0x3ff) * stride_mult
;
9145 aligned_height
= intel_fb_align_height(fb
, 0, fb
->height
);
9147 plane_config
->size
= fb
->pitches
[0] * aligned_height
;
9149 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9150 crtc
->base
.name
, plane
->base
.name
, fb
->width
, fb
->height
,
9151 fb
->format
->cpp
[0] * 8, base
, fb
->pitches
[0],
9152 plane_config
->size
);
9154 plane_config
->fb
= intel_fb
;
9161 static void ironlake_get_pfit_config(struct intel_crtc
*crtc
,
9162 struct intel_crtc_state
*pipe_config
)
9164 struct drm_device
*dev
= crtc
->base
.dev
;
9165 struct drm_i915_private
*dev_priv
= to_i915(dev
);
9168 tmp
= I915_READ(PF_CTL(crtc
->pipe
));
9170 if (tmp
& PF_ENABLE
) {
9171 pipe_config
->pch_pfit
.enabled
= true;
9172 pipe_config
->pch_pfit
.pos
= I915_READ(PF_WIN_POS(crtc
->pipe
));
9173 pipe_config
->pch_pfit
.size
= I915_READ(PF_WIN_SZ(crtc
->pipe
));
9175 /* We currently do not free assignements of panel fitters on
9176 * ivb/hsw (since we don't use the higher upscaling modes which
9177 * differentiates them) so just WARN about this case for now. */
9178 if (IS_GEN(dev_priv
, 7)) {
9179 WARN_ON((tmp
& PF_PIPE_SEL_MASK_IVB
) !=
9180 PF_PIPE_SEL_IVB(crtc
->pipe
));
9185 static bool ironlake_get_pipe_config(struct intel_crtc
*crtc
,
9186 struct intel_crtc_state
*pipe_config
)
9188 struct drm_device
*dev
= crtc
->base
.dev
;
9189 struct drm_i915_private
*dev_priv
= to_i915(dev
);
9190 enum intel_display_power_domain power_domain
;
9191 intel_wakeref_t wakeref
;
9195 power_domain
= POWER_DOMAIN_PIPE(crtc
->pipe
);
9196 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
9200 pipe_config
->output_format
= INTEL_OUTPUT_FORMAT_RGB
;
9201 pipe_config
->cpu_transcoder
= (enum transcoder
) crtc
->pipe
;
9202 pipe_config
->shared_dpll
= NULL
;
9205 tmp
= I915_READ(PIPECONF(crtc
->pipe
));
9206 if (!(tmp
& PIPECONF_ENABLE
))
9209 switch (tmp
& PIPECONF_BPC_MASK
) {
9211 pipe_config
->pipe_bpp
= 18;
9214 pipe_config
->pipe_bpp
= 24;
9216 case PIPECONF_10BPC
:
9217 pipe_config
->pipe_bpp
= 30;
9219 case PIPECONF_12BPC
:
9220 pipe_config
->pipe_bpp
= 36;
9226 if (tmp
& PIPECONF_COLOR_RANGE_SELECT
)
9227 pipe_config
->limited_color_range
= true;
9229 if (I915_READ(PCH_TRANSCONF(crtc
->pipe
)) & TRANS_ENABLE
) {
9230 struct intel_shared_dpll
*pll
;
9231 enum intel_dpll_id pll_id
;
9233 pipe_config
->has_pch_encoder
= true;
9235 tmp
= I915_READ(FDI_RX_CTL(crtc
->pipe
));
9236 pipe_config
->fdi_lanes
= ((FDI_DP_PORT_WIDTH_MASK
& tmp
) >>
9237 FDI_DP_PORT_WIDTH_SHIFT
) + 1;
9239 ironlake_get_fdi_m_n_config(crtc
, pipe_config
);
9241 if (HAS_PCH_IBX(dev_priv
)) {
9243 * The pipe->pch transcoder and pch transcoder->pll
9246 pll_id
= (enum intel_dpll_id
) crtc
->pipe
;
9248 tmp
= I915_READ(PCH_DPLL_SEL
);
9249 if (tmp
& TRANS_DPLLB_SEL(crtc
->pipe
))
9250 pll_id
= DPLL_ID_PCH_PLL_B
;
9252 pll_id
= DPLL_ID_PCH_PLL_A
;
9255 pipe_config
->shared_dpll
=
9256 intel_get_shared_dpll_by_id(dev_priv
, pll_id
);
9257 pll
= pipe_config
->shared_dpll
;
9259 WARN_ON(!pll
->info
->funcs
->get_hw_state(dev_priv
, pll
,
9260 &pipe_config
->dpll_hw_state
));
9262 tmp
= pipe_config
->dpll_hw_state
.dpll
;
9263 pipe_config
->pixel_multiplier
=
9264 ((tmp
& PLL_REF_SDVO_HDMI_MULTIPLIER_MASK
)
9265 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT
) + 1;
9267 ironlake_pch_clock_get(crtc
, pipe_config
);
9269 pipe_config
->pixel_multiplier
= 1;
9272 intel_get_pipe_timings(crtc
, pipe_config
);
9273 intel_get_pipe_src_size(crtc
, pipe_config
);
9275 ironlake_get_pfit_config(crtc
, pipe_config
);
9280 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
9285 static void assert_can_disable_lcpll(struct drm_i915_private
*dev_priv
)
9287 struct drm_device
*dev
= &dev_priv
->drm
;
9288 struct intel_crtc
*crtc
;
9290 for_each_intel_crtc(dev
, crtc
)
9291 I915_STATE_WARN(crtc
->active
, "CRTC for pipe %c enabled\n",
9292 pipe_name(crtc
->pipe
));
9294 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2
),
9295 "Display power well on\n");
9296 I915_STATE_WARN(I915_READ(SPLL_CTL
) & SPLL_PLL_ENABLE
, "SPLL enabled\n");
9297 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE
, "WRPLL1 enabled\n");
9298 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE
, "WRPLL2 enabled\n");
9299 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON
, "Panel power on\n");
9300 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2
) & BLM_PWM_ENABLE
,
9301 "CPU PWM1 enabled\n");
9302 if (IS_HASWELL(dev_priv
))
9303 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL
) & BLM_PWM_ENABLE
,
9304 "CPU PWM2 enabled\n");
9305 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1
) & BLM_PCH_PWM_ENABLE
,
9306 "PCH PWM1 enabled\n");
9307 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL
) & UTIL_PIN_ENABLE
,
9308 "Utility pin enabled\n");
9309 I915_STATE_WARN(I915_READ(PCH_GTC_CTL
) & PCH_GTC_ENABLE
, "PCH GTC enabled\n");
9312 * In theory we can still leave IRQs enabled, as long as only the HPD
9313 * interrupts remain enabled. We used to check for that, but since it's
9314 * gen-specific and since we only disable LCPLL after we fully disable
9315 * the interrupts, the check below should be enough.
9317 I915_STATE_WARN(intel_irqs_enabled(dev_priv
), "IRQs enabled\n");
9320 static u32
hsw_read_dcomp(struct drm_i915_private
*dev_priv
)
9322 if (IS_HASWELL(dev_priv
))
9323 return I915_READ(D_COMP_HSW
);
9325 return I915_READ(D_COMP_BDW
);
9328 static void hsw_write_dcomp(struct drm_i915_private
*dev_priv
, u32 val
)
9330 if (IS_HASWELL(dev_priv
)) {
9331 mutex_lock(&dev_priv
->pcu_lock
);
9332 if (sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_D_COMP
,
9334 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9335 mutex_unlock(&dev_priv
->pcu_lock
);
9337 I915_WRITE(D_COMP_BDW
, val
);
9338 POSTING_READ(D_COMP_BDW
);
9343 * This function implements pieces of two sequences from BSpec:
9344 * - Sequence for display software to disable LCPLL
9345 * - Sequence for display software to allow package C8+
9346 * The steps implemented here are just the steps that actually touch the LCPLL
9347 * register. Callers should take care of disabling all the display engine
9348 * functions, doing the mode unset, fixing interrupts, etc.
9350 static void hsw_disable_lcpll(struct drm_i915_private
*dev_priv
,
9351 bool switch_to_fclk
, bool allow_power_down
)
9355 assert_can_disable_lcpll(dev_priv
);
9357 val
= I915_READ(LCPLL_CTL
);
9359 if (switch_to_fclk
) {
9360 val
|= LCPLL_CD_SOURCE_FCLK
;
9361 I915_WRITE(LCPLL_CTL
, val
);
9363 if (wait_for_us(I915_READ(LCPLL_CTL
) &
9364 LCPLL_CD_SOURCE_FCLK_DONE
, 1))
9365 DRM_ERROR("Switching to FCLK failed\n");
9367 val
= I915_READ(LCPLL_CTL
);
9370 val
|= LCPLL_PLL_DISABLE
;
9371 I915_WRITE(LCPLL_CTL
, val
);
9372 POSTING_READ(LCPLL_CTL
);
9374 if (intel_wait_for_register(dev_priv
, LCPLL_CTL
, LCPLL_PLL_LOCK
, 0, 1))
9375 DRM_ERROR("LCPLL still locked\n");
9377 val
= hsw_read_dcomp(dev_priv
);
9378 val
|= D_COMP_COMP_DISABLE
;
9379 hsw_write_dcomp(dev_priv
, val
);
9382 if (wait_for((hsw_read_dcomp(dev_priv
) & D_COMP_RCOMP_IN_PROGRESS
) == 0,
9384 DRM_ERROR("D_COMP RCOMP still in progress\n");
9386 if (allow_power_down
) {
9387 val
= I915_READ(LCPLL_CTL
);
9388 val
|= LCPLL_POWER_DOWN_ALLOW
;
9389 I915_WRITE(LCPLL_CTL
, val
);
9390 POSTING_READ(LCPLL_CTL
);
9395 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9398 static void hsw_restore_lcpll(struct drm_i915_private
*dev_priv
)
9402 val
= I915_READ(LCPLL_CTL
);
9404 if ((val
& (LCPLL_PLL_LOCK
| LCPLL_PLL_DISABLE
| LCPLL_CD_SOURCE_FCLK
|
9405 LCPLL_POWER_DOWN_ALLOW
)) == LCPLL_PLL_LOCK
)
9409 * Make sure we're not on PC8 state before disabling PC8, otherwise
9410 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9412 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
9414 if (val
& LCPLL_POWER_DOWN_ALLOW
) {
9415 val
&= ~LCPLL_POWER_DOWN_ALLOW
;
9416 I915_WRITE(LCPLL_CTL
, val
);
9417 POSTING_READ(LCPLL_CTL
);
9420 val
= hsw_read_dcomp(dev_priv
);
9421 val
|= D_COMP_COMP_FORCE
;
9422 val
&= ~D_COMP_COMP_DISABLE
;
9423 hsw_write_dcomp(dev_priv
, val
);
9425 val
= I915_READ(LCPLL_CTL
);
9426 val
&= ~LCPLL_PLL_DISABLE
;
9427 I915_WRITE(LCPLL_CTL
, val
);
9429 if (intel_wait_for_register(dev_priv
,
9430 LCPLL_CTL
, LCPLL_PLL_LOCK
, LCPLL_PLL_LOCK
,
9432 DRM_ERROR("LCPLL not locked yet\n");
9434 if (val
& LCPLL_CD_SOURCE_FCLK
) {
9435 val
= I915_READ(LCPLL_CTL
);
9436 val
&= ~LCPLL_CD_SOURCE_FCLK
;
9437 I915_WRITE(LCPLL_CTL
, val
);
9439 if (wait_for_us((I915_READ(LCPLL_CTL
) &
9440 LCPLL_CD_SOURCE_FCLK_DONE
) == 0, 1))
9441 DRM_ERROR("Switching back to LCPLL failed\n");
9444 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
9446 intel_update_cdclk(dev_priv
);
9447 intel_dump_cdclk_state(&dev_priv
->cdclk
.hw
, "Current CDCLK");
9451 * Package states C8 and deeper are really deep PC states that can only be
9452 * reached when all the devices on the system allow it, so even if the graphics
9453 * device allows PC8+, it doesn't mean the system will actually get to these
9454 * states. Our driver only allows PC8+ when going into runtime PM.
9456 * The requirements for PC8+ are that all the outputs are disabled, the power
9457 * well is disabled and most interrupts are disabled, and these are also
9458 * requirements for runtime PM. When these conditions are met, we manually do
9459 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9460 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9463 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9464 * the state of some registers, so when we come back from PC8+ we need to
9465 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9466 * need to take care of the registers kept by RC6. Notice that this happens even
9467 * if we don't put the device in PCI D3 state (which is what currently happens
9468 * because of the runtime PM support).
9470 * For more, read "Display Sequences for Package C8" on the hardware
9473 void hsw_enable_pc8(struct drm_i915_private
*dev_priv
)
9477 DRM_DEBUG_KMS("Enabling package C8+\n");
9479 if (HAS_PCH_LPT_LP(dev_priv
)) {
9480 val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
9481 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
9482 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
9485 lpt_disable_clkout_dp(dev_priv
);
9486 hsw_disable_lcpll(dev_priv
, true, true);
9489 void hsw_disable_pc8(struct drm_i915_private
*dev_priv
)
9493 DRM_DEBUG_KMS("Disabling package C8+\n");
9495 hsw_restore_lcpll(dev_priv
);
9496 lpt_init_pch_refclk(dev_priv
);
9498 if (HAS_PCH_LPT_LP(dev_priv
)) {
9499 val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
9500 val
|= PCH_LP_PARTITION_LEVEL_DISABLE
;
9501 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
9505 static int haswell_crtc_compute_clock(struct intel_crtc
*crtc
,
9506 struct intel_crtc_state
*crtc_state
)
9508 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
9509 struct intel_atomic_state
*state
=
9510 to_intel_atomic_state(crtc_state
->base
.state
);
9512 if (!intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DSI
) ||
9513 IS_ICELAKE(dev_priv
)) {
9514 struct intel_encoder
*encoder
=
9515 intel_get_crtc_new_encoder(state
, crtc_state
);
9517 if (!intel_get_shared_dpll(crtc
, crtc_state
, encoder
)) {
9518 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9519 pipe_name(crtc
->pipe
));
9527 static void cannonlake_get_ddi_pll(struct drm_i915_private
*dev_priv
,
9529 struct intel_crtc_state
*pipe_config
)
9531 enum intel_dpll_id id
;
9534 temp
= I915_READ(DPCLKA_CFGCR0
) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port
);
9535 id
= temp
>> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port
);
9537 if (WARN_ON(id
< SKL_DPLL0
|| id
> SKL_DPLL2
))
9540 pipe_config
->shared_dpll
= intel_get_shared_dpll_by_id(dev_priv
, id
);
9543 static void icelake_get_ddi_pll(struct drm_i915_private
*dev_priv
,
9545 struct intel_crtc_state
*pipe_config
)
9547 enum intel_dpll_id id
;
9550 /* TODO: TBT pll not implemented. */
9551 if (intel_port_is_combophy(dev_priv
, port
)) {
9552 temp
= I915_READ(DPCLKA_CFGCR0_ICL
) &
9553 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port
);
9554 id
= temp
>> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port
);
9556 if (WARN_ON(!intel_dpll_is_combophy(id
)))
9558 } else if (intel_port_is_tc(dev_priv
, port
)) {
9559 id
= icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv
, port
));
9561 WARN(1, "Invalid port %x\n", port
);
9565 pipe_config
->shared_dpll
= intel_get_shared_dpll_by_id(dev_priv
, id
);
9568 static void bxt_get_ddi_pll(struct drm_i915_private
*dev_priv
,
9570 struct intel_crtc_state
*pipe_config
)
9572 enum intel_dpll_id id
;
9576 id
= DPLL_ID_SKL_DPLL0
;
9579 id
= DPLL_ID_SKL_DPLL1
;
9582 id
= DPLL_ID_SKL_DPLL2
;
9585 DRM_ERROR("Incorrect port type\n");
9589 pipe_config
->shared_dpll
= intel_get_shared_dpll_by_id(dev_priv
, id
);
9592 static void skylake_get_ddi_pll(struct drm_i915_private
*dev_priv
,
9594 struct intel_crtc_state
*pipe_config
)
9596 enum intel_dpll_id id
;
9599 temp
= I915_READ(DPLL_CTRL2
) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port
);
9600 id
= temp
>> (port
* 3 + 1);
9602 if (WARN_ON(id
< SKL_DPLL0
|| id
> SKL_DPLL3
))
9605 pipe_config
->shared_dpll
= intel_get_shared_dpll_by_id(dev_priv
, id
);
9608 static void haswell_get_ddi_pll(struct drm_i915_private
*dev_priv
,
9610 struct intel_crtc_state
*pipe_config
)
9612 enum intel_dpll_id id
;
9613 u32 ddi_pll_sel
= I915_READ(PORT_CLK_SEL(port
));
9615 switch (ddi_pll_sel
) {
9616 case PORT_CLK_SEL_WRPLL1
:
9617 id
= DPLL_ID_WRPLL1
;
9619 case PORT_CLK_SEL_WRPLL2
:
9620 id
= DPLL_ID_WRPLL2
;
9622 case PORT_CLK_SEL_SPLL
:
9625 case PORT_CLK_SEL_LCPLL_810
:
9626 id
= DPLL_ID_LCPLL_810
;
9628 case PORT_CLK_SEL_LCPLL_1350
:
9629 id
= DPLL_ID_LCPLL_1350
;
9631 case PORT_CLK_SEL_LCPLL_2700
:
9632 id
= DPLL_ID_LCPLL_2700
;
9635 MISSING_CASE(ddi_pll_sel
);
9637 case PORT_CLK_SEL_NONE
:
9641 pipe_config
->shared_dpll
= intel_get_shared_dpll_by_id(dev_priv
, id
);
9644 static bool hsw_get_transcoder_state(struct intel_crtc
*crtc
,
9645 struct intel_crtc_state
*pipe_config
,
9646 u64
*power_domain_mask
)
9648 struct drm_device
*dev
= crtc
->base
.dev
;
9649 struct drm_i915_private
*dev_priv
= to_i915(dev
);
9650 enum intel_display_power_domain power_domain
;
9651 unsigned long panel_transcoder_mask
= BIT(TRANSCODER_EDP
);
9652 unsigned long enabled_panel_transcoders
= 0;
9653 enum transcoder panel_transcoder
;
9656 if (IS_ICELAKE(dev_priv
))
9657 panel_transcoder_mask
|=
9658 BIT(TRANSCODER_DSI_0
) | BIT(TRANSCODER_DSI_1
);
9661 * The pipe->transcoder mapping is fixed with the exception of the eDP
9662 * and DSI transcoders handled below.
9664 pipe_config
->cpu_transcoder
= (enum transcoder
) crtc
->pipe
;
9667 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9668 * consistency and less surprising code; it's in always on power).
9670 for_each_set_bit(panel_transcoder
,
9671 &panel_transcoder_mask
,
9672 ARRAY_SIZE(INTEL_INFO(dev_priv
)->trans_offsets
)) {
9673 enum pipe trans_pipe
;
9675 tmp
= I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder
));
9676 if (!(tmp
& TRANS_DDI_FUNC_ENABLE
))
9680 * Log all enabled ones, only use the first one.
9682 * FIXME: This won't work for two separate DSI displays.
9684 enabled_panel_transcoders
|= BIT(panel_transcoder
);
9685 if (enabled_panel_transcoders
!= BIT(panel_transcoder
))
9688 switch (tmp
& TRANS_DDI_EDP_INPUT_MASK
) {
9690 WARN(1, "unknown pipe linked to transcoder %s\n",
9691 transcoder_name(panel_transcoder
));
9693 case TRANS_DDI_EDP_INPUT_A_ONOFF
:
9694 case TRANS_DDI_EDP_INPUT_A_ON
:
9695 trans_pipe
= PIPE_A
;
9697 case TRANS_DDI_EDP_INPUT_B_ONOFF
:
9698 trans_pipe
= PIPE_B
;
9700 case TRANS_DDI_EDP_INPUT_C_ONOFF
:
9701 trans_pipe
= PIPE_C
;
9705 if (trans_pipe
== crtc
->pipe
)
9706 pipe_config
->cpu_transcoder
= panel_transcoder
;
9710 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9712 WARN_ON((enabled_panel_transcoders
& BIT(TRANSCODER_EDP
)) &&
9713 enabled_panel_transcoders
!= BIT(TRANSCODER_EDP
));
9715 power_domain
= POWER_DOMAIN_TRANSCODER(pipe_config
->cpu_transcoder
);
9716 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
9719 WARN_ON(*power_domain_mask
& BIT_ULL(power_domain
));
9720 *power_domain_mask
|= BIT_ULL(power_domain
);
9722 tmp
= I915_READ(PIPECONF(pipe_config
->cpu_transcoder
));
9724 return tmp
& PIPECONF_ENABLE
;
9727 static bool bxt_get_dsi_transcoder_state(struct intel_crtc
*crtc
,
9728 struct intel_crtc_state
*pipe_config
,
9729 u64
*power_domain_mask
)
9731 struct drm_device
*dev
= crtc
->base
.dev
;
9732 struct drm_i915_private
*dev_priv
= to_i915(dev
);
9733 enum intel_display_power_domain power_domain
;
9735 enum transcoder cpu_transcoder
;
9738 for_each_port_masked(port
, BIT(PORT_A
) | BIT(PORT_C
)) {
9740 cpu_transcoder
= TRANSCODER_DSI_A
;
9742 cpu_transcoder
= TRANSCODER_DSI_C
;
9744 power_domain
= POWER_DOMAIN_TRANSCODER(cpu_transcoder
);
9745 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
9748 WARN_ON(*power_domain_mask
& BIT_ULL(power_domain
));
9749 *power_domain_mask
|= BIT_ULL(power_domain
);
9752 * The PLL needs to be enabled with a valid divider
9753 * configuration, otherwise accessing DSI registers will hang
9754 * the machine. See BSpec North Display Engine
9755 * registers/MIPI[BXT]. We can break out here early, since we
9756 * need the same DSI PLL to be enabled for both DSI ports.
9758 if (!bxt_dsi_pll_is_enabled(dev_priv
))
9761 /* XXX: this works for video mode only */
9762 tmp
= I915_READ(BXT_MIPI_PORT_CTRL(port
));
9763 if (!(tmp
& DPI_ENABLE
))
9766 tmp
= I915_READ(MIPI_CTRL(port
));
9767 if ((tmp
& BXT_PIPE_SELECT_MASK
) != BXT_PIPE_SELECT(crtc
->pipe
))
9770 pipe_config
->cpu_transcoder
= cpu_transcoder
;
9774 return transcoder_is_dsi(pipe_config
->cpu_transcoder
);
9777 static void haswell_get_ddi_port_state(struct intel_crtc
*crtc
,
9778 struct intel_crtc_state
*pipe_config
)
9780 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
9781 struct intel_shared_dpll
*pll
;
9785 tmp
= I915_READ(TRANS_DDI_FUNC_CTL(pipe_config
->cpu_transcoder
));
9787 port
= (tmp
& TRANS_DDI_PORT_MASK
) >> TRANS_DDI_PORT_SHIFT
;
9789 if (IS_ICELAKE(dev_priv
))
9790 icelake_get_ddi_pll(dev_priv
, port
, pipe_config
);
9791 else if (IS_CANNONLAKE(dev_priv
))
9792 cannonlake_get_ddi_pll(dev_priv
, port
, pipe_config
);
9793 else if (IS_GEN9_BC(dev_priv
))
9794 skylake_get_ddi_pll(dev_priv
, port
, pipe_config
);
9795 else if (IS_GEN9_LP(dev_priv
))
9796 bxt_get_ddi_pll(dev_priv
, port
, pipe_config
);
9798 haswell_get_ddi_pll(dev_priv
, port
, pipe_config
);
9800 pll
= pipe_config
->shared_dpll
;
9802 WARN_ON(!pll
->info
->funcs
->get_hw_state(dev_priv
, pll
,
9803 &pipe_config
->dpll_hw_state
));
9807 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9808 * DDI E. So just check whether this pipe is wired to DDI E and whether
9809 * the PCH transcoder is on.
9811 if (INTEL_GEN(dev_priv
) < 9 &&
9812 (port
== PORT_E
) && I915_READ(LPT_TRANSCONF
) & TRANS_ENABLE
) {
9813 pipe_config
->has_pch_encoder
= true;
9815 tmp
= I915_READ(FDI_RX_CTL(PIPE_A
));
9816 pipe_config
->fdi_lanes
= ((FDI_DP_PORT_WIDTH_MASK
& tmp
) >>
9817 FDI_DP_PORT_WIDTH_SHIFT
) + 1;
9819 ironlake_get_fdi_m_n_config(crtc
, pipe_config
);
9823 static bool haswell_get_pipe_config(struct intel_crtc
*crtc
,
9824 struct intel_crtc_state
*pipe_config
)
9826 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
9827 enum intel_display_power_domain power_domain
;
9828 u64 power_domain_mask
;
9831 intel_crtc_init_scalers(crtc
, pipe_config
);
9833 power_domain
= POWER_DOMAIN_PIPE(crtc
->pipe
);
9834 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
9836 power_domain_mask
= BIT_ULL(power_domain
);
9838 pipe_config
->shared_dpll
= NULL
;
9840 active
= hsw_get_transcoder_state(crtc
, pipe_config
, &power_domain_mask
);
9842 if (IS_GEN9_LP(dev_priv
) &&
9843 bxt_get_dsi_transcoder_state(crtc
, pipe_config
, &power_domain_mask
)) {
9851 if (!transcoder_is_dsi(pipe_config
->cpu_transcoder
) ||
9852 IS_ICELAKE(dev_priv
)) {
9853 haswell_get_ddi_port_state(crtc
, pipe_config
);
9854 intel_get_pipe_timings(crtc
, pipe_config
);
9857 intel_get_pipe_src_size(crtc
, pipe_config
);
9858 intel_get_crtc_ycbcr_config(crtc
, pipe_config
);
9860 pipe_config
->gamma_mode
=
9861 I915_READ(GAMMA_MODE(crtc
->pipe
)) & GAMMA_MODE_MODE_MASK
;
9863 power_domain
= POWER_DOMAIN_PIPE_PANEL_FITTER(crtc
->pipe
);
9864 if (intel_display_power_get_if_enabled(dev_priv
, power_domain
)) {
9865 WARN_ON(power_domain_mask
& BIT_ULL(power_domain
));
9866 power_domain_mask
|= BIT_ULL(power_domain
);
9868 if (INTEL_GEN(dev_priv
) >= 9)
9869 skylake_get_pfit_config(crtc
, pipe_config
);
9871 ironlake_get_pfit_config(crtc
, pipe_config
);
9874 if (hsw_crtc_supports_ips(crtc
)) {
9875 if (IS_HASWELL(dev_priv
))
9876 pipe_config
->ips_enabled
= I915_READ(IPS_CTL
) & IPS_ENABLE
;
9879 * We cannot readout IPS state on broadwell, set to
9880 * true so we can set it to a defined state on first
9883 pipe_config
->ips_enabled
= true;
9887 if (pipe_config
->cpu_transcoder
!= TRANSCODER_EDP
&&
9888 !transcoder_is_dsi(pipe_config
->cpu_transcoder
)) {
9889 pipe_config
->pixel_multiplier
=
9890 I915_READ(PIPE_MULT(pipe_config
->cpu_transcoder
)) + 1;
9892 pipe_config
->pixel_multiplier
= 1;
9896 for_each_power_domain(power_domain
, power_domain_mask
)
9897 intel_display_power_put_unchecked(dev_priv
, power_domain
);
9902 static u32
intel_cursor_base(const struct intel_plane_state
*plane_state
)
9904 struct drm_i915_private
*dev_priv
=
9905 to_i915(plane_state
->base
.plane
->dev
);
9906 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
9907 const struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
9910 if (INTEL_INFO(dev_priv
)->display
.cursor_needs_physical
)
9911 base
= obj
->phys_handle
->busaddr
;
9913 base
= intel_plane_ggtt_offset(plane_state
);
9915 base
+= plane_state
->color_plane
[0].offset
;
9917 /* ILK+ do this automagically */
9918 if (HAS_GMCH(dev_priv
) &&
9919 plane_state
->base
.rotation
& DRM_MODE_ROTATE_180
)
9920 base
+= (plane_state
->base
.crtc_h
*
9921 plane_state
->base
.crtc_w
- 1) * fb
->format
->cpp
[0];
9926 static u32
intel_cursor_position(const struct intel_plane_state
*plane_state
)
9928 int x
= plane_state
->base
.crtc_x
;
9929 int y
= plane_state
->base
.crtc_y
;
9933 pos
|= CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
;
9936 pos
|= x
<< CURSOR_X_SHIFT
;
9939 pos
|= CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
;
9942 pos
|= y
<< CURSOR_Y_SHIFT
;
9947 static bool intel_cursor_size_ok(const struct intel_plane_state
*plane_state
)
9949 const struct drm_mode_config
*config
=
9950 &plane_state
->base
.plane
->dev
->mode_config
;
9951 int width
= plane_state
->base
.crtc_w
;
9952 int height
= plane_state
->base
.crtc_h
;
9954 return width
> 0 && width
<= config
->cursor_width
&&
9955 height
> 0 && height
<= config
->cursor_height
;
9958 static int intel_cursor_check_surface(struct intel_plane_state
*plane_state
)
9960 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
9961 unsigned int rotation
= plane_state
->base
.rotation
;
9966 intel_fill_fb_ggtt_view(&plane_state
->view
, fb
, rotation
);
9967 plane_state
->color_plane
[0].stride
= intel_fb_pitch(fb
, 0, rotation
);
9969 ret
= intel_plane_check_stride(plane_state
);
9973 src_x
= plane_state
->base
.src_x
>> 16;
9974 src_y
= plane_state
->base
.src_y
>> 16;
9976 intel_add_fb_offsets(&src_x
, &src_y
, plane_state
, 0);
9977 offset
= intel_plane_compute_aligned_offset(&src_x
, &src_y
,
9980 if (src_x
!= 0 || src_y
!= 0) {
9981 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9985 plane_state
->color_plane
[0].offset
= offset
;
9990 static int intel_check_cursor(struct intel_crtc_state
*crtc_state
,
9991 struct intel_plane_state
*plane_state
)
9993 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
9996 if (fb
&& fb
->modifier
!= DRM_FORMAT_MOD_LINEAR
) {
9997 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10001 ret
= drm_atomic_helper_check_plane_state(&plane_state
->base
,
10003 DRM_PLANE_HELPER_NO_SCALING
,
10004 DRM_PLANE_HELPER_NO_SCALING
,
10009 if (!plane_state
->base
.visible
)
10012 ret
= intel_plane_check_src_coordinates(plane_state
);
10016 ret
= intel_cursor_check_surface(plane_state
);
10023 static unsigned int
10024 i845_cursor_max_stride(struct intel_plane
*plane
,
10025 u32 pixel_format
, u64 modifier
,
10026 unsigned int rotation
)
10031 static u32
i845_cursor_ctl_crtc(const struct intel_crtc_state
*crtc_state
)
10033 return CURSOR_GAMMA_ENABLE
;
10036 static u32
i845_cursor_ctl(const struct intel_crtc_state
*crtc_state
,
10037 const struct intel_plane_state
*plane_state
)
10039 return CURSOR_ENABLE
|
10040 CURSOR_FORMAT_ARGB
|
10041 CURSOR_STRIDE(plane_state
->color_plane
[0].stride
);
10044 static bool i845_cursor_size_ok(const struct intel_plane_state
*plane_state
)
10046 int width
= plane_state
->base
.crtc_w
;
10049 * 845g/865g are only limited by the width of their cursors,
10050 * the height is arbitrary up to the precision of the register.
10052 return intel_cursor_size_ok(plane_state
) && IS_ALIGNED(width
, 64);
10055 static int i845_check_cursor(struct intel_crtc_state
*crtc_state
,
10056 struct intel_plane_state
*plane_state
)
10058 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
10061 ret
= intel_check_cursor(crtc_state
, plane_state
);
10065 /* if we want to turn off the cursor ignore width and height */
10069 /* Check for which cursor types we support */
10070 if (!i845_cursor_size_ok(plane_state
)) {
10071 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10072 plane_state
->base
.crtc_w
,
10073 plane_state
->base
.crtc_h
);
10077 WARN_ON(plane_state
->base
.visible
&&
10078 plane_state
->color_plane
[0].stride
!= fb
->pitches
[0]);
10080 switch (fb
->pitches
[0]) {
10087 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10092 plane_state
->ctl
= i845_cursor_ctl(crtc_state
, plane_state
);
10097 static void i845_update_cursor(struct intel_plane
*plane
,
10098 const struct intel_crtc_state
*crtc_state
,
10099 const struct intel_plane_state
*plane_state
)
10101 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
10102 u32 cntl
= 0, base
= 0, pos
= 0, size
= 0;
10103 unsigned long irqflags
;
10105 if (plane_state
&& plane_state
->base
.visible
) {
10106 unsigned int width
= plane_state
->base
.crtc_w
;
10107 unsigned int height
= plane_state
->base
.crtc_h
;
10109 cntl
= plane_state
->ctl
|
10110 i845_cursor_ctl_crtc(crtc_state
);
10112 size
= (height
<< 12) | width
;
10114 base
= intel_cursor_base(plane_state
);
10115 pos
= intel_cursor_position(plane_state
);
10118 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
10120 /* On these chipsets we can only modify the base/size/stride
10121 * whilst the cursor is disabled.
10123 if (plane
->cursor
.base
!= base
||
10124 plane
->cursor
.size
!= size
||
10125 plane
->cursor
.cntl
!= cntl
) {
10126 I915_WRITE_FW(CURCNTR(PIPE_A
), 0);
10127 I915_WRITE_FW(CURBASE(PIPE_A
), base
);
10128 I915_WRITE_FW(CURSIZE
, size
);
10129 I915_WRITE_FW(CURPOS(PIPE_A
), pos
);
10130 I915_WRITE_FW(CURCNTR(PIPE_A
), cntl
);
10132 plane
->cursor
.base
= base
;
10133 plane
->cursor
.size
= size
;
10134 plane
->cursor
.cntl
= cntl
;
10136 I915_WRITE_FW(CURPOS(PIPE_A
), pos
);
10139 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
10142 static void i845_disable_cursor(struct intel_plane
*plane
,
10143 const struct intel_crtc_state
*crtc_state
)
10145 i845_update_cursor(plane
, crtc_state
, NULL
);
10148 static bool i845_cursor_get_hw_state(struct intel_plane
*plane
,
10151 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
10152 enum intel_display_power_domain power_domain
;
10153 intel_wakeref_t wakeref
;
10156 power_domain
= POWER_DOMAIN_PIPE(PIPE_A
);
10157 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
10161 ret
= I915_READ(CURCNTR(PIPE_A
)) & CURSOR_ENABLE
;
10165 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
10170 static unsigned int
10171 i9xx_cursor_max_stride(struct intel_plane
*plane
,
10172 u32 pixel_format
, u64 modifier
,
10173 unsigned int rotation
)
10175 return plane
->base
.dev
->mode_config
.cursor_width
* 4;
10178 static u32
i9xx_cursor_ctl_crtc(const struct intel_crtc_state
*crtc_state
)
10180 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
10181 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
10184 if (INTEL_GEN(dev_priv
) >= 11)
10187 cntl
|= MCURSOR_GAMMA_ENABLE
;
10189 if (HAS_DDI(dev_priv
))
10190 cntl
|= MCURSOR_PIPE_CSC_ENABLE
;
10192 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
))
10193 cntl
|= MCURSOR_PIPE_SELECT(crtc
->pipe
);
10198 static u32
i9xx_cursor_ctl(const struct intel_crtc_state
*crtc_state
,
10199 const struct intel_plane_state
*plane_state
)
10201 struct drm_i915_private
*dev_priv
=
10202 to_i915(plane_state
->base
.plane
->dev
);
10205 if (IS_GEN(dev_priv
, 6) || IS_IVYBRIDGE(dev_priv
))
10206 cntl
|= MCURSOR_TRICKLE_FEED_DISABLE
;
10208 switch (plane_state
->base
.crtc_w
) {
10210 cntl
|= MCURSOR_MODE_64_ARGB_AX
;
10213 cntl
|= MCURSOR_MODE_128_ARGB_AX
;
10216 cntl
|= MCURSOR_MODE_256_ARGB_AX
;
10219 MISSING_CASE(plane_state
->base
.crtc_w
);
10223 if (plane_state
->base
.rotation
& DRM_MODE_ROTATE_180
)
10224 cntl
|= MCURSOR_ROTATE_180
;
10229 static bool i9xx_cursor_size_ok(const struct intel_plane_state
*plane_state
)
10231 struct drm_i915_private
*dev_priv
=
10232 to_i915(plane_state
->base
.plane
->dev
);
10233 int width
= plane_state
->base
.crtc_w
;
10234 int height
= plane_state
->base
.crtc_h
;
10236 if (!intel_cursor_size_ok(plane_state
))
10239 /* Cursor width is limited to a few power-of-two sizes */
10250 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10251 * height from 8 lines up to the cursor width, when the
10252 * cursor is not rotated. Everything else requires square
10255 if (HAS_CUR_FBC(dev_priv
) &&
10256 plane_state
->base
.rotation
& DRM_MODE_ROTATE_0
) {
10257 if (height
< 8 || height
> width
)
10260 if (height
!= width
)
10267 static int i9xx_check_cursor(struct intel_crtc_state
*crtc_state
,
10268 struct intel_plane_state
*plane_state
)
10270 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
10271 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
10272 const struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
10273 enum pipe pipe
= plane
->pipe
;
10276 ret
= intel_check_cursor(crtc_state
, plane_state
);
10280 /* if we want to turn off the cursor ignore width and height */
10284 /* Check for which cursor types we support */
10285 if (!i9xx_cursor_size_ok(plane_state
)) {
10286 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10287 plane_state
->base
.crtc_w
,
10288 plane_state
->base
.crtc_h
);
10292 WARN_ON(plane_state
->base
.visible
&&
10293 plane_state
->color_plane
[0].stride
!= fb
->pitches
[0]);
10295 if (fb
->pitches
[0] != plane_state
->base
.crtc_w
* fb
->format
->cpp
[0]) {
10296 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10297 fb
->pitches
[0], plane_state
->base
.crtc_w
);
10302 * There's something wrong with the cursor on CHV pipe C.
10303 * If it straddles the left edge of the screen then
10304 * moving it away from the edge or disabling it often
10305 * results in a pipe underrun, and often that can lead to
10306 * dead pipe (constant underrun reported, and it scans
10307 * out just a solid color). To recover from that, the
10308 * display power well must be turned off and on again.
10309 * Refuse the put the cursor into that compromised position.
10311 if (IS_CHERRYVIEW(dev_priv
) && pipe
== PIPE_C
&&
10312 plane_state
->base
.visible
&& plane_state
->base
.crtc_x
< 0) {
10313 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10317 plane_state
->ctl
= i9xx_cursor_ctl(crtc_state
, plane_state
);
10322 static void i9xx_update_cursor(struct intel_plane
*plane
,
10323 const struct intel_crtc_state
*crtc_state
,
10324 const struct intel_plane_state
*plane_state
)
10326 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
10327 enum pipe pipe
= plane
->pipe
;
10328 u32 cntl
= 0, base
= 0, pos
= 0, fbc_ctl
= 0;
10329 unsigned long irqflags
;
10331 if (plane_state
&& plane_state
->base
.visible
) {
10332 cntl
= plane_state
->ctl
|
10333 i9xx_cursor_ctl_crtc(crtc_state
);
10335 if (plane_state
->base
.crtc_h
!= plane_state
->base
.crtc_w
)
10336 fbc_ctl
= CUR_FBC_CTL_EN
| (plane_state
->base
.crtc_h
- 1);
10338 base
= intel_cursor_base(plane_state
);
10339 pos
= intel_cursor_position(plane_state
);
10342 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
10345 * On some platforms writing CURCNTR first will also
10346 * cause CURPOS to be armed by the CURBASE write.
10347 * Without the CURCNTR write the CURPOS write would
10348 * arm itself. Thus we always update CURCNTR before
10351 * On other platforms CURPOS always requires the
10352 * CURBASE write to arm the update. Additonally
10353 * a write to any of the cursor register will cancel
10354 * an already armed cursor update. Thus leaving out
10355 * the CURBASE write after CURPOS could lead to a
10356 * cursor that doesn't appear to move, or even change
10357 * shape. Thus we always write CURBASE.
10359 * The other registers are armed by by the CURBASE write
10360 * except when the plane is getting enabled at which time
10361 * the CURCNTR write arms the update.
10364 if (INTEL_GEN(dev_priv
) >= 9)
10365 skl_write_cursor_wm(plane
, crtc_state
);
10367 if (plane
->cursor
.base
!= base
||
10368 plane
->cursor
.size
!= fbc_ctl
||
10369 plane
->cursor
.cntl
!= cntl
) {
10370 if (HAS_CUR_FBC(dev_priv
))
10371 I915_WRITE_FW(CUR_FBC_CTL(pipe
), fbc_ctl
);
10372 I915_WRITE_FW(CURCNTR(pipe
), cntl
);
10373 I915_WRITE_FW(CURPOS(pipe
), pos
);
10374 I915_WRITE_FW(CURBASE(pipe
), base
);
10376 plane
->cursor
.base
= base
;
10377 plane
->cursor
.size
= fbc_ctl
;
10378 plane
->cursor
.cntl
= cntl
;
10380 I915_WRITE_FW(CURPOS(pipe
), pos
);
10381 I915_WRITE_FW(CURBASE(pipe
), base
);
10384 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
10387 static void i9xx_disable_cursor(struct intel_plane
*plane
,
10388 const struct intel_crtc_state
*crtc_state
)
10390 i9xx_update_cursor(plane
, crtc_state
, NULL
);
10393 static bool i9xx_cursor_get_hw_state(struct intel_plane
*plane
,
10396 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
10397 enum intel_display_power_domain power_domain
;
10398 intel_wakeref_t wakeref
;
10403 * Not 100% correct for planes that can move between pipes,
10404 * but that's only the case for gen2-3 which don't have any
10405 * display power wells.
10407 power_domain
= POWER_DOMAIN_PIPE(plane
->pipe
);
10408 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
10412 val
= I915_READ(CURCNTR(plane
->pipe
));
10414 ret
= val
& MCURSOR_MODE
;
10416 if (INTEL_GEN(dev_priv
) >= 5 || IS_G4X(dev_priv
))
10417 *pipe
= plane
->pipe
;
10419 *pipe
= (val
& MCURSOR_PIPE_SELECT_MASK
) >>
10420 MCURSOR_PIPE_SELECT_SHIFT
;
10422 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
10427 /* VESA 640x480x72Hz mode to set on the pipe */
10428 static const struct drm_display_mode load_detect_mode
= {
10429 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT
, 31500, 640, 664,
10430 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
),
10433 struct drm_framebuffer
*
10434 intel_framebuffer_create(struct drm_i915_gem_object
*obj
,
10435 struct drm_mode_fb_cmd2
*mode_cmd
)
10437 struct intel_framebuffer
*intel_fb
;
10440 intel_fb
= kzalloc(sizeof(*intel_fb
), GFP_KERNEL
);
10442 return ERR_PTR(-ENOMEM
);
10444 ret
= intel_framebuffer_init(intel_fb
, obj
, mode_cmd
);
10448 return &intel_fb
->base
;
10452 return ERR_PTR(ret
);
10455 static int intel_modeset_disable_planes(struct drm_atomic_state
*state
,
10456 struct drm_crtc
*crtc
)
10458 struct drm_plane
*plane
;
10459 struct drm_plane_state
*plane_state
;
10462 ret
= drm_atomic_add_affected_planes(state
, crtc
);
10466 for_each_new_plane_in_state(state
, plane
, plane_state
, i
) {
10467 if (plane_state
->crtc
!= crtc
)
10470 ret
= drm_atomic_set_crtc_for_plane(plane_state
, NULL
);
10474 drm_atomic_set_fb_for_plane(plane_state
, NULL
);
10480 int intel_get_load_detect_pipe(struct drm_connector
*connector
,
10481 const struct drm_display_mode
*mode
,
10482 struct intel_load_detect_pipe
*old
,
10483 struct drm_modeset_acquire_ctx
*ctx
)
10485 struct intel_crtc
*intel_crtc
;
10486 struct intel_encoder
*intel_encoder
=
10487 intel_attached_encoder(connector
);
10488 struct drm_crtc
*possible_crtc
;
10489 struct drm_encoder
*encoder
= &intel_encoder
->base
;
10490 struct drm_crtc
*crtc
= NULL
;
10491 struct drm_device
*dev
= encoder
->dev
;
10492 struct drm_i915_private
*dev_priv
= to_i915(dev
);
10493 struct drm_mode_config
*config
= &dev
->mode_config
;
10494 struct drm_atomic_state
*state
= NULL
, *restore_state
= NULL
;
10495 struct drm_connector_state
*connector_state
;
10496 struct intel_crtc_state
*crtc_state
;
10499 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10500 connector
->base
.id
, connector
->name
,
10501 encoder
->base
.id
, encoder
->name
);
10503 old
->restore_state
= NULL
;
10505 WARN_ON(!drm_modeset_is_locked(&config
->connection_mutex
));
10508 * Algorithm gets a little messy:
10510 * - if the connector already has an assigned crtc, use it (but make
10511 * sure it's on first)
10513 * - try to find the first unused crtc that can drive this connector,
10514 * and use that if we find one
10517 /* See if we already have a CRTC for this connector */
10518 if (connector
->state
->crtc
) {
10519 crtc
= connector
->state
->crtc
;
10521 ret
= drm_modeset_lock(&crtc
->mutex
, ctx
);
10525 /* Make sure the crtc and connector are running */
10529 /* Find an unused one (if possible) */
10530 for_each_crtc(dev
, possible_crtc
) {
10532 if (!(encoder
->possible_crtcs
& (1 << i
)))
10535 ret
= drm_modeset_lock(&possible_crtc
->mutex
, ctx
);
10539 if (possible_crtc
->state
->enable
) {
10540 drm_modeset_unlock(&possible_crtc
->mutex
);
10544 crtc
= possible_crtc
;
10549 * If we didn't find an unused CRTC, don't use any.
10552 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10558 intel_crtc
= to_intel_crtc(crtc
);
10560 state
= drm_atomic_state_alloc(dev
);
10561 restore_state
= drm_atomic_state_alloc(dev
);
10562 if (!state
|| !restore_state
) {
10567 state
->acquire_ctx
= ctx
;
10568 restore_state
->acquire_ctx
= ctx
;
10570 connector_state
= drm_atomic_get_connector_state(state
, connector
);
10571 if (IS_ERR(connector_state
)) {
10572 ret
= PTR_ERR(connector_state
);
10576 ret
= drm_atomic_set_crtc_for_connector(connector_state
, crtc
);
10580 crtc_state
= intel_atomic_get_crtc_state(state
, intel_crtc
);
10581 if (IS_ERR(crtc_state
)) {
10582 ret
= PTR_ERR(crtc_state
);
10586 crtc_state
->base
.active
= crtc_state
->base
.enable
= true;
10589 mode
= &load_detect_mode
;
10591 ret
= drm_atomic_set_mode_for_crtc(&crtc_state
->base
, mode
);
10595 ret
= intel_modeset_disable_planes(state
, crtc
);
10599 ret
= PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state
, connector
));
10601 ret
= PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state
, crtc
));
10603 ret
= drm_atomic_add_affected_planes(restore_state
, crtc
);
10605 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret
);
10609 ret
= drm_atomic_commit(state
);
10611 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10615 old
->restore_state
= restore_state
;
10616 drm_atomic_state_put(state
);
10618 /* let the connector get through one full cycle before testing */
10619 intel_wait_for_vblank(dev_priv
, intel_crtc
->pipe
);
10624 drm_atomic_state_put(state
);
10627 if (restore_state
) {
10628 drm_atomic_state_put(restore_state
);
10629 restore_state
= NULL
;
10632 if (ret
== -EDEADLK
)
10638 void intel_release_load_detect_pipe(struct drm_connector
*connector
,
10639 struct intel_load_detect_pipe
*old
,
10640 struct drm_modeset_acquire_ctx
*ctx
)
10642 struct intel_encoder
*intel_encoder
=
10643 intel_attached_encoder(connector
);
10644 struct drm_encoder
*encoder
= &intel_encoder
->base
;
10645 struct drm_atomic_state
*state
= old
->restore_state
;
10648 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10649 connector
->base
.id
, connector
->name
,
10650 encoder
->base
.id
, encoder
->name
);
10655 ret
= drm_atomic_helper_commit_duplicated_state(state
, ctx
);
10657 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret
);
10658 drm_atomic_state_put(state
);
10661 static int i9xx_pll_refclk(struct drm_device
*dev
,
10662 const struct intel_crtc_state
*pipe_config
)
10664 struct drm_i915_private
*dev_priv
= to_i915(dev
);
10665 u32 dpll
= pipe_config
->dpll_hw_state
.dpll
;
10667 if ((dpll
& PLL_REF_INPUT_MASK
) == PLLB_REF_INPUT_SPREADSPECTRUMIN
)
10668 return dev_priv
->vbt
.lvds_ssc_freq
;
10669 else if (HAS_PCH_SPLIT(dev_priv
))
10671 else if (!IS_GEN(dev_priv
, 2))
10677 /* Returns the clock of the currently programmed mode of the given pipe. */
10678 static void i9xx_crtc_clock_get(struct intel_crtc
*crtc
,
10679 struct intel_crtc_state
*pipe_config
)
10681 struct drm_device
*dev
= crtc
->base
.dev
;
10682 struct drm_i915_private
*dev_priv
= to_i915(dev
);
10683 int pipe
= pipe_config
->cpu_transcoder
;
10684 u32 dpll
= pipe_config
->dpll_hw_state
.dpll
;
10688 int refclk
= i9xx_pll_refclk(dev
, pipe_config
);
10690 if ((dpll
& DISPLAY_RATE_SELECT_FPA1
) == 0)
10691 fp
= pipe_config
->dpll_hw_state
.fp0
;
10693 fp
= pipe_config
->dpll_hw_state
.fp1
;
10695 clock
.m1
= (fp
& FP_M1_DIV_MASK
) >> FP_M1_DIV_SHIFT
;
10696 if (IS_PINEVIEW(dev_priv
)) {
10697 clock
.n
= ffs((fp
& FP_N_PINEVIEW_DIV_MASK
) >> FP_N_DIV_SHIFT
) - 1;
10698 clock
.m2
= (fp
& FP_M2_PINEVIEW_DIV_MASK
) >> FP_M2_DIV_SHIFT
;
10700 clock
.n
= (fp
& FP_N_DIV_MASK
) >> FP_N_DIV_SHIFT
;
10701 clock
.m2
= (fp
& FP_M2_DIV_MASK
) >> FP_M2_DIV_SHIFT
;
10704 if (!IS_GEN(dev_priv
, 2)) {
10705 if (IS_PINEVIEW(dev_priv
))
10706 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW
) >>
10707 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW
);
10709 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK
) >>
10710 DPLL_FPA01_P1_POST_DIV_SHIFT
);
10712 switch (dpll
& DPLL_MODE_MASK
) {
10713 case DPLLB_MODE_DAC_SERIAL
:
10714 clock
.p2
= dpll
& DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
?
10717 case DPLLB_MODE_LVDS
:
10718 clock
.p2
= dpll
& DPLLB_LVDS_P2_CLOCK_DIV_7
?
10722 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10723 "mode\n", (int)(dpll
& DPLL_MODE_MASK
));
10727 if (IS_PINEVIEW(dev_priv
))
10728 port_clock
= pnv_calc_dpll_params(refclk
, &clock
);
10730 port_clock
= i9xx_calc_dpll_params(refclk
, &clock
);
10732 u32 lvds
= IS_I830(dev_priv
) ? 0 : I915_READ(LVDS
);
10733 bool is_lvds
= (pipe
== 1) && (lvds
& LVDS_PORT_EN
);
10736 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS
) >>
10737 DPLL_FPA01_P1_POST_DIV_SHIFT
);
10739 if (lvds
& LVDS_CLKB_POWER_UP
)
10744 if (dpll
& PLL_P1_DIVIDE_BY_TWO
)
10747 clock
.p1
= ((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_I830
) >>
10748 DPLL_FPA01_P1_POST_DIV_SHIFT
) + 2;
10750 if (dpll
& PLL_P2_DIVIDE_BY_4
)
10756 port_clock
= i9xx_calc_dpll_params(refclk
, &clock
);
10760 * This value includes pixel_multiplier. We will use
10761 * port_clock to compute adjusted_mode.crtc_clock in the
10762 * encoder's get_config() function.
10764 pipe_config
->port_clock
= port_clock
;
10767 int intel_dotclock_calculate(int link_freq
,
10768 const struct intel_link_m_n
*m_n
)
10771 * The calculation for the data clock is:
10772 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10773 * But we want to avoid losing precison if possible, so:
10774 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10776 * and the link clock is simpler:
10777 * link_clock = (m * link_clock) / n
10783 return div_u64(mul_u32_u32(m_n
->link_m
, link_freq
), m_n
->link_n
);
10786 static void ironlake_pch_clock_get(struct intel_crtc
*crtc
,
10787 struct intel_crtc_state
*pipe_config
)
10789 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
10791 /* read out port_clock from the DPLL */
10792 i9xx_crtc_clock_get(crtc
, pipe_config
);
10795 * In case there is an active pipe without active ports,
10796 * we may need some idea for the dotclock anyway.
10797 * Calculate one based on the FDI configuration.
10799 pipe_config
->base
.adjusted_mode
.crtc_clock
=
10800 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv
, pipe_config
),
10801 &pipe_config
->fdi_m_n
);
10804 /* Returns the currently programmed mode of the given encoder. */
10805 struct drm_display_mode
*
10806 intel_encoder_current_mode(struct intel_encoder
*encoder
)
10808 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
10809 struct intel_crtc_state
*crtc_state
;
10810 struct drm_display_mode
*mode
;
10811 struct intel_crtc
*crtc
;
10814 if (!encoder
->get_hw_state(encoder
, &pipe
))
10817 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
10819 mode
= kzalloc(sizeof(*mode
), GFP_KERNEL
);
10823 crtc_state
= kzalloc(sizeof(*crtc_state
), GFP_KERNEL
);
10829 crtc_state
->base
.crtc
= &crtc
->base
;
10831 if (!dev_priv
->display
.get_pipe_config(crtc
, crtc_state
)) {
10837 encoder
->get_config(encoder
, crtc_state
);
10839 intel_mode_from_pipe_config(mode
, crtc_state
);
10846 static void intel_crtc_destroy(struct drm_crtc
*crtc
)
10848 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
10850 drm_crtc_cleanup(crtc
);
10855 * intel_wm_need_update - Check whether watermarks need updating
10856 * @cur: current plane state
10857 * @new: new plane state
10859 * Check current plane state versus the new one to determine whether
10860 * watermarks need to be recalculated.
10862 * Returns true or false.
10864 static bool intel_wm_need_update(struct intel_plane_state
*cur
,
10865 struct intel_plane_state
*new)
10867 /* Update watermarks on tiling or size changes. */
10868 if (new->base
.visible
!= cur
->base
.visible
)
10871 if (!cur
->base
.fb
|| !new->base
.fb
)
10874 if (cur
->base
.fb
->modifier
!= new->base
.fb
->modifier
||
10875 cur
->base
.rotation
!= new->base
.rotation
||
10876 drm_rect_width(&new->base
.src
) != drm_rect_width(&cur
->base
.src
) ||
10877 drm_rect_height(&new->base
.src
) != drm_rect_height(&cur
->base
.src
) ||
10878 drm_rect_width(&new->base
.dst
) != drm_rect_width(&cur
->base
.dst
) ||
10879 drm_rect_height(&new->base
.dst
) != drm_rect_height(&cur
->base
.dst
))
10885 static bool needs_scaling(const struct intel_plane_state
*state
)
10887 int src_w
= drm_rect_width(&state
->base
.src
) >> 16;
10888 int src_h
= drm_rect_height(&state
->base
.src
) >> 16;
10889 int dst_w
= drm_rect_width(&state
->base
.dst
);
10890 int dst_h
= drm_rect_height(&state
->base
.dst
);
10892 return (src_w
!= dst_w
|| src_h
!= dst_h
);
10895 int intel_plane_atomic_calc_changes(const struct intel_crtc_state
*old_crtc_state
,
10896 struct drm_crtc_state
*crtc_state
,
10897 const struct intel_plane_state
*old_plane_state
,
10898 struct drm_plane_state
*plane_state
)
10900 struct intel_crtc_state
*pipe_config
= to_intel_crtc_state(crtc_state
);
10901 struct drm_crtc
*crtc
= crtc_state
->crtc
;
10902 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
10903 struct intel_plane
*plane
= to_intel_plane(plane_state
->plane
);
10904 struct drm_device
*dev
= crtc
->dev
;
10905 struct drm_i915_private
*dev_priv
= to_i915(dev
);
10906 bool mode_changed
= needs_modeset(crtc_state
);
10907 bool was_crtc_enabled
= old_crtc_state
->base
.active
;
10908 bool is_crtc_enabled
= crtc_state
->active
;
10909 bool turn_off
, turn_on
, visible
, was_visible
;
10910 struct drm_framebuffer
*fb
= plane_state
->fb
;
10913 if (INTEL_GEN(dev_priv
) >= 9 && plane
->id
!= PLANE_CURSOR
) {
10914 ret
= skl_update_scaler_plane(
10915 to_intel_crtc_state(crtc_state
),
10916 to_intel_plane_state(plane_state
));
10921 was_visible
= old_plane_state
->base
.visible
;
10922 visible
= plane_state
->visible
;
10924 if (!was_crtc_enabled
&& WARN_ON(was_visible
))
10925 was_visible
= false;
10928 * Visibility is calculated as if the crtc was on, but
10929 * after scaler setup everything depends on it being off
10930 * when the crtc isn't active.
10932 * FIXME this is wrong for watermarks. Watermarks should also
10933 * be computed as if the pipe would be active. Perhaps move
10934 * per-plane wm computation to the .check_plane() hook, and
10935 * only combine the results from all planes in the current place?
10937 if (!is_crtc_enabled
) {
10938 plane_state
->visible
= visible
= false;
10939 to_intel_crtc_state(crtc_state
)->active_planes
&= ~BIT(plane
->id
);
10942 if (!was_visible
&& !visible
)
10945 if (fb
!= old_plane_state
->base
.fb
)
10946 pipe_config
->fb_changed
= true;
10948 turn_off
= was_visible
&& (!visible
|| mode_changed
);
10949 turn_on
= visible
&& (!was_visible
|| mode_changed
);
10951 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10952 intel_crtc
->base
.base
.id
, intel_crtc
->base
.name
,
10953 plane
->base
.base
.id
, plane
->base
.name
,
10954 fb
? fb
->base
.id
: -1);
10956 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10957 plane
->base
.base
.id
, plane
->base
.name
,
10958 was_visible
, visible
,
10959 turn_off
, turn_on
, mode_changed
);
10962 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
))
10963 pipe_config
->update_wm_pre
= true;
10965 /* must disable cxsr around plane enable/disable */
10966 if (plane
->id
!= PLANE_CURSOR
)
10967 pipe_config
->disable_cxsr
= true;
10968 } else if (turn_off
) {
10969 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
))
10970 pipe_config
->update_wm_post
= true;
10972 /* must disable cxsr around plane enable/disable */
10973 if (plane
->id
!= PLANE_CURSOR
)
10974 pipe_config
->disable_cxsr
= true;
10975 } else if (intel_wm_need_update(to_intel_plane_state(plane
->base
.state
),
10976 to_intel_plane_state(plane_state
))) {
10977 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
)) {
10978 /* FIXME bollocks */
10979 pipe_config
->update_wm_pre
= true;
10980 pipe_config
->update_wm_post
= true;
10984 if (visible
|| was_visible
)
10985 pipe_config
->fb_bits
|= plane
->frontbuffer_bit
;
10988 * ILK/SNB DVSACNTR/Sprite Enable
10989 * IVB SPR_CTL/Sprite Enable
10990 * "When in Self Refresh Big FIFO mode, a write to enable the
10991 * plane will be internally buffered and delayed while Big FIFO
10992 * mode is exiting."
10994 * Which means that enabling the sprite can take an extra frame
10995 * when we start in big FIFO mode (LP1+). Thus we need to drop
10996 * down to LP0 and wait for vblank in order to make sure the
10997 * sprite gets enabled on the next vblank after the register write.
10998 * Doing otherwise would risk enabling the sprite one frame after
10999 * we've already signalled flip completion. We can resume LP1+
11000 * once the sprite has been enabled.
11003 * WaCxSRDisabledForSpriteScaling:ivb
11004 * IVB SPR_SCALE/Scaling Enable
11005 * "Low Power watermarks must be disabled for at least one
11006 * frame before enabling sprite scaling, and kept disabled
11007 * until sprite scaling is disabled."
11009 * ILK/SNB DVSASCALE/Scaling Enable
11010 * "When in Self Refresh Big FIFO mode, scaling enable will be
11011 * masked off while Big FIFO mode is exiting."
11013 * Despite the w/a only being listed for IVB we assume that
11014 * the ILK/SNB note has similar ramifications, hence we apply
11015 * the w/a on all three platforms.
11017 * With experimental results seems this is needed also for primary
11018 * plane, not only sprite plane.
11020 if (plane
->id
!= PLANE_CURSOR
&&
11021 (IS_GEN_RANGE(dev_priv
, 5, 6) ||
11022 IS_IVYBRIDGE(dev_priv
)) &&
11023 (turn_on
|| (!needs_scaling(old_plane_state
) &&
11024 needs_scaling(to_intel_plane_state(plane_state
)))))
11025 pipe_config
->disable_lp_wm
= true;
11030 static bool encoders_cloneable(const struct intel_encoder
*a
,
11031 const struct intel_encoder
*b
)
11033 /* masks could be asymmetric, so check both ways */
11034 return a
== b
|| (a
->cloneable
& (1 << b
->type
) &&
11035 b
->cloneable
& (1 << a
->type
));
11038 static bool check_single_encoder_cloning(struct drm_atomic_state
*state
,
11039 struct intel_crtc
*crtc
,
11040 struct intel_encoder
*encoder
)
11042 struct intel_encoder
*source_encoder
;
11043 struct drm_connector
*connector
;
11044 struct drm_connector_state
*connector_state
;
11047 for_each_new_connector_in_state(state
, connector
, connector_state
, i
) {
11048 if (connector_state
->crtc
!= &crtc
->base
)
11052 to_intel_encoder(connector_state
->best_encoder
);
11053 if (!encoders_cloneable(encoder
, source_encoder
))
11060 static int icl_add_linked_planes(struct intel_atomic_state
*state
)
11062 struct intel_plane
*plane
, *linked
;
11063 struct intel_plane_state
*plane_state
, *linked_plane_state
;
11066 for_each_new_intel_plane_in_state(state
, plane
, plane_state
, i
) {
11067 linked
= plane_state
->linked_plane
;
11072 linked_plane_state
= intel_atomic_get_plane_state(state
, linked
);
11073 if (IS_ERR(linked_plane_state
))
11074 return PTR_ERR(linked_plane_state
);
11076 WARN_ON(linked_plane_state
->linked_plane
!= plane
);
11077 WARN_ON(linked_plane_state
->slave
== plane_state
->slave
);
11083 static int icl_check_nv12_planes(struct intel_crtc_state
*crtc_state
)
11085 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
11086 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
11087 struct intel_atomic_state
*state
= to_intel_atomic_state(crtc_state
->base
.state
);
11088 struct intel_plane
*plane
, *linked
;
11089 struct intel_plane_state
*plane_state
;
11092 if (INTEL_GEN(dev_priv
) < 11)
11096 * Destroy all old plane links and make the slave plane invisible
11097 * in the crtc_state->active_planes mask.
11099 for_each_new_intel_plane_in_state(state
, plane
, plane_state
, i
) {
11100 if (plane
->pipe
!= crtc
->pipe
|| !plane_state
->linked_plane
)
11103 plane_state
->linked_plane
= NULL
;
11104 if (plane_state
->slave
&& !plane_state
->base
.visible
) {
11105 crtc_state
->active_planes
&= ~BIT(plane
->id
);
11106 crtc_state
->update_planes
|= BIT(plane
->id
);
11109 plane_state
->slave
= false;
11112 if (!crtc_state
->nv12_planes
)
11115 for_each_new_intel_plane_in_state(state
, plane
, plane_state
, i
) {
11116 struct intel_plane_state
*linked_state
= NULL
;
11118 if (plane
->pipe
!= crtc
->pipe
||
11119 !(crtc_state
->nv12_planes
& BIT(plane
->id
)))
11122 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, linked
) {
11123 if (!icl_is_nv12_y_plane(linked
->id
))
11126 if (crtc_state
->active_planes
& BIT(linked
->id
))
11129 linked_state
= intel_atomic_get_plane_state(state
, linked
);
11130 if (IS_ERR(linked_state
))
11131 return PTR_ERR(linked_state
);
11136 if (!linked_state
) {
11137 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
11138 hweight8(crtc_state
->nv12_planes
));
11143 plane_state
->linked_plane
= linked
;
11145 linked_state
->slave
= true;
11146 linked_state
->linked_plane
= plane
;
11147 crtc_state
->active_planes
|= BIT(linked
->id
);
11148 crtc_state
->update_planes
|= BIT(linked
->id
);
11149 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked
->base
.name
, plane
->base
.name
);
11155 static int intel_crtc_atomic_check(struct drm_crtc
*crtc
,
11156 struct drm_crtc_state
*crtc_state
)
11158 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
11159 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
11160 struct intel_crtc_state
*pipe_config
=
11161 to_intel_crtc_state(crtc_state
);
11163 bool mode_changed
= needs_modeset(crtc_state
);
11165 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
) &&
11166 mode_changed
&& !crtc_state
->active
)
11167 pipe_config
->update_wm_post
= true;
11169 if (mode_changed
&& crtc_state
->enable
&&
11170 dev_priv
->display
.crtc_compute_clock
&&
11171 !WARN_ON(pipe_config
->shared_dpll
)) {
11172 ret
= dev_priv
->display
.crtc_compute_clock(intel_crtc
,
11178 if (mode_changed
|| crtc_state
->color_mgmt_changed
) {
11179 ret
= intel_color_check(pipe_config
);
11184 * Changing color management on Intel hardware is
11185 * handled as part of planes update.
11187 crtc_state
->planes_changed
= true;
11191 if (dev_priv
->display
.compute_pipe_wm
) {
11192 ret
= dev_priv
->display
.compute_pipe_wm(pipe_config
);
11194 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11199 if (dev_priv
->display
.compute_intermediate_wm
) {
11200 if (WARN_ON(!dev_priv
->display
.compute_pipe_wm
))
11204 * Calculate 'intermediate' watermarks that satisfy both the
11205 * old state and the new state. We can program these
11208 ret
= dev_priv
->display
.compute_intermediate_wm(pipe_config
);
11210 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11215 if (INTEL_GEN(dev_priv
) >= 9) {
11216 if (mode_changed
|| pipe_config
->update_pipe
)
11217 ret
= skl_update_scaler_crtc(pipe_config
);
11220 ret
= icl_check_nv12_planes(pipe_config
);
11222 ret
= skl_check_pipe_max_pixel_rate(intel_crtc
,
11225 ret
= intel_atomic_setup_scalers(dev_priv
, intel_crtc
,
11229 if (HAS_IPS(dev_priv
))
11230 pipe_config
->ips_enabled
= hsw_compute_ips_config(pipe_config
);
11235 static const struct drm_crtc_helper_funcs intel_helper_funcs
= {
11236 .atomic_check
= intel_crtc_atomic_check
,
11239 static void intel_modeset_update_connector_atomic_state(struct drm_device
*dev
)
11241 struct intel_connector
*connector
;
11242 struct drm_connector_list_iter conn_iter
;
11244 drm_connector_list_iter_begin(dev
, &conn_iter
);
11245 for_each_intel_connector_iter(connector
, &conn_iter
) {
11246 if (connector
->base
.state
->crtc
)
11247 drm_connector_put(&connector
->base
);
11249 if (connector
->base
.encoder
) {
11250 connector
->base
.state
->best_encoder
=
11251 connector
->base
.encoder
;
11252 connector
->base
.state
->crtc
=
11253 connector
->base
.encoder
->crtc
;
11255 drm_connector_get(&connector
->base
);
11257 connector
->base
.state
->best_encoder
= NULL
;
11258 connector
->base
.state
->crtc
= NULL
;
11261 drm_connector_list_iter_end(&conn_iter
);
11265 compute_sink_pipe_bpp(const struct drm_connector_state
*conn_state
,
11266 struct intel_crtc_state
*pipe_config
)
11268 struct drm_connector
*connector
= conn_state
->connector
;
11269 const struct drm_display_info
*info
= &connector
->display_info
;
11272 switch (conn_state
->max_bpc
) {
11289 if (bpp
< pipe_config
->pipe_bpp
) {
11290 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11291 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11292 connector
->base
.id
, connector
->name
,
11293 bpp
, 3 * info
->bpc
, 3 * conn_state
->max_requested_bpc
,
11294 pipe_config
->pipe_bpp
);
11296 pipe_config
->pipe_bpp
= bpp
;
11303 compute_baseline_pipe_bpp(struct intel_crtc
*crtc
,
11304 struct intel_crtc_state
*pipe_config
)
11306 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
11307 struct drm_atomic_state
*state
= pipe_config
->base
.state
;
11308 struct drm_connector
*connector
;
11309 struct drm_connector_state
*connector_state
;
11312 if ((IS_G4X(dev_priv
) || IS_VALLEYVIEW(dev_priv
) ||
11313 IS_CHERRYVIEW(dev_priv
)))
11315 else if (INTEL_GEN(dev_priv
) >= 5)
11320 pipe_config
->pipe_bpp
= bpp
;
11322 /* Clamp display bpp to connector max bpp */
11323 for_each_new_connector_in_state(state
, connector
, connector_state
, i
) {
11326 if (connector_state
->crtc
!= &crtc
->base
)
11329 ret
= compute_sink_pipe_bpp(connector_state
, pipe_config
);
11337 static void intel_dump_crtc_timings(const struct drm_display_mode
*mode
)
11339 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11340 "type: 0x%x flags: 0x%x\n",
11342 mode
->crtc_hdisplay
, mode
->crtc_hsync_start
,
11343 mode
->crtc_hsync_end
, mode
->crtc_htotal
,
11344 mode
->crtc_vdisplay
, mode
->crtc_vsync_start
,
11345 mode
->crtc_vsync_end
, mode
->crtc_vtotal
, mode
->type
, mode
->flags
);
11349 intel_dump_m_n_config(struct intel_crtc_state
*pipe_config
, char *id
,
11350 unsigned int lane_count
, struct intel_link_m_n
*m_n
)
11352 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11354 m_n
->gmch_m
, m_n
->gmch_n
,
11355 m_n
->link_m
, m_n
->link_n
, m_n
->tu
);
11358 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11360 static const char * const output_type_str
[] = {
11361 OUTPUT_TYPE(UNUSED
),
11362 OUTPUT_TYPE(ANALOG
),
11366 OUTPUT_TYPE(TVOUT
),
11372 OUTPUT_TYPE(DP_MST
),
11377 static void snprintf_output_types(char *buf
, size_t len
,
11378 unsigned int output_types
)
11385 for (i
= 0; i
< ARRAY_SIZE(output_type_str
); i
++) {
11388 if ((output_types
& BIT(i
)) == 0)
11391 r
= snprintf(str
, len
, "%s%s",
11392 str
!= buf
? "," : "", output_type_str
[i
]);
11398 output_types
&= ~BIT(i
);
11401 WARN_ON_ONCE(output_types
!= 0);
11404 static const char * const output_format_str
[] = {
11405 [INTEL_OUTPUT_FORMAT_INVALID
] = "Invalid",
11406 [INTEL_OUTPUT_FORMAT_RGB
] = "RGB",
11407 [INTEL_OUTPUT_FORMAT_YCBCR420
] = "YCBCR4:2:0",
11408 [INTEL_OUTPUT_FORMAT_YCBCR444
] = "YCBCR4:4:4",
11411 static const char *output_formats(enum intel_output_format format
)
11413 if (format
>= ARRAY_SIZE(output_format_str
))
11414 format
= INTEL_OUTPUT_FORMAT_INVALID
;
11415 return output_format_str
[format
];
11418 static void intel_dump_pipe_config(struct intel_crtc
*crtc
,
11419 struct intel_crtc_state
*pipe_config
,
11420 const char *context
)
11422 struct drm_device
*dev
= crtc
->base
.dev
;
11423 struct drm_i915_private
*dev_priv
= to_i915(dev
);
11424 struct drm_plane
*plane
;
11425 struct intel_plane
*intel_plane
;
11426 struct intel_plane_state
*state
;
11427 struct drm_framebuffer
*fb
;
11430 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11431 crtc
->base
.base
.id
, crtc
->base
.name
, context
);
11433 snprintf_output_types(buf
, sizeof(buf
), pipe_config
->output_types
);
11434 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11435 buf
, pipe_config
->output_types
);
11437 DRM_DEBUG_KMS("output format: %s\n",
11438 output_formats(pipe_config
->output_format
));
11440 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11441 transcoder_name(pipe_config
->cpu_transcoder
),
11442 pipe_config
->pipe_bpp
, pipe_config
->dither
);
11444 if (pipe_config
->has_pch_encoder
)
11445 intel_dump_m_n_config(pipe_config
, "fdi",
11446 pipe_config
->fdi_lanes
,
11447 &pipe_config
->fdi_m_n
);
11449 if (intel_crtc_has_dp_encoder(pipe_config
)) {
11450 intel_dump_m_n_config(pipe_config
, "dp m_n",
11451 pipe_config
->lane_count
, &pipe_config
->dp_m_n
);
11452 if (pipe_config
->has_drrs
)
11453 intel_dump_m_n_config(pipe_config
, "dp m2_n2",
11454 pipe_config
->lane_count
,
11455 &pipe_config
->dp_m2_n2
);
11458 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11459 pipe_config
->has_audio
, pipe_config
->has_infoframe
);
11461 DRM_DEBUG_KMS("requested mode:\n");
11462 drm_mode_debug_printmodeline(&pipe_config
->base
.mode
);
11463 DRM_DEBUG_KMS("adjusted mode:\n");
11464 drm_mode_debug_printmodeline(&pipe_config
->base
.adjusted_mode
);
11465 intel_dump_crtc_timings(&pipe_config
->base
.adjusted_mode
);
11466 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11467 pipe_config
->port_clock
,
11468 pipe_config
->pipe_src_w
, pipe_config
->pipe_src_h
,
11469 pipe_config
->pixel_rate
);
11471 if (INTEL_GEN(dev_priv
) >= 9)
11472 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11474 pipe_config
->scaler_state
.scaler_users
,
11475 pipe_config
->scaler_state
.scaler_id
);
11477 if (HAS_GMCH(dev_priv
))
11478 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11479 pipe_config
->gmch_pfit
.control
,
11480 pipe_config
->gmch_pfit
.pgm_ratios
,
11481 pipe_config
->gmch_pfit
.lvds_border_bits
);
11483 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11484 pipe_config
->pch_pfit
.pos
,
11485 pipe_config
->pch_pfit
.size
,
11486 enableddisabled(pipe_config
->pch_pfit
.enabled
));
11488 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11489 pipe_config
->ips_enabled
, pipe_config
->double_wide
);
11491 intel_dpll_dump_hw_state(dev_priv
, &pipe_config
->dpll_hw_state
);
11493 DRM_DEBUG_KMS("planes on this crtc\n");
11494 list_for_each_entry(plane
, &dev
->mode_config
.plane_list
, head
) {
11495 struct drm_format_name_buf format_name
;
11496 intel_plane
= to_intel_plane(plane
);
11497 if (intel_plane
->pipe
!= crtc
->pipe
)
11500 state
= to_intel_plane_state(plane
->state
);
11501 fb
= state
->base
.fb
;
11503 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11504 plane
->base
.id
, plane
->name
, state
->scaler_id
);
11508 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11509 plane
->base
.id
, plane
->name
,
11510 fb
->base
.id
, fb
->width
, fb
->height
,
11511 drm_get_format_name(fb
->format
->format
, &format_name
));
11512 if (INTEL_GEN(dev_priv
) >= 9)
11513 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11515 state
->base
.src
.x1
>> 16,
11516 state
->base
.src
.y1
>> 16,
11517 drm_rect_width(&state
->base
.src
) >> 16,
11518 drm_rect_height(&state
->base
.src
) >> 16,
11519 state
->base
.dst
.x1
, state
->base
.dst
.y1
,
11520 drm_rect_width(&state
->base
.dst
),
11521 drm_rect_height(&state
->base
.dst
));
11525 static bool check_digital_port_conflicts(struct drm_atomic_state
*state
)
11527 struct drm_device
*dev
= state
->dev
;
11528 struct drm_connector
*connector
;
11529 struct drm_connector_list_iter conn_iter
;
11530 unsigned int used_ports
= 0;
11531 unsigned int used_mst_ports
= 0;
11535 * Walk the connector list instead of the encoder
11536 * list to detect the problem on ddi platforms
11537 * where there's just one encoder per digital port.
11539 drm_connector_list_iter_begin(dev
, &conn_iter
);
11540 drm_for_each_connector_iter(connector
, &conn_iter
) {
11541 struct drm_connector_state
*connector_state
;
11542 struct intel_encoder
*encoder
;
11544 connector_state
= drm_atomic_get_new_connector_state(state
, connector
);
11545 if (!connector_state
)
11546 connector_state
= connector
->state
;
11548 if (!connector_state
->best_encoder
)
11551 encoder
= to_intel_encoder(connector_state
->best_encoder
);
11553 WARN_ON(!connector_state
->crtc
);
11555 switch (encoder
->type
) {
11556 unsigned int port_mask
;
11557 case INTEL_OUTPUT_DDI
:
11558 if (WARN_ON(!HAS_DDI(to_i915(dev
))))
11560 /* else: fall through */
11561 case INTEL_OUTPUT_DP
:
11562 case INTEL_OUTPUT_HDMI
:
11563 case INTEL_OUTPUT_EDP
:
11564 port_mask
= 1 << encoder
->port
;
11566 /* the same port mustn't appear more than once */
11567 if (used_ports
& port_mask
)
11570 used_ports
|= port_mask
;
11572 case INTEL_OUTPUT_DP_MST
:
11574 1 << encoder
->port
;
11580 drm_connector_list_iter_end(&conn_iter
);
11582 /* can't mix MST and SST/HDMI on the same port */
11583 if (used_ports
& used_mst_ports
)
11590 clear_intel_crtc_state(struct intel_crtc_state
*crtc_state
)
11592 struct drm_i915_private
*dev_priv
=
11593 to_i915(crtc_state
->base
.crtc
->dev
);
11594 struct intel_crtc_state
*saved_state
;
11596 saved_state
= kzalloc(sizeof(*saved_state
), GFP_KERNEL
);
11600 /* FIXME: before the switch to atomic started, a new pipe_config was
11601 * kzalloc'd. Code that depends on any field being zero should be
11602 * fixed, so that the crtc_state can be safely duplicated. For now,
11603 * only fields that are know to not cause problems are preserved. */
11605 saved_state
->scaler_state
= crtc_state
->scaler_state
;
11606 saved_state
->shared_dpll
= crtc_state
->shared_dpll
;
11607 saved_state
->dpll_hw_state
= crtc_state
->dpll_hw_state
;
11608 saved_state
->pch_pfit
.force_thru
= crtc_state
->pch_pfit
.force_thru
;
11609 saved_state
->ips_force_disable
= crtc_state
->ips_force_disable
;
11610 if (IS_G4X(dev_priv
) ||
11611 IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
11612 saved_state
->wm
= crtc_state
->wm
;
11614 /* Keep base drm_crtc_state intact, only clear our extended struct */
11615 BUILD_BUG_ON(offsetof(struct intel_crtc_state
, base
));
11616 memcpy(&crtc_state
->base
+ 1, &saved_state
->base
+ 1,
11617 sizeof(*crtc_state
) - sizeof(crtc_state
->base
));
11619 kfree(saved_state
);
11624 intel_modeset_pipe_config(struct drm_crtc
*crtc
,
11625 struct intel_crtc_state
*pipe_config
)
11627 struct drm_atomic_state
*state
= pipe_config
->base
.state
;
11628 struct intel_encoder
*encoder
;
11629 struct drm_connector
*connector
;
11630 struct drm_connector_state
*connector_state
;
11635 ret
= clear_intel_crtc_state(pipe_config
);
11639 pipe_config
->cpu_transcoder
=
11640 (enum transcoder
) to_intel_crtc(crtc
)->pipe
;
11643 * Sanitize sync polarity flags based on requested ones. If neither
11644 * positive or negative polarity is requested, treat this as meaning
11645 * negative polarity.
11647 if (!(pipe_config
->base
.adjusted_mode
.flags
&
11648 (DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_NHSYNC
)))
11649 pipe_config
->base
.adjusted_mode
.flags
|= DRM_MODE_FLAG_NHSYNC
;
11651 if (!(pipe_config
->base
.adjusted_mode
.flags
&
11652 (DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_NVSYNC
)))
11653 pipe_config
->base
.adjusted_mode
.flags
|= DRM_MODE_FLAG_NVSYNC
;
11655 ret
= compute_baseline_pipe_bpp(to_intel_crtc(crtc
),
11660 base_bpp
= pipe_config
->pipe_bpp
;
11663 * Determine the real pipe dimensions. Note that stereo modes can
11664 * increase the actual pipe size due to the frame doubling and
11665 * insertion of additional space for blanks between the frame. This
11666 * is stored in the crtc timings. We use the requested mode to do this
11667 * computation to clearly distinguish it from the adjusted mode, which
11668 * can be changed by the connectors in the below retry loop.
11670 drm_mode_get_hv_timing(&pipe_config
->base
.mode
,
11671 &pipe_config
->pipe_src_w
,
11672 &pipe_config
->pipe_src_h
);
11674 for_each_new_connector_in_state(state
, connector
, connector_state
, i
) {
11675 if (connector_state
->crtc
!= crtc
)
11678 encoder
= to_intel_encoder(connector_state
->best_encoder
);
11680 if (!check_single_encoder_cloning(state
, to_intel_crtc(crtc
), encoder
)) {
11681 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11686 * Determine output_types before calling the .compute_config()
11687 * hooks so that the hooks can use this information safely.
11689 if (encoder
->compute_output_type
)
11690 pipe_config
->output_types
|=
11691 BIT(encoder
->compute_output_type(encoder
, pipe_config
,
11694 pipe_config
->output_types
|= BIT(encoder
->type
);
11698 /* Ensure the port clock defaults are reset when retrying. */
11699 pipe_config
->port_clock
= 0;
11700 pipe_config
->pixel_multiplier
= 1;
11702 /* Fill in default crtc timings, allow encoders to overwrite them. */
11703 drm_mode_set_crtcinfo(&pipe_config
->base
.adjusted_mode
,
11704 CRTC_STEREO_DOUBLE
);
11706 /* Pass our mode to the connectors and the CRTC to give them a chance to
11707 * adjust it according to limitations or connector properties, and also
11708 * a chance to reject the mode entirely.
11710 for_each_new_connector_in_state(state
, connector
, connector_state
, i
) {
11711 if (connector_state
->crtc
!= crtc
)
11714 encoder
= to_intel_encoder(connector_state
->best_encoder
);
11715 ret
= encoder
->compute_config(encoder
, pipe_config
,
11718 if (ret
!= -EDEADLK
)
11719 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11725 /* Set default port clock if not overwritten by the encoder. Needs to be
11726 * done afterwards in case the encoder adjusts the mode. */
11727 if (!pipe_config
->port_clock
)
11728 pipe_config
->port_clock
= pipe_config
->base
.adjusted_mode
.crtc_clock
11729 * pipe_config
->pixel_multiplier
;
11731 ret
= intel_crtc_compute_config(to_intel_crtc(crtc
), pipe_config
);
11732 if (ret
== -EDEADLK
)
11735 DRM_DEBUG_KMS("CRTC fixup failed\n");
11739 if (ret
== RETRY
) {
11740 if (WARN(!retry
, "loop in pipe configuration computation\n"))
11743 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11745 goto encoder_retry
;
11748 /* Dithering seems to not pass-through bits correctly when it should, so
11749 * only enable it on 6bpc panels and when its not a compliance
11750 * test requesting 6bpc video pattern.
11752 pipe_config
->dither
= (pipe_config
->pipe_bpp
== 6*3) &&
11753 !pipe_config
->dither_force_disable
;
11754 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11755 base_bpp
, pipe_config
->pipe_bpp
, pipe_config
->dither
);
11760 static bool intel_fuzzy_clock_check(int clock1
, int clock2
)
11764 if (clock1
== clock2
)
11767 if (!clock1
|| !clock2
)
11770 diff
= abs(clock1
- clock2
);
11772 if (((((diff
+ clock1
+ clock2
) * 100)) / (clock1
+ clock2
)) < 105)
11779 intel_compare_m_n(unsigned int m
, unsigned int n
,
11780 unsigned int m2
, unsigned int n2
,
11783 if (m
== m2
&& n
== n2
)
11786 if (exact
|| !m
|| !n
|| !m2
|| !n2
)
11789 BUILD_BUG_ON(DATA_LINK_M_N_MASK
> INT_MAX
);
11796 } else if (n
< n2
) {
11806 return intel_fuzzy_clock_check(m
, m2
);
11810 intel_compare_link_m_n(const struct intel_link_m_n
*m_n
,
11811 struct intel_link_m_n
*m2_n2
,
11814 if (m_n
->tu
== m2_n2
->tu
&&
11815 intel_compare_m_n(m_n
->gmch_m
, m_n
->gmch_n
,
11816 m2_n2
->gmch_m
, m2_n2
->gmch_n
, !adjust
) &&
11817 intel_compare_m_n(m_n
->link_m
, m_n
->link_n
,
11818 m2_n2
->link_m
, m2_n2
->link_n
, !adjust
)) {
11828 static void __printf(3, 4)
11829 pipe_config_err(bool adjust
, const char *name
, const char *format
, ...)
11831 struct va_format vaf
;
11834 va_start(args
, format
);
11839 drm_dbg(DRM_UT_KMS
, "mismatch in %s %pV", name
, &vaf
);
11841 drm_err("mismatch in %s %pV", name
, &vaf
);
11846 static bool fastboot_enabled(struct drm_i915_private
*dev_priv
)
11848 if (i915_modparams
.fastboot
!= -1)
11849 return i915_modparams
.fastboot
;
11851 /* Enable fastboot by default on Skylake and newer */
11852 if (INTEL_GEN(dev_priv
) >= 9)
11855 /* Enable fastboot by default on VLV and CHV */
11856 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
11859 /* Disabled by default on all others */
11864 intel_pipe_config_compare(struct drm_i915_private
*dev_priv
,
11865 struct intel_crtc_state
*current_config
,
11866 struct intel_crtc_state
*pipe_config
,
11870 bool fixup_inherited
= adjust
&&
11871 (current_config
->base
.mode
.private_flags
& I915_MODE_FLAG_INHERITED
) &&
11872 !(pipe_config
->base
.mode
.private_flags
& I915_MODE_FLAG_INHERITED
);
11874 if (fixup_inherited
&& !fastboot_enabled(dev_priv
)) {
11875 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
11879 #define PIPE_CONF_CHECK_X(name) do { \
11880 if (current_config->name != pipe_config->name) { \
11881 pipe_config_err(adjust, __stringify(name), \
11882 "(expected 0x%08x, found 0x%08x)\n", \
11883 current_config->name, \
11884 pipe_config->name); \
11889 #define PIPE_CONF_CHECK_I(name) do { \
11890 if (current_config->name != pipe_config->name) { \
11891 pipe_config_err(adjust, __stringify(name), \
11892 "(expected %i, found %i)\n", \
11893 current_config->name, \
11894 pipe_config->name); \
11899 #define PIPE_CONF_CHECK_BOOL(name) do { \
11900 if (current_config->name != pipe_config->name) { \
11901 pipe_config_err(adjust, __stringify(name), \
11902 "(expected %s, found %s)\n", \
11903 yesno(current_config->name), \
11904 yesno(pipe_config->name)); \
11910 * Checks state where we only read out the enabling, but not the entire
11911 * state itself (like full infoframes or ELD for audio). These states
11912 * require a full modeset on bootup to fix up.
11914 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11915 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11916 PIPE_CONF_CHECK_BOOL(name); \
11918 pipe_config_err(adjust, __stringify(name), \
11919 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11920 yesno(current_config->name), \
11921 yesno(pipe_config->name)); \
11926 #define PIPE_CONF_CHECK_P(name) do { \
11927 if (current_config->name != pipe_config->name) { \
11928 pipe_config_err(adjust, __stringify(name), \
11929 "(expected %p, found %p)\n", \
11930 current_config->name, \
11931 pipe_config->name); \
11936 #define PIPE_CONF_CHECK_M_N(name) do { \
11937 if (!intel_compare_link_m_n(¤t_config->name, \
11938 &pipe_config->name,\
11940 pipe_config_err(adjust, __stringify(name), \
11941 "(expected tu %i gmch %i/%i link %i/%i, " \
11942 "found tu %i, gmch %i/%i link %i/%i)\n", \
11943 current_config->name.tu, \
11944 current_config->name.gmch_m, \
11945 current_config->name.gmch_n, \
11946 current_config->name.link_m, \
11947 current_config->name.link_n, \
11948 pipe_config->name.tu, \
11949 pipe_config->name.gmch_m, \
11950 pipe_config->name.gmch_n, \
11951 pipe_config->name.link_m, \
11952 pipe_config->name.link_n); \
11957 /* This is required for BDW+ where there is only one set of registers for
11958 * switching between high and low RR.
11959 * This macro can be used whenever a comparison has to be made between one
11960 * hw state and multiple sw state variables.
11962 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11963 if (!intel_compare_link_m_n(¤t_config->name, \
11964 &pipe_config->name, adjust) && \
11965 !intel_compare_link_m_n(¤t_config->alt_name, \
11966 &pipe_config->name, adjust)) { \
11967 pipe_config_err(adjust, __stringify(name), \
11968 "(expected tu %i gmch %i/%i link %i/%i, " \
11969 "or tu %i gmch %i/%i link %i/%i, " \
11970 "found tu %i, gmch %i/%i link %i/%i)\n", \
11971 current_config->name.tu, \
11972 current_config->name.gmch_m, \
11973 current_config->name.gmch_n, \
11974 current_config->name.link_m, \
11975 current_config->name.link_n, \
11976 current_config->alt_name.tu, \
11977 current_config->alt_name.gmch_m, \
11978 current_config->alt_name.gmch_n, \
11979 current_config->alt_name.link_m, \
11980 current_config->alt_name.link_n, \
11981 pipe_config->name.tu, \
11982 pipe_config->name.gmch_m, \
11983 pipe_config->name.gmch_n, \
11984 pipe_config->name.link_m, \
11985 pipe_config->name.link_n); \
11990 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11991 if ((current_config->name ^ pipe_config->name) & (mask)) { \
11992 pipe_config_err(adjust, __stringify(name), \
11993 "(%x) (expected %i, found %i)\n", \
11995 current_config->name & (mask), \
11996 pipe_config->name & (mask)); \
12001 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12002 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12003 pipe_config_err(adjust, __stringify(name), \
12004 "(expected %i, found %i)\n", \
12005 current_config->name, \
12006 pipe_config->name); \
12011 #define PIPE_CONF_QUIRK(quirk) \
12012 ((current_config->quirks | pipe_config->quirks) & (quirk))
12014 PIPE_CONF_CHECK_I(cpu_transcoder
);
12016 PIPE_CONF_CHECK_BOOL(has_pch_encoder
);
12017 PIPE_CONF_CHECK_I(fdi_lanes
);
12018 PIPE_CONF_CHECK_M_N(fdi_m_n
);
12020 PIPE_CONF_CHECK_I(lane_count
);
12021 PIPE_CONF_CHECK_X(lane_lat_optim_mask
);
12023 if (INTEL_GEN(dev_priv
) < 8) {
12024 PIPE_CONF_CHECK_M_N(dp_m_n
);
12026 if (current_config
->has_drrs
)
12027 PIPE_CONF_CHECK_M_N(dp_m2_n2
);
12029 PIPE_CONF_CHECK_M_N_ALT(dp_m_n
, dp_m2_n2
);
12031 PIPE_CONF_CHECK_X(output_types
);
12033 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_hdisplay
);
12034 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_htotal
);
12035 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_hblank_start
);
12036 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_hblank_end
);
12037 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_hsync_start
);
12038 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_hsync_end
);
12040 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_vdisplay
);
12041 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_vtotal
);
12042 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_vblank_start
);
12043 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_vblank_end
);
12044 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_vsync_start
);
12045 PIPE_CONF_CHECK_I(base
.adjusted_mode
.crtc_vsync_end
);
12047 PIPE_CONF_CHECK_I(pixel_multiplier
);
12048 PIPE_CONF_CHECK_I(output_format
);
12049 PIPE_CONF_CHECK_BOOL(has_hdmi_sink
);
12050 if ((INTEL_GEN(dev_priv
) < 8 && !IS_HASWELL(dev_priv
)) ||
12051 IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
12052 PIPE_CONF_CHECK_BOOL(limited_color_range
);
12054 PIPE_CONF_CHECK_BOOL(hdmi_scrambling
);
12055 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio
);
12056 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe
);
12058 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio
);
12060 PIPE_CONF_CHECK_FLAGS(base
.adjusted_mode
.flags
,
12061 DRM_MODE_FLAG_INTERLACE
);
12063 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS
)) {
12064 PIPE_CONF_CHECK_FLAGS(base
.adjusted_mode
.flags
,
12065 DRM_MODE_FLAG_PHSYNC
);
12066 PIPE_CONF_CHECK_FLAGS(base
.adjusted_mode
.flags
,
12067 DRM_MODE_FLAG_NHSYNC
);
12068 PIPE_CONF_CHECK_FLAGS(base
.adjusted_mode
.flags
,
12069 DRM_MODE_FLAG_PVSYNC
);
12070 PIPE_CONF_CHECK_FLAGS(base
.adjusted_mode
.flags
,
12071 DRM_MODE_FLAG_NVSYNC
);
12074 PIPE_CONF_CHECK_X(gmch_pfit
.control
);
12075 /* pfit ratios are autocomputed by the hw on gen4+ */
12076 if (INTEL_GEN(dev_priv
) < 4)
12077 PIPE_CONF_CHECK_X(gmch_pfit
.pgm_ratios
);
12078 PIPE_CONF_CHECK_X(gmch_pfit
.lvds_border_bits
);
12081 PIPE_CONF_CHECK_I(pipe_src_w
);
12082 PIPE_CONF_CHECK_I(pipe_src_h
);
12084 PIPE_CONF_CHECK_BOOL(pch_pfit
.enabled
);
12085 if (current_config
->pch_pfit
.enabled
) {
12086 PIPE_CONF_CHECK_X(pch_pfit
.pos
);
12087 PIPE_CONF_CHECK_X(pch_pfit
.size
);
12090 PIPE_CONF_CHECK_I(scaler_state
.scaler_id
);
12091 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate
);
12094 PIPE_CONF_CHECK_BOOL(double_wide
);
12096 PIPE_CONF_CHECK_P(shared_dpll
);
12097 PIPE_CONF_CHECK_X(dpll_hw_state
.dpll
);
12098 PIPE_CONF_CHECK_X(dpll_hw_state
.dpll_md
);
12099 PIPE_CONF_CHECK_X(dpll_hw_state
.fp0
);
12100 PIPE_CONF_CHECK_X(dpll_hw_state
.fp1
);
12101 PIPE_CONF_CHECK_X(dpll_hw_state
.wrpll
);
12102 PIPE_CONF_CHECK_X(dpll_hw_state
.spll
);
12103 PIPE_CONF_CHECK_X(dpll_hw_state
.ctrl1
);
12104 PIPE_CONF_CHECK_X(dpll_hw_state
.cfgcr1
);
12105 PIPE_CONF_CHECK_X(dpll_hw_state
.cfgcr2
);
12106 PIPE_CONF_CHECK_X(dpll_hw_state
.cfgcr0
);
12107 PIPE_CONF_CHECK_X(dpll_hw_state
.ebb0
);
12108 PIPE_CONF_CHECK_X(dpll_hw_state
.ebb4
);
12109 PIPE_CONF_CHECK_X(dpll_hw_state
.pll0
);
12110 PIPE_CONF_CHECK_X(dpll_hw_state
.pll1
);
12111 PIPE_CONF_CHECK_X(dpll_hw_state
.pll2
);
12112 PIPE_CONF_CHECK_X(dpll_hw_state
.pll3
);
12113 PIPE_CONF_CHECK_X(dpll_hw_state
.pll6
);
12114 PIPE_CONF_CHECK_X(dpll_hw_state
.pll8
);
12115 PIPE_CONF_CHECK_X(dpll_hw_state
.pll9
);
12116 PIPE_CONF_CHECK_X(dpll_hw_state
.pll10
);
12117 PIPE_CONF_CHECK_X(dpll_hw_state
.pcsdw12
);
12118 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_refclkin_ctl
);
12119 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_clktop2_coreclkctl1
);
12120 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_clktop2_hsclkctl
);
12121 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_div0
);
12122 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_div1
);
12123 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_lf
);
12124 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_frac_lock
);
12125 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_ssc
);
12126 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_bias
);
12127 PIPE_CONF_CHECK_X(dpll_hw_state
.mg_pll_tdc_coldst_bias
);
12129 PIPE_CONF_CHECK_X(dsi_pll
.ctrl
);
12130 PIPE_CONF_CHECK_X(dsi_pll
.div
);
12132 if (IS_G4X(dev_priv
) || INTEL_GEN(dev_priv
) >= 5)
12133 PIPE_CONF_CHECK_I(pipe_bpp
);
12135 PIPE_CONF_CHECK_CLOCK_FUZZY(base
.adjusted_mode
.crtc_clock
);
12136 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock
);
12138 PIPE_CONF_CHECK_I(min_voltage_level
);
12140 #undef PIPE_CONF_CHECK_X
12141 #undef PIPE_CONF_CHECK_I
12142 #undef PIPE_CONF_CHECK_BOOL
12143 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12144 #undef PIPE_CONF_CHECK_P
12145 #undef PIPE_CONF_CHECK_FLAGS
12146 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12147 #undef PIPE_CONF_QUIRK
12152 static void intel_pipe_config_sanity_check(struct drm_i915_private
*dev_priv
,
12153 const struct intel_crtc_state
*pipe_config
)
12155 if (pipe_config
->has_pch_encoder
) {
12156 int fdi_dotclock
= intel_dotclock_calculate(intel_fdi_link_freq(dev_priv
, pipe_config
),
12157 &pipe_config
->fdi_m_n
);
12158 int dotclock
= pipe_config
->base
.adjusted_mode
.crtc_clock
;
12161 * FDI already provided one idea for the dotclock.
12162 * Yell if the encoder disagrees.
12164 WARN(!intel_fuzzy_clock_check(fdi_dotclock
, dotclock
),
12165 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12166 fdi_dotclock
, dotclock
);
12170 static void verify_wm_state(struct drm_crtc
*crtc
,
12171 struct drm_crtc_state
*new_state
)
12173 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
12174 struct skl_ddb_allocation hw_ddb
, *sw_ddb
;
12175 struct skl_pipe_wm hw_wm
, *sw_wm
;
12176 struct skl_plane_wm
*hw_plane_wm
, *sw_plane_wm
;
12177 struct skl_ddb_entry
*hw_ddb_entry
, *sw_ddb_entry
;
12178 struct skl_ddb_entry hw_ddb_y
[I915_MAX_PLANES
];
12179 struct skl_ddb_entry hw_ddb_uv
[I915_MAX_PLANES
];
12180 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
12181 const enum pipe pipe
= intel_crtc
->pipe
;
12182 int plane
, level
, max_level
= ilk_wm_max_level(dev_priv
);
12184 if (INTEL_GEN(dev_priv
) < 9 || !new_state
->active
)
12187 skl_pipe_wm_get_hw_state(intel_crtc
, &hw_wm
);
12188 sw_wm
= &to_intel_crtc_state(new_state
)->wm
.skl
.optimal
;
12190 skl_pipe_ddb_get_hw_state(intel_crtc
, hw_ddb_y
, hw_ddb_uv
);
12192 skl_ddb_get_hw_state(dev_priv
, &hw_ddb
);
12193 sw_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
12195 if (INTEL_GEN(dev_priv
) >= 11)
12196 if (hw_ddb
.enabled_slices
!= sw_ddb
->enabled_slices
)
12197 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12198 sw_ddb
->enabled_slices
,
12199 hw_ddb
.enabled_slices
);
12201 for_each_universal_plane(dev_priv
, pipe
, plane
) {
12202 hw_plane_wm
= &hw_wm
.planes
[plane
];
12203 sw_plane_wm
= &sw_wm
->planes
[plane
];
12206 for (level
= 0; level
<= max_level
; level
++) {
12207 if (skl_wm_level_equals(&hw_plane_wm
->wm
[level
],
12208 &sw_plane_wm
->wm
[level
]))
12211 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12212 pipe_name(pipe
), plane
+ 1, level
,
12213 sw_plane_wm
->wm
[level
].plane_en
,
12214 sw_plane_wm
->wm
[level
].plane_res_b
,
12215 sw_plane_wm
->wm
[level
].plane_res_l
,
12216 hw_plane_wm
->wm
[level
].plane_en
,
12217 hw_plane_wm
->wm
[level
].plane_res_b
,
12218 hw_plane_wm
->wm
[level
].plane_res_l
);
12221 if (!skl_wm_level_equals(&hw_plane_wm
->trans_wm
,
12222 &sw_plane_wm
->trans_wm
)) {
12223 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12224 pipe_name(pipe
), plane
+ 1,
12225 sw_plane_wm
->trans_wm
.plane_en
,
12226 sw_plane_wm
->trans_wm
.plane_res_b
,
12227 sw_plane_wm
->trans_wm
.plane_res_l
,
12228 hw_plane_wm
->trans_wm
.plane_en
,
12229 hw_plane_wm
->trans_wm
.plane_res_b
,
12230 hw_plane_wm
->trans_wm
.plane_res_l
);
12234 hw_ddb_entry
= &hw_ddb_y
[plane
];
12235 sw_ddb_entry
= &to_intel_crtc_state(new_state
)->wm
.skl
.plane_ddb_y
[plane
];
12237 if (!skl_ddb_entry_equal(hw_ddb_entry
, sw_ddb_entry
)) {
12238 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12239 pipe_name(pipe
), plane
+ 1,
12240 sw_ddb_entry
->start
, sw_ddb_entry
->end
,
12241 hw_ddb_entry
->start
, hw_ddb_entry
->end
);
12247 * If the cursor plane isn't active, we may not have updated it's ddb
12248 * allocation. In that case since the ddb allocation will be updated
12249 * once the plane becomes visible, we can skip this check
12252 hw_plane_wm
= &hw_wm
.planes
[PLANE_CURSOR
];
12253 sw_plane_wm
= &sw_wm
->planes
[PLANE_CURSOR
];
12256 for (level
= 0; level
<= max_level
; level
++) {
12257 if (skl_wm_level_equals(&hw_plane_wm
->wm
[level
],
12258 &sw_plane_wm
->wm
[level
]))
12261 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12262 pipe_name(pipe
), level
,
12263 sw_plane_wm
->wm
[level
].plane_en
,
12264 sw_plane_wm
->wm
[level
].plane_res_b
,
12265 sw_plane_wm
->wm
[level
].plane_res_l
,
12266 hw_plane_wm
->wm
[level
].plane_en
,
12267 hw_plane_wm
->wm
[level
].plane_res_b
,
12268 hw_plane_wm
->wm
[level
].plane_res_l
);
12271 if (!skl_wm_level_equals(&hw_plane_wm
->trans_wm
,
12272 &sw_plane_wm
->trans_wm
)) {
12273 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12275 sw_plane_wm
->trans_wm
.plane_en
,
12276 sw_plane_wm
->trans_wm
.plane_res_b
,
12277 sw_plane_wm
->trans_wm
.plane_res_l
,
12278 hw_plane_wm
->trans_wm
.plane_en
,
12279 hw_plane_wm
->trans_wm
.plane_res_b
,
12280 hw_plane_wm
->trans_wm
.plane_res_l
);
12284 hw_ddb_entry
= &hw_ddb_y
[PLANE_CURSOR
];
12285 sw_ddb_entry
= &to_intel_crtc_state(new_state
)->wm
.skl
.plane_ddb_y
[PLANE_CURSOR
];
12287 if (!skl_ddb_entry_equal(hw_ddb_entry
, sw_ddb_entry
)) {
12288 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12290 sw_ddb_entry
->start
, sw_ddb_entry
->end
,
12291 hw_ddb_entry
->start
, hw_ddb_entry
->end
);
12297 verify_connector_state(struct drm_device
*dev
,
12298 struct drm_atomic_state
*state
,
12299 struct drm_crtc
*crtc
)
12301 struct drm_connector
*connector
;
12302 struct drm_connector_state
*new_conn_state
;
12305 for_each_new_connector_in_state(state
, connector
, new_conn_state
, i
) {
12306 struct drm_encoder
*encoder
= connector
->encoder
;
12307 struct drm_crtc_state
*crtc_state
= NULL
;
12309 if (new_conn_state
->crtc
!= crtc
)
12313 crtc_state
= drm_atomic_get_new_crtc_state(state
, new_conn_state
->crtc
);
12315 intel_connector_verify_state(crtc_state
, new_conn_state
);
12317 I915_STATE_WARN(new_conn_state
->best_encoder
!= encoder
,
12318 "connector's atomic encoder doesn't match legacy encoder\n");
12323 verify_encoder_state(struct drm_device
*dev
, struct drm_atomic_state
*state
)
12325 struct intel_encoder
*encoder
;
12326 struct drm_connector
*connector
;
12327 struct drm_connector_state
*old_conn_state
, *new_conn_state
;
12330 for_each_intel_encoder(dev
, encoder
) {
12331 bool enabled
= false, found
= false;
12334 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12335 encoder
->base
.base
.id
,
12336 encoder
->base
.name
);
12338 for_each_oldnew_connector_in_state(state
, connector
, old_conn_state
,
12339 new_conn_state
, i
) {
12340 if (old_conn_state
->best_encoder
== &encoder
->base
)
12343 if (new_conn_state
->best_encoder
!= &encoder
->base
)
12345 found
= enabled
= true;
12347 I915_STATE_WARN(new_conn_state
->crtc
!=
12348 encoder
->base
.crtc
,
12349 "connector's crtc doesn't match encoder crtc\n");
12355 I915_STATE_WARN(!!encoder
->base
.crtc
!= enabled
,
12356 "encoder's enabled state mismatch "
12357 "(expected %i, found %i)\n",
12358 !!encoder
->base
.crtc
, enabled
);
12360 if (!encoder
->base
.crtc
) {
12363 active
= encoder
->get_hw_state(encoder
, &pipe
);
12364 I915_STATE_WARN(active
,
12365 "encoder detached but still enabled on pipe %c.\n",
12372 verify_crtc_state(struct drm_crtc
*crtc
,
12373 struct drm_crtc_state
*old_crtc_state
,
12374 struct drm_crtc_state
*new_crtc_state
)
12376 struct drm_device
*dev
= crtc
->dev
;
12377 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12378 struct intel_encoder
*encoder
;
12379 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
12380 struct intel_crtc_state
*pipe_config
, *sw_config
;
12381 struct drm_atomic_state
*old_state
;
12384 old_state
= old_crtc_state
->state
;
12385 __drm_atomic_helper_crtc_destroy_state(old_crtc_state
);
12386 pipe_config
= to_intel_crtc_state(old_crtc_state
);
12387 memset(pipe_config
, 0, sizeof(*pipe_config
));
12388 pipe_config
->base
.crtc
= crtc
;
12389 pipe_config
->base
.state
= old_state
;
12391 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc
->base
.id
, crtc
->name
);
12393 active
= dev_priv
->display
.get_pipe_config(intel_crtc
, pipe_config
);
12395 /* we keep both pipes enabled on 830 */
12396 if (IS_I830(dev_priv
))
12397 active
= new_crtc_state
->active
;
12399 I915_STATE_WARN(new_crtc_state
->active
!= active
,
12400 "crtc active state doesn't match with hw state "
12401 "(expected %i, found %i)\n", new_crtc_state
->active
, active
);
12403 I915_STATE_WARN(intel_crtc
->active
!= new_crtc_state
->active
,
12404 "transitional active state does not match atomic hw state "
12405 "(expected %i, found %i)\n", new_crtc_state
->active
, intel_crtc
->active
);
12407 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
12410 active
= encoder
->get_hw_state(encoder
, &pipe
);
12411 I915_STATE_WARN(active
!= new_crtc_state
->active
,
12412 "[ENCODER:%i] active %i with crtc active %i\n",
12413 encoder
->base
.base
.id
, active
, new_crtc_state
->active
);
12415 I915_STATE_WARN(active
&& intel_crtc
->pipe
!= pipe
,
12416 "Encoder connected to wrong pipe %c\n",
12420 encoder
->get_config(encoder
, pipe_config
);
12423 intel_crtc_compute_pixel_rate(pipe_config
);
12425 if (!new_crtc_state
->active
)
12428 intel_pipe_config_sanity_check(dev_priv
, pipe_config
);
12430 sw_config
= to_intel_crtc_state(new_crtc_state
);
12431 if (!intel_pipe_config_compare(dev_priv
, sw_config
,
12432 pipe_config
, false)) {
12433 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12434 intel_dump_pipe_config(intel_crtc
, pipe_config
,
12436 intel_dump_pipe_config(intel_crtc
, sw_config
,
12442 intel_verify_planes(struct intel_atomic_state
*state
)
12444 struct intel_plane
*plane
;
12445 const struct intel_plane_state
*plane_state
;
12448 for_each_new_intel_plane_in_state(state
, plane
,
12450 assert_plane(plane
, plane_state
->base
.visible
);
12454 verify_single_dpll_state(struct drm_i915_private
*dev_priv
,
12455 struct intel_shared_dpll
*pll
,
12456 struct drm_crtc
*crtc
,
12457 struct drm_crtc_state
*new_state
)
12459 struct intel_dpll_hw_state dpll_hw_state
;
12460 unsigned int crtc_mask
;
12463 memset(&dpll_hw_state
, 0, sizeof(dpll_hw_state
));
12465 DRM_DEBUG_KMS("%s\n", pll
->info
->name
);
12467 active
= pll
->info
->funcs
->get_hw_state(dev_priv
, pll
, &dpll_hw_state
);
12469 if (!(pll
->info
->flags
& INTEL_DPLL_ALWAYS_ON
)) {
12470 I915_STATE_WARN(!pll
->on
&& pll
->active_mask
,
12471 "pll in active use but not on in sw tracking\n");
12472 I915_STATE_WARN(pll
->on
&& !pll
->active_mask
,
12473 "pll is on but not used by any active crtc\n");
12474 I915_STATE_WARN(pll
->on
!= active
,
12475 "pll on state mismatch (expected %i, found %i)\n",
12480 I915_STATE_WARN(pll
->active_mask
& ~pll
->state
.crtc_mask
,
12481 "more active pll users than references: %x vs %x\n",
12482 pll
->active_mask
, pll
->state
.crtc_mask
);
12487 crtc_mask
= drm_crtc_mask(crtc
);
12489 if (new_state
->active
)
12490 I915_STATE_WARN(!(pll
->active_mask
& crtc_mask
),
12491 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12492 pipe_name(drm_crtc_index(crtc
)), pll
->active_mask
);
12494 I915_STATE_WARN(pll
->active_mask
& crtc_mask
,
12495 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12496 pipe_name(drm_crtc_index(crtc
)), pll
->active_mask
);
12498 I915_STATE_WARN(!(pll
->state
.crtc_mask
& crtc_mask
),
12499 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12500 crtc_mask
, pll
->state
.crtc_mask
);
12502 I915_STATE_WARN(pll
->on
&& memcmp(&pll
->state
.hw_state
,
12504 sizeof(dpll_hw_state
)),
12505 "pll hw state mismatch\n");
12509 verify_shared_dpll_state(struct drm_device
*dev
, struct drm_crtc
*crtc
,
12510 struct drm_crtc_state
*old_crtc_state
,
12511 struct drm_crtc_state
*new_crtc_state
)
12513 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12514 struct intel_crtc_state
*old_state
= to_intel_crtc_state(old_crtc_state
);
12515 struct intel_crtc_state
*new_state
= to_intel_crtc_state(new_crtc_state
);
12517 if (new_state
->shared_dpll
)
12518 verify_single_dpll_state(dev_priv
, new_state
->shared_dpll
, crtc
, new_crtc_state
);
12520 if (old_state
->shared_dpll
&&
12521 old_state
->shared_dpll
!= new_state
->shared_dpll
) {
12522 unsigned int crtc_mask
= drm_crtc_mask(crtc
);
12523 struct intel_shared_dpll
*pll
= old_state
->shared_dpll
;
12525 I915_STATE_WARN(pll
->active_mask
& crtc_mask
,
12526 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12527 pipe_name(drm_crtc_index(crtc
)));
12528 I915_STATE_WARN(pll
->state
.crtc_mask
& crtc_mask
,
12529 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12530 pipe_name(drm_crtc_index(crtc
)));
12535 intel_modeset_verify_crtc(struct drm_crtc
*crtc
,
12536 struct drm_atomic_state
*state
,
12537 struct drm_crtc_state
*old_state
,
12538 struct drm_crtc_state
*new_state
)
12540 if (!needs_modeset(new_state
) &&
12541 !to_intel_crtc_state(new_state
)->update_pipe
)
12544 verify_wm_state(crtc
, new_state
);
12545 verify_connector_state(crtc
->dev
, state
, crtc
);
12546 verify_crtc_state(crtc
, old_state
, new_state
);
12547 verify_shared_dpll_state(crtc
->dev
, crtc
, old_state
, new_state
);
12551 verify_disabled_dpll_state(struct drm_device
*dev
)
12553 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12556 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++)
12557 verify_single_dpll_state(dev_priv
, &dev_priv
->shared_dplls
[i
], NULL
, NULL
);
12561 intel_modeset_verify_disabled(struct drm_device
*dev
,
12562 struct drm_atomic_state
*state
)
12564 verify_encoder_state(dev
, state
);
12565 verify_connector_state(dev
, state
, NULL
);
12566 verify_disabled_dpll_state(dev
);
12569 static void update_scanline_offset(const struct intel_crtc_state
*crtc_state
)
12571 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
12572 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
12575 * The scanline counter increments at the leading edge of hsync.
12577 * On most platforms it starts counting from vtotal-1 on the
12578 * first active line. That means the scanline counter value is
12579 * always one less than what we would expect. Ie. just after
12580 * start of vblank, which also occurs at start of hsync (on the
12581 * last active line), the scanline counter will read vblank_start-1.
12583 * On gen2 the scanline counter starts counting from 1 instead
12584 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12585 * to keep the value positive), instead of adding one.
12587 * On HSW+ the behaviour of the scanline counter depends on the output
12588 * type. For DP ports it behaves like most other platforms, but on HDMI
12589 * there's an extra 1 line difference. So we need to add two instead of
12590 * one to the value.
12592 * On VLV/CHV DSI the scanline counter would appear to increment
12593 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12594 * that means we can't tell whether we're in vblank or not while
12595 * we're on that particular line. We must still set scanline_offset
12596 * to 1 so that the vblank timestamps come out correct when we query
12597 * the scanline counter from within the vblank interrupt handler.
12598 * However if queried just before the start of vblank we'll get an
12599 * answer that's slightly in the future.
12601 if (IS_GEN(dev_priv
, 2)) {
12602 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->base
.adjusted_mode
;
12605 vtotal
= adjusted_mode
->crtc_vtotal
;
12606 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
12609 crtc
->scanline_offset
= vtotal
- 1;
12610 } else if (HAS_DDI(dev_priv
) &&
12611 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_HDMI
)) {
12612 crtc
->scanline_offset
= 2;
12614 crtc
->scanline_offset
= 1;
12617 static void intel_modeset_clear_plls(struct drm_atomic_state
*state
)
12619 struct drm_device
*dev
= state
->dev
;
12620 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12621 struct drm_crtc
*crtc
;
12622 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
12625 if (!dev_priv
->display
.crtc_compute_clock
)
12628 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
12629 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
12630 struct intel_shared_dpll
*old_dpll
=
12631 to_intel_crtc_state(old_crtc_state
)->shared_dpll
;
12633 if (!needs_modeset(new_crtc_state
))
12636 to_intel_crtc_state(new_crtc_state
)->shared_dpll
= NULL
;
12641 intel_release_shared_dpll(old_dpll
, intel_crtc
, state
);
12646 * This implements the workaround described in the "notes" section of the mode
12647 * set sequence documentation. When going from no pipes or single pipe to
12648 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12649 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12651 static int haswell_mode_set_planes_workaround(struct drm_atomic_state
*state
)
12653 struct drm_crtc_state
*crtc_state
;
12654 struct intel_crtc
*intel_crtc
;
12655 struct drm_crtc
*crtc
;
12656 struct intel_crtc_state
*first_crtc_state
= NULL
;
12657 struct intel_crtc_state
*other_crtc_state
= NULL
;
12658 enum pipe first_pipe
= INVALID_PIPE
, enabled_pipe
= INVALID_PIPE
;
12661 /* look at all crtc's that are going to be enabled in during modeset */
12662 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
12663 intel_crtc
= to_intel_crtc(crtc
);
12665 if (!crtc_state
->active
|| !needs_modeset(crtc_state
))
12668 if (first_crtc_state
) {
12669 other_crtc_state
= to_intel_crtc_state(crtc_state
);
12672 first_crtc_state
= to_intel_crtc_state(crtc_state
);
12673 first_pipe
= intel_crtc
->pipe
;
12677 /* No workaround needed? */
12678 if (!first_crtc_state
)
12681 /* w/a possibly needed, check how many crtc's are already enabled. */
12682 for_each_intel_crtc(state
->dev
, intel_crtc
) {
12683 struct intel_crtc_state
*pipe_config
;
12685 pipe_config
= intel_atomic_get_crtc_state(state
, intel_crtc
);
12686 if (IS_ERR(pipe_config
))
12687 return PTR_ERR(pipe_config
);
12689 pipe_config
->hsw_workaround_pipe
= INVALID_PIPE
;
12691 if (!pipe_config
->base
.active
||
12692 needs_modeset(&pipe_config
->base
))
12695 /* 2 or more enabled crtcs means no need for w/a */
12696 if (enabled_pipe
!= INVALID_PIPE
)
12699 enabled_pipe
= intel_crtc
->pipe
;
12702 if (enabled_pipe
!= INVALID_PIPE
)
12703 first_crtc_state
->hsw_workaround_pipe
= enabled_pipe
;
12704 else if (other_crtc_state
)
12705 other_crtc_state
->hsw_workaround_pipe
= first_pipe
;
12710 static int intel_lock_all_pipes(struct drm_atomic_state
*state
)
12712 struct drm_crtc
*crtc
;
12714 /* Add all pipes to the state */
12715 for_each_crtc(state
->dev
, crtc
) {
12716 struct drm_crtc_state
*crtc_state
;
12718 crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
12719 if (IS_ERR(crtc_state
))
12720 return PTR_ERR(crtc_state
);
12726 static int intel_modeset_all_pipes(struct drm_atomic_state
*state
)
12728 struct drm_crtc
*crtc
;
12731 * Add all pipes to the state, and force
12732 * a modeset on all the active ones.
12734 for_each_crtc(state
->dev
, crtc
) {
12735 struct drm_crtc_state
*crtc_state
;
12738 crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
12739 if (IS_ERR(crtc_state
))
12740 return PTR_ERR(crtc_state
);
12742 if (!crtc_state
->active
|| needs_modeset(crtc_state
))
12745 crtc_state
->mode_changed
= true;
12747 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
12751 ret
= drm_atomic_add_affected_planes(state
, crtc
);
12759 static int intel_modeset_checks(struct drm_atomic_state
*state
)
12761 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
12762 struct drm_i915_private
*dev_priv
= to_i915(state
->dev
);
12763 struct drm_crtc
*crtc
;
12764 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
12767 if (!check_digital_port_conflicts(state
)) {
12768 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12772 intel_state
->modeset
= true;
12773 intel_state
->active_crtcs
= dev_priv
->active_crtcs
;
12774 intel_state
->cdclk
.logical
= dev_priv
->cdclk
.logical
;
12775 intel_state
->cdclk
.actual
= dev_priv
->cdclk
.actual
;
12777 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
12778 if (new_crtc_state
->active
)
12779 intel_state
->active_crtcs
|= 1 << i
;
12781 intel_state
->active_crtcs
&= ~(1 << i
);
12783 if (old_crtc_state
->active
!= new_crtc_state
->active
)
12784 intel_state
->active_pipe_changes
|= drm_crtc_mask(crtc
);
12788 * See if the config requires any additional preparation, e.g.
12789 * to adjust global state with pipes off. We need to do this
12790 * here so we can get the modeset_pipe updated config for the new
12791 * mode set on this crtc. For other crtcs we need to use the
12792 * adjusted_mode bits in the crtc directly.
12794 if (dev_priv
->display
.modeset_calc_cdclk
) {
12795 ret
= dev_priv
->display
.modeset_calc_cdclk(state
);
12800 * Writes to dev_priv->cdclk.logical must protected by
12801 * holding all the crtc locks, even if we don't end up
12802 * touching the hardware
12804 if (intel_cdclk_changed(&dev_priv
->cdclk
.logical
,
12805 &intel_state
->cdclk
.logical
)) {
12806 ret
= intel_lock_all_pipes(state
);
12811 /* All pipes must be switched off while we change the cdclk. */
12812 if (intel_cdclk_needs_modeset(&dev_priv
->cdclk
.actual
,
12813 &intel_state
->cdclk
.actual
)) {
12814 ret
= intel_modeset_all_pipes(state
);
12819 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12820 intel_state
->cdclk
.logical
.cdclk
,
12821 intel_state
->cdclk
.actual
.cdclk
);
12822 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12823 intel_state
->cdclk
.logical
.voltage_level
,
12824 intel_state
->cdclk
.actual
.voltage_level
);
12826 to_intel_atomic_state(state
)->cdclk
.logical
= dev_priv
->cdclk
.logical
;
12829 intel_modeset_clear_plls(state
);
12831 if (IS_HASWELL(dev_priv
))
12832 return haswell_mode_set_planes_workaround(state
);
12838 * Handle calculation of various watermark data at the end of the atomic check
12839 * phase. The code here should be run after the per-crtc and per-plane 'check'
12840 * handlers to ensure that all derived state has been updated.
12842 static int calc_watermark_data(struct intel_atomic_state
*state
)
12844 struct drm_device
*dev
= state
->base
.dev
;
12845 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12847 /* Is there platform-specific watermark information to calculate? */
12848 if (dev_priv
->display
.compute_global_watermarks
)
12849 return dev_priv
->display
.compute_global_watermarks(state
);
12855 * intel_atomic_check - validate state object
12857 * @state: state to validate
12859 static int intel_atomic_check(struct drm_device
*dev
,
12860 struct drm_atomic_state
*state
)
12862 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12863 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
12864 struct drm_crtc
*crtc
;
12865 struct drm_crtc_state
*old_crtc_state
, *crtc_state
;
12867 bool any_ms
= false;
12869 /* Catch I915_MODE_FLAG_INHERITED */
12870 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
12872 if (crtc_state
->mode
.private_flags
!=
12873 old_crtc_state
->mode
.private_flags
)
12874 crtc_state
->mode_changed
= true;
12877 ret
= drm_atomic_helper_check_modeset(dev
, state
);
12881 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, crtc_state
, i
) {
12882 struct intel_crtc_state
*pipe_config
=
12883 to_intel_crtc_state(crtc_state
);
12885 if (!needs_modeset(crtc_state
))
12888 if (!crtc_state
->enable
) {
12893 ret
= intel_modeset_pipe_config(crtc
, pipe_config
);
12894 if (ret
== -EDEADLK
)
12897 intel_dump_pipe_config(to_intel_crtc(crtc
),
12898 pipe_config
, "[failed]");
12902 if (intel_pipe_config_compare(dev_priv
,
12903 to_intel_crtc_state(old_crtc_state
),
12904 pipe_config
, true)) {
12905 crtc_state
->mode_changed
= false;
12906 pipe_config
->update_pipe
= true;
12909 if (needs_modeset(crtc_state
))
12912 intel_dump_pipe_config(to_intel_crtc(crtc
), pipe_config
,
12913 needs_modeset(crtc_state
) ?
12914 "[modeset]" : "[fastset]");
12917 ret
= drm_dp_mst_atomic_check(state
);
12922 ret
= intel_modeset_checks(state
);
12927 intel_state
->cdclk
.logical
= dev_priv
->cdclk
.logical
;
12930 ret
= icl_add_linked_planes(intel_state
);
12934 ret
= drm_atomic_helper_check_planes(dev
, state
);
12938 intel_fbc_choose_crtc(dev_priv
, intel_state
);
12939 return calc_watermark_data(intel_state
);
12942 static int intel_atomic_prepare_commit(struct drm_device
*dev
,
12943 struct drm_atomic_state
*state
)
12945 return drm_atomic_helper_prepare_planes(dev
, state
);
12948 u32
intel_crtc_get_vblank_counter(struct intel_crtc
*crtc
)
12950 struct drm_device
*dev
= crtc
->base
.dev
;
12951 struct drm_vblank_crtc
*vblank
= &dev
->vblank
[drm_crtc_index(&crtc
->base
)];
12953 if (!vblank
->max_vblank_count
)
12954 return (u32
)drm_crtc_accurate_vblank_count(&crtc
->base
);
12956 return dev
->driver
->get_vblank_counter(dev
, crtc
->pipe
);
12959 static void intel_update_crtc(struct drm_crtc
*crtc
,
12960 struct drm_atomic_state
*state
,
12961 struct drm_crtc_state
*old_crtc_state
,
12962 struct drm_crtc_state
*new_crtc_state
)
12964 struct drm_device
*dev
= crtc
->dev
;
12965 struct drm_i915_private
*dev_priv
= to_i915(dev
);
12966 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
12967 struct intel_crtc_state
*pipe_config
= to_intel_crtc_state(new_crtc_state
);
12968 bool modeset
= needs_modeset(new_crtc_state
);
12969 struct intel_plane_state
*new_plane_state
=
12970 intel_atomic_get_new_plane_state(to_intel_atomic_state(state
),
12971 to_intel_plane(crtc
->primary
));
12974 update_scanline_offset(pipe_config
);
12975 dev_priv
->display
.crtc_enable(pipe_config
, state
);
12977 /* vblanks work again, re-enable pipe CRC. */
12978 intel_crtc_enable_pipe_crc(intel_crtc
);
12980 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state
),
12983 if (pipe_config
->update_pipe
)
12984 intel_encoders_update_pipe(crtc
, pipe_config
, state
);
12987 if (pipe_config
->update_pipe
&& !pipe_config
->enable_fbc
)
12988 intel_fbc_disable(intel_crtc
);
12989 else if (new_plane_state
)
12990 intel_fbc_enable(intel_crtc
, pipe_config
, new_plane_state
);
12992 intel_begin_crtc_commit(crtc
, old_crtc_state
);
12994 if (INTEL_GEN(dev_priv
) >= 9)
12995 skl_update_planes_on_crtc(to_intel_atomic_state(state
), intel_crtc
);
12997 i9xx_update_planes_on_crtc(to_intel_atomic_state(state
), intel_crtc
);
12999 intel_finish_crtc_commit(crtc
, old_crtc_state
);
13002 static void intel_update_crtcs(struct drm_atomic_state
*state
)
13004 struct drm_crtc
*crtc
;
13005 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
13008 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
13009 if (!new_crtc_state
->active
)
13012 intel_update_crtc(crtc
, state
, old_crtc_state
,
13017 static void skl_update_crtcs(struct drm_atomic_state
*state
)
13019 struct drm_i915_private
*dev_priv
= to_i915(state
->dev
);
13020 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
13021 struct drm_crtc
*crtc
;
13022 struct intel_crtc
*intel_crtc
;
13023 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
13024 struct intel_crtc_state
*cstate
;
13025 unsigned int updated
= 0;
13029 u8 hw_enabled_slices
= dev_priv
->wm
.skl_hw
.ddb
.enabled_slices
;
13030 u8 required_slices
= intel_state
->wm_results
.ddb
.enabled_slices
;
13031 struct skl_ddb_entry entries
[I915_MAX_PIPES
] = {};
13033 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
)
13034 /* ignore allocations for crtc's that have been turned off. */
13035 if (new_crtc_state
->active
)
13036 entries
[i
] = to_intel_crtc_state(old_crtc_state
)->wm
.skl
.ddb
;
13038 /* If 2nd DBuf slice required, enable it here */
13039 if (INTEL_GEN(dev_priv
) >= 11 && required_slices
> hw_enabled_slices
)
13040 icl_dbuf_slices_update(dev_priv
, required_slices
);
13043 * Whenever the number of active pipes changes, we need to make sure we
13044 * update the pipes in the right order so that their ddb allocations
13045 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13046 * cause pipe underruns and other bad stuff.
13051 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
13052 bool vbl_wait
= false;
13053 unsigned int cmask
= drm_crtc_mask(crtc
);
13055 intel_crtc
= to_intel_crtc(crtc
);
13056 cstate
= to_intel_crtc_state(new_crtc_state
);
13057 pipe
= intel_crtc
->pipe
;
13059 if (updated
& cmask
|| !cstate
->base
.active
)
13062 if (skl_ddb_allocation_overlaps(&cstate
->wm
.skl
.ddb
,
13064 INTEL_INFO(dev_priv
)->num_pipes
, i
))
13068 entries
[i
] = cstate
->wm
.skl
.ddb
;
13071 * If this is an already active pipe, it's DDB changed,
13072 * and this isn't the last pipe that needs updating
13073 * then we need to wait for a vblank to pass for the
13074 * new ddb allocation to take effect.
13076 if (!skl_ddb_entry_equal(&cstate
->wm
.skl
.ddb
,
13077 &to_intel_crtc_state(old_crtc_state
)->wm
.skl
.ddb
) &&
13078 !new_crtc_state
->active_changed
&&
13079 intel_state
->wm_results
.dirty_pipes
!= updated
)
13082 intel_update_crtc(crtc
, state
, old_crtc_state
,
13086 intel_wait_for_vblank(dev_priv
, pipe
);
13090 } while (progress
);
13092 /* If 2nd DBuf slice is no more required disable it */
13093 if (INTEL_GEN(dev_priv
) >= 11 && required_slices
< hw_enabled_slices
)
13094 icl_dbuf_slices_update(dev_priv
, required_slices
);
13097 static void intel_atomic_helper_free_state(struct drm_i915_private
*dev_priv
)
13099 struct intel_atomic_state
*state
, *next
;
13100 struct llist_node
*freed
;
13102 freed
= llist_del_all(&dev_priv
->atomic_helper
.free_list
);
13103 llist_for_each_entry_safe(state
, next
, freed
, freed
)
13104 drm_atomic_state_put(&state
->base
);
13107 static void intel_atomic_helper_free_state_worker(struct work_struct
*work
)
13109 struct drm_i915_private
*dev_priv
=
13110 container_of(work
, typeof(*dev_priv
), atomic_helper
.free_work
);
13112 intel_atomic_helper_free_state(dev_priv
);
13115 static void intel_atomic_commit_fence_wait(struct intel_atomic_state
*intel_state
)
13117 struct wait_queue_entry wait_fence
, wait_reset
;
13118 struct drm_i915_private
*dev_priv
= to_i915(intel_state
->base
.dev
);
13120 init_wait_entry(&wait_fence
, 0);
13121 init_wait_entry(&wait_reset
, 0);
13123 prepare_to_wait(&intel_state
->commit_ready
.wait
,
13124 &wait_fence
, TASK_UNINTERRUPTIBLE
);
13125 prepare_to_wait(&dev_priv
->gpu_error
.wait_queue
,
13126 &wait_reset
, TASK_UNINTERRUPTIBLE
);
13129 if (i915_sw_fence_done(&intel_state
->commit_ready
)
13130 || test_bit(I915_RESET_MODESET
, &dev_priv
->gpu_error
.flags
))
13135 finish_wait(&intel_state
->commit_ready
.wait
, &wait_fence
);
13136 finish_wait(&dev_priv
->gpu_error
.wait_queue
, &wait_reset
);
13139 static void intel_atomic_cleanup_work(struct work_struct
*work
)
13141 struct drm_atomic_state
*state
=
13142 container_of(work
, struct drm_atomic_state
, commit_work
);
13143 struct drm_i915_private
*i915
= to_i915(state
->dev
);
13145 drm_atomic_helper_cleanup_planes(&i915
->drm
, state
);
13146 drm_atomic_helper_commit_cleanup_done(state
);
13147 drm_atomic_state_put(state
);
13149 intel_atomic_helper_free_state(i915
);
13152 static void intel_atomic_commit_tail(struct drm_atomic_state
*state
)
13154 struct drm_device
*dev
= state
->dev
;
13155 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
13156 struct drm_i915_private
*dev_priv
= to_i915(dev
);
13157 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
13158 struct intel_crtc_state
*new_intel_crtc_state
, *old_intel_crtc_state
;
13159 struct drm_crtc
*crtc
;
13160 struct intel_crtc
*intel_crtc
;
13161 u64 put_domains
[I915_MAX_PIPES
] = {};
13162 intel_wakeref_t wakeref
= 0;
13165 intel_atomic_commit_fence_wait(intel_state
);
13167 drm_atomic_helper_wait_for_dependencies(state
);
13169 if (intel_state
->modeset
)
13170 wakeref
= intel_display_power_get(dev_priv
, POWER_DOMAIN_MODESET
);
13172 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
13173 old_intel_crtc_state
= to_intel_crtc_state(old_crtc_state
);
13174 new_intel_crtc_state
= to_intel_crtc_state(new_crtc_state
);
13175 intel_crtc
= to_intel_crtc(crtc
);
13177 if (needs_modeset(new_crtc_state
) ||
13178 to_intel_crtc_state(new_crtc_state
)->update_pipe
) {
13180 put_domains
[intel_crtc
->pipe
] =
13181 modeset_get_crtc_power_domains(crtc
,
13182 new_intel_crtc_state
);
13185 if (!needs_modeset(new_crtc_state
))
13188 intel_pre_plane_update(old_intel_crtc_state
, new_intel_crtc_state
);
13190 if (old_crtc_state
->active
) {
13191 intel_crtc_disable_planes(intel_state
, intel_crtc
);
13194 * We need to disable pipe CRC before disabling the pipe,
13195 * or we race against vblank off.
13197 intel_crtc_disable_pipe_crc(intel_crtc
);
13199 dev_priv
->display
.crtc_disable(old_intel_crtc_state
, state
);
13200 intel_crtc
->active
= false;
13201 intel_fbc_disable(intel_crtc
);
13202 intel_disable_shared_dpll(old_intel_crtc_state
);
13205 * Underruns don't always raise
13206 * interrupts, so check manually.
13208 intel_check_cpu_fifo_underruns(dev_priv
);
13209 intel_check_pch_fifo_underruns(dev_priv
);
13211 /* FIXME unify this for all platforms */
13212 if (!new_crtc_state
->active
&&
13213 !HAS_GMCH(dev_priv
) &&
13214 dev_priv
->display
.initial_watermarks
)
13215 dev_priv
->display
.initial_watermarks(intel_state
,
13216 new_intel_crtc_state
);
13220 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13221 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
)
13222 to_intel_crtc(crtc
)->config
= to_intel_crtc_state(new_crtc_state
);
13224 if (intel_state
->modeset
) {
13225 drm_atomic_helper_update_legacy_modeset_state(state
->dev
, state
);
13227 intel_set_cdclk(dev_priv
, &dev_priv
->cdclk
.actual
);
13230 * SKL workaround: bspec recommends we disable the SAGV when we
13231 * have more then one pipe enabled
13233 if (!intel_can_enable_sagv(state
))
13234 intel_disable_sagv(dev_priv
);
13236 intel_modeset_verify_disabled(dev
, state
);
13239 /* Complete the events for pipes that have now been disabled */
13240 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
13241 bool modeset
= needs_modeset(new_crtc_state
);
13243 /* Complete events for now disable pipes here. */
13244 if (modeset
&& !new_crtc_state
->active
&& new_crtc_state
->event
) {
13245 spin_lock_irq(&dev
->event_lock
);
13246 drm_crtc_send_vblank_event(crtc
, new_crtc_state
->event
);
13247 spin_unlock_irq(&dev
->event_lock
);
13249 new_crtc_state
->event
= NULL
;
13253 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13254 dev_priv
->display
.update_crtcs(state
);
13256 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13257 * already, but still need the state for the delayed optimization. To
13259 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13260 * - schedule that vblank worker _before_ calling hw_done
13261 * - at the start of commit_tail, cancel it _synchrously
13262 * - switch over to the vblank wait helper in the core after that since
13263 * we don't need out special handling any more.
13265 drm_atomic_helper_wait_for_flip_done(dev
, state
);
13267 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
13268 new_intel_crtc_state
= to_intel_crtc_state(new_crtc_state
);
13270 if (new_crtc_state
->active
&&
13271 !needs_modeset(new_crtc_state
) &&
13272 (new_intel_crtc_state
->base
.color_mgmt_changed
||
13273 new_intel_crtc_state
->update_pipe
))
13274 intel_color_load_luts(new_intel_crtc_state
);
13278 * Now that the vblank has passed, we can go ahead and program the
13279 * optimal watermarks on platforms that need two-step watermark
13282 * TODO: Move this (and other cleanup) to an async worker eventually.
13284 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
13285 new_intel_crtc_state
= to_intel_crtc_state(new_crtc_state
);
13287 if (dev_priv
->display
.optimize_watermarks
)
13288 dev_priv
->display
.optimize_watermarks(intel_state
,
13289 new_intel_crtc_state
);
13292 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
13293 intel_post_plane_update(to_intel_crtc_state(old_crtc_state
));
13295 if (put_domains
[i
])
13296 modeset_put_power_domains(dev_priv
, put_domains
[i
]);
13298 intel_modeset_verify_crtc(crtc
, state
, old_crtc_state
, new_crtc_state
);
13301 if (intel_state
->modeset
)
13302 intel_verify_planes(intel_state
);
13304 if (intel_state
->modeset
&& intel_can_enable_sagv(state
))
13305 intel_enable_sagv(dev_priv
);
13307 drm_atomic_helper_commit_hw_done(state
);
13309 if (intel_state
->modeset
) {
13310 /* As one of the primary mmio accessors, KMS has a high
13311 * likelihood of triggering bugs in unclaimed access. After we
13312 * finish modesetting, see if an error has been flagged, and if
13313 * so enable debugging for the next modeset - and hope we catch
13316 intel_uncore_arm_unclaimed_mmio_detection(dev_priv
);
13317 intel_display_power_put(dev_priv
, POWER_DOMAIN_MODESET
, wakeref
);
13321 * Defer the cleanup of the old state to a separate worker to not
13322 * impede the current task (userspace for blocking modesets) that
13323 * are executed inline. For out-of-line asynchronous modesets/flips,
13324 * deferring to a new worker seems overkill, but we would place a
13325 * schedule point (cond_resched()) here anyway to keep latencies
13328 INIT_WORK(&state
->commit_work
, intel_atomic_cleanup_work
);
13329 queue_work(system_highpri_wq
, &state
->commit_work
);
13332 static void intel_atomic_commit_work(struct work_struct
*work
)
13334 struct drm_atomic_state
*state
=
13335 container_of(work
, struct drm_atomic_state
, commit_work
);
13337 intel_atomic_commit_tail(state
);
13340 static int __i915_sw_fence_call
13341 intel_atomic_commit_ready(struct i915_sw_fence
*fence
,
13342 enum i915_sw_fence_notify notify
)
13344 struct intel_atomic_state
*state
=
13345 container_of(fence
, struct intel_atomic_state
, commit_ready
);
13348 case FENCE_COMPLETE
:
13349 /* we do blocking waits in the worker, nothing to do here */
13353 struct intel_atomic_helper
*helper
=
13354 &to_i915(state
->base
.dev
)->atomic_helper
;
13356 if (llist_add(&state
->freed
, &helper
->free_list
))
13357 schedule_work(&helper
->free_work
);
13362 return NOTIFY_DONE
;
13365 static void intel_atomic_track_fbs(struct drm_atomic_state
*state
)
13367 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
13368 struct drm_plane
*plane
;
13371 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
)
13372 i915_gem_track_fb(intel_fb_obj(old_plane_state
->fb
),
13373 intel_fb_obj(new_plane_state
->fb
),
13374 to_intel_plane(plane
)->frontbuffer_bit
);
13378 * intel_atomic_commit - commit validated state object
13380 * @state: the top-level driver state object
13381 * @nonblock: nonblocking commit
13383 * This function commits a top-level state object that has been validated
13384 * with drm_atomic_helper_check().
13387 * Zero for success or -errno.
13389 static int intel_atomic_commit(struct drm_device
*dev
,
13390 struct drm_atomic_state
*state
,
13393 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
13394 struct drm_i915_private
*dev_priv
= to_i915(dev
);
13397 drm_atomic_state_get(state
);
13398 i915_sw_fence_init(&intel_state
->commit_ready
,
13399 intel_atomic_commit_ready
);
13402 * The intel_legacy_cursor_update() fast path takes care
13403 * of avoiding the vblank waits for simple cursor
13404 * movement and flips. For cursor on/off and size changes,
13405 * we want to perform the vblank waits so that watermark
13406 * updates happen during the correct frames. Gen9+ have
13407 * double buffered watermarks and so shouldn't need this.
13409 * Unset state->legacy_cursor_update before the call to
13410 * drm_atomic_helper_setup_commit() because otherwise
13411 * drm_atomic_helper_wait_for_flip_done() is a noop and
13412 * we get FIFO underruns because we didn't wait
13415 * FIXME doing watermarks and fb cleanup from a vblank worker
13416 * (assuming we had any) would solve these problems.
13418 if (INTEL_GEN(dev_priv
) < 9 && state
->legacy_cursor_update
) {
13419 struct intel_crtc_state
*new_crtc_state
;
13420 struct intel_crtc
*crtc
;
13423 for_each_new_intel_crtc_in_state(intel_state
, crtc
, new_crtc_state
, i
)
13424 if (new_crtc_state
->wm
.need_postvbl_update
||
13425 new_crtc_state
->update_wm_post
)
13426 state
->legacy_cursor_update
= false;
13429 ret
= intel_atomic_prepare_commit(dev
, state
);
13431 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret
);
13432 i915_sw_fence_commit(&intel_state
->commit_ready
);
13436 ret
= drm_atomic_helper_setup_commit(state
, nonblock
);
13438 ret
= drm_atomic_helper_swap_state(state
, true);
13441 i915_sw_fence_commit(&intel_state
->commit_ready
);
13443 drm_atomic_helper_cleanup_planes(dev
, state
);
13446 dev_priv
->wm
.distrust_bios_wm
= false;
13447 intel_shared_dpll_swap_state(state
);
13448 intel_atomic_track_fbs(state
);
13450 if (intel_state
->modeset
) {
13451 memcpy(dev_priv
->min_cdclk
, intel_state
->min_cdclk
,
13452 sizeof(intel_state
->min_cdclk
));
13453 memcpy(dev_priv
->min_voltage_level
,
13454 intel_state
->min_voltage_level
,
13455 sizeof(intel_state
->min_voltage_level
));
13456 dev_priv
->active_crtcs
= intel_state
->active_crtcs
;
13457 dev_priv
->cdclk
.logical
= intel_state
->cdclk
.logical
;
13458 dev_priv
->cdclk
.actual
= intel_state
->cdclk
.actual
;
13461 drm_atomic_state_get(state
);
13462 INIT_WORK(&state
->commit_work
, intel_atomic_commit_work
);
13464 i915_sw_fence_commit(&intel_state
->commit_ready
);
13465 if (nonblock
&& intel_state
->modeset
) {
13466 queue_work(dev_priv
->modeset_wq
, &state
->commit_work
);
13467 } else if (nonblock
) {
13468 queue_work(system_unbound_wq
, &state
->commit_work
);
13470 if (intel_state
->modeset
)
13471 flush_workqueue(dev_priv
->modeset_wq
);
13472 intel_atomic_commit_tail(state
);
13478 static const struct drm_crtc_funcs intel_crtc_funcs
= {
13479 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
13480 .set_config
= drm_atomic_helper_set_config
,
13481 .destroy
= intel_crtc_destroy
,
13482 .page_flip
= drm_atomic_helper_page_flip
,
13483 .atomic_duplicate_state
= intel_crtc_duplicate_state
,
13484 .atomic_destroy_state
= intel_crtc_destroy_state
,
13485 .set_crc_source
= intel_crtc_set_crc_source
,
13486 .verify_crc_source
= intel_crtc_verify_crc_source
,
13487 .get_crc_sources
= intel_crtc_get_crc_sources
,
13490 struct wait_rps_boost
{
13491 struct wait_queue_entry wait
;
13493 struct drm_crtc
*crtc
;
13494 struct i915_request
*request
;
13497 static int do_rps_boost(struct wait_queue_entry
*_wait
,
13498 unsigned mode
, int sync
, void *key
)
13500 struct wait_rps_boost
*wait
= container_of(_wait
, typeof(*wait
), wait
);
13501 struct i915_request
*rq
= wait
->request
;
13504 * If we missed the vblank, but the request is already running it
13505 * is reasonable to assume that it will complete before the next
13506 * vblank without our intervention, so leave RPS alone.
13508 if (!i915_request_started(rq
))
13509 gen6_rps_boost(rq
, NULL
);
13510 i915_request_put(rq
);
13512 drm_crtc_vblank_put(wait
->crtc
);
13514 list_del(&wait
->wait
.entry
);
13519 static void add_rps_boost_after_vblank(struct drm_crtc
*crtc
,
13520 struct dma_fence
*fence
)
13522 struct wait_rps_boost
*wait
;
13524 if (!dma_fence_is_i915(fence
))
13527 if (INTEL_GEN(to_i915(crtc
->dev
)) < 6)
13530 if (drm_crtc_vblank_get(crtc
))
13533 wait
= kmalloc(sizeof(*wait
), GFP_KERNEL
);
13535 drm_crtc_vblank_put(crtc
);
13539 wait
->request
= to_request(dma_fence_get(fence
));
13542 wait
->wait
.func
= do_rps_boost
;
13543 wait
->wait
.flags
= 0;
13545 add_wait_queue(drm_crtc_vblank_waitqueue(crtc
), &wait
->wait
);
13548 static int intel_plane_pin_fb(struct intel_plane_state
*plane_state
)
13550 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
13551 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
13552 struct drm_framebuffer
*fb
= plane_state
->base
.fb
;
13553 struct i915_vma
*vma
;
13555 if (plane
->id
== PLANE_CURSOR
&&
13556 INTEL_INFO(dev_priv
)->display
.cursor_needs_physical
) {
13557 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
13558 const int align
= intel_cursor_alignment(dev_priv
);
13561 err
= i915_gem_object_attach_phys(obj
, align
);
13566 vma
= intel_pin_and_fence_fb_obj(fb
,
13567 &plane_state
->view
,
13568 intel_plane_uses_fence(plane_state
),
13569 &plane_state
->flags
);
13571 return PTR_ERR(vma
);
13573 plane_state
->vma
= vma
;
13578 static void intel_plane_unpin_fb(struct intel_plane_state
*old_plane_state
)
13580 struct i915_vma
*vma
;
13582 vma
= fetch_and_zero(&old_plane_state
->vma
);
13584 intel_unpin_fb_vma(vma
, old_plane_state
->flags
);
13587 static void fb_obj_bump_render_priority(struct drm_i915_gem_object
*obj
)
13589 struct i915_sched_attr attr
= {
13590 .priority
= I915_PRIORITY_DISPLAY
,
13593 i915_gem_object_wait_priority(obj
, 0, &attr
);
13597 * intel_prepare_plane_fb - Prepare fb for usage on plane
13598 * @plane: drm plane to prepare for
13599 * @new_state: the plane state being prepared
13601 * Prepares a framebuffer for usage on a display plane. Generally this
13602 * involves pinning the underlying object and updating the frontbuffer tracking
13603 * bits. Some older platforms need special physical address handling for
13606 * Must be called with struct_mutex held.
13608 * Returns 0 on success, negative error code on failure.
13611 intel_prepare_plane_fb(struct drm_plane
*plane
,
13612 struct drm_plane_state
*new_state
)
13614 struct intel_atomic_state
*intel_state
=
13615 to_intel_atomic_state(new_state
->state
);
13616 struct drm_i915_private
*dev_priv
= to_i915(plane
->dev
);
13617 struct drm_framebuffer
*fb
= new_state
->fb
;
13618 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
13619 struct drm_i915_gem_object
*old_obj
= intel_fb_obj(plane
->state
->fb
);
13623 struct drm_crtc_state
*crtc_state
=
13624 drm_atomic_get_new_crtc_state(new_state
->state
,
13625 plane
->state
->crtc
);
13627 /* Big Hammer, we also need to ensure that any pending
13628 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13629 * current scanout is retired before unpinning the old
13630 * framebuffer. Note that we rely on userspace rendering
13631 * into the buffer attached to the pipe they are waiting
13632 * on. If not, userspace generates a GPU hang with IPEHR
13633 * point to the MI_WAIT_FOR_EVENT.
13635 * This should only fail upon a hung GPU, in which case we
13636 * can safely continue.
13638 if (needs_modeset(crtc_state
)) {
13639 ret
= i915_sw_fence_await_reservation(&intel_state
->commit_ready
,
13640 old_obj
->resv
, NULL
,
13648 if (new_state
->fence
) { /* explicit fencing */
13649 ret
= i915_sw_fence_await_dma_fence(&intel_state
->commit_ready
,
13651 I915_FENCE_TIMEOUT
,
13660 ret
= i915_gem_object_pin_pages(obj
);
13664 ret
= mutex_lock_interruptible(&dev_priv
->drm
.struct_mutex
);
13666 i915_gem_object_unpin_pages(obj
);
13670 ret
= intel_plane_pin_fb(to_intel_plane_state(new_state
));
13672 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
13673 i915_gem_object_unpin_pages(obj
);
13677 fb_obj_bump_render_priority(obj
);
13678 intel_fb_obj_flush(obj
, ORIGIN_DIRTYFB
);
13680 if (!new_state
->fence
) { /* implicit fencing */
13681 struct dma_fence
*fence
;
13683 ret
= i915_sw_fence_await_reservation(&intel_state
->commit_ready
,
13685 false, I915_FENCE_TIMEOUT
,
13690 fence
= reservation_object_get_excl_rcu(obj
->resv
);
13692 add_rps_boost_after_vblank(new_state
->crtc
, fence
);
13693 dma_fence_put(fence
);
13696 add_rps_boost_after_vblank(new_state
->crtc
, new_state
->fence
);
13700 * We declare pageflips to be interactive and so merit a small bias
13701 * towards upclocking to deliver the frame on time. By only changing
13702 * the RPS thresholds to sample more regularly and aim for higher
13703 * clocks we can hopefully deliver low power workloads (like kodi)
13704 * that are not quite steady state without resorting to forcing
13705 * maximum clocks following a vblank miss (see do_rps_boost()).
13707 if (!intel_state
->rps_interactive
) {
13708 intel_rps_mark_interactive(dev_priv
, true);
13709 intel_state
->rps_interactive
= true;
13716 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13717 * @plane: drm plane to clean up for
13718 * @old_state: the state from the previous modeset
13720 * Cleans up a framebuffer that has just been removed from a plane.
13722 * Must be called with struct_mutex held.
13725 intel_cleanup_plane_fb(struct drm_plane
*plane
,
13726 struct drm_plane_state
*old_state
)
13728 struct intel_atomic_state
*intel_state
=
13729 to_intel_atomic_state(old_state
->state
);
13730 struct drm_i915_private
*dev_priv
= to_i915(plane
->dev
);
13732 if (intel_state
->rps_interactive
) {
13733 intel_rps_mark_interactive(dev_priv
, false);
13734 intel_state
->rps_interactive
= false;
13737 /* Should only be called after a successful intel_prepare_plane_fb()! */
13738 mutex_lock(&dev_priv
->drm
.struct_mutex
);
13739 intel_plane_unpin_fb(to_intel_plane_state(old_state
));
13740 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
13744 skl_max_scale(const struct intel_crtc_state
*crtc_state
,
13747 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
13748 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
13749 int max_scale
, mult
;
13750 int crtc_clock
, max_dotclk
, tmpclk1
, tmpclk2
;
13752 if (!crtc_state
->base
.enable
)
13753 return DRM_PLANE_HELPER_NO_SCALING
;
13755 crtc_clock
= crtc_state
->base
.adjusted_mode
.crtc_clock
;
13756 max_dotclk
= to_intel_atomic_state(crtc_state
->base
.state
)->cdclk
.logical
.cdclk
;
13758 if (IS_GEMINILAKE(dev_priv
) || INTEL_GEN(dev_priv
) >= 10)
13761 if (WARN_ON_ONCE(!crtc_clock
|| max_dotclk
< crtc_clock
))
13762 return DRM_PLANE_HELPER_NO_SCALING
;
13765 * skl max scale is lower of:
13766 * close to 3 but not 3, -1 is for that purpose
13770 mult
= pixel_format
== DRM_FORMAT_NV12
? 2 : 3;
13771 tmpclk1
= (1 << 16) * mult
- 1;
13772 tmpclk2
= (1 << 8) * ((max_dotclk
<< 8) / crtc_clock
);
13773 max_scale
= min(tmpclk1
, tmpclk2
);
13778 static void intel_begin_crtc_commit(struct drm_crtc
*crtc
,
13779 struct drm_crtc_state
*old_crtc_state
)
13781 struct drm_device
*dev
= crtc
->dev
;
13782 struct drm_i915_private
*dev_priv
= to_i915(dev
);
13783 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
13784 struct intel_crtc_state
*old_intel_cstate
=
13785 to_intel_crtc_state(old_crtc_state
);
13786 struct intel_atomic_state
*old_intel_state
=
13787 to_intel_atomic_state(old_crtc_state
->state
);
13788 struct intel_crtc_state
*intel_cstate
=
13789 intel_atomic_get_new_crtc_state(old_intel_state
, intel_crtc
);
13790 bool modeset
= needs_modeset(&intel_cstate
->base
);
13792 /* Perform vblank evasion around commit operation */
13793 intel_pipe_update_start(intel_cstate
);
13798 if (intel_cstate
->base
.color_mgmt_changed
||
13799 intel_cstate
->update_pipe
)
13800 intel_color_commit(intel_cstate
);
13802 if (intel_cstate
->update_pipe
)
13803 intel_update_pipe_config(old_intel_cstate
, intel_cstate
);
13804 else if (INTEL_GEN(dev_priv
) >= 9)
13805 skl_detach_scalers(intel_cstate
);
13808 if (dev_priv
->display
.atomic_update_watermarks
)
13809 dev_priv
->display
.atomic_update_watermarks(old_intel_state
,
13813 void intel_crtc_arm_fifo_underrun(struct intel_crtc
*crtc
,
13814 struct intel_crtc_state
*crtc_state
)
13816 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
13818 if (!IS_GEN(dev_priv
, 2))
13819 intel_set_cpu_fifo_underrun_reporting(dev_priv
, crtc
->pipe
, true);
13821 if (crtc_state
->has_pch_encoder
) {
13822 enum pipe pch_transcoder
=
13823 intel_crtc_pch_transcoder(crtc
);
13825 intel_set_pch_fifo_underrun_reporting(dev_priv
, pch_transcoder
, true);
13829 static void intel_finish_crtc_commit(struct drm_crtc
*crtc
,
13830 struct drm_crtc_state
*old_crtc_state
)
13832 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
13833 struct intel_atomic_state
*old_intel_state
=
13834 to_intel_atomic_state(old_crtc_state
->state
);
13835 struct intel_crtc_state
*new_crtc_state
=
13836 intel_atomic_get_new_crtc_state(old_intel_state
, intel_crtc
);
13838 intel_pipe_update_end(new_crtc_state
);
13840 if (new_crtc_state
->update_pipe
&&
13841 !needs_modeset(&new_crtc_state
->base
) &&
13842 old_crtc_state
->mode
.private_flags
& I915_MODE_FLAG_INHERITED
)
13843 intel_crtc_arm_fifo_underrun(intel_crtc
, new_crtc_state
);
13847 * intel_plane_destroy - destroy a plane
13848 * @plane: plane to destroy
13850 * Common destruction function for all types of planes (primary, cursor,
13853 void intel_plane_destroy(struct drm_plane
*plane
)
13855 drm_plane_cleanup(plane
);
13856 kfree(to_intel_plane(plane
));
13859 static bool i8xx_plane_format_mod_supported(struct drm_plane
*_plane
,
13860 u32 format
, u64 modifier
)
13862 switch (modifier
) {
13863 case DRM_FORMAT_MOD_LINEAR
:
13864 case I915_FORMAT_MOD_X_TILED
:
13871 case DRM_FORMAT_C8
:
13872 case DRM_FORMAT_RGB565
:
13873 case DRM_FORMAT_XRGB1555
:
13874 case DRM_FORMAT_XRGB8888
:
13875 return modifier
== DRM_FORMAT_MOD_LINEAR
||
13876 modifier
== I915_FORMAT_MOD_X_TILED
;
13882 static bool i965_plane_format_mod_supported(struct drm_plane
*_plane
,
13883 u32 format
, u64 modifier
)
13885 switch (modifier
) {
13886 case DRM_FORMAT_MOD_LINEAR
:
13887 case I915_FORMAT_MOD_X_TILED
:
13894 case DRM_FORMAT_C8
:
13895 case DRM_FORMAT_RGB565
:
13896 case DRM_FORMAT_XRGB8888
:
13897 case DRM_FORMAT_XBGR8888
:
13898 case DRM_FORMAT_XRGB2101010
:
13899 case DRM_FORMAT_XBGR2101010
:
13900 return modifier
== DRM_FORMAT_MOD_LINEAR
||
13901 modifier
== I915_FORMAT_MOD_X_TILED
;
13907 static bool intel_cursor_format_mod_supported(struct drm_plane
*_plane
,
13908 u32 format
, u64 modifier
)
13910 return modifier
== DRM_FORMAT_MOD_LINEAR
&&
13911 format
== DRM_FORMAT_ARGB8888
;
13914 static const struct drm_plane_funcs i965_plane_funcs
= {
13915 .update_plane
= drm_atomic_helper_update_plane
,
13916 .disable_plane
= drm_atomic_helper_disable_plane
,
13917 .destroy
= intel_plane_destroy
,
13918 .atomic_get_property
= intel_plane_atomic_get_property
,
13919 .atomic_set_property
= intel_plane_atomic_set_property
,
13920 .atomic_duplicate_state
= intel_plane_duplicate_state
,
13921 .atomic_destroy_state
= intel_plane_destroy_state
,
13922 .format_mod_supported
= i965_plane_format_mod_supported
,
13925 static const struct drm_plane_funcs i8xx_plane_funcs
= {
13926 .update_plane
= drm_atomic_helper_update_plane
,
13927 .disable_plane
= drm_atomic_helper_disable_plane
,
13928 .destroy
= intel_plane_destroy
,
13929 .atomic_get_property
= intel_plane_atomic_get_property
,
13930 .atomic_set_property
= intel_plane_atomic_set_property
,
13931 .atomic_duplicate_state
= intel_plane_duplicate_state
,
13932 .atomic_destroy_state
= intel_plane_destroy_state
,
13933 .format_mod_supported
= i8xx_plane_format_mod_supported
,
13937 intel_legacy_cursor_update(struct drm_plane
*plane
,
13938 struct drm_crtc
*crtc
,
13939 struct drm_framebuffer
*fb
,
13940 int crtc_x
, int crtc_y
,
13941 unsigned int crtc_w
, unsigned int crtc_h
,
13942 u32 src_x
, u32 src_y
,
13943 u32 src_w
, u32 src_h
,
13944 struct drm_modeset_acquire_ctx
*ctx
)
13946 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
13948 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
13949 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
13950 struct drm_framebuffer
*old_fb
;
13951 struct intel_crtc_state
*crtc_state
=
13952 to_intel_crtc_state(crtc
->state
);
13953 struct intel_crtc_state
*new_crtc_state
;
13956 * When crtc is inactive or there is a modeset pending,
13957 * wait for it to complete in the slowpath
13959 if (!crtc_state
->base
.active
|| needs_modeset(&crtc_state
->base
) ||
13960 crtc_state
->update_pipe
)
13963 old_plane_state
= plane
->state
;
13965 * Don't do an async update if there is an outstanding commit modifying
13966 * the plane. This prevents our async update's changes from getting
13967 * overridden by a previous synchronous update's state.
13969 if (old_plane_state
->commit
&&
13970 !try_wait_for_completion(&old_plane_state
->commit
->hw_done
))
13974 * If any parameters change that may affect watermarks,
13975 * take the slowpath. Only changing fb or position should be
13978 if (old_plane_state
->crtc
!= crtc
||
13979 old_plane_state
->src_w
!= src_w
||
13980 old_plane_state
->src_h
!= src_h
||
13981 old_plane_state
->crtc_w
!= crtc_w
||
13982 old_plane_state
->crtc_h
!= crtc_h
||
13983 !old_plane_state
->fb
!= !fb
)
13986 new_plane_state
= intel_plane_duplicate_state(plane
);
13987 if (!new_plane_state
)
13990 new_crtc_state
= to_intel_crtc_state(intel_crtc_duplicate_state(crtc
));
13991 if (!new_crtc_state
) {
13996 drm_atomic_set_fb_for_plane(new_plane_state
, fb
);
13998 new_plane_state
->src_x
= src_x
;
13999 new_plane_state
->src_y
= src_y
;
14000 new_plane_state
->src_w
= src_w
;
14001 new_plane_state
->src_h
= src_h
;
14002 new_plane_state
->crtc_x
= crtc_x
;
14003 new_plane_state
->crtc_y
= crtc_y
;
14004 new_plane_state
->crtc_w
= crtc_w
;
14005 new_plane_state
->crtc_h
= crtc_h
;
14007 ret
= intel_plane_atomic_check_with_state(crtc_state
, new_crtc_state
,
14008 to_intel_plane_state(old_plane_state
),
14009 to_intel_plane_state(new_plane_state
));
14013 ret
= mutex_lock_interruptible(&dev_priv
->drm
.struct_mutex
);
14017 ret
= intel_plane_pin_fb(to_intel_plane_state(new_plane_state
));
14021 intel_fb_obj_flush(intel_fb_obj(fb
), ORIGIN_FLIP
);
14023 old_fb
= old_plane_state
->fb
;
14024 i915_gem_track_fb(intel_fb_obj(old_fb
), intel_fb_obj(fb
),
14025 intel_plane
->frontbuffer_bit
);
14027 /* Swap plane state */
14028 plane
->state
= new_plane_state
;
14031 * We cannot swap crtc_state as it may be in use by an atomic commit or
14032 * page flip that's running simultaneously. If we swap crtc_state and
14033 * destroy the old state, we will cause a use-after-free there.
14035 * Only update active_planes, which is needed for our internal
14036 * bookkeeping. Either value will do the right thing when updating
14037 * planes atomically. If the cursor was part of the atomic update then
14038 * we would have taken the slowpath.
14040 crtc_state
->active_planes
= new_crtc_state
->active_planes
;
14042 if (plane
->state
->visible
) {
14043 trace_intel_update_plane(plane
, to_intel_crtc(crtc
));
14044 intel_plane
->update_plane(intel_plane
, crtc_state
,
14045 to_intel_plane_state(plane
->state
));
14047 trace_intel_disable_plane(plane
, to_intel_crtc(crtc
));
14048 intel_plane
->disable_plane(intel_plane
, crtc_state
);
14051 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state
));
14054 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
14056 if (new_crtc_state
)
14057 intel_crtc_destroy_state(crtc
, &new_crtc_state
->base
);
14059 intel_plane_destroy_state(plane
, new_plane_state
);
14061 intel_plane_destroy_state(plane
, old_plane_state
);
14065 return drm_atomic_helper_update_plane(plane
, crtc
, fb
,
14066 crtc_x
, crtc_y
, crtc_w
, crtc_h
,
14067 src_x
, src_y
, src_w
, src_h
, ctx
);
14070 static const struct drm_plane_funcs intel_cursor_plane_funcs
= {
14071 .update_plane
= intel_legacy_cursor_update
,
14072 .disable_plane
= drm_atomic_helper_disable_plane
,
14073 .destroy
= intel_plane_destroy
,
14074 .atomic_get_property
= intel_plane_atomic_get_property
,
14075 .atomic_set_property
= intel_plane_atomic_set_property
,
14076 .atomic_duplicate_state
= intel_plane_duplicate_state
,
14077 .atomic_destroy_state
= intel_plane_destroy_state
,
14078 .format_mod_supported
= intel_cursor_format_mod_supported
,
14081 static bool i9xx_plane_has_fbc(struct drm_i915_private
*dev_priv
,
14082 enum i9xx_plane_id i9xx_plane
)
14084 if (!HAS_FBC(dev_priv
))
14087 if (IS_BROADWELL(dev_priv
) || IS_HASWELL(dev_priv
))
14088 return i9xx_plane
== PLANE_A
; /* tied to pipe A */
14089 else if (IS_IVYBRIDGE(dev_priv
))
14090 return i9xx_plane
== PLANE_A
|| i9xx_plane
== PLANE_B
||
14091 i9xx_plane
== PLANE_C
;
14092 else if (INTEL_GEN(dev_priv
) >= 4)
14093 return i9xx_plane
== PLANE_A
|| i9xx_plane
== PLANE_B
;
14095 return i9xx_plane
== PLANE_A
;
14098 static struct intel_plane
*
14099 intel_primary_plane_create(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
14101 struct intel_plane
*plane
;
14102 const struct drm_plane_funcs
*plane_funcs
;
14103 unsigned int supported_rotations
;
14104 unsigned int possible_crtcs
;
14105 const u64
*modifiers
;
14106 const u32
*formats
;
14110 if (INTEL_GEN(dev_priv
) >= 9)
14111 return skl_universal_plane_create(dev_priv
, pipe
,
14114 plane
= intel_plane_alloc();
14118 plane
->pipe
= pipe
;
14120 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14121 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14123 if (HAS_FBC(dev_priv
) && INTEL_GEN(dev_priv
) < 4)
14124 plane
->i9xx_plane
= (enum i9xx_plane_id
) !pipe
;
14126 plane
->i9xx_plane
= (enum i9xx_plane_id
) pipe
;
14127 plane
->id
= PLANE_PRIMARY
;
14128 plane
->frontbuffer_bit
= INTEL_FRONTBUFFER(pipe
, plane
->id
);
14130 plane
->has_fbc
= i9xx_plane_has_fbc(dev_priv
, plane
->i9xx_plane
);
14131 if (plane
->has_fbc
) {
14132 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
14134 fbc
->possible_framebuffer_bits
|= plane
->frontbuffer_bit
;
14137 if (INTEL_GEN(dev_priv
) >= 4) {
14138 formats
= i965_primary_formats
;
14139 num_formats
= ARRAY_SIZE(i965_primary_formats
);
14140 modifiers
= i9xx_format_modifiers
;
14142 plane
->max_stride
= i9xx_plane_max_stride
;
14143 plane
->update_plane
= i9xx_update_plane
;
14144 plane
->disable_plane
= i9xx_disable_plane
;
14145 plane
->get_hw_state
= i9xx_plane_get_hw_state
;
14146 plane
->check_plane
= i9xx_plane_check
;
14148 plane_funcs
= &i965_plane_funcs
;
14150 formats
= i8xx_primary_formats
;
14151 num_formats
= ARRAY_SIZE(i8xx_primary_formats
);
14152 modifiers
= i9xx_format_modifiers
;
14154 plane
->max_stride
= i9xx_plane_max_stride
;
14155 plane
->update_plane
= i9xx_update_plane
;
14156 plane
->disable_plane
= i9xx_disable_plane
;
14157 plane
->get_hw_state
= i9xx_plane_get_hw_state
;
14158 plane
->check_plane
= i9xx_plane_check
;
14160 plane_funcs
= &i8xx_plane_funcs
;
14163 possible_crtcs
= BIT(pipe
);
14165 if (INTEL_GEN(dev_priv
) >= 5 || IS_G4X(dev_priv
))
14166 ret
= drm_universal_plane_init(&dev_priv
->drm
, &plane
->base
,
14167 possible_crtcs
, plane_funcs
,
14168 formats
, num_formats
, modifiers
,
14169 DRM_PLANE_TYPE_PRIMARY
,
14170 "primary %c", pipe_name(pipe
));
14172 ret
= drm_universal_plane_init(&dev_priv
->drm
, &plane
->base
,
14173 possible_crtcs
, plane_funcs
,
14174 formats
, num_formats
, modifiers
,
14175 DRM_PLANE_TYPE_PRIMARY
,
14177 plane_name(plane
->i9xx_plane
));
14181 if (IS_CHERRYVIEW(dev_priv
) && pipe
== PIPE_B
) {
14182 supported_rotations
=
14183 DRM_MODE_ROTATE_0
| DRM_MODE_ROTATE_180
|
14184 DRM_MODE_REFLECT_X
;
14185 } else if (INTEL_GEN(dev_priv
) >= 4) {
14186 supported_rotations
=
14187 DRM_MODE_ROTATE_0
| DRM_MODE_ROTATE_180
;
14189 supported_rotations
= DRM_MODE_ROTATE_0
;
14192 if (INTEL_GEN(dev_priv
) >= 4)
14193 drm_plane_create_rotation_property(&plane
->base
,
14195 supported_rotations
);
14197 drm_plane_helper_add(&plane
->base
, &intel_plane_helper_funcs
);
14202 intel_plane_free(plane
);
14204 return ERR_PTR(ret
);
14207 static struct intel_plane
*
14208 intel_cursor_plane_create(struct drm_i915_private
*dev_priv
,
14211 unsigned int possible_crtcs
;
14212 struct intel_plane
*cursor
;
14215 cursor
= intel_plane_alloc();
14216 if (IS_ERR(cursor
))
14219 cursor
->pipe
= pipe
;
14220 cursor
->i9xx_plane
= (enum i9xx_plane_id
) pipe
;
14221 cursor
->id
= PLANE_CURSOR
;
14222 cursor
->frontbuffer_bit
= INTEL_FRONTBUFFER(pipe
, cursor
->id
);
14224 if (IS_I845G(dev_priv
) || IS_I865G(dev_priv
)) {
14225 cursor
->max_stride
= i845_cursor_max_stride
;
14226 cursor
->update_plane
= i845_update_cursor
;
14227 cursor
->disable_plane
= i845_disable_cursor
;
14228 cursor
->get_hw_state
= i845_cursor_get_hw_state
;
14229 cursor
->check_plane
= i845_check_cursor
;
14231 cursor
->max_stride
= i9xx_cursor_max_stride
;
14232 cursor
->update_plane
= i9xx_update_cursor
;
14233 cursor
->disable_plane
= i9xx_disable_cursor
;
14234 cursor
->get_hw_state
= i9xx_cursor_get_hw_state
;
14235 cursor
->check_plane
= i9xx_check_cursor
;
14238 cursor
->cursor
.base
= ~0;
14239 cursor
->cursor
.cntl
= ~0;
14241 if (IS_I845G(dev_priv
) || IS_I865G(dev_priv
) || HAS_CUR_FBC(dev_priv
))
14242 cursor
->cursor
.size
= ~0;
14244 possible_crtcs
= BIT(pipe
);
14246 ret
= drm_universal_plane_init(&dev_priv
->drm
, &cursor
->base
,
14247 possible_crtcs
, &intel_cursor_plane_funcs
,
14248 intel_cursor_formats
,
14249 ARRAY_SIZE(intel_cursor_formats
),
14250 cursor_format_modifiers
,
14251 DRM_PLANE_TYPE_CURSOR
,
14252 "cursor %c", pipe_name(pipe
));
14256 if (INTEL_GEN(dev_priv
) >= 4)
14257 drm_plane_create_rotation_property(&cursor
->base
,
14259 DRM_MODE_ROTATE_0
|
14260 DRM_MODE_ROTATE_180
);
14262 drm_plane_helper_add(&cursor
->base
, &intel_plane_helper_funcs
);
14267 intel_plane_free(cursor
);
14269 return ERR_PTR(ret
);
14272 static void intel_crtc_init_scalers(struct intel_crtc
*crtc
,
14273 struct intel_crtc_state
*crtc_state
)
14275 struct intel_crtc_scaler_state
*scaler_state
=
14276 &crtc_state
->scaler_state
;
14277 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
14280 crtc
->num_scalers
= RUNTIME_INFO(dev_priv
)->num_scalers
[crtc
->pipe
];
14281 if (!crtc
->num_scalers
)
14284 for (i
= 0; i
< crtc
->num_scalers
; i
++) {
14285 struct intel_scaler
*scaler
= &scaler_state
->scalers
[i
];
14287 scaler
->in_use
= 0;
14291 scaler_state
->scaler_id
= -1;
14294 static int intel_crtc_init(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
14296 struct intel_crtc
*intel_crtc
;
14297 struct intel_crtc_state
*crtc_state
= NULL
;
14298 struct intel_plane
*primary
= NULL
;
14299 struct intel_plane
*cursor
= NULL
;
14302 intel_crtc
= kzalloc(sizeof(*intel_crtc
), GFP_KERNEL
);
14306 crtc_state
= kzalloc(sizeof(*crtc_state
), GFP_KERNEL
);
14311 intel_crtc
->config
= crtc_state
;
14312 intel_crtc
->base
.state
= &crtc_state
->base
;
14313 crtc_state
->base
.crtc
= &intel_crtc
->base
;
14315 primary
= intel_primary_plane_create(dev_priv
, pipe
);
14316 if (IS_ERR(primary
)) {
14317 ret
= PTR_ERR(primary
);
14320 intel_crtc
->plane_ids_mask
|= BIT(primary
->id
);
14322 for_each_sprite(dev_priv
, pipe
, sprite
) {
14323 struct intel_plane
*plane
;
14325 plane
= intel_sprite_plane_create(dev_priv
, pipe
, sprite
);
14326 if (IS_ERR(plane
)) {
14327 ret
= PTR_ERR(plane
);
14330 intel_crtc
->plane_ids_mask
|= BIT(plane
->id
);
14333 cursor
= intel_cursor_plane_create(dev_priv
, pipe
);
14334 if (IS_ERR(cursor
)) {
14335 ret
= PTR_ERR(cursor
);
14338 intel_crtc
->plane_ids_mask
|= BIT(cursor
->id
);
14340 ret
= drm_crtc_init_with_planes(&dev_priv
->drm
, &intel_crtc
->base
,
14341 &primary
->base
, &cursor
->base
,
14343 "pipe %c", pipe_name(pipe
));
14347 intel_crtc
->pipe
= pipe
;
14349 /* initialize shared scalers */
14350 intel_crtc_init_scalers(intel_crtc
, crtc_state
);
14352 BUG_ON(pipe
>= ARRAY_SIZE(dev_priv
->pipe_to_crtc_mapping
) ||
14353 dev_priv
->pipe_to_crtc_mapping
[pipe
] != NULL
);
14354 dev_priv
->pipe_to_crtc_mapping
[pipe
] = intel_crtc
;
14356 if (INTEL_GEN(dev_priv
) < 9) {
14357 enum i9xx_plane_id i9xx_plane
= primary
->i9xx_plane
;
14359 BUG_ON(i9xx_plane
>= ARRAY_SIZE(dev_priv
->plane_to_crtc_mapping
) ||
14360 dev_priv
->plane_to_crtc_mapping
[i9xx_plane
] != NULL
);
14361 dev_priv
->plane_to_crtc_mapping
[i9xx_plane
] = intel_crtc
;
14364 drm_crtc_helper_add(&intel_crtc
->base
, &intel_helper_funcs
);
14366 intel_color_init(intel_crtc
);
14368 WARN_ON(drm_crtc_index(&intel_crtc
->base
) != intel_crtc
->pipe
);
14374 * drm_mode_config_cleanup() will free up any
14375 * crtcs/planes already initialized.
14383 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device
*dev
, void *data
,
14384 struct drm_file
*file
)
14386 struct drm_i915_get_pipe_from_crtc_id
*pipe_from_crtc_id
= data
;
14387 struct drm_crtc
*drmmode_crtc
;
14388 struct intel_crtc
*crtc
;
14390 drmmode_crtc
= drm_crtc_find(dev
, file
, pipe_from_crtc_id
->crtc_id
);
14394 crtc
= to_intel_crtc(drmmode_crtc
);
14395 pipe_from_crtc_id
->pipe
= crtc
->pipe
;
14400 static int intel_encoder_clones(struct intel_encoder
*encoder
)
14402 struct drm_device
*dev
= encoder
->base
.dev
;
14403 struct intel_encoder
*source_encoder
;
14404 int index_mask
= 0;
14407 for_each_intel_encoder(dev
, source_encoder
) {
14408 if (encoders_cloneable(encoder
, source_encoder
))
14409 index_mask
|= (1 << entry
);
14417 static bool ilk_has_edp_a(struct drm_i915_private
*dev_priv
)
14419 if (!IS_MOBILE(dev_priv
))
14422 if ((I915_READ(DP_A
) & DP_DETECTED
) == 0)
14425 if (IS_GEN(dev_priv
, 5) && (I915_READ(FUSE_STRAP
) & ILK_eDP_A_DISABLE
))
14431 static bool intel_ddi_crt_present(struct drm_i915_private
*dev_priv
)
14433 if (INTEL_GEN(dev_priv
) >= 9)
14436 if (IS_HSW_ULT(dev_priv
) || IS_BDW_ULT(dev_priv
))
14439 if (HAS_PCH_LPT_H(dev_priv
) &&
14440 I915_READ(SFUSE_STRAP
) & SFUSE_STRAP_CRT_DISABLED
)
14443 /* DDI E can't be used if DDI A requires 4 lanes */
14444 if (I915_READ(DDI_BUF_CTL(PORT_A
)) & DDI_A_4_LANES
)
14447 if (!dev_priv
->vbt
.int_crt_support
)
14453 void intel_pps_unlock_regs_wa(struct drm_i915_private
*dev_priv
)
14458 if (HAS_DDI(dev_priv
))
14461 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14462 * everywhere where registers can be write protected.
14464 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
14469 for (pps_idx
= 0; pps_idx
< pps_num
; pps_idx
++) {
14470 u32 val
= I915_READ(PP_CONTROL(pps_idx
));
14472 val
= (val
& ~PANEL_UNLOCK_MASK
) | PANEL_UNLOCK_REGS
;
14473 I915_WRITE(PP_CONTROL(pps_idx
), val
);
14477 static void intel_pps_init(struct drm_i915_private
*dev_priv
)
14479 if (HAS_PCH_SPLIT(dev_priv
) || IS_GEN9_LP(dev_priv
))
14480 dev_priv
->pps_mmio_base
= PCH_PPS_BASE
;
14481 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
14482 dev_priv
->pps_mmio_base
= VLV_PPS_BASE
;
14484 dev_priv
->pps_mmio_base
= PPS_BASE
;
14486 intel_pps_unlock_regs_wa(dev_priv
);
14489 static void intel_setup_outputs(struct drm_i915_private
*dev_priv
)
14491 struct intel_encoder
*encoder
;
14492 bool dpd_is_edp
= false;
14494 intel_pps_init(dev_priv
);
14496 if (!HAS_DISPLAY(dev_priv
))
14499 if (IS_ICELAKE(dev_priv
)) {
14500 intel_ddi_init(dev_priv
, PORT_A
);
14501 intel_ddi_init(dev_priv
, PORT_B
);
14502 intel_ddi_init(dev_priv
, PORT_C
);
14503 intel_ddi_init(dev_priv
, PORT_D
);
14504 intel_ddi_init(dev_priv
, PORT_E
);
14506 * On some ICL SKUs port F is not present. No strap bits for
14507 * this, so rely on VBT.
14508 * Work around broken VBTs on SKUs known to have no port F.
14510 if (IS_ICL_WITH_PORT_F(dev_priv
) &&
14511 intel_bios_is_port_present(dev_priv
, PORT_F
))
14512 intel_ddi_init(dev_priv
, PORT_F
);
14514 icl_dsi_init(dev_priv
);
14515 } else if (IS_GEN9_LP(dev_priv
)) {
14517 * FIXME: Broxton doesn't support port detection via the
14518 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14519 * detect the ports.
14521 intel_ddi_init(dev_priv
, PORT_A
);
14522 intel_ddi_init(dev_priv
, PORT_B
);
14523 intel_ddi_init(dev_priv
, PORT_C
);
14525 vlv_dsi_init(dev_priv
);
14526 } else if (HAS_DDI(dev_priv
)) {
14529 if (intel_ddi_crt_present(dev_priv
))
14530 intel_crt_init(dev_priv
);
14533 * Haswell uses DDI functions to detect digital outputs.
14534 * On SKL pre-D0 the strap isn't connected, so we assume
14537 found
= I915_READ(DDI_BUF_CTL(PORT_A
)) & DDI_INIT_DISPLAY_DETECTED
;
14538 /* WaIgnoreDDIAStrap: skl */
14539 if (found
|| IS_GEN9_BC(dev_priv
))
14540 intel_ddi_init(dev_priv
, PORT_A
);
14542 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14544 found
= I915_READ(SFUSE_STRAP
);
14546 if (found
& SFUSE_STRAP_DDIB_DETECTED
)
14547 intel_ddi_init(dev_priv
, PORT_B
);
14548 if (found
& SFUSE_STRAP_DDIC_DETECTED
)
14549 intel_ddi_init(dev_priv
, PORT_C
);
14550 if (found
& SFUSE_STRAP_DDID_DETECTED
)
14551 intel_ddi_init(dev_priv
, PORT_D
);
14552 if (found
& SFUSE_STRAP_DDIF_DETECTED
)
14553 intel_ddi_init(dev_priv
, PORT_F
);
14555 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14557 if (IS_GEN9_BC(dev_priv
) &&
14558 intel_bios_is_port_present(dev_priv
, PORT_E
))
14559 intel_ddi_init(dev_priv
, PORT_E
);
14561 } else if (HAS_PCH_SPLIT(dev_priv
)) {
14565 * intel_edp_init_connector() depends on this completing first,
14566 * to prevent the registration of both eDP and LVDS and the
14567 * incorrect sharing of the PPS.
14569 intel_lvds_init(dev_priv
);
14570 intel_crt_init(dev_priv
);
14572 dpd_is_edp
= intel_dp_is_port_edp(dev_priv
, PORT_D
);
14574 if (ilk_has_edp_a(dev_priv
))
14575 intel_dp_init(dev_priv
, DP_A
, PORT_A
);
14577 if (I915_READ(PCH_HDMIB
) & SDVO_DETECTED
) {
14578 /* PCH SDVOB multiplex with HDMIB */
14579 found
= intel_sdvo_init(dev_priv
, PCH_SDVOB
, PORT_B
);
14581 intel_hdmi_init(dev_priv
, PCH_HDMIB
, PORT_B
);
14582 if (!found
&& (I915_READ(PCH_DP_B
) & DP_DETECTED
))
14583 intel_dp_init(dev_priv
, PCH_DP_B
, PORT_B
);
14586 if (I915_READ(PCH_HDMIC
) & SDVO_DETECTED
)
14587 intel_hdmi_init(dev_priv
, PCH_HDMIC
, PORT_C
);
14589 if (!dpd_is_edp
&& I915_READ(PCH_HDMID
) & SDVO_DETECTED
)
14590 intel_hdmi_init(dev_priv
, PCH_HDMID
, PORT_D
);
14592 if (I915_READ(PCH_DP_C
) & DP_DETECTED
)
14593 intel_dp_init(dev_priv
, PCH_DP_C
, PORT_C
);
14595 if (I915_READ(PCH_DP_D
) & DP_DETECTED
)
14596 intel_dp_init(dev_priv
, PCH_DP_D
, PORT_D
);
14597 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
14598 bool has_edp
, has_port
;
14600 if (IS_VALLEYVIEW(dev_priv
) && dev_priv
->vbt
.int_crt_support
)
14601 intel_crt_init(dev_priv
);
14604 * The DP_DETECTED bit is the latched state of the DDC
14605 * SDA pin at boot. However since eDP doesn't require DDC
14606 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14607 * eDP ports may have been muxed to an alternate function.
14608 * Thus we can't rely on the DP_DETECTED bit alone to detect
14609 * eDP ports. Consult the VBT as well as DP_DETECTED to
14610 * detect eDP ports.
14612 * Sadly the straps seem to be missing sometimes even for HDMI
14613 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14614 * and VBT for the presence of the port. Additionally we can't
14615 * trust the port type the VBT declares as we've seen at least
14616 * HDMI ports that the VBT claim are DP or eDP.
14618 has_edp
= intel_dp_is_port_edp(dev_priv
, PORT_B
);
14619 has_port
= intel_bios_is_port_present(dev_priv
, PORT_B
);
14620 if (I915_READ(VLV_DP_B
) & DP_DETECTED
|| has_port
)
14621 has_edp
&= intel_dp_init(dev_priv
, VLV_DP_B
, PORT_B
);
14622 if ((I915_READ(VLV_HDMIB
) & SDVO_DETECTED
|| has_port
) && !has_edp
)
14623 intel_hdmi_init(dev_priv
, VLV_HDMIB
, PORT_B
);
14625 has_edp
= intel_dp_is_port_edp(dev_priv
, PORT_C
);
14626 has_port
= intel_bios_is_port_present(dev_priv
, PORT_C
);
14627 if (I915_READ(VLV_DP_C
) & DP_DETECTED
|| has_port
)
14628 has_edp
&= intel_dp_init(dev_priv
, VLV_DP_C
, PORT_C
);
14629 if ((I915_READ(VLV_HDMIC
) & SDVO_DETECTED
|| has_port
) && !has_edp
)
14630 intel_hdmi_init(dev_priv
, VLV_HDMIC
, PORT_C
);
14632 if (IS_CHERRYVIEW(dev_priv
)) {
14634 * eDP not supported on port D,
14635 * so no need to worry about it
14637 has_port
= intel_bios_is_port_present(dev_priv
, PORT_D
);
14638 if (I915_READ(CHV_DP_D
) & DP_DETECTED
|| has_port
)
14639 intel_dp_init(dev_priv
, CHV_DP_D
, PORT_D
);
14640 if (I915_READ(CHV_HDMID
) & SDVO_DETECTED
|| has_port
)
14641 intel_hdmi_init(dev_priv
, CHV_HDMID
, PORT_D
);
14644 vlv_dsi_init(dev_priv
);
14645 } else if (IS_PINEVIEW(dev_priv
)) {
14646 intel_lvds_init(dev_priv
);
14647 intel_crt_init(dev_priv
);
14648 } else if (IS_GEN_RANGE(dev_priv
, 3, 4)) {
14649 bool found
= false;
14651 if (IS_MOBILE(dev_priv
))
14652 intel_lvds_init(dev_priv
);
14654 intel_crt_init(dev_priv
);
14656 if (I915_READ(GEN3_SDVOB
) & SDVO_DETECTED
) {
14657 DRM_DEBUG_KMS("probing SDVOB\n");
14658 found
= intel_sdvo_init(dev_priv
, GEN3_SDVOB
, PORT_B
);
14659 if (!found
&& IS_G4X(dev_priv
)) {
14660 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14661 intel_hdmi_init(dev_priv
, GEN4_HDMIB
, PORT_B
);
14664 if (!found
&& IS_G4X(dev_priv
))
14665 intel_dp_init(dev_priv
, DP_B
, PORT_B
);
14668 /* Before G4X SDVOC doesn't have its own detect register */
14670 if (I915_READ(GEN3_SDVOB
) & SDVO_DETECTED
) {
14671 DRM_DEBUG_KMS("probing SDVOC\n");
14672 found
= intel_sdvo_init(dev_priv
, GEN3_SDVOC
, PORT_C
);
14675 if (!found
&& (I915_READ(GEN3_SDVOC
) & SDVO_DETECTED
)) {
14677 if (IS_G4X(dev_priv
)) {
14678 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14679 intel_hdmi_init(dev_priv
, GEN4_HDMIC
, PORT_C
);
14681 if (IS_G4X(dev_priv
))
14682 intel_dp_init(dev_priv
, DP_C
, PORT_C
);
14685 if (IS_G4X(dev_priv
) && (I915_READ(DP_D
) & DP_DETECTED
))
14686 intel_dp_init(dev_priv
, DP_D
, PORT_D
);
14688 if (SUPPORTS_TV(dev_priv
))
14689 intel_tv_init(dev_priv
);
14690 } else if (IS_GEN(dev_priv
, 2)) {
14691 if (IS_I85X(dev_priv
))
14692 intel_lvds_init(dev_priv
);
14694 intel_crt_init(dev_priv
);
14695 intel_dvo_init(dev_priv
);
14698 intel_psr_init(dev_priv
);
14700 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
14701 encoder
->base
.possible_crtcs
= encoder
->crtc_mask
;
14702 encoder
->base
.possible_clones
=
14703 intel_encoder_clones(encoder
);
14706 intel_init_pch_refclk(dev_priv
);
14708 drm_helper_move_panel_connectors_to_head(&dev_priv
->drm
);
14711 static void intel_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
14713 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
14714 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
14716 drm_framebuffer_cleanup(fb
);
14718 i915_gem_object_lock(obj
);
14719 WARN_ON(!obj
->framebuffer_references
--);
14720 i915_gem_object_unlock(obj
);
14722 i915_gem_object_put(obj
);
14727 static int intel_user_framebuffer_create_handle(struct drm_framebuffer
*fb
,
14728 struct drm_file
*file
,
14729 unsigned int *handle
)
14731 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
14733 if (obj
->userptr
.mm
) {
14734 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14738 return drm_gem_handle_create(file
, &obj
->base
, handle
);
14741 static int intel_user_framebuffer_dirty(struct drm_framebuffer
*fb
,
14742 struct drm_file
*file
,
14743 unsigned flags
, unsigned color
,
14744 struct drm_clip_rect
*clips
,
14745 unsigned num_clips
)
14747 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
14749 i915_gem_object_flush_if_display(obj
);
14750 intel_fb_obj_flush(obj
, ORIGIN_DIRTYFB
);
14755 static const struct drm_framebuffer_funcs intel_fb_funcs
= {
14756 .destroy
= intel_user_framebuffer_destroy
,
14757 .create_handle
= intel_user_framebuffer_create_handle
,
14758 .dirty
= intel_user_framebuffer_dirty
,
14762 u32
intel_fb_pitch_limit(struct drm_i915_private
*dev_priv
,
14763 u32 pixel_format
, u64 fb_modifier
)
14765 struct intel_crtc
*crtc
;
14766 struct intel_plane
*plane
;
14769 * We assume the primary plane for pipe A has
14770 * the highest stride limits of them all.
14772 crtc
= intel_get_crtc_for_pipe(dev_priv
, PIPE_A
);
14773 plane
= to_intel_plane(crtc
->base
.primary
);
14775 return plane
->max_stride(plane
, pixel_format
, fb_modifier
,
14776 DRM_MODE_ROTATE_0
);
14779 static int intel_framebuffer_init(struct intel_framebuffer
*intel_fb
,
14780 struct drm_i915_gem_object
*obj
,
14781 struct drm_mode_fb_cmd2
*mode_cmd
)
14783 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
14784 struct drm_framebuffer
*fb
= &intel_fb
->base
;
14786 unsigned int tiling
, stride
;
14790 i915_gem_object_lock(obj
);
14791 obj
->framebuffer_references
++;
14792 tiling
= i915_gem_object_get_tiling(obj
);
14793 stride
= i915_gem_object_get_stride(obj
);
14794 i915_gem_object_unlock(obj
);
14796 if (mode_cmd
->flags
& DRM_MODE_FB_MODIFIERS
) {
14798 * If there's a fence, enforce that
14799 * the fb modifier and tiling mode match.
14801 if (tiling
!= I915_TILING_NONE
&&
14802 tiling
!= intel_fb_modifier_to_tiling(mode_cmd
->modifier
[0])) {
14803 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14807 if (tiling
== I915_TILING_X
) {
14808 mode_cmd
->modifier
[0] = I915_FORMAT_MOD_X_TILED
;
14809 } else if (tiling
== I915_TILING_Y
) {
14810 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14815 if (!drm_any_plane_has_format(&dev_priv
->drm
,
14816 mode_cmd
->pixel_format
,
14817 mode_cmd
->modifier
[0])) {
14818 struct drm_format_name_buf format_name
;
14820 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
14821 drm_get_format_name(mode_cmd
->pixel_format
,
14823 mode_cmd
->modifier
[0]);
14828 * gen2/3 display engine uses the fence if present,
14829 * so the tiling mode must match the fb modifier exactly.
14831 if (INTEL_GEN(dev_priv
) < 4 &&
14832 tiling
!= intel_fb_modifier_to_tiling(mode_cmd
->modifier
[0])) {
14833 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14837 pitch_limit
= intel_fb_pitch_limit(dev_priv
, mode_cmd
->pixel_format
,
14838 mode_cmd
->modifier
[0]);
14839 if (mode_cmd
->pitches
[0] > pitch_limit
) {
14840 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14841 mode_cmd
->modifier
[0] != DRM_FORMAT_MOD_LINEAR
?
14842 "tiled" : "linear",
14843 mode_cmd
->pitches
[0], pitch_limit
);
14848 * If there's a fence, enforce that
14849 * the fb pitch and fence stride match.
14851 if (tiling
!= I915_TILING_NONE
&& mode_cmd
->pitches
[0] != stride
) {
14852 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14853 mode_cmd
->pitches
[0], stride
);
14857 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14858 if (mode_cmd
->offsets
[0] != 0)
14861 drm_helper_mode_fill_fb_struct(&dev_priv
->drm
, fb
, mode_cmd
);
14863 for (i
= 0; i
< fb
->format
->num_planes
; i
++) {
14864 u32 stride_alignment
;
14866 if (mode_cmd
->handles
[i
] != mode_cmd
->handles
[0]) {
14867 DRM_DEBUG_KMS("bad plane %d handle\n", i
);
14871 stride_alignment
= intel_fb_stride_alignment(fb
, i
);
14874 * Display WA #0531: skl,bxt,kbl,glk
14876 * Render decompression and plane width > 3840
14877 * combined with horizontal panning requires the
14878 * plane stride to be a multiple of 4. We'll just
14879 * require the entire fb to accommodate that to avoid
14880 * potential runtime errors at plane configuration time.
14882 if (IS_GEN(dev_priv
, 9) && i
== 0 && fb
->width
> 3840 &&
14883 is_ccs_modifier(fb
->modifier
))
14884 stride_alignment
*= 4;
14886 if (fb
->pitches
[i
] & (stride_alignment
- 1)) {
14887 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14888 i
, fb
->pitches
[i
], stride_alignment
);
14892 fb
->obj
[i
] = &obj
->base
;
14895 ret
= intel_fill_fb_info(dev_priv
, fb
);
14899 ret
= drm_framebuffer_init(&dev_priv
->drm
, fb
, &intel_fb_funcs
);
14901 DRM_ERROR("framebuffer init failed %d\n", ret
);
14908 i915_gem_object_lock(obj
);
14909 obj
->framebuffer_references
--;
14910 i915_gem_object_unlock(obj
);
14914 static struct drm_framebuffer
*
14915 intel_user_framebuffer_create(struct drm_device
*dev
,
14916 struct drm_file
*filp
,
14917 const struct drm_mode_fb_cmd2
*user_mode_cmd
)
14919 struct drm_framebuffer
*fb
;
14920 struct drm_i915_gem_object
*obj
;
14921 struct drm_mode_fb_cmd2 mode_cmd
= *user_mode_cmd
;
14923 obj
= i915_gem_object_lookup(filp
, mode_cmd
.handles
[0]);
14925 return ERR_PTR(-ENOENT
);
14927 fb
= intel_framebuffer_create(obj
, &mode_cmd
);
14929 i915_gem_object_put(obj
);
14934 static void intel_atomic_state_free(struct drm_atomic_state
*state
)
14936 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
14938 drm_atomic_state_default_release(state
);
14940 i915_sw_fence_fini(&intel_state
->commit_ready
);
14945 static enum drm_mode_status
14946 intel_mode_valid(struct drm_device
*dev
,
14947 const struct drm_display_mode
*mode
)
14949 struct drm_i915_private
*dev_priv
= to_i915(dev
);
14950 int hdisplay_max
, htotal_max
;
14951 int vdisplay_max
, vtotal_max
;
14954 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14955 * of DBLSCAN modes to the output's mode list when they detect
14956 * the scaling mode property on the connector. And they don't
14957 * ask the kernel to validate those modes in any way until
14958 * modeset time at which point the client gets a protocol error.
14959 * So in order to not upset those clients we silently ignore the
14960 * DBLSCAN flag on such connectors. For other connectors we will
14961 * reject modes with the DBLSCAN flag in encoder->compute_config().
14962 * And we always reject DBLSCAN modes in connector->mode_valid()
14963 * as we never want such modes on the connector's mode list.
14966 if (mode
->vscan
> 1)
14967 return MODE_NO_VSCAN
;
14969 if (mode
->flags
& DRM_MODE_FLAG_HSKEW
)
14970 return MODE_H_ILLEGAL
;
14972 if (mode
->flags
& (DRM_MODE_FLAG_CSYNC
|
14973 DRM_MODE_FLAG_NCSYNC
|
14974 DRM_MODE_FLAG_PCSYNC
))
14977 if (mode
->flags
& (DRM_MODE_FLAG_BCAST
|
14978 DRM_MODE_FLAG_PIXMUX
|
14979 DRM_MODE_FLAG_CLKDIV2
))
14982 if (INTEL_GEN(dev_priv
) >= 9 ||
14983 IS_BROADWELL(dev_priv
) || IS_HASWELL(dev_priv
)) {
14984 hdisplay_max
= 8192; /* FDI max 4096 handled elsewhere */
14985 vdisplay_max
= 4096;
14988 } else if (INTEL_GEN(dev_priv
) >= 3) {
14989 hdisplay_max
= 4096;
14990 vdisplay_max
= 4096;
14994 hdisplay_max
= 2048;
14995 vdisplay_max
= 2048;
15000 if (mode
->hdisplay
> hdisplay_max
||
15001 mode
->hsync_start
> htotal_max
||
15002 mode
->hsync_end
> htotal_max
||
15003 mode
->htotal
> htotal_max
)
15004 return MODE_H_ILLEGAL
;
15006 if (mode
->vdisplay
> vdisplay_max
||
15007 mode
->vsync_start
> vtotal_max
||
15008 mode
->vsync_end
> vtotal_max
||
15009 mode
->vtotal
> vtotal_max
)
15010 return MODE_V_ILLEGAL
;
15015 static const struct drm_mode_config_funcs intel_mode_funcs
= {
15016 .fb_create
= intel_user_framebuffer_create
,
15017 .get_format_info
= intel_get_format_info
,
15018 .output_poll_changed
= intel_fbdev_output_poll_changed
,
15019 .mode_valid
= intel_mode_valid
,
15020 .atomic_check
= intel_atomic_check
,
15021 .atomic_commit
= intel_atomic_commit
,
15022 .atomic_state_alloc
= intel_atomic_state_alloc
,
15023 .atomic_state_clear
= intel_atomic_state_clear
,
15024 .atomic_state_free
= intel_atomic_state_free
,
15028 * intel_init_display_hooks - initialize the display modesetting hooks
15029 * @dev_priv: device private
15031 void intel_init_display_hooks(struct drm_i915_private
*dev_priv
)
15033 intel_init_cdclk_hooks(dev_priv
);
15035 if (INTEL_GEN(dev_priv
) >= 9) {
15036 dev_priv
->display
.get_pipe_config
= haswell_get_pipe_config
;
15037 dev_priv
->display
.get_initial_plane_config
=
15038 skylake_get_initial_plane_config
;
15039 dev_priv
->display
.crtc_compute_clock
=
15040 haswell_crtc_compute_clock
;
15041 dev_priv
->display
.crtc_enable
= haswell_crtc_enable
;
15042 dev_priv
->display
.crtc_disable
= haswell_crtc_disable
;
15043 } else if (HAS_DDI(dev_priv
)) {
15044 dev_priv
->display
.get_pipe_config
= haswell_get_pipe_config
;
15045 dev_priv
->display
.get_initial_plane_config
=
15046 i9xx_get_initial_plane_config
;
15047 dev_priv
->display
.crtc_compute_clock
=
15048 haswell_crtc_compute_clock
;
15049 dev_priv
->display
.crtc_enable
= haswell_crtc_enable
;
15050 dev_priv
->display
.crtc_disable
= haswell_crtc_disable
;
15051 } else if (HAS_PCH_SPLIT(dev_priv
)) {
15052 dev_priv
->display
.get_pipe_config
= ironlake_get_pipe_config
;
15053 dev_priv
->display
.get_initial_plane_config
=
15054 i9xx_get_initial_plane_config
;
15055 dev_priv
->display
.crtc_compute_clock
=
15056 ironlake_crtc_compute_clock
;
15057 dev_priv
->display
.crtc_enable
= ironlake_crtc_enable
;
15058 dev_priv
->display
.crtc_disable
= ironlake_crtc_disable
;
15059 } else if (IS_CHERRYVIEW(dev_priv
)) {
15060 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
15061 dev_priv
->display
.get_initial_plane_config
=
15062 i9xx_get_initial_plane_config
;
15063 dev_priv
->display
.crtc_compute_clock
= chv_crtc_compute_clock
;
15064 dev_priv
->display
.crtc_enable
= valleyview_crtc_enable
;
15065 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
15066 } else if (IS_VALLEYVIEW(dev_priv
)) {
15067 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
15068 dev_priv
->display
.get_initial_plane_config
=
15069 i9xx_get_initial_plane_config
;
15070 dev_priv
->display
.crtc_compute_clock
= vlv_crtc_compute_clock
;
15071 dev_priv
->display
.crtc_enable
= valleyview_crtc_enable
;
15072 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
15073 } else if (IS_G4X(dev_priv
)) {
15074 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
15075 dev_priv
->display
.get_initial_plane_config
=
15076 i9xx_get_initial_plane_config
;
15077 dev_priv
->display
.crtc_compute_clock
= g4x_crtc_compute_clock
;
15078 dev_priv
->display
.crtc_enable
= i9xx_crtc_enable
;
15079 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
15080 } else if (IS_PINEVIEW(dev_priv
)) {
15081 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
15082 dev_priv
->display
.get_initial_plane_config
=
15083 i9xx_get_initial_plane_config
;
15084 dev_priv
->display
.crtc_compute_clock
= pnv_crtc_compute_clock
;
15085 dev_priv
->display
.crtc_enable
= i9xx_crtc_enable
;
15086 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
15087 } else if (!IS_GEN(dev_priv
, 2)) {
15088 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
15089 dev_priv
->display
.get_initial_plane_config
=
15090 i9xx_get_initial_plane_config
;
15091 dev_priv
->display
.crtc_compute_clock
= i9xx_crtc_compute_clock
;
15092 dev_priv
->display
.crtc_enable
= i9xx_crtc_enable
;
15093 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
15095 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
15096 dev_priv
->display
.get_initial_plane_config
=
15097 i9xx_get_initial_plane_config
;
15098 dev_priv
->display
.crtc_compute_clock
= i8xx_crtc_compute_clock
;
15099 dev_priv
->display
.crtc_enable
= i9xx_crtc_enable
;
15100 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
15103 if (IS_GEN(dev_priv
, 5)) {
15104 dev_priv
->display
.fdi_link_train
= ironlake_fdi_link_train
;
15105 } else if (IS_GEN(dev_priv
, 6)) {
15106 dev_priv
->display
.fdi_link_train
= gen6_fdi_link_train
;
15107 } else if (IS_IVYBRIDGE(dev_priv
)) {
15108 /* FIXME: detect B0+ stepping and use auto training */
15109 dev_priv
->display
.fdi_link_train
= ivb_manual_fdi_link_train
;
15110 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
15111 dev_priv
->display
.fdi_link_train
= hsw_fdi_link_train
;
15114 if (INTEL_GEN(dev_priv
) >= 9)
15115 dev_priv
->display
.update_crtcs
= skl_update_crtcs
;
15117 dev_priv
->display
.update_crtcs
= intel_update_crtcs
;
15120 /* Disable the VGA plane that we never use */
15121 static void i915_disable_vga(struct drm_i915_private
*dev_priv
)
15123 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
15125 i915_reg_t vga_reg
= i915_vgacntrl_reg(dev_priv
);
15127 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15128 vga_get_uninterruptible(pdev
, VGA_RSRC_LEGACY_IO
);
15129 outb(SR01
, VGA_SR_INDEX
);
15130 sr1
= inb(VGA_SR_DATA
);
15131 outb(sr1
| 1<<5, VGA_SR_DATA
);
15132 vga_put(pdev
, VGA_RSRC_LEGACY_IO
);
15135 I915_WRITE(vga_reg
, VGA_DISP_DISABLE
);
15136 POSTING_READ(vga_reg
);
15139 void intel_modeset_init_hw(struct drm_device
*dev
)
15141 struct drm_i915_private
*dev_priv
= to_i915(dev
);
15143 intel_update_cdclk(dev_priv
);
15144 intel_dump_cdclk_state(&dev_priv
->cdclk
.hw
, "Current CDCLK");
15145 dev_priv
->cdclk
.logical
= dev_priv
->cdclk
.actual
= dev_priv
->cdclk
.hw
;
15149 * Calculate what we think the watermarks should be for the state we've read
15150 * out of the hardware and then immediately program those watermarks so that
15151 * we ensure the hardware settings match our internal state.
15153 * We can calculate what we think WM's should be by creating a duplicate of the
15154 * current state (which was constructed during hardware readout) and running it
15155 * through the atomic check code to calculate new watermark values in the
15158 static void sanitize_watermarks(struct drm_device
*dev
)
15160 struct drm_i915_private
*dev_priv
= to_i915(dev
);
15161 struct drm_atomic_state
*state
;
15162 struct intel_atomic_state
*intel_state
;
15163 struct drm_crtc
*crtc
;
15164 struct drm_crtc_state
*cstate
;
15165 struct drm_modeset_acquire_ctx ctx
;
15169 /* Only supported on platforms that use atomic watermark design */
15170 if (!dev_priv
->display
.optimize_watermarks
)
15174 * We need to hold connection_mutex before calling duplicate_state so
15175 * that the connector loop is protected.
15177 drm_modeset_acquire_init(&ctx
, 0);
15179 ret
= drm_modeset_lock_all_ctx(dev
, &ctx
);
15180 if (ret
== -EDEADLK
) {
15181 drm_modeset_backoff(&ctx
);
15183 } else if (WARN_ON(ret
)) {
15187 state
= drm_atomic_helper_duplicate_state(dev
, &ctx
);
15188 if (WARN_ON(IS_ERR(state
)))
15191 intel_state
= to_intel_atomic_state(state
);
15194 * Hardware readout is the only time we don't want to calculate
15195 * intermediate watermarks (since we don't trust the current
15198 if (!HAS_GMCH(dev_priv
))
15199 intel_state
->skip_intermediate_wm
= true;
15201 ret
= intel_atomic_check(dev
, state
);
15204 * If we fail here, it means that the hardware appears to be
15205 * programmed in a way that shouldn't be possible, given our
15206 * understanding of watermark requirements. This might mean a
15207 * mistake in the hardware readout code or a mistake in the
15208 * watermark calculations for a given platform. Raise a WARN
15209 * so that this is noticeable.
15211 * If this actually happens, we'll have to just leave the
15212 * BIOS-programmed watermarks untouched and hope for the best.
15214 WARN(true, "Could not determine valid watermarks for inherited state\n");
15218 /* Write calculated watermark values back */
15219 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
) {
15220 struct intel_crtc_state
*cs
= to_intel_crtc_state(cstate
);
15222 cs
->wm
.need_postvbl_update
= true;
15223 dev_priv
->display
.optimize_watermarks(intel_state
, cs
);
15225 to_intel_crtc_state(crtc
->state
)->wm
= cs
->wm
;
15229 drm_atomic_state_put(state
);
15231 drm_modeset_drop_locks(&ctx
);
15232 drm_modeset_acquire_fini(&ctx
);
15235 static void intel_update_fdi_pll_freq(struct drm_i915_private
*dev_priv
)
15237 if (IS_GEN(dev_priv
, 5)) {
15239 I915_READ(FDI_PLL_BIOS_0
) & FDI_PLL_FB_CLOCK_MASK
;
15241 dev_priv
->fdi_pll_freq
= (fdi_pll_clk
+ 2) * 10000;
15242 } else if (IS_GEN(dev_priv
, 6) || IS_IVYBRIDGE(dev_priv
)) {
15243 dev_priv
->fdi_pll_freq
= 270000;
15248 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv
->fdi_pll_freq
);
15251 static int intel_initial_commit(struct drm_device
*dev
)
15253 struct drm_atomic_state
*state
= NULL
;
15254 struct drm_modeset_acquire_ctx ctx
;
15255 struct drm_crtc
*crtc
;
15256 struct drm_crtc_state
*crtc_state
;
15259 state
= drm_atomic_state_alloc(dev
);
15263 drm_modeset_acquire_init(&ctx
, 0);
15266 state
->acquire_ctx
= &ctx
;
15268 drm_for_each_crtc(crtc
, dev
) {
15269 crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
15270 if (IS_ERR(crtc_state
)) {
15271 ret
= PTR_ERR(crtc_state
);
15275 if (crtc_state
->active
) {
15276 ret
= drm_atomic_add_affected_planes(state
, crtc
);
15281 * FIXME hack to force a LUT update to avoid the
15282 * plane update forcing the pipe gamma on without
15283 * having a proper LUT loaded. Remove once we
15284 * have readout for pipe gamma enable.
15286 crtc_state
->color_mgmt_changed
= true;
15290 ret
= drm_atomic_commit(state
);
15293 if (ret
== -EDEADLK
) {
15294 drm_atomic_state_clear(state
);
15295 drm_modeset_backoff(&ctx
);
15299 drm_atomic_state_put(state
);
15301 drm_modeset_drop_locks(&ctx
);
15302 drm_modeset_acquire_fini(&ctx
);
15307 int intel_modeset_init(struct drm_device
*dev
)
15309 struct drm_i915_private
*dev_priv
= to_i915(dev
);
15310 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
15312 struct intel_crtc
*crtc
;
15315 dev_priv
->modeset_wq
= alloc_ordered_workqueue("i915_modeset", 0);
15317 drm_mode_config_init(dev
);
15319 dev
->mode_config
.min_width
= 0;
15320 dev
->mode_config
.min_height
= 0;
15322 dev
->mode_config
.preferred_depth
= 24;
15323 dev
->mode_config
.prefer_shadow
= 1;
15325 dev
->mode_config
.allow_fb_modifiers
= true;
15327 dev
->mode_config
.funcs
= &intel_mode_funcs
;
15329 init_llist_head(&dev_priv
->atomic_helper
.free_list
);
15330 INIT_WORK(&dev_priv
->atomic_helper
.free_work
,
15331 intel_atomic_helper_free_state_worker
);
15333 intel_init_quirks(dev_priv
);
15335 intel_fbc_init(dev_priv
);
15337 intel_init_pm(dev_priv
);
15340 * There may be no VBT; and if the BIOS enabled SSC we can
15341 * just keep using it to avoid unnecessary flicker. Whereas if the
15342 * BIOS isn't using it, don't assume it will work even if the VBT
15343 * indicates as much.
15345 if (HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
)) {
15346 bool bios_lvds_use_ssc
= !!(I915_READ(PCH_DREF_CONTROL
) &
15349 if (dev_priv
->vbt
.lvds_use_ssc
!= bios_lvds_use_ssc
) {
15350 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15351 bios_lvds_use_ssc
? "en" : "dis",
15352 dev_priv
->vbt
.lvds_use_ssc
? "en" : "dis");
15353 dev_priv
->vbt
.lvds_use_ssc
= bios_lvds_use_ssc
;
15357 /* maximum framebuffer dimensions */
15358 if (IS_GEN(dev_priv
, 2)) {
15359 dev
->mode_config
.max_width
= 2048;
15360 dev
->mode_config
.max_height
= 2048;
15361 } else if (IS_GEN(dev_priv
, 3)) {
15362 dev
->mode_config
.max_width
= 4096;
15363 dev
->mode_config
.max_height
= 4096;
15365 dev
->mode_config
.max_width
= 8192;
15366 dev
->mode_config
.max_height
= 8192;
15369 if (IS_I845G(dev_priv
) || IS_I865G(dev_priv
)) {
15370 dev
->mode_config
.cursor_width
= IS_I845G(dev_priv
) ? 64 : 512;
15371 dev
->mode_config
.cursor_height
= 1023;
15372 } else if (IS_GEN(dev_priv
, 2)) {
15373 dev
->mode_config
.cursor_width
= 64;
15374 dev
->mode_config
.cursor_height
= 64;
15376 dev
->mode_config
.cursor_width
= 256;
15377 dev
->mode_config
.cursor_height
= 256;
15380 dev
->mode_config
.fb_base
= ggtt
->gmadr
.start
;
15382 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15383 INTEL_INFO(dev_priv
)->num_pipes
,
15384 INTEL_INFO(dev_priv
)->num_pipes
> 1 ? "s" : "");
15386 for_each_pipe(dev_priv
, pipe
) {
15387 ret
= intel_crtc_init(dev_priv
, pipe
);
15389 drm_mode_config_cleanup(dev
);
15394 intel_shared_dpll_init(dev
);
15395 intel_update_fdi_pll_freq(dev_priv
);
15397 intel_update_czclk(dev_priv
);
15398 intel_modeset_init_hw(dev
);
15400 if (dev_priv
->max_cdclk_freq
== 0)
15401 intel_update_max_cdclk(dev_priv
);
15403 /* Just disable it once at startup */
15404 i915_disable_vga(dev_priv
);
15405 intel_setup_outputs(dev_priv
);
15407 drm_modeset_lock_all(dev
);
15408 intel_modeset_setup_hw_state(dev
, dev
->mode_config
.acquire_ctx
);
15409 drm_modeset_unlock_all(dev
);
15411 for_each_intel_crtc(dev
, crtc
) {
15412 struct intel_initial_plane_config plane_config
= {};
15418 * Note that reserving the BIOS fb up front prevents us
15419 * from stuffing other stolen allocations like the ring
15420 * on top. This prevents some ugliness at boot time, and
15421 * can even allow for smooth boot transitions if the BIOS
15422 * fb is large enough for the active pipe configuration.
15424 dev_priv
->display
.get_initial_plane_config(crtc
,
15428 * If the fb is shared between multiple heads, we'll
15429 * just get the first one.
15431 intel_find_initial_plane_obj(crtc
, &plane_config
);
15435 * Make sure hardware watermarks really match the state we read out.
15436 * Note that we need to do this after reconstructing the BIOS fb's
15437 * since the watermark calculation done here will use pstate->fb.
15439 if (!HAS_GMCH(dev_priv
))
15440 sanitize_watermarks(dev
);
15443 * Force all active planes to recompute their states. So that on
15444 * mode_setcrtc after probe, all the intel_plane_state variables
15445 * are already calculated and there is no assert_plane warnings
15448 ret
= intel_initial_commit(dev
);
15450 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15455 void i830_enable_pipe(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
15457 struct intel_crtc
*crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
15458 /* 640x480@60Hz, ~25175 kHz */
15459 struct dpll clock
= {
15469 WARN_ON(i9xx_calc_dpll_params(48000, &clock
) != 25154);
15471 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15472 pipe_name(pipe
), clock
.vco
, clock
.dot
);
15474 fp
= i9xx_dpll_compute_fp(&clock
);
15475 dpll
= (I915_READ(DPLL(pipe
)) & DPLL_DVO_2X_MODE
) |
15476 DPLL_VGA_MODE_DIS
|
15477 ((clock
.p1
- 2) << DPLL_FPA01_P1_POST_DIV_SHIFT
) |
15478 PLL_P2_DIVIDE_BY_4
|
15479 PLL_REF_INPUT_DREFCLK
|
15482 I915_WRITE(FP0(pipe
), fp
);
15483 I915_WRITE(FP1(pipe
), fp
);
15485 I915_WRITE(HTOTAL(pipe
), (640 - 1) | ((800 - 1) << 16));
15486 I915_WRITE(HBLANK(pipe
), (640 - 1) | ((800 - 1) << 16));
15487 I915_WRITE(HSYNC(pipe
), (656 - 1) | ((752 - 1) << 16));
15488 I915_WRITE(VTOTAL(pipe
), (480 - 1) | ((525 - 1) << 16));
15489 I915_WRITE(VBLANK(pipe
), (480 - 1) | ((525 - 1) << 16));
15490 I915_WRITE(VSYNC(pipe
), (490 - 1) | ((492 - 1) << 16));
15491 I915_WRITE(PIPESRC(pipe
), ((640 - 1) << 16) | (480 - 1));
15494 * Apparently we need to have VGA mode enabled prior to changing
15495 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15496 * dividers, even though the register value does change.
15498 I915_WRITE(DPLL(pipe
), dpll
& ~DPLL_VGA_MODE_DIS
);
15499 I915_WRITE(DPLL(pipe
), dpll
);
15501 /* Wait for the clocks to stabilize. */
15502 POSTING_READ(DPLL(pipe
));
15505 /* The pixel multiplier can only be updated once the
15506 * DPLL is enabled and the clocks are stable.
15508 * So write it again.
15510 I915_WRITE(DPLL(pipe
), dpll
);
15512 /* We do this three times for luck */
15513 for (i
= 0; i
< 3 ; i
++) {
15514 I915_WRITE(DPLL(pipe
), dpll
);
15515 POSTING_READ(DPLL(pipe
));
15516 udelay(150); /* wait for warmup */
15519 I915_WRITE(PIPECONF(pipe
), PIPECONF_ENABLE
| PIPECONF_PROGRESSIVE
);
15520 POSTING_READ(PIPECONF(pipe
));
15522 intel_wait_for_pipe_scanline_moving(crtc
);
15525 void i830_disable_pipe(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
15527 struct intel_crtc
*crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
15529 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15532 WARN_ON(I915_READ(DSPCNTR(PLANE_A
)) & DISPLAY_PLANE_ENABLE
);
15533 WARN_ON(I915_READ(DSPCNTR(PLANE_B
)) & DISPLAY_PLANE_ENABLE
);
15534 WARN_ON(I915_READ(DSPCNTR(PLANE_C
)) & DISPLAY_PLANE_ENABLE
);
15535 WARN_ON(I915_READ(CURCNTR(PIPE_A
)) & MCURSOR_MODE
);
15536 WARN_ON(I915_READ(CURCNTR(PIPE_B
)) & MCURSOR_MODE
);
15538 I915_WRITE(PIPECONF(pipe
), 0);
15539 POSTING_READ(PIPECONF(pipe
));
15541 intel_wait_for_pipe_scanline_stopped(crtc
);
15543 I915_WRITE(DPLL(pipe
), DPLL_VGA_MODE_DIS
);
15544 POSTING_READ(DPLL(pipe
));
15548 intel_sanitize_plane_mapping(struct drm_i915_private
*dev_priv
)
15550 struct intel_crtc
*crtc
;
15552 if (INTEL_GEN(dev_priv
) >= 4)
15555 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
15556 struct intel_plane
*plane
=
15557 to_intel_plane(crtc
->base
.primary
);
15558 struct intel_crtc
*plane_crtc
;
15561 if (!plane
->get_hw_state(plane
, &pipe
))
15564 if (pipe
== crtc
->pipe
)
15567 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15568 plane
->base
.base
.id
, plane
->base
.name
);
15570 plane_crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
15571 intel_plane_disable_noatomic(plane_crtc
, plane
);
15575 static bool intel_crtc_has_encoders(struct intel_crtc
*crtc
)
15577 struct drm_device
*dev
= crtc
->base
.dev
;
15578 struct intel_encoder
*encoder
;
15580 for_each_encoder_on_crtc(dev
, &crtc
->base
, encoder
)
15586 static struct intel_connector
*intel_encoder_find_connector(struct intel_encoder
*encoder
)
15588 struct drm_device
*dev
= encoder
->base
.dev
;
15589 struct intel_connector
*connector
;
15591 for_each_connector_on_encoder(dev
, &encoder
->base
, connector
)
15597 static bool has_pch_trancoder(struct drm_i915_private
*dev_priv
,
15598 enum pipe pch_transcoder
)
15600 return HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
) ||
15601 (HAS_PCH_LPT_H(dev_priv
) && pch_transcoder
== PIPE_A
);
15604 static void intel_sanitize_crtc(struct intel_crtc
*crtc
,
15605 struct drm_modeset_acquire_ctx
*ctx
)
15607 struct drm_device
*dev
= crtc
->base
.dev
;
15608 struct drm_i915_private
*dev_priv
= to_i915(dev
);
15609 struct intel_crtc_state
*crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
15610 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
15612 /* Clear any frame start delays used for debugging left by the BIOS */
15613 if (crtc
->active
&& !transcoder_is_dsi(cpu_transcoder
)) {
15614 i915_reg_t reg
= PIPECONF(cpu_transcoder
);
15617 I915_READ(reg
) & ~PIPECONF_FRAME_START_DELAY_MASK
);
15620 if (crtc_state
->base
.active
) {
15621 struct intel_plane
*plane
;
15623 /* Disable everything but the primary plane */
15624 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
15625 const struct intel_plane_state
*plane_state
=
15626 to_intel_plane_state(plane
->base
.state
);
15628 if (plane_state
->base
.visible
&&
15629 plane
->base
.type
!= DRM_PLANE_TYPE_PRIMARY
)
15630 intel_plane_disable_noatomic(crtc
, plane
);
15634 * Disable any background color set by the BIOS, but enable the
15635 * gamma and CSC to match how we program our planes.
15637 if (INTEL_GEN(dev_priv
) >= 9)
15638 I915_WRITE(SKL_BOTTOM_COLOR(crtc
->pipe
),
15639 SKL_BOTTOM_COLOR_GAMMA_ENABLE
|
15640 SKL_BOTTOM_COLOR_CSC_ENABLE
);
15643 /* Adjust the state of the output pipe according to whether we
15644 * have active connectors/encoders. */
15645 if (crtc_state
->base
.active
&& !intel_crtc_has_encoders(crtc
))
15646 intel_crtc_disable_noatomic(&crtc
->base
, ctx
);
15648 if (crtc_state
->base
.active
|| HAS_GMCH(dev_priv
)) {
15650 * We start out with underrun reporting disabled to avoid races.
15651 * For correct bookkeeping mark this on active crtcs.
15653 * Also on gmch platforms we dont have any hardware bits to
15654 * disable the underrun reporting. Which means we need to start
15655 * out with underrun reporting disabled also on inactive pipes,
15656 * since otherwise we'll complain about the garbage we read when
15657 * e.g. coming up after runtime pm.
15659 * No protection against concurrent access is required - at
15660 * worst a fifo underrun happens which also sets this to false.
15662 crtc
->cpu_fifo_underrun_disabled
= true;
15664 * We track the PCH trancoder underrun reporting state
15665 * within the crtc. With crtc for pipe A housing the underrun
15666 * reporting state for PCH transcoder A, crtc for pipe B housing
15667 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15668 * and marking underrun reporting as disabled for the non-existing
15669 * PCH transcoders B and C would prevent enabling the south
15670 * error interrupt (see cpt_can_enable_serr_int()).
15672 if (has_pch_trancoder(dev_priv
, crtc
->pipe
))
15673 crtc
->pch_fifo_underrun_disabled
= true;
15677 static bool has_bogus_dpll_config(const struct intel_crtc_state
*crtc_state
)
15679 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
15682 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15683 * the hardware when a high res displays plugged in. DPLL P
15684 * divider is zero, and the pipe timings are bonkers. We'll
15685 * try to disable everything in that case.
15687 * FIXME would be nice to be able to sanitize this state
15688 * without several WARNs, but for now let's take the easy
15691 return IS_GEN(dev_priv
, 6) &&
15692 crtc_state
->base
.active
&&
15693 crtc_state
->shared_dpll
&&
15694 crtc_state
->port_clock
== 0;
15697 static void intel_sanitize_encoder(struct intel_encoder
*encoder
)
15699 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
15700 struct intel_connector
*connector
;
15701 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
15702 struct intel_crtc_state
*crtc_state
= crtc
?
15703 to_intel_crtc_state(crtc
->base
.state
) : NULL
;
15705 /* We need to check both for a crtc link (meaning that the
15706 * encoder is active and trying to read from a pipe) and the
15707 * pipe itself being active. */
15708 bool has_active_crtc
= crtc_state
&&
15709 crtc_state
->base
.active
;
15711 if (crtc_state
&& has_bogus_dpll_config(crtc_state
)) {
15712 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15713 pipe_name(crtc
->pipe
));
15714 has_active_crtc
= false;
15717 connector
= intel_encoder_find_connector(encoder
);
15718 if (connector
&& !has_active_crtc
) {
15719 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15720 encoder
->base
.base
.id
,
15721 encoder
->base
.name
);
15723 /* Connector is active, but has no active pipe. This is
15724 * fallout from our resume register restoring. Disable
15725 * the encoder manually again. */
15727 struct drm_encoder
*best_encoder
;
15729 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15730 encoder
->base
.base
.id
,
15731 encoder
->base
.name
);
15733 /* avoid oopsing in case the hooks consult best_encoder */
15734 best_encoder
= connector
->base
.state
->best_encoder
;
15735 connector
->base
.state
->best_encoder
= &encoder
->base
;
15737 if (encoder
->disable
)
15738 encoder
->disable(encoder
, crtc_state
,
15739 connector
->base
.state
);
15740 if (encoder
->post_disable
)
15741 encoder
->post_disable(encoder
, crtc_state
,
15742 connector
->base
.state
);
15744 connector
->base
.state
->best_encoder
= best_encoder
;
15746 encoder
->base
.crtc
= NULL
;
15748 /* Inconsistent output/port/pipe state happens presumably due to
15749 * a bug in one of the get_hw_state functions. Or someplace else
15750 * in our code, like the register restore mess on resume. Clamp
15751 * things to off as a safer default. */
15753 connector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
15754 connector
->base
.encoder
= NULL
;
15757 /* notify opregion of the sanitized encoder state */
15758 intel_opregion_notify_encoder(encoder
, connector
&& has_active_crtc
);
15760 if (INTEL_GEN(dev_priv
) >= 11)
15761 icl_sanitize_encoder_pll_mapping(encoder
);
15764 void i915_redisable_vga_power_on(struct drm_i915_private
*dev_priv
)
15766 i915_reg_t vga_reg
= i915_vgacntrl_reg(dev_priv
);
15768 if (!(I915_READ(vga_reg
) & VGA_DISP_DISABLE
)) {
15769 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15770 i915_disable_vga(dev_priv
);
15774 void i915_redisable_vga(struct drm_i915_private
*dev_priv
)
15776 intel_wakeref_t wakeref
;
15779 * This function can be called both from intel_modeset_setup_hw_state or
15780 * at a very early point in our resume sequence, where the power well
15781 * structures are not yet restored. Since this function is at a very
15782 * paranoid "someone might have enabled VGA while we were not looking"
15783 * level, just check if the power well is enabled instead of trying to
15784 * follow the "don't touch the power well if we don't need it" policy
15785 * the rest of the driver uses.
15787 wakeref
= intel_display_power_get_if_enabled(dev_priv
,
15792 i915_redisable_vga_power_on(dev_priv
);
15794 intel_display_power_put(dev_priv
, POWER_DOMAIN_VGA
, wakeref
);
15797 /* FIXME read out full plane state for all planes */
15798 static void readout_plane_state(struct drm_i915_private
*dev_priv
)
15800 struct intel_plane
*plane
;
15801 struct intel_crtc
*crtc
;
15803 for_each_intel_plane(&dev_priv
->drm
, plane
) {
15804 struct intel_plane_state
*plane_state
=
15805 to_intel_plane_state(plane
->base
.state
);
15806 struct intel_crtc_state
*crtc_state
;
15807 enum pipe pipe
= PIPE_A
;
15810 visible
= plane
->get_hw_state(plane
, &pipe
);
15812 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
15813 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
15815 intel_set_plane_visible(crtc_state
, plane_state
, visible
);
15817 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15818 plane
->base
.base
.id
, plane
->base
.name
,
15819 enableddisabled(visible
), pipe_name(pipe
));
15822 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
15823 struct intel_crtc_state
*crtc_state
=
15824 to_intel_crtc_state(crtc
->base
.state
);
15826 fixup_active_planes(crtc_state
);
15830 static void intel_modeset_readout_hw_state(struct drm_device
*dev
)
15832 struct drm_i915_private
*dev_priv
= to_i915(dev
);
15834 struct intel_crtc
*crtc
;
15835 struct intel_encoder
*encoder
;
15836 struct intel_connector
*connector
;
15837 struct drm_connector_list_iter conn_iter
;
15840 dev_priv
->active_crtcs
= 0;
15842 for_each_intel_crtc(dev
, crtc
) {
15843 struct intel_crtc_state
*crtc_state
=
15844 to_intel_crtc_state(crtc
->base
.state
);
15846 __drm_atomic_helper_crtc_destroy_state(&crtc_state
->base
);
15847 memset(crtc_state
, 0, sizeof(*crtc_state
));
15848 crtc_state
->base
.crtc
= &crtc
->base
;
15850 crtc_state
->base
.active
= crtc_state
->base
.enable
=
15851 dev_priv
->display
.get_pipe_config(crtc
, crtc_state
);
15853 crtc
->base
.enabled
= crtc_state
->base
.enable
;
15854 crtc
->active
= crtc_state
->base
.active
;
15856 if (crtc_state
->base
.active
)
15857 dev_priv
->active_crtcs
|= 1 << crtc
->pipe
;
15859 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15860 crtc
->base
.base
.id
, crtc
->base
.name
,
15861 enableddisabled(crtc_state
->base
.active
));
15864 readout_plane_state(dev_priv
);
15866 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
15867 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
15869 pll
->on
= pll
->info
->funcs
->get_hw_state(dev_priv
, pll
,
15870 &pll
->state
.hw_state
);
15871 pll
->state
.crtc_mask
= 0;
15872 for_each_intel_crtc(dev
, crtc
) {
15873 struct intel_crtc_state
*crtc_state
=
15874 to_intel_crtc_state(crtc
->base
.state
);
15876 if (crtc_state
->base
.active
&&
15877 crtc_state
->shared_dpll
== pll
)
15878 pll
->state
.crtc_mask
|= 1 << crtc
->pipe
;
15880 pll
->active_mask
= pll
->state
.crtc_mask
;
15882 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15883 pll
->info
->name
, pll
->state
.crtc_mask
, pll
->on
);
15886 for_each_intel_encoder(dev
, encoder
) {
15889 if (encoder
->get_hw_state(encoder
, &pipe
)) {
15890 struct intel_crtc_state
*crtc_state
;
15892 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
15893 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
15895 encoder
->base
.crtc
= &crtc
->base
;
15896 encoder
->get_config(encoder
, crtc_state
);
15898 encoder
->base
.crtc
= NULL
;
15901 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15902 encoder
->base
.base
.id
, encoder
->base
.name
,
15903 enableddisabled(encoder
->base
.crtc
),
15907 drm_connector_list_iter_begin(dev
, &conn_iter
);
15908 for_each_intel_connector_iter(connector
, &conn_iter
) {
15909 if (connector
->get_hw_state(connector
)) {
15910 connector
->base
.dpms
= DRM_MODE_DPMS_ON
;
15912 encoder
= connector
->encoder
;
15913 connector
->base
.encoder
= &encoder
->base
;
15915 if (encoder
->base
.crtc
&&
15916 encoder
->base
.crtc
->state
->active
) {
15918 * This has to be done during hardware readout
15919 * because anything calling .crtc_disable may
15920 * rely on the connector_mask being accurate.
15922 encoder
->base
.crtc
->state
->connector_mask
|=
15923 drm_connector_mask(&connector
->base
);
15924 encoder
->base
.crtc
->state
->encoder_mask
|=
15925 drm_encoder_mask(&encoder
->base
);
15929 connector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
15930 connector
->base
.encoder
= NULL
;
15932 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15933 connector
->base
.base
.id
, connector
->base
.name
,
15934 enableddisabled(connector
->base
.encoder
));
15936 drm_connector_list_iter_end(&conn_iter
);
15938 for_each_intel_crtc(dev
, crtc
) {
15939 struct intel_crtc_state
*crtc_state
=
15940 to_intel_crtc_state(crtc
->base
.state
);
15943 memset(&crtc
->base
.mode
, 0, sizeof(crtc
->base
.mode
));
15944 if (crtc_state
->base
.active
) {
15945 intel_mode_from_pipe_config(&crtc
->base
.mode
, crtc_state
);
15946 crtc
->base
.mode
.hdisplay
= crtc_state
->pipe_src_w
;
15947 crtc
->base
.mode
.vdisplay
= crtc_state
->pipe_src_h
;
15948 intel_mode_from_pipe_config(&crtc_state
->base
.adjusted_mode
, crtc_state
);
15949 WARN_ON(drm_atomic_set_mode_for_crtc(crtc
->base
.state
, &crtc
->base
.mode
));
15952 * The initial mode needs to be set in order to keep
15953 * the atomic core happy. It wants a valid mode if the
15954 * crtc's enabled, so we do the above call.
15956 * But we don't set all the derived state fully, hence
15957 * set a flag to indicate that a full recalculation is
15958 * needed on the next commit.
15960 crtc_state
->base
.mode
.private_flags
= I915_MODE_FLAG_INHERITED
;
15962 intel_crtc_compute_pixel_rate(crtc_state
);
15964 if (dev_priv
->display
.modeset_calc_cdclk
) {
15965 min_cdclk
= intel_crtc_compute_min_cdclk(crtc_state
);
15966 if (WARN_ON(min_cdclk
< 0))
15970 drm_calc_timestamping_constants(&crtc
->base
,
15971 &crtc_state
->base
.adjusted_mode
);
15972 update_scanline_offset(crtc_state
);
15975 dev_priv
->min_cdclk
[crtc
->pipe
] = min_cdclk
;
15976 dev_priv
->min_voltage_level
[crtc
->pipe
] =
15977 crtc_state
->min_voltage_level
;
15979 intel_pipe_config_sanity_check(dev_priv
, crtc_state
);
15984 get_encoder_power_domains(struct drm_i915_private
*dev_priv
)
15986 struct intel_encoder
*encoder
;
15988 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
15989 struct intel_crtc_state
*crtc_state
;
15991 if (!encoder
->get_power_domains
)
15995 * MST-primary and inactive encoders don't have a crtc state
15996 * and neither of these require any power domain references.
15998 if (!encoder
->base
.crtc
)
16001 crtc_state
= to_intel_crtc_state(encoder
->base
.crtc
->state
);
16002 encoder
->get_power_domains(encoder
, crtc_state
);
16006 static void intel_early_display_was(struct drm_i915_private
*dev_priv
)
16008 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16009 if (IS_CANNONLAKE(dev_priv
) || IS_GEMINILAKE(dev_priv
))
16010 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
16013 if (IS_HASWELL(dev_priv
)) {
16015 * WaRsPkgCStateDisplayPMReq:hsw
16016 * System hang if this isn't done before disabling all planes!
16018 I915_WRITE(CHICKEN_PAR1_1
,
16019 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
16023 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private
*dev_priv
,
16024 enum port port
, i915_reg_t hdmi_reg
)
16026 u32 val
= I915_READ(hdmi_reg
);
16028 if (val
& SDVO_ENABLE
||
16029 (val
& SDVO_PIPE_SEL_MASK
) == SDVO_PIPE_SEL(PIPE_A
))
16032 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16035 val
&= ~SDVO_PIPE_SEL_MASK
;
16036 val
|= SDVO_PIPE_SEL(PIPE_A
);
16038 I915_WRITE(hdmi_reg
, val
);
16041 static void ibx_sanitize_pch_dp_port(struct drm_i915_private
*dev_priv
,
16042 enum port port
, i915_reg_t dp_reg
)
16044 u32 val
= I915_READ(dp_reg
);
16046 if (val
& DP_PORT_EN
||
16047 (val
& DP_PIPE_SEL_MASK
) == DP_PIPE_SEL(PIPE_A
))
16050 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16053 val
&= ~DP_PIPE_SEL_MASK
;
16054 val
|= DP_PIPE_SEL(PIPE_A
);
16056 I915_WRITE(dp_reg
, val
);
16059 static void ibx_sanitize_pch_ports(struct drm_i915_private
*dev_priv
)
16062 * The BIOS may select transcoder B on some of the PCH
16063 * ports even it doesn't enable the port. This would trip
16064 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16065 * Sanitize the transcoder select bits to prevent that. We
16066 * assume that the BIOS never actually enabled the port,
16067 * because if it did we'd actually have to toggle the port
16068 * on and back off to make the transcoder A select stick
16069 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16070 * intel_disable_sdvo()).
16072 ibx_sanitize_pch_dp_port(dev_priv
, PORT_B
, PCH_DP_B
);
16073 ibx_sanitize_pch_dp_port(dev_priv
, PORT_C
, PCH_DP_C
);
16074 ibx_sanitize_pch_dp_port(dev_priv
, PORT_D
, PCH_DP_D
);
16076 /* PCH SDVOB multiplex with HDMIB */
16077 ibx_sanitize_pch_hdmi_port(dev_priv
, PORT_B
, PCH_HDMIB
);
16078 ibx_sanitize_pch_hdmi_port(dev_priv
, PORT_C
, PCH_HDMIC
);
16079 ibx_sanitize_pch_hdmi_port(dev_priv
, PORT_D
, PCH_HDMID
);
16082 /* Scan out the current hw modeset state,
16083 * and sanitizes it to the current state
16086 intel_modeset_setup_hw_state(struct drm_device
*dev
,
16087 struct drm_modeset_acquire_ctx
*ctx
)
16089 struct drm_i915_private
*dev_priv
= to_i915(dev
);
16090 struct intel_crtc_state
*crtc_state
;
16091 struct intel_encoder
*encoder
;
16092 struct intel_crtc
*crtc
;
16093 intel_wakeref_t wakeref
;
16096 wakeref
= intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
16098 intel_early_display_was(dev_priv
);
16099 intel_modeset_readout_hw_state(dev
);
16101 /* HW state is read out, now we need to sanitize this mess. */
16102 get_encoder_power_domains(dev_priv
);
16104 if (HAS_PCH_IBX(dev_priv
))
16105 ibx_sanitize_pch_ports(dev_priv
);
16108 * intel_sanitize_plane_mapping() may need to do vblank
16109 * waits, so we need vblank interrupts restored beforehand.
16111 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
16112 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
16114 drm_crtc_vblank_reset(&crtc
->base
);
16116 if (crtc_state
->base
.active
)
16117 intel_crtc_vblank_on(crtc_state
);
16120 intel_sanitize_plane_mapping(dev_priv
);
16122 for_each_intel_encoder(dev
, encoder
)
16123 intel_sanitize_encoder(encoder
);
16125 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
16126 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
16127 intel_sanitize_crtc(crtc
, ctx
);
16128 intel_dump_pipe_config(crtc
, crtc_state
,
16129 "[setup_hw_state]");
16132 intel_modeset_update_connector_atomic_state(dev
);
16134 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
16135 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
16137 if (!pll
->on
|| pll
->active_mask
)
16140 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16143 pll
->info
->funcs
->disable(dev_priv
, pll
);
16147 if (IS_G4X(dev_priv
)) {
16148 g4x_wm_get_hw_state(dev_priv
);
16149 g4x_wm_sanitize(dev_priv
);
16150 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
16151 vlv_wm_get_hw_state(dev_priv
);
16152 vlv_wm_sanitize(dev_priv
);
16153 } else if (INTEL_GEN(dev_priv
) >= 9) {
16154 skl_wm_get_hw_state(dev_priv
);
16155 } else if (HAS_PCH_SPLIT(dev_priv
)) {
16156 ilk_wm_get_hw_state(dev_priv
);
16159 for_each_intel_crtc(dev
, crtc
) {
16162 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
16163 put_domains
= modeset_get_crtc_power_domains(&crtc
->base
, crtc_state
);
16164 if (WARN_ON(put_domains
))
16165 modeset_put_power_domains(dev_priv
, put_domains
);
16168 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
, wakeref
);
16170 intel_fbc_init_pipe_state(dev_priv
);
16173 void intel_display_resume(struct drm_device
*dev
)
16175 struct drm_i915_private
*dev_priv
= to_i915(dev
);
16176 struct drm_atomic_state
*state
= dev_priv
->modeset_restore_state
;
16177 struct drm_modeset_acquire_ctx ctx
;
16180 dev_priv
->modeset_restore_state
= NULL
;
16182 state
->acquire_ctx
= &ctx
;
16184 drm_modeset_acquire_init(&ctx
, 0);
16187 ret
= drm_modeset_lock_all_ctx(dev
, &ctx
);
16188 if (ret
!= -EDEADLK
)
16191 drm_modeset_backoff(&ctx
);
16195 ret
= __intel_display_resume(dev
, state
, &ctx
);
16197 intel_enable_ipc(dev_priv
);
16198 drm_modeset_drop_locks(&ctx
);
16199 drm_modeset_acquire_fini(&ctx
);
16202 DRM_ERROR("Restoring old state failed with %i\n", ret
);
16204 drm_atomic_state_put(state
);
16207 static void intel_hpd_poll_fini(struct drm_device
*dev
)
16209 struct intel_connector
*connector
;
16210 struct drm_connector_list_iter conn_iter
;
16212 /* Kill all the work that may have been queued by hpd. */
16213 drm_connector_list_iter_begin(dev
, &conn_iter
);
16214 for_each_intel_connector_iter(connector
, &conn_iter
) {
16215 if (connector
->modeset_retry_work
.func
)
16216 cancel_work_sync(&connector
->modeset_retry_work
);
16217 if (connector
->hdcp
.shim
) {
16218 cancel_delayed_work_sync(&connector
->hdcp
.check_work
);
16219 cancel_work_sync(&connector
->hdcp
.prop_work
);
16222 drm_connector_list_iter_end(&conn_iter
);
16225 void intel_modeset_cleanup(struct drm_device
*dev
)
16227 struct drm_i915_private
*dev_priv
= to_i915(dev
);
16229 flush_workqueue(dev_priv
->modeset_wq
);
16231 flush_work(&dev_priv
->atomic_helper
.free_work
);
16232 WARN_ON(!llist_empty(&dev_priv
->atomic_helper
.free_list
));
16235 * Interrupts and polling as the first thing to avoid creating havoc.
16236 * Too much stuff here (turning of connectors, ...) would
16237 * experience fancy races otherwise.
16239 intel_irq_uninstall(dev_priv
);
16242 * Due to the hpd irq storm handling the hotplug work can re-arm the
16243 * poll handlers. Hence disable polling after hpd handling is shut down.
16245 intel_hpd_poll_fini(dev
);
16247 /* poll work can call into fbdev, hence clean that up afterwards */
16248 intel_fbdev_fini(dev_priv
);
16250 intel_unregister_dsm_handler();
16252 intel_fbc_global_disable(dev_priv
);
16254 /* flush any delayed tasks or pending work */
16255 flush_scheduled_work();
16257 drm_mode_config_cleanup(dev
);
16259 intel_overlay_cleanup(dev_priv
);
16261 intel_teardown_gmbus(dev_priv
);
16263 destroy_workqueue(dev_priv
->modeset_wq
);
16265 intel_fbc_cleanup_cfb(dev_priv
);
16269 * set vga decode state - true == enable VGA decode
16271 int intel_modeset_vga_set_state(struct drm_i915_private
*dev_priv
, bool state
)
16273 unsigned reg
= INTEL_GEN(dev_priv
) >= 6 ? SNB_GMCH_CTRL
: INTEL_GMCH_CTRL
;
16276 if (pci_read_config_word(dev_priv
->bridge_dev
, reg
, &gmch_ctrl
)) {
16277 DRM_ERROR("failed to read control word\n");
16281 if (!!(gmch_ctrl
& INTEL_GMCH_VGA_DISABLE
) == !state
)
16285 gmch_ctrl
&= ~INTEL_GMCH_VGA_DISABLE
;
16287 gmch_ctrl
|= INTEL_GMCH_VGA_DISABLE
;
16289 if (pci_write_config_word(dev_priv
->bridge_dev
, reg
, gmch_ctrl
)) {
16290 DRM_ERROR("failed to write control word\n");
16297 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16299 struct intel_display_error_state
{
16301 u32 power_well_driver
;
16303 int num_transcoders
;
16305 struct intel_cursor_error_state
{
16310 } cursor
[I915_MAX_PIPES
];
16312 struct intel_pipe_error_state
{
16313 bool power_domain_on
;
16316 } pipe
[I915_MAX_PIPES
];
16318 struct intel_plane_error_state
{
16326 } plane
[I915_MAX_PIPES
];
16328 struct intel_transcoder_error_state
{
16329 bool power_domain_on
;
16330 enum transcoder cpu_transcoder
;
16343 struct intel_display_error_state
*
16344 intel_display_capture_error_state(struct drm_i915_private
*dev_priv
)
16346 struct intel_display_error_state
*error
;
16347 int transcoders
[] = {
16355 if (!HAS_DISPLAY(dev_priv
))
16358 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
16362 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
16363 error
->power_well_driver
= I915_READ(HSW_PWR_WELL_CTL2
);
16365 for_each_pipe(dev_priv
, i
) {
16366 error
->pipe
[i
].power_domain_on
=
16367 __intel_display_power_is_enabled(dev_priv
,
16368 POWER_DOMAIN_PIPE(i
));
16369 if (!error
->pipe
[i
].power_domain_on
)
16372 error
->cursor
[i
].control
= I915_READ(CURCNTR(i
));
16373 error
->cursor
[i
].position
= I915_READ(CURPOS(i
));
16374 error
->cursor
[i
].base
= I915_READ(CURBASE(i
));
16376 error
->plane
[i
].control
= I915_READ(DSPCNTR(i
));
16377 error
->plane
[i
].stride
= I915_READ(DSPSTRIDE(i
));
16378 if (INTEL_GEN(dev_priv
) <= 3) {
16379 error
->plane
[i
].size
= I915_READ(DSPSIZE(i
));
16380 error
->plane
[i
].pos
= I915_READ(DSPPOS(i
));
16382 if (INTEL_GEN(dev_priv
) <= 7 && !IS_HASWELL(dev_priv
))
16383 error
->plane
[i
].addr
= I915_READ(DSPADDR(i
));
16384 if (INTEL_GEN(dev_priv
) >= 4) {
16385 error
->plane
[i
].surface
= I915_READ(DSPSURF(i
));
16386 error
->plane
[i
].tile_offset
= I915_READ(DSPTILEOFF(i
));
16389 error
->pipe
[i
].source
= I915_READ(PIPESRC(i
));
16391 if (HAS_GMCH(dev_priv
))
16392 error
->pipe
[i
].stat
= I915_READ(PIPESTAT(i
));
16395 /* Note: this does not include DSI transcoders. */
16396 error
->num_transcoders
= INTEL_INFO(dev_priv
)->num_pipes
;
16397 if (HAS_DDI(dev_priv
))
16398 error
->num_transcoders
++; /* Account for eDP. */
16400 for (i
= 0; i
< error
->num_transcoders
; i
++) {
16401 enum transcoder cpu_transcoder
= transcoders
[i
];
16403 error
->transcoder
[i
].power_domain_on
=
16404 __intel_display_power_is_enabled(dev_priv
,
16405 POWER_DOMAIN_TRANSCODER(cpu_transcoder
));
16406 if (!error
->transcoder
[i
].power_domain_on
)
16409 error
->transcoder
[i
].cpu_transcoder
= cpu_transcoder
;
16411 error
->transcoder
[i
].conf
= I915_READ(PIPECONF(cpu_transcoder
));
16412 error
->transcoder
[i
].htotal
= I915_READ(HTOTAL(cpu_transcoder
));
16413 error
->transcoder
[i
].hblank
= I915_READ(HBLANK(cpu_transcoder
));
16414 error
->transcoder
[i
].hsync
= I915_READ(HSYNC(cpu_transcoder
));
16415 error
->transcoder
[i
].vtotal
= I915_READ(VTOTAL(cpu_transcoder
));
16416 error
->transcoder
[i
].vblank
= I915_READ(VBLANK(cpu_transcoder
));
16417 error
->transcoder
[i
].vsync
= I915_READ(VSYNC(cpu_transcoder
));
16423 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16426 intel_display_print_error_state(struct drm_i915_error_state_buf
*m
,
16427 struct intel_display_error_state
*error
)
16429 struct drm_i915_private
*dev_priv
= m
->i915
;
16435 err_printf(m
, "Num Pipes: %d\n", INTEL_INFO(dev_priv
)->num_pipes
);
16436 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
16437 err_printf(m
, "PWR_WELL_CTL2: %08x\n",
16438 error
->power_well_driver
);
16439 for_each_pipe(dev_priv
, i
) {
16440 err_printf(m
, "Pipe [%d]:\n", i
);
16441 err_printf(m
, " Power: %s\n",
16442 onoff(error
->pipe
[i
].power_domain_on
));
16443 err_printf(m
, " SRC: %08x\n", error
->pipe
[i
].source
);
16444 err_printf(m
, " STAT: %08x\n", error
->pipe
[i
].stat
);
16446 err_printf(m
, "Plane [%d]:\n", i
);
16447 err_printf(m
, " CNTR: %08x\n", error
->plane
[i
].control
);
16448 err_printf(m
, " STRIDE: %08x\n", error
->plane
[i
].stride
);
16449 if (INTEL_GEN(dev_priv
) <= 3) {
16450 err_printf(m
, " SIZE: %08x\n", error
->plane
[i
].size
);
16451 err_printf(m
, " POS: %08x\n", error
->plane
[i
].pos
);
16453 if (INTEL_GEN(dev_priv
) <= 7 && !IS_HASWELL(dev_priv
))
16454 err_printf(m
, " ADDR: %08x\n", error
->plane
[i
].addr
);
16455 if (INTEL_GEN(dev_priv
) >= 4) {
16456 err_printf(m
, " SURF: %08x\n", error
->plane
[i
].surface
);
16457 err_printf(m
, " TILEOFF: %08x\n", error
->plane
[i
].tile_offset
);
16460 err_printf(m
, "Cursor [%d]:\n", i
);
16461 err_printf(m
, " CNTR: %08x\n", error
->cursor
[i
].control
);
16462 err_printf(m
, " POS: %08x\n", error
->cursor
[i
].position
);
16463 err_printf(m
, " BASE: %08x\n", error
->cursor
[i
].base
);
16466 for (i
= 0; i
< error
->num_transcoders
; i
++) {
16467 err_printf(m
, "CPU transcoder: %s\n",
16468 transcoder_name(error
->transcoder
[i
].cpu_transcoder
));
16469 err_printf(m
, " Power: %s\n",
16470 onoff(error
->transcoder
[i
].power_domain_on
));
16471 err_printf(m
, " CONF: %08x\n", error
->transcoder
[i
].conf
);
16472 err_printf(m
, " HTOTAL: %08x\n", error
->transcoder
[i
].htotal
);
16473 err_printf(m
, " HBLANK: %08x\n", error
->transcoder
[i
].hblank
);
16474 err_printf(m
, " HSYNC: %08x\n", error
->transcoder
[i
].hsync
);
16475 err_printf(m
, " VTOTAL: %08x\n", error
->transcoder
[i
].vtotal
);
16476 err_printf(m
, " VBLANK: %08x\n", error
->transcoder
[i
].vblank
);
16477 err_printf(m
, " VSYNC: %08x\n", error
->transcoder
[i
].vsync
);