2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
32 #include <drm/drm_print.h>
35 #include "intel_drv.h"
40 * The i915 driver supports dynamic enabling and disabling of entire hardware
41 * blocks at runtime. This is especially important on the display side where
42 * software is supposed to control many power gates manually on recent hardware,
43 * since on the GT side a lot of the power management is done by the hardware.
44 * But even there some manual control at the device level is required.
46 * Since i915 supports a diverse set of platforms with a unified codebase and
47 * hardware engineers just love to shuffle functionality around between power
48 * domains there's a sizeable amount of indirection required. This file provides
49 * generic functions to the driver for grabbing and releasing references for
50 * abstract power domains. It then maps those to the actual power wells
51 * present for a given platform.
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
56 #include <linux/sort.h>
60 static noinline depot_stack_handle_t
__save_depot_stack(void)
62 unsigned long entries
[STACKDEPTH
];
63 struct stack_trace trace
= {
65 .max_entries
= ARRAY_SIZE(entries
),
69 save_stack_trace(&trace
);
70 if (trace
.nr_entries
&&
71 trace
.entries
[trace
.nr_entries
- 1] == ULONG_MAX
)
74 return depot_save_stack(&trace
, GFP_NOWAIT
| __GFP_NOWARN
);
77 static void __print_depot_stack(depot_stack_handle_t stack
,
78 char *buf
, int sz
, int indent
)
80 unsigned long entries
[STACKDEPTH
];
81 struct stack_trace trace
= {
83 .max_entries
= ARRAY_SIZE(entries
),
86 depot_fetch_stack(stack
, &trace
);
87 snprint_stack_trace(buf
, sz
, &trace
, indent
);
90 static void init_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
)
92 struct i915_runtime_pm
*rpm
= &i915
->runtime_pm
;
94 spin_lock_init(&rpm
->debug
.lock
);
97 static noinline depot_stack_handle_t
98 track_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
)
100 struct i915_runtime_pm
*rpm
= &i915
->runtime_pm
;
101 depot_stack_handle_t stack
, *stacks
;
104 atomic_inc(&rpm
->wakeref_count
);
105 assert_rpm_wakelock_held(i915
);
107 if (!HAS_RUNTIME_PM(i915
))
110 stack
= __save_depot_stack();
114 spin_lock_irqsave(&rpm
->debug
.lock
, flags
);
116 if (!rpm
->debug
.count
)
117 rpm
->debug
.last_acquire
= stack
;
119 stacks
= krealloc(rpm
->debug
.owners
,
120 (rpm
->debug
.count
+ 1) * sizeof(*stacks
),
121 GFP_NOWAIT
| __GFP_NOWARN
);
123 stacks
[rpm
->debug
.count
++] = stack
;
124 rpm
->debug
.owners
= stacks
;
129 spin_unlock_irqrestore(&rpm
->debug
.lock
, flags
);
134 static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
,
135 depot_stack_handle_t stack
)
137 struct i915_runtime_pm
*rpm
= &i915
->runtime_pm
;
138 unsigned long flags
, n
;
141 if (unlikely(stack
== -1))
144 spin_lock_irqsave(&rpm
->debug
.lock
, flags
);
145 for (n
= rpm
->debug
.count
; n
--; ) {
146 if (rpm
->debug
.owners
[n
] == stack
) {
147 memmove(rpm
->debug
.owners
+ n
,
148 rpm
->debug
.owners
+ n
+ 1,
149 (--rpm
->debug
.count
- n
) * sizeof(stack
));
154 spin_unlock_irqrestore(&rpm
->debug
.lock
, flags
);
157 "Unmatched wakeref (tracking %lu), count %u\n",
158 rpm
->debug
.count
, atomic_read(&rpm
->wakeref_count
))) {
161 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
165 __print_depot_stack(stack
, buf
, PAGE_SIZE
, 2);
166 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack
, buf
);
168 stack
= READ_ONCE(rpm
->debug
.last_release
);
170 __print_depot_stack(stack
, buf
, PAGE_SIZE
, 2);
171 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf
);
178 static int cmphandle(const void *_a
, const void *_b
)
180 const depot_stack_handle_t
* const a
= _a
, * const b
= _b
;
191 __print_intel_runtime_pm_wakeref(struct drm_printer
*p
,
192 const struct intel_runtime_pm_debug
*dbg
)
197 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
201 if (dbg
->last_acquire
) {
202 __print_depot_stack(dbg
->last_acquire
, buf
, PAGE_SIZE
, 2);
203 drm_printf(p
, "Wakeref last acquired:\n%s", buf
);
206 if (dbg
->last_release
) {
207 __print_depot_stack(dbg
->last_release
, buf
, PAGE_SIZE
, 2);
208 drm_printf(p
, "Wakeref last released:\n%s", buf
);
211 drm_printf(p
, "Wakeref count: %lu\n", dbg
->count
);
213 sort(dbg
->owners
, dbg
->count
, sizeof(*dbg
->owners
), cmphandle
, NULL
);
215 for (i
= 0; i
< dbg
->count
; i
++) {
216 depot_stack_handle_t stack
= dbg
->owners
[i
];
220 while (i
+ 1 < dbg
->count
&& dbg
->owners
[i
+ 1] == stack
)
222 __print_depot_stack(stack
, buf
, PAGE_SIZE
, 2);
223 drm_printf(p
, "Wakeref x%lu taken at:\n%s", rep
, buf
);
230 untrack_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
)
232 struct i915_runtime_pm
*rpm
= &i915
->runtime_pm
;
233 struct intel_runtime_pm_debug dbg
= {};
234 struct drm_printer p
;
237 assert_rpm_wakelock_held(i915
);
238 if (atomic_dec_and_lock_irqsave(&rpm
->wakeref_count
,
243 rpm
->debug
.owners
= NULL
;
244 rpm
->debug
.count
= 0;
245 rpm
->debug
.last_release
= __save_depot_stack();
247 spin_unlock_irqrestore(&rpm
->debug
.lock
, flags
);
252 p
= drm_debug_printer("i915");
253 __print_intel_runtime_pm_wakeref(&p
, &dbg
);
258 void print_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
,
259 struct drm_printer
*p
)
261 struct intel_runtime_pm_debug dbg
= {};
264 struct i915_runtime_pm
*rpm
= &i915
->runtime_pm
;
265 unsigned long alloc
= dbg
.count
;
266 depot_stack_handle_t
*s
;
268 spin_lock_irq(&rpm
->debug
.lock
);
269 dbg
.count
= rpm
->debug
.count
;
270 if (dbg
.count
<= alloc
) {
273 dbg
.count
* sizeof(*s
));
275 dbg
.last_acquire
= rpm
->debug
.last_acquire
;
276 dbg
.last_release
= rpm
->debug
.last_release
;
277 spin_unlock_irq(&rpm
->debug
.lock
);
278 if (dbg
.count
<= alloc
)
281 s
= krealloc(dbg
.owners
, dbg
.count
* sizeof(*s
), GFP_KERNEL
);
288 __print_intel_runtime_pm_wakeref(p
, &dbg
);
296 static void init_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
)
300 static depot_stack_handle_t
301 track_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
)
303 atomic_inc(&i915
->runtime_pm
.wakeref_count
);
304 assert_rpm_wakelock_held(i915
);
308 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private
*i915
)
310 assert_rpm_wakelock_held(i915
);
311 atomic_dec(&i915
->runtime_pm
.wakeref_count
);
316 bool intel_display_power_well_is_enabled(struct drm_i915_private
*dev_priv
,
317 enum i915_power_well_id power_well_id
);
320 intel_display_power_domain_str(enum intel_display_power_domain domain
)
323 case POWER_DOMAIN_PIPE_A
:
325 case POWER_DOMAIN_PIPE_B
:
327 case POWER_DOMAIN_PIPE_C
:
329 case POWER_DOMAIN_PIPE_A_PANEL_FITTER
:
330 return "PIPE_A_PANEL_FITTER";
331 case POWER_DOMAIN_PIPE_B_PANEL_FITTER
:
332 return "PIPE_B_PANEL_FITTER";
333 case POWER_DOMAIN_PIPE_C_PANEL_FITTER
:
334 return "PIPE_C_PANEL_FITTER";
335 case POWER_DOMAIN_TRANSCODER_A
:
336 return "TRANSCODER_A";
337 case POWER_DOMAIN_TRANSCODER_B
:
338 return "TRANSCODER_B";
339 case POWER_DOMAIN_TRANSCODER_C
:
340 return "TRANSCODER_C";
341 case POWER_DOMAIN_TRANSCODER_EDP
:
342 return "TRANSCODER_EDP";
343 case POWER_DOMAIN_TRANSCODER_EDP_VDSC
:
344 return "TRANSCODER_EDP_VDSC";
345 case POWER_DOMAIN_TRANSCODER_DSI_A
:
346 return "TRANSCODER_DSI_A";
347 case POWER_DOMAIN_TRANSCODER_DSI_C
:
348 return "TRANSCODER_DSI_C";
349 case POWER_DOMAIN_PORT_DDI_A_LANES
:
350 return "PORT_DDI_A_LANES";
351 case POWER_DOMAIN_PORT_DDI_B_LANES
:
352 return "PORT_DDI_B_LANES";
353 case POWER_DOMAIN_PORT_DDI_C_LANES
:
354 return "PORT_DDI_C_LANES";
355 case POWER_DOMAIN_PORT_DDI_D_LANES
:
356 return "PORT_DDI_D_LANES";
357 case POWER_DOMAIN_PORT_DDI_E_LANES
:
358 return "PORT_DDI_E_LANES";
359 case POWER_DOMAIN_PORT_DDI_F_LANES
:
360 return "PORT_DDI_F_LANES";
361 case POWER_DOMAIN_PORT_DDI_A_IO
:
362 return "PORT_DDI_A_IO";
363 case POWER_DOMAIN_PORT_DDI_B_IO
:
364 return "PORT_DDI_B_IO";
365 case POWER_DOMAIN_PORT_DDI_C_IO
:
366 return "PORT_DDI_C_IO";
367 case POWER_DOMAIN_PORT_DDI_D_IO
:
368 return "PORT_DDI_D_IO";
369 case POWER_DOMAIN_PORT_DDI_E_IO
:
370 return "PORT_DDI_E_IO";
371 case POWER_DOMAIN_PORT_DDI_F_IO
:
372 return "PORT_DDI_F_IO";
373 case POWER_DOMAIN_PORT_DSI
:
375 case POWER_DOMAIN_PORT_CRT
:
377 case POWER_DOMAIN_PORT_OTHER
:
379 case POWER_DOMAIN_VGA
:
381 case POWER_DOMAIN_AUDIO
:
383 case POWER_DOMAIN_PLLS
:
385 case POWER_DOMAIN_AUX_A
:
387 case POWER_DOMAIN_AUX_B
:
389 case POWER_DOMAIN_AUX_C
:
391 case POWER_DOMAIN_AUX_D
:
393 case POWER_DOMAIN_AUX_E
:
395 case POWER_DOMAIN_AUX_F
:
397 case POWER_DOMAIN_AUX_IO_A
:
399 case POWER_DOMAIN_AUX_TBT1
:
401 case POWER_DOMAIN_AUX_TBT2
:
403 case POWER_DOMAIN_AUX_TBT3
:
405 case POWER_DOMAIN_AUX_TBT4
:
407 case POWER_DOMAIN_GMBUS
:
409 case POWER_DOMAIN_INIT
:
411 case POWER_DOMAIN_MODESET
:
413 case POWER_DOMAIN_GT_IRQ
:
416 MISSING_CASE(domain
);
421 static void intel_power_well_enable(struct drm_i915_private
*dev_priv
,
422 struct i915_power_well
*power_well
)
424 DRM_DEBUG_KMS("enabling %s\n", power_well
->desc
->name
);
425 power_well
->desc
->ops
->enable(dev_priv
, power_well
);
426 power_well
->hw_enabled
= true;
429 static void intel_power_well_disable(struct drm_i915_private
*dev_priv
,
430 struct i915_power_well
*power_well
)
432 DRM_DEBUG_KMS("disabling %s\n", power_well
->desc
->name
);
433 power_well
->hw_enabled
= false;
434 power_well
->desc
->ops
->disable(dev_priv
, power_well
);
437 static void intel_power_well_get(struct drm_i915_private
*dev_priv
,
438 struct i915_power_well
*power_well
)
440 if (!power_well
->count
++)
441 intel_power_well_enable(dev_priv
, power_well
);
444 static void intel_power_well_put(struct drm_i915_private
*dev_priv
,
445 struct i915_power_well
*power_well
)
447 WARN(!power_well
->count
, "Use count on power well %s is already zero",
448 power_well
->desc
->name
);
450 if (!--power_well
->count
)
451 intel_power_well_disable(dev_priv
, power_well
);
455 * __intel_display_power_is_enabled - unlocked check for a power domain
456 * @dev_priv: i915 device instance
457 * @domain: power domain to check
459 * This is the unlocked version of intel_display_power_is_enabled() and should
460 * only be used from error capture and recovery code where deadlocks are
464 * True when the power domain is enabled, false otherwise.
466 bool __intel_display_power_is_enabled(struct drm_i915_private
*dev_priv
,
467 enum intel_display_power_domain domain
)
469 struct i915_power_well
*power_well
;
472 if (dev_priv
->runtime_pm
.suspended
)
477 for_each_power_domain_well_reverse(dev_priv
, power_well
, BIT_ULL(domain
)) {
478 if (power_well
->desc
->always_on
)
481 if (!power_well
->hw_enabled
) {
491 * intel_display_power_is_enabled - check for a power domain
492 * @dev_priv: i915 device instance
493 * @domain: power domain to check
495 * This function can be used to check the hw power domain state. It is mostly
496 * used in hardware state readout functions. Everywhere else code should rely
497 * upon explicit power domain reference counting to ensure that the hardware
498 * block is powered up before accessing it.
500 * Callers must hold the relevant modesetting locks to ensure that concurrent
501 * threads can't disable the power well while the caller tries to read a few
505 * True when the power domain is enabled, false otherwise.
507 bool intel_display_power_is_enabled(struct drm_i915_private
*dev_priv
,
508 enum intel_display_power_domain domain
)
510 struct i915_power_domains
*power_domains
;
513 power_domains
= &dev_priv
->power_domains
;
515 mutex_lock(&power_domains
->lock
);
516 ret
= __intel_display_power_is_enabled(dev_priv
, domain
);
517 mutex_unlock(&power_domains
->lock
);
523 * Starting with Haswell, we have a "Power Down Well" that can be turned off
524 * when not needed anymore. We have 4 registers that can request the power well
525 * to be enabled, and it will only be disabled if none of the registers is
526 * requesting it to be enabled.
528 static void hsw_power_well_post_enable(struct drm_i915_private
*dev_priv
,
529 u8 irq_pipe_mask
, bool has_vga
)
531 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
534 * After we re-enable the power well, if we touch VGA register 0x3d5
535 * we'll get unclaimed register interrupts. This stops after we write
536 * anything to the VGA MSR register. The vgacon module uses this
537 * register all the time, so if we unbind our driver and, as a
538 * consequence, bind vgacon, we'll get stuck in an infinite loop at
539 * console_unlock(). So make here we touch the VGA MSR register, making
540 * sure vgacon can keep working normally without triggering interrupts
541 * and error messages.
544 vga_get_uninterruptible(pdev
, VGA_RSRC_LEGACY_IO
);
545 outb(inb(VGA_MSR_READ
), VGA_MSR_WRITE
);
546 vga_put(pdev
, VGA_RSRC_LEGACY_IO
);
550 gen8_irq_power_well_post_enable(dev_priv
, irq_pipe_mask
);
553 static void hsw_power_well_pre_disable(struct drm_i915_private
*dev_priv
,
557 gen8_irq_power_well_pre_disable(dev_priv
, irq_pipe_mask
);
561 static void hsw_wait_for_power_well_enable(struct drm_i915_private
*dev_priv
,
562 struct i915_power_well
*power_well
)
564 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
565 int pw_idx
= power_well
->desc
->hsw
.idx
;
567 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
568 WARN_ON(intel_wait_for_register(dev_priv
,
570 HSW_PWR_WELL_CTL_STATE(pw_idx
),
571 HSW_PWR_WELL_CTL_STATE(pw_idx
),
575 static u32
hsw_power_well_requesters(struct drm_i915_private
*dev_priv
,
576 const struct i915_power_well_regs
*regs
,
579 u32 req_mask
= HSW_PWR_WELL_CTL_REQ(pw_idx
);
582 ret
= I915_READ(regs
->bios
) & req_mask
? 1 : 0;
583 ret
|= I915_READ(regs
->driver
) & req_mask
? 2 : 0;
585 ret
|= I915_READ(regs
->kvmr
) & req_mask
? 4 : 0;
586 ret
|= I915_READ(regs
->debug
) & req_mask
? 8 : 0;
591 static void hsw_wait_for_power_well_disable(struct drm_i915_private
*dev_priv
,
592 struct i915_power_well
*power_well
)
594 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
595 int pw_idx
= power_well
->desc
->hsw
.idx
;
600 * Bspec doesn't require waiting for PWs to get disabled, but still do
601 * this for paranoia. The known cases where a PW will be forced on:
602 * - a KVMR request on any power well via the KVMR request register
603 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
604 * DEBUG request registers
605 * Skip the wait in case any of the request bits are set and print a
606 * diagnostic message.
608 wait_for((disabled
= !(I915_READ(regs
->driver
) &
609 HSW_PWR_WELL_CTL_STATE(pw_idx
))) ||
610 (reqs
= hsw_power_well_requesters(dev_priv
, regs
, pw_idx
)), 1);
614 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
615 power_well
->desc
->name
,
616 !!(reqs
& 1), !!(reqs
& 2), !!(reqs
& 4), !!(reqs
& 8));
619 static void gen9_wait_for_power_well_fuses(struct drm_i915_private
*dev_priv
,
620 enum skl_power_gate pg
)
622 /* Timeout 5us for PG#0, for other PGs 1us */
623 WARN_ON(intel_wait_for_register(dev_priv
, SKL_FUSE_STATUS
,
624 SKL_FUSE_PG_DIST_STATUS(pg
),
625 SKL_FUSE_PG_DIST_STATUS(pg
), 1));
628 static void hsw_power_well_enable(struct drm_i915_private
*dev_priv
,
629 struct i915_power_well
*power_well
)
631 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
632 int pw_idx
= power_well
->desc
->hsw
.idx
;
633 bool wait_fuses
= power_well
->desc
->hsw
.has_fuses
;
634 enum skl_power_gate
uninitialized_var(pg
);
638 pg
= INTEL_GEN(dev_priv
) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx
) :
639 SKL_PW_CTL_IDX_TO_PG(pw_idx
);
641 * For PW1 we have to wait both for the PW0/PG0 fuse state
642 * before enabling the power well and PW1/PG1's own fuse
643 * state after the enabling. For all other power wells with
644 * fuses we only have to wait for that PW/PG's fuse state
645 * after the enabling.
648 gen9_wait_for_power_well_fuses(dev_priv
, SKL_PG0
);
651 val
= I915_READ(regs
->driver
);
652 I915_WRITE(regs
->driver
, val
| HSW_PWR_WELL_CTL_REQ(pw_idx
));
653 hsw_wait_for_power_well_enable(dev_priv
, power_well
);
655 /* Display WA #1178: cnl */
656 if (IS_CANNONLAKE(dev_priv
) &&
657 pw_idx
>= GLK_PW_CTL_IDX_AUX_B
&&
658 pw_idx
<= CNL_PW_CTL_IDX_AUX_F
) {
659 val
= I915_READ(CNL_AUX_ANAOVRD1(pw_idx
));
660 val
|= CNL_AUX_ANAOVRD1_ENABLE
| CNL_AUX_ANAOVRD1_LDO_BYPASS
;
661 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx
), val
);
665 gen9_wait_for_power_well_fuses(dev_priv
, pg
);
667 hsw_power_well_post_enable(dev_priv
,
668 power_well
->desc
->hsw
.irq_pipe_mask
,
669 power_well
->desc
->hsw
.has_vga
);
672 static void hsw_power_well_disable(struct drm_i915_private
*dev_priv
,
673 struct i915_power_well
*power_well
)
675 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
676 int pw_idx
= power_well
->desc
->hsw
.idx
;
679 hsw_power_well_pre_disable(dev_priv
,
680 power_well
->desc
->hsw
.irq_pipe_mask
);
682 val
= I915_READ(regs
->driver
);
683 I915_WRITE(regs
->driver
, val
& ~HSW_PWR_WELL_CTL_REQ(pw_idx
));
684 hsw_wait_for_power_well_disable(dev_priv
, power_well
);
687 #define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
690 icl_combo_phy_aux_power_well_enable(struct drm_i915_private
*dev_priv
,
691 struct i915_power_well
*power_well
)
693 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
694 int pw_idx
= power_well
->desc
->hsw
.idx
;
695 enum port port
= ICL_AUX_PW_TO_PORT(pw_idx
);
698 val
= I915_READ(regs
->driver
);
699 I915_WRITE(regs
->driver
, val
| HSW_PWR_WELL_CTL_REQ(pw_idx
));
701 val
= I915_READ(ICL_PORT_CL_DW12(port
));
702 I915_WRITE(ICL_PORT_CL_DW12(port
), val
| ICL_LANE_ENABLE_AUX
);
704 hsw_wait_for_power_well_enable(dev_priv
, power_well
);
706 /* Display WA #1178: icl */
707 if (IS_ICELAKE(dev_priv
) &&
708 pw_idx
>= ICL_PW_CTL_IDX_AUX_A
&& pw_idx
<= ICL_PW_CTL_IDX_AUX_B
&&
709 !intel_bios_is_port_edp(dev_priv
, port
)) {
710 val
= I915_READ(ICL_AUX_ANAOVRD1(pw_idx
));
711 val
|= ICL_AUX_ANAOVRD1_ENABLE
| ICL_AUX_ANAOVRD1_LDO_BYPASS
;
712 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx
), val
);
717 icl_combo_phy_aux_power_well_disable(struct drm_i915_private
*dev_priv
,
718 struct i915_power_well
*power_well
)
720 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
721 int pw_idx
= power_well
->desc
->hsw
.idx
;
722 enum port port
= ICL_AUX_PW_TO_PORT(pw_idx
);
725 val
= I915_READ(ICL_PORT_CL_DW12(port
));
726 I915_WRITE(ICL_PORT_CL_DW12(port
), val
& ~ICL_LANE_ENABLE_AUX
);
728 val
= I915_READ(regs
->driver
);
729 I915_WRITE(regs
->driver
, val
& ~HSW_PWR_WELL_CTL_REQ(pw_idx
));
731 hsw_wait_for_power_well_disable(dev_priv
, power_well
);
734 #define ICL_AUX_PW_TO_CH(pw_idx) \
735 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
738 icl_tc_phy_aux_power_well_enable(struct drm_i915_private
*dev_priv
,
739 struct i915_power_well
*power_well
)
741 enum aux_ch aux_ch
= ICL_AUX_PW_TO_CH(power_well
->desc
->hsw
.idx
);
744 val
= I915_READ(DP_AUX_CH_CTL(aux_ch
));
745 val
&= ~DP_AUX_CH_CTL_TBT_IO
;
746 if (power_well
->desc
->hsw
.is_tc_tbt
)
747 val
|= DP_AUX_CH_CTL_TBT_IO
;
748 I915_WRITE(DP_AUX_CH_CTL(aux_ch
), val
);
750 hsw_power_well_enable(dev_priv
, power_well
);
754 * We should only use the power well if we explicitly asked the hardware to
755 * enable it, so check if it's enabled and also check if we've requested it to
758 static bool hsw_power_well_enabled(struct drm_i915_private
*dev_priv
,
759 struct i915_power_well
*power_well
)
761 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
762 enum i915_power_well_id id
= power_well
->desc
->id
;
763 int pw_idx
= power_well
->desc
->hsw
.idx
;
764 u32 mask
= HSW_PWR_WELL_CTL_REQ(pw_idx
) |
765 HSW_PWR_WELL_CTL_STATE(pw_idx
);
768 val
= I915_READ(regs
->driver
);
771 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
772 * and the MISC_IO PW will be not restored, so check instead for the
773 * BIOS's own request bits, which are forced-on for these power wells
774 * when exiting DC5/6.
776 if (IS_GEN(dev_priv
, 9) && !IS_GEN9_LP(dev_priv
) &&
777 (id
== SKL_DISP_PW_1
|| id
== SKL_DISP_PW_MISC_IO
))
778 val
|= I915_READ(regs
->bios
);
780 return (val
& mask
) == mask
;
783 static void assert_can_enable_dc9(struct drm_i915_private
*dev_priv
)
785 WARN_ONCE((I915_READ(DC_STATE_EN
) & DC_STATE_EN_DC9
),
786 "DC9 already programmed to be enabled.\n");
787 WARN_ONCE(I915_READ(DC_STATE_EN
) & DC_STATE_EN_UPTO_DC5
,
788 "DC5 still not disabled to enable DC9.\n");
789 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2
) &
790 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2
),
791 "Power well 2 on.\n");
792 WARN_ONCE(intel_irqs_enabled(dev_priv
),
793 "Interrupts not disabled yet.\n");
796 * TODO: check for the following to verify the conditions to enter DC9
797 * state are satisfied:
798 * 1] Check relevant display engine registers to verify if mode set
799 * disable sequence was followed.
800 * 2] Check if display uninitialize sequence is initialized.
804 static void assert_can_disable_dc9(struct drm_i915_private
*dev_priv
)
806 WARN_ONCE(intel_irqs_enabled(dev_priv
),
807 "Interrupts not disabled yet.\n");
808 WARN_ONCE(I915_READ(DC_STATE_EN
) & DC_STATE_EN_UPTO_DC5
,
809 "DC5 still not disabled.\n");
812 * TODO: check for the following to verify DC9 state was indeed
813 * entered before programming to disable it:
814 * 1] Check relevant display engine registers to verify if mode
815 * set disable sequence was followed.
816 * 2] Check if display uninitialize sequence is initialized.
820 static void gen9_write_dc_state(struct drm_i915_private
*dev_priv
,
827 I915_WRITE(DC_STATE_EN
, state
);
829 /* It has been observed that disabling the dc6 state sometimes
830 * doesn't stick and dmc keeps returning old value. Make sure
831 * the write really sticks enough times and also force rewrite until
832 * we are confident that state is exactly what we want.
835 v
= I915_READ(DC_STATE_EN
);
838 I915_WRITE(DC_STATE_EN
, state
);
841 } else if (rereads
++ > 5) {
845 } while (rewrites
< 100);
848 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
851 /* Most of the times we need one retry, avoid spam */
853 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
857 static u32
gen9_dc_mask(struct drm_i915_private
*dev_priv
)
861 mask
= DC_STATE_EN_UPTO_DC5
;
862 if (INTEL_GEN(dev_priv
) >= 11)
863 mask
|= DC_STATE_EN_UPTO_DC6
| DC_STATE_EN_DC9
;
864 else if (IS_GEN9_LP(dev_priv
))
865 mask
|= DC_STATE_EN_DC9
;
867 mask
|= DC_STATE_EN_UPTO_DC6
;
872 void gen9_sanitize_dc_state(struct drm_i915_private
*dev_priv
)
876 val
= I915_READ(DC_STATE_EN
) & gen9_dc_mask(dev_priv
);
878 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
879 dev_priv
->csr
.dc_state
, val
);
880 dev_priv
->csr
.dc_state
= val
;
884 * gen9_set_dc_state - set target display C power state
885 * @dev_priv: i915 device instance
886 * @state: target DC power state
888 * - DC_STATE_EN_UPTO_DC5
889 * - DC_STATE_EN_UPTO_DC6
892 * Signal to DMC firmware/HW the target DC power state passed in @state.
893 * DMC/HW can turn off individual display clocks and power rails when entering
894 * a deeper DC power state (higher in number) and turns these back when exiting
895 * that state to a shallower power state (lower in number). The HW will decide
896 * when to actually enter a given state on an on-demand basis, for instance
897 * depending on the active state of display pipes. The state of display
898 * registers backed by affected power rails are saved/restored as needed.
900 * Based on the above enabling a deeper DC power state is asynchronous wrt.
901 * enabling it. Disabling a deeper power state is synchronous: for instance
902 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
903 * back on and register state is restored. This is guaranteed by the MMIO write
904 * to DC_STATE_EN blocking until the state is restored.
906 static void gen9_set_dc_state(struct drm_i915_private
*dev_priv
, u32 state
)
911 if (WARN_ON_ONCE(state
& ~dev_priv
->csr
.allowed_dc_mask
))
912 state
&= dev_priv
->csr
.allowed_dc_mask
;
914 val
= I915_READ(DC_STATE_EN
);
915 mask
= gen9_dc_mask(dev_priv
);
916 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
919 /* Check if DMC is ignoring our DC state requests */
920 if ((val
& mask
) != dev_priv
->csr
.dc_state
)
921 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
922 dev_priv
->csr
.dc_state
, val
& mask
);
927 gen9_write_dc_state(dev_priv
, val
);
929 dev_priv
->csr
.dc_state
= val
& mask
;
932 void bxt_enable_dc9(struct drm_i915_private
*dev_priv
)
934 assert_can_enable_dc9(dev_priv
);
936 DRM_DEBUG_KMS("Enabling DC9\n");
938 * Power sequencer reset is not needed on
939 * platforms with South Display Engine on PCH,
940 * because PPS registers are always on.
942 if (!HAS_PCH_SPLIT(dev_priv
))
943 intel_power_sequencer_reset(dev_priv
);
944 gen9_set_dc_state(dev_priv
, DC_STATE_EN_DC9
);
947 void bxt_disable_dc9(struct drm_i915_private
*dev_priv
)
949 assert_can_disable_dc9(dev_priv
);
951 DRM_DEBUG_KMS("Disabling DC9\n");
953 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
955 intel_pps_unlock_regs_wa(dev_priv
);
958 static void assert_csr_loaded(struct drm_i915_private
*dev_priv
)
960 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
961 "CSR program storage start is NULL\n");
962 WARN_ONCE(!I915_READ(CSR_SSP_BASE
), "CSR SSP Base Not fine\n");
963 WARN_ONCE(!I915_READ(CSR_HTP_SKL
), "CSR HTP Not fine\n");
966 static struct i915_power_well
*
967 lookup_power_well(struct drm_i915_private
*dev_priv
,
968 enum i915_power_well_id power_well_id
)
970 struct i915_power_well
*power_well
;
972 for_each_power_well(dev_priv
, power_well
)
973 if (power_well
->desc
->id
== power_well_id
)
977 * It's not feasible to add error checking code to the callers since
978 * this condition really shouldn't happen and it doesn't even make sense
979 * to abort things like display initialization sequences. Just return
980 * the first power well and hope the WARN gets reported so we can fix
983 WARN(1, "Power well %d not defined for this platform\n", power_well_id
);
984 return &dev_priv
->power_domains
.power_wells
[0];
987 static void assert_can_enable_dc5(struct drm_i915_private
*dev_priv
)
989 bool pg2_enabled
= intel_display_power_well_is_enabled(dev_priv
,
992 WARN_ONCE(pg2_enabled
, "PG2 not disabled to enable DC5.\n");
994 WARN_ONCE((I915_READ(DC_STATE_EN
) & DC_STATE_EN_UPTO_DC5
),
995 "DC5 already programmed to be enabled.\n");
996 assert_rpm_wakelock_held(dev_priv
);
998 assert_csr_loaded(dev_priv
);
1001 void gen9_enable_dc5(struct drm_i915_private
*dev_priv
)
1003 assert_can_enable_dc5(dev_priv
);
1005 DRM_DEBUG_KMS("Enabling DC5\n");
1007 /* Wa Display #1183: skl,kbl,cfl */
1008 if (IS_GEN9_BC(dev_priv
))
1009 I915_WRITE(GEN8_CHICKEN_DCPR_1
, I915_READ(GEN8_CHICKEN_DCPR_1
) |
1010 SKL_SELECT_ALTERNATE_DC_EXIT
);
1012 gen9_set_dc_state(dev_priv
, DC_STATE_EN_UPTO_DC5
);
1015 static void assert_can_enable_dc6(struct drm_i915_private
*dev_priv
)
1017 WARN_ONCE(I915_READ(UTIL_PIN_CTL
) & UTIL_PIN_ENABLE
,
1018 "Backlight is not disabled.\n");
1019 WARN_ONCE((I915_READ(DC_STATE_EN
) & DC_STATE_EN_UPTO_DC6
),
1020 "DC6 already programmed to be enabled.\n");
1022 assert_csr_loaded(dev_priv
);
1025 void skl_enable_dc6(struct drm_i915_private
*dev_priv
)
1027 assert_can_enable_dc6(dev_priv
);
1029 DRM_DEBUG_KMS("Enabling DC6\n");
1031 /* Wa Display #1183: skl,kbl,cfl */
1032 if (IS_GEN9_BC(dev_priv
))
1033 I915_WRITE(GEN8_CHICKEN_DCPR_1
, I915_READ(GEN8_CHICKEN_DCPR_1
) |
1034 SKL_SELECT_ALTERNATE_DC_EXIT
);
1036 gen9_set_dc_state(dev_priv
, DC_STATE_EN_UPTO_DC6
);
1039 static void hsw_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
1040 struct i915_power_well
*power_well
)
1042 const struct i915_power_well_regs
*regs
= power_well
->desc
->hsw
.regs
;
1043 int pw_idx
= power_well
->desc
->hsw
.idx
;
1044 u32 mask
= HSW_PWR_WELL_CTL_REQ(pw_idx
);
1045 u32 bios_req
= I915_READ(regs
->bios
);
1047 /* Take over the request bit if set by BIOS. */
1048 if (bios_req
& mask
) {
1049 u32 drv_req
= I915_READ(regs
->driver
);
1051 if (!(drv_req
& mask
))
1052 I915_WRITE(regs
->driver
, drv_req
| mask
);
1053 I915_WRITE(regs
->bios
, bios_req
& ~mask
);
1057 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private
*dev_priv
,
1058 struct i915_power_well
*power_well
)
1060 bxt_ddi_phy_init(dev_priv
, power_well
->desc
->bxt
.phy
);
1063 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private
*dev_priv
,
1064 struct i915_power_well
*power_well
)
1066 bxt_ddi_phy_uninit(dev_priv
, power_well
->desc
->bxt
.phy
);
1069 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private
*dev_priv
,
1070 struct i915_power_well
*power_well
)
1072 return bxt_ddi_phy_is_enabled(dev_priv
, power_well
->desc
->bxt
.phy
);
1075 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private
*dev_priv
)
1077 struct i915_power_well
*power_well
;
1079 power_well
= lookup_power_well(dev_priv
, BXT_DISP_PW_DPIO_CMN_A
);
1080 if (power_well
->count
> 0)
1081 bxt_ddi_phy_verify_state(dev_priv
, power_well
->desc
->bxt
.phy
);
1083 power_well
= lookup_power_well(dev_priv
, VLV_DISP_PW_DPIO_CMN_BC
);
1084 if (power_well
->count
> 0)
1085 bxt_ddi_phy_verify_state(dev_priv
, power_well
->desc
->bxt
.phy
);
1087 if (IS_GEMINILAKE(dev_priv
)) {
1088 power_well
= lookup_power_well(dev_priv
,
1089 GLK_DISP_PW_DPIO_CMN_C
);
1090 if (power_well
->count
> 0)
1091 bxt_ddi_phy_verify_state(dev_priv
,
1092 power_well
->desc
->bxt
.phy
);
1096 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private
*dev_priv
,
1097 struct i915_power_well
*power_well
)
1099 return (I915_READ(DC_STATE_EN
) & DC_STATE_EN_UPTO_DC5_DC6_MASK
) == 0;
1102 static void gen9_assert_dbuf_enabled(struct drm_i915_private
*dev_priv
)
1104 u32 tmp
= I915_READ(DBUF_CTL
);
1106 WARN((tmp
& (DBUF_POWER_STATE
| DBUF_POWER_REQUEST
)) !=
1107 (DBUF_POWER_STATE
| DBUF_POWER_REQUEST
),
1108 "Unexpected DBuf power power state (0x%08x)\n", tmp
);
1111 static void gen9_dc_off_power_well_enable(struct drm_i915_private
*dev_priv
,
1112 struct i915_power_well
*power_well
)
1114 struct intel_cdclk_state cdclk_state
= {};
1116 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
1118 dev_priv
->display
.get_cdclk(dev_priv
, &cdclk_state
);
1119 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1120 WARN_ON(intel_cdclk_needs_modeset(&dev_priv
->cdclk
.hw
, &cdclk_state
));
1122 gen9_assert_dbuf_enabled(dev_priv
);
1124 if (IS_GEN9_LP(dev_priv
))
1125 bxt_verify_ddi_phy_power_wells(dev_priv
);
1127 if (INTEL_GEN(dev_priv
) >= 11)
1129 * DMC retains HW context only for port A, the other combo
1130 * PHY's HW context for port B is lost after DC transitions,
1131 * so we need to restore it manually.
1133 icl_combo_phys_init(dev_priv
);
1136 static void gen9_dc_off_power_well_disable(struct drm_i915_private
*dev_priv
,
1137 struct i915_power_well
*power_well
)
1139 if (!dev_priv
->csr
.dmc_payload
)
1142 if (dev_priv
->csr
.allowed_dc_mask
& DC_STATE_EN_UPTO_DC6
)
1143 skl_enable_dc6(dev_priv
);
1144 else if (dev_priv
->csr
.allowed_dc_mask
& DC_STATE_EN_UPTO_DC5
)
1145 gen9_enable_dc5(dev_priv
);
1148 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private
*dev_priv
,
1149 struct i915_power_well
*power_well
)
1153 static void i9xx_always_on_power_well_noop(struct drm_i915_private
*dev_priv
,
1154 struct i915_power_well
*power_well
)
1158 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private
*dev_priv
,
1159 struct i915_power_well
*power_well
)
1164 static void i830_pipes_power_well_enable(struct drm_i915_private
*dev_priv
,
1165 struct i915_power_well
*power_well
)
1167 if ((I915_READ(PIPECONF(PIPE_A
)) & PIPECONF_ENABLE
) == 0)
1168 i830_enable_pipe(dev_priv
, PIPE_A
);
1169 if ((I915_READ(PIPECONF(PIPE_B
)) & PIPECONF_ENABLE
) == 0)
1170 i830_enable_pipe(dev_priv
, PIPE_B
);
1173 static void i830_pipes_power_well_disable(struct drm_i915_private
*dev_priv
,
1174 struct i915_power_well
*power_well
)
1176 i830_disable_pipe(dev_priv
, PIPE_B
);
1177 i830_disable_pipe(dev_priv
, PIPE_A
);
1180 static bool i830_pipes_power_well_enabled(struct drm_i915_private
*dev_priv
,
1181 struct i915_power_well
*power_well
)
1183 return I915_READ(PIPECONF(PIPE_A
)) & PIPECONF_ENABLE
&&
1184 I915_READ(PIPECONF(PIPE_B
)) & PIPECONF_ENABLE
;
1187 static void i830_pipes_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
1188 struct i915_power_well
*power_well
)
1190 if (power_well
->count
> 0)
1191 i830_pipes_power_well_enable(dev_priv
, power_well
);
1193 i830_pipes_power_well_disable(dev_priv
, power_well
);
1196 static void vlv_set_power_well(struct drm_i915_private
*dev_priv
,
1197 struct i915_power_well
*power_well
, bool enable
)
1199 int pw_idx
= power_well
->desc
->vlv
.idx
;
1204 mask
= PUNIT_PWRGT_MASK(pw_idx
);
1205 state
= enable
? PUNIT_PWRGT_PWR_ON(pw_idx
) :
1206 PUNIT_PWRGT_PWR_GATE(pw_idx
);
1208 mutex_lock(&dev_priv
->pcu_lock
);
1211 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1216 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
);
1219 vlv_punit_write(dev_priv
, PUNIT_REG_PWRGT_CTRL
, ctrl
);
1221 if (wait_for(COND
, 100))
1222 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1224 vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
));
1229 mutex_unlock(&dev_priv
->pcu_lock
);
1232 static void vlv_power_well_enable(struct drm_i915_private
*dev_priv
,
1233 struct i915_power_well
*power_well
)
1235 vlv_set_power_well(dev_priv
, power_well
, true);
1238 static void vlv_power_well_disable(struct drm_i915_private
*dev_priv
,
1239 struct i915_power_well
*power_well
)
1241 vlv_set_power_well(dev_priv
, power_well
, false);
1244 static bool vlv_power_well_enabled(struct drm_i915_private
*dev_priv
,
1245 struct i915_power_well
*power_well
)
1247 int pw_idx
= power_well
->desc
->vlv
.idx
;
1248 bool enabled
= false;
1253 mask
= PUNIT_PWRGT_MASK(pw_idx
);
1254 ctrl
= PUNIT_PWRGT_PWR_ON(pw_idx
);
1256 mutex_lock(&dev_priv
->pcu_lock
);
1258 state
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_STATUS
) & mask
;
1260 * We only ever set the power-on and power-gate states, anything
1261 * else is unexpected.
1263 WARN_ON(state
!= PUNIT_PWRGT_PWR_ON(pw_idx
) &&
1264 state
!= PUNIT_PWRGT_PWR_GATE(pw_idx
));
1269 * A transient state at this point would mean some unexpected party
1270 * is poking at the power controls too.
1272 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
) & mask
;
1273 WARN_ON(ctrl
!= state
);
1275 mutex_unlock(&dev_priv
->pcu_lock
);
1280 static void vlv_init_display_clock_gating(struct drm_i915_private
*dev_priv
)
1285 * On driver load, a pipe may be active and driving a DSI display.
1286 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1287 * (and never recovering) in this case. intel_dsi_post_disable() will
1288 * clear it when we turn off the display.
1290 val
= I915_READ(DSPCLK_GATE_D
);
1291 val
&= DPOUNIT_CLOCK_GATE_DISABLE
;
1292 val
|= VRHUNIT_CLOCK_GATE_DISABLE
;
1293 I915_WRITE(DSPCLK_GATE_D
, val
);
1296 * Disable trickle feed and enable pnd deadline calculation
1298 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
1299 I915_WRITE(CBR1_VLV
, 0);
1301 WARN_ON(dev_priv
->rawclk_freq
== 0);
1303 I915_WRITE(RAWCLK_FREQ_VLV
,
1304 DIV_ROUND_CLOSEST(dev_priv
->rawclk_freq
, 1000));
1307 static void vlv_display_power_well_init(struct drm_i915_private
*dev_priv
)
1309 struct intel_encoder
*encoder
;
1313 * Enable the CRI clock source so we can get at the
1314 * display and the reference clock for VGA
1315 * hotplug / manual detection. Supposedly DSI also
1316 * needs the ref clock up and running.
1318 * CHV DPLL B/C have some issues if VGA mode is enabled.
1320 for_each_pipe(dev_priv
, pipe
) {
1321 u32 val
= I915_READ(DPLL(pipe
));
1323 val
|= DPLL_REF_CLK_ENABLE_VLV
| DPLL_VGA_MODE_DIS
;
1325 val
|= DPLL_INTEGRATED_CRI_CLK_VLV
;
1327 I915_WRITE(DPLL(pipe
), val
);
1330 vlv_init_display_clock_gating(dev_priv
);
1332 spin_lock_irq(&dev_priv
->irq_lock
);
1333 valleyview_enable_display_irqs(dev_priv
);
1334 spin_unlock_irq(&dev_priv
->irq_lock
);
1337 * During driver initialization/resume we can avoid restoring the
1338 * part of the HW/SW state that will be inited anyway explicitly.
1340 if (dev_priv
->power_domains
.initializing
)
1343 intel_hpd_init(dev_priv
);
1345 /* Re-enable the ADPA, if we have one */
1346 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
1347 if (encoder
->type
== INTEL_OUTPUT_ANALOG
)
1348 intel_crt_reset(&encoder
->base
);
1351 i915_redisable_vga_power_on(dev_priv
);
1353 intel_pps_unlock_regs_wa(dev_priv
);
1356 static void vlv_display_power_well_deinit(struct drm_i915_private
*dev_priv
)
1358 spin_lock_irq(&dev_priv
->irq_lock
);
1359 valleyview_disable_display_irqs(dev_priv
);
1360 spin_unlock_irq(&dev_priv
->irq_lock
);
1362 /* make sure we're done processing display irqs */
1363 synchronize_irq(dev_priv
->drm
.irq
);
1365 intel_power_sequencer_reset(dev_priv
);
1367 /* Prevent us from re-enabling polling on accident in late suspend */
1368 if (!dev_priv
->drm
.dev
->power
.is_suspended
)
1369 intel_hpd_poll_init(dev_priv
);
1372 static void vlv_display_power_well_enable(struct drm_i915_private
*dev_priv
,
1373 struct i915_power_well
*power_well
)
1375 vlv_set_power_well(dev_priv
, power_well
, true);
1377 vlv_display_power_well_init(dev_priv
);
1380 static void vlv_display_power_well_disable(struct drm_i915_private
*dev_priv
,
1381 struct i915_power_well
*power_well
)
1383 vlv_display_power_well_deinit(dev_priv
);
1385 vlv_set_power_well(dev_priv
, power_well
, false);
1388 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private
*dev_priv
,
1389 struct i915_power_well
*power_well
)
1391 /* since ref/cri clock was enabled */
1392 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1394 vlv_set_power_well(dev_priv
, power_well
, true);
1397 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1398 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1399 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1400 * b. The other bits such as sfr settings / modesel may all
1403 * This should only be done on init and resume from S3 with
1404 * both PLLs disabled, or we risk losing DPIO and PLL
1407 I915_WRITE(DPIO_CTL
, I915_READ(DPIO_CTL
) | DPIO_CMNRST
);
1410 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private
*dev_priv
,
1411 struct i915_power_well
*power_well
)
1415 for_each_pipe(dev_priv
, pipe
)
1416 assert_pll_disabled(dev_priv
, pipe
);
1418 /* Assert common reset */
1419 I915_WRITE(DPIO_CTL
, I915_READ(DPIO_CTL
) & ~DPIO_CMNRST
);
1421 vlv_set_power_well(dev_priv
, power_well
, false);
1424 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1426 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1428 static void assert_chv_phy_status(struct drm_i915_private
*dev_priv
)
1430 struct i915_power_well
*cmn_bc
=
1431 lookup_power_well(dev_priv
, VLV_DISP_PW_DPIO_CMN_BC
);
1432 struct i915_power_well
*cmn_d
=
1433 lookup_power_well(dev_priv
, CHV_DISP_PW_DPIO_CMN_D
);
1434 u32 phy_control
= dev_priv
->chv_phy_control
;
1436 u32 phy_status_mask
= 0xffffffff;
1439 * The BIOS can leave the PHY is some weird state
1440 * where it doesn't fully power down some parts.
1441 * Disable the asserts until the PHY has been fully
1442 * reset (ie. the power well has been disabled at
1445 if (!dev_priv
->chv_phy_assert
[DPIO_PHY0
])
1446 phy_status_mask
&= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0
, DPIO_CH0
) |
1447 PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH0
, 0) |
1448 PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH0
, 1) |
1449 PHY_STATUS_CMN_LDO(DPIO_PHY0
, DPIO_CH1
) |
1450 PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH1
, 0) |
1451 PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH1
, 1));
1453 if (!dev_priv
->chv_phy_assert
[DPIO_PHY1
])
1454 phy_status_mask
&= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1
, DPIO_CH0
) |
1455 PHY_STATUS_SPLINE_LDO(DPIO_PHY1
, DPIO_CH0
, 0) |
1456 PHY_STATUS_SPLINE_LDO(DPIO_PHY1
, DPIO_CH0
, 1));
1458 if (cmn_bc
->desc
->ops
->is_enabled(dev_priv
, cmn_bc
)) {
1459 phy_status
|= PHY_POWERGOOD(DPIO_PHY0
);
1461 /* this assumes override is only used to enable lanes */
1462 if ((phy_control
& PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0
, DPIO_CH0
)) == 0)
1463 phy_control
|= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0
, DPIO_CH0
);
1465 if ((phy_control
& PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0
, DPIO_CH1
)) == 0)
1466 phy_control
|= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0
, DPIO_CH1
);
1468 /* CL1 is on whenever anything is on in either channel */
1469 if (BITS_SET(phy_control
,
1470 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0
, DPIO_CH0
) |
1471 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0
, DPIO_CH1
)))
1472 phy_status
|= PHY_STATUS_CMN_LDO(DPIO_PHY0
, DPIO_CH0
);
1475 * The DPLLB check accounts for the pipe B + port A usage
1476 * with CL2 powered up but all the lanes in the second channel
1479 if (BITS_SET(phy_control
,
1480 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0
, DPIO_CH1
)) &&
1481 (I915_READ(DPLL(PIPE_B
)) & DPLL_VCO_ENABLE
) == 0)
1482 phy_status
|= PHY_STATUS_CMN_LDO(DPIO_PHY0
, DPIO_CH1
);
1484 if (BITS_SET(phy_control
,
1485 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0
, DPIO_CH0
)))
1486 phy_status
|= PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH0
, 0);
1487 if (BITS_SET(phy_control
,
1488 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0
, DPIO_CH0
)))
1489 phy_status
|= PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH0
, 1);
1491 if (BITS_SET(phy_control
,
1492 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0
, DPIO_CH1
)))
1493 phy_status
|= PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH1
, 0);
1494 if (BITS_SET(phy_control
,
1495 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0
, DPIO_CH1
)))
1496 phy_status
|= PHY_STATUS_SPLINE_LDO(DPIO_PHY0
, DPIO_CH1
, 1);
1499 if (cmn_d
->desc
->ops
->is_enabled(dev_priv
, cmn_d
)) {
1500 phy_status
|= PHY_POWERGOOD(DPIO_PHY1
);
1502 /* this assumes override is only used to enable lanes */
1503 if ((phy_control
& PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1
, DPIO_CH0
)) == 0)
1504 phy_control
|= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1
, DPIO_CH0
);
1506 if (BITS_SET(phy_control
,
1507 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1
, DPIO_CH0
)))
1508 phy_status
|= PHY_STATUS_CMN_LDO(DPIO_PHY1
, DPIO_CH0
);
1510 if (BITS_SET(phy_control
,
1511 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1
, DPIO_CH0
)))
1512 phy_status
|= PHY_STATUS_SPLINE_LDO(DPIO_PHY1
, DPIO_CH0
, 0);
1513 if (BITS_SET(phy_control
,
1514 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1
, DPIO_CH0
)))
1515 phy_status
|= PHY_STATUS_SPLINE_LDO(DPIO_PHY1
, DPIO_CH0
, 1);
1518 phy_status
&= phy_status_mask
;
1521 * The PHY may be busy with some initial calibration and whatnot,
1522 * so the power state can take a while to actually change.
1524 if (intel_wait_for_register(dev_priv
,
1529 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1530 I915_READ(DISPLAY_PHY_STATUS
) & phy_status_mask
,
1531 phy_status
, dev_priv
->chv_phy_control
);
1536 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private
*dev_priv
,
1537 struct i915_power_well
*power_well
)
1543 WARN_ON_ONCE(power_well
->desc
->id
!= VLV_DISP_PW_DPIO_CMN_BC
&&
1544 power_well
->desc
->id
!= CHV_DISP_PW_DPIO_CMN_D
);
1546 if (power_well
->desc
->id
== VLV_DISP_PW_DPIO_CMN_BC
) {
1554 /* since ref/cri clock was enabled */
1555 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1556 vlv_set_power_well(dev_priv
, power_well
, true);
1558 /* Poll for phypwrgood signal */
1559 if (intel_wait_for_register(dev_priv
,
1564 DRM_ERROR("Display PHY %d is not power up\n", phy
);
1566 mutex_lock(&dev_priv
->sb_lock
);
1568 /* Enable dynamic power down */
1569 tmp
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW28
);
1570 tmp
|= DPIO_DYNPWRDOWNEN_CH0
| DPIO_CL1POWERDOWNEN
|
1571 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ
;
1572 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW28
, tmp
);
1574 if (power_well
->desc
->id
== VLV_DISP_PW_DPIO_CMN_BC
) {
1575 tmp
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW6_CH1
);
1576 tmp
|= DPIO_DYNPWRDOWNEN_CH1
;
1577 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW6_CH1
, tmp
);
1580 * Force the non-existing CL2 off. BXT does this
1581 * too, so maybe it saves some power even though
1582 * CL2 doesn't exist?
1584 tmp
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW30
);
1585 tmp
|= DPIO_CL2_LDOFUSE_PWRENB
;
1586 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW30
, tmp
);
1589 mutex_unlock(&dev_priv
->sb_lock
);
1591 dev_priv
->chv_phy_control
|= PHY_COM_LANE_RESET_DEASSERT(phy
);
1592 I915_WRITE(DISPLAY_PHY_CONTROL
, dev_priv
->chv_phy_control
);
1594 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1595 phy
, dev_priv
->chv_phy_control
);
1597 assert_chv_phy_status(dev_priv
);
1600 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private
*dev_priv
,
1601 struct i915_power_well
*power_well
)
1605 WARN_ON_ONCE(power_well
->desc
->id
!= VLV_DISP_PW_DPIO_CMN_BC
&&
1606 power_well
->desc
->id
!= CHV_DISP_PW_DPIO_CMN_D
);
1608 if (power_well
->desc
->id
== VLV_DISP_PW_DPIO_CMN_BC
) {
1610 assert_pll_disabled(dev_priv
, PIPE_A
);
1611 assert_pll_disabled(dev_priv
, PIPE_B
);
1614 assert_pll_disabled(dev_priv
, PIPE_C
);
1617 dev_priv
->chv_phy_control
&= ~PHY_COM_LANE_RESET_DEASSERT(phy
);
1618 I915_WRITE(DISPLAY_PHY_CONTROL
, dev_priv
->chv_phy_control
);
1620 vlv_set_power_well(dev_priv
, power_well
, false);
1622 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1623 phy
, dev_priv
->chv_phy_control
);
1625 /* PHY is fully reset now, so we can enable the PHY state asserts */
1626 dev_priv
->chv_phy_assert
[phy
] = true;
1628 assert_chv_phy_status(dev_priv
);
1631 static void assert_chv_phy_powergate(struct drm_i915_private
*dev_priv
, enum dpio_phy phy
,
1632 enum dpio_channel ch
, bool override
, unsigned int mask
)
1634 enum pipe pipe
= phy
== DPIO_PHY0
? PIPE_A
: PIPE_C
;
1635 u32 reg
, val
, expected
, actual
;
1638 * The BIOS can leave the PHY is some weird state
1639 * where it doesn't fully power down some parts.
1640 * Disable the asserts until the PHY has been fully
1641 * reset (ie. the power well has been disabled at
1644 if (!dev_priv
->chv_phy_assert
[phy
])
1648 reg
= _CHV_CMN_DW0_CH0
;
1650 reg
= _CHV_CMN_DW6_CH1
;
1652 mutex_lock(&dev_priv
->sb_lock
);
1653 val
= vlv_dpio_read(dev_priv
, pipe
, reg
);
1654 mutex_unlock(&dev_priv
->sb_lock
);
1657 * This assumes !override is only used when the port is disabled.
1658 * All lanes should power down even without the override when
1659 * the port is disabled.
1661 if (!override
|| mask
== 0xf) {
1662 expected
= DPIO_ALLDL_POWERDOWN
| DPIO_ANYDL_POWERDOWN
;
1664 * If CH1 common lane is not active anymore
1665 * (eg. for pipe B DPLL) the entire channel will
1666 * shut down, which causes the common lane registers
1667 * to read as 0. That means we can't actually check
1668 * the lane power down status bits, but as the entire
1669 * register reads as 0 it's a good indication that the
1670 * channel is indeed entirely powered down.
1672 if (ch
== DPIO_CH1
&& val
== 0)
1674 } else if (mask
!= 0x0) {
1675 expected
= DPIO_ANYDL_POWERDOWN
;
1681 actual
= val
>> DPIO_ANYDL_POWERDOWN_SHIFT_CH0
;
1683 actual
= val
>> DPIO_ANYDL_POWERDOWN_SHIFT_CH1
;
1684 actual
&= DPIO_ALLDL_POWERDOWN
| DPIO_ANYDL_POWERDOWN
;
1686 WARN(actual
!= expected
,
1687 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1688 !!(actual
& DPIO_ALLDL_POWERDOWN
), !!(actual
& DPIO_ANYDL_POWERDOWN
),
1689 !!(expected
& DPIO_ALLDL_POWERDOWN
), !!(expected
& DPIO_ANYDL_POWERDOWN
),
1693 bool chv_phy_powergate_ch(struct drm_i915_private
*dev_priv
, enum dpio_phy phy
,
1694 enum dpio_channel ch
, bool override
)
1696 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
1699 mutex_lock(&power_domains
->lock
);
1701 was_override
= dev_priv
->chv_phy_control
& PHY_CH_POWER_DOWN_OVRD_EN(phy
, ch
);
1703 if (override
== was_override
)
1707 dev_priv
->chv_phy_control
|= PHY_CH_POWER_DOWN_OVRD_EN(phy
, ch
);
1709 dev_priv
->chv_phy_control
&= ~PHY_CH_POWER_DOWN_OVRD_EN(phy
, ch
);
1711 I915_WRITE(DISPLAY_PHY_CONTROL
, dev_priv
->chv_phy_control
);
1713 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1714 phy
, ch
, dev_priv
->chv_phy_control
);
1716 assert_chv_phy_status(dev_priv
);
1719 mutex_unlock(&power_domains
->lock
);
1721 return was_override
;
1724 void chv_phy_powergate_lanes(struct intel_encoder
*encoder
,
1725 bool override
, unsigned int mask
)
1727 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
1728 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
1729 enum dpio_phy phy
= vlv_dport_to_phy(enc_to_dig_port(&encoder
->base
));
1730 enum dpio_channel ch
= vlv_dport_to_channel(enc_to_dig_port(&encoder
->base
));
1732 mutex_lock(&power_domains
->lock
);
1734 dev_priv
->chv_phy_control
&= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy
, ch
);
1735 dev_priv
->chv_phy_control
|= PHY_CH_POWER_DOWN_OVRD(mask
, phy
, ch
);
1738 dev_priv
->chv_phy_control
|= PHY_CH_POWER_DOWN_OVRD_EN(phy
, ch
);
1740 dev_priv
->chv_phy_control
&= ~PHY_CH_POWER_DOWN_OVRD_EN(phy
, ch
);
1742 I915_WRITE(DISPLAY_PHY_CONTROL
, dev_priv
->chv_phy_control
);
1744 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1745 phy
, ch
, mask
, dev_priv
->chv_phy_control
);
1747 assert_chv_phy_status(dev_priv
);
1749 assert_chv_phy_powergate(dev_priv
, phy
, ch
, override
, mask
);
1751 mutex_unlock(&power_domains
->lock
);
1754 static bool chv_pipe_power_well_enabled(struct drm_i915_private
*dev_priv
,
1755 struct i915_power_well
*power_well
)
1757 enum pipe pipe
= PIPE_A
;
1761 mutex_lock(&dev_priv
->pcu_lock
);
1763 state
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
) & DP_SSS_MASK(pipe
);
1765 * We only ever set the power-on and power-gate states, anything
1766 * else is unexpected.
1768 WARN_ON(state
!= DP_SSS_PWR_ON(pipe
) && state
!= DP_SSS_PWR_GATE(pipe
));
1769 enabled
= state
== DP_SSS_PWR_ON(pipe
);
1772 * A transient state at this point would mean some unexpected party
1773 * is poking at the power controls too.
1775 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
) & DP_SSC_MASK(pipe
);
1776 WARN_ON(ctrl
<< 16 != state
);
1778 mutex_unlock(&dev_priv
->pcu_lock
);
1783 static void chv_set_pipe_power_well(struct drm_i915_private
*dev_priv
,
1784 struct i915_power_well
*power_well
,
1787 enum pipe pipe
= PIPE_A
;
1791 state
= enable
? DP_SSS_PWR_ON(pipe
) : DP_SSS_PWR_GATE(pipe
);
1793 mutex_lock(&dev_priv
->pcu_lock
);
1796 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1801 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
1802 ctrl
&= ~DP_SSC_MASK(pipe
);
1803 ctrl
|= enable
? DP_SSC_PWR_ON(pipe
) : DP_SSC_PWR_GATE(pipe
);
1804 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, ctrl
);
1806 if (wait_for(COND
, 100))
1807 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1809 vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
));
1814 mutex_unlock(&dev_priv
->pcu_lock
);
1817 static void chv_pipe_power_well_enable(struct drm_i915_private
*dev_priv
,
1818 struct i915_power_well
*power_well
)
1820 chv_set_pipe_power_well(dev_priv
, power_well
, true);
1822 vlv_display_power_well_init(dev_priv
);
1825 static void chv_pipe_power_well_disable(struct drm_i915_private
*dev_priv
,
1826 struct i915_power_well
*power_well
)
1828 vlv_display_power_well_deinit(dev_priv
);
1830 chv_set_pipe_power_well(dev_priv
, power_well
, false);
1834 __intel_display_power_get_domain(struct drm_i915_private
*dev_priv
,
1835 enum intel_display_power_domain domain
)
1837 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
1838 struct i915_power_well
*power_well
;
1840 for_each_power_domain_well(dev_priv
, power_well
, BIT_ULL(domain
))
1841 intel_power_well_get(dev_priv
, power_well
);
1843 power_domains
->domain_use_count
[domain
]++;
1847 * intel_display_power_get - grab a power domain reference
1848 * @dev_priv: i915 device instance
1849 * @domain: power domain to reference
1851 * This function grabs a power domain reference for @domain and ensures that the
1852 * power domain and all its parents are powered up. Therefore users should only
1853 * grab a reference to the innermost power domain they need.
1855 * Any power domain reference obtained by this function must have a symmetric
1856 * call to intel_display_power_put() to release the reference again.
1858 intel_wakeref_t
intel_display_power_get(struct drm_i915_private
*dev_priv
,
1859 enum intel_display_power_domain domain
)
1861 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
1862 intel_wakeref_t wakeref
= intel_runtime_pm_get(dev_priv
);
1864 mutex_lock(&power_domains
->lock
);
1866 __intel_display_power_get_domain(dev_priv
, domain
);
1868 mutex_unlock(&power_domains
->lock
);
1874 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1875 * @dev_priv: i915 device instance
1876 * @domain: power domain to reference
1878 * This function grabs a power domain reference for @domain and ensures that the
1879 * power domain and all its parents are powered up. Therefore users should only
1880 * grab a reference to the innermost power domain they need.
1882 * Any power domain reference obtained by this function must have a symmetric
1883 * call to intel_display_power_put() to release the reference again.
1886 intel_display_power_get_if_enabled(struct drm_i915_private
*dev_priv
,
1887 enum intel_display_power_domain domain
)
1889 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
1890 intel_wakeref_t wakeref
;
1893 wakeref
= intel_runtime_pm_get_if_in_use(dev_priv
);
1897 mutex_lock(&power_domains
->lock
);
1899 if (__intel_display_power_is_enabled(dev_priv
, domain
)) {
1900 __intel_display_power_get_domain(dev_priv
, domain
);
1906 mutex_unlock(&power_domains
->lock
);
1909 intel_runtime_pm_put(dev_priv
, wakeref
);
1916 static void __intel_display_power_put(struct drm_i915_private
*dev_priv
,
1917 enum intel_display_power_domain domain
)
1919 struct i915_power_domains
*power_domains
;
1920 struct i915_power_well
*power_well
;
1922 power_domains
= &dev_priv
->power_domains
;
1924 mutex_lock(&power_domains
->lock
);
1926 WARN(!power_domains
->domain_use_count
[domain
],
1927 "Use count on domain %s is already zero\n",
1928 intel_display_power_domain_str(domain
));
1929 power_domains
->domain_use_count
[domain
]--;
1931 for_each_power_domain_well_reverse(dev_priv
, power_well
, BIT_ULL(domain
))
1932 intel_power_well_put(dev_priv
, power_well
);
1934 mutex_unlock(&power_domains
->lock
);
1938 * intel_display_power_put - release a power domain reference
1939 * @dev_priv: i915 device instance
1940 * @domain: power domain to reference
1942 * This function drops the power domain reference obtained by
1943 * intel_display_power_get() and might power down the corresponding hardware
1944 * block right away if this is the last reference.
1946 void intel_display_power_put_unchecked(struct drm_i915_private
*dev_priv
,
1947 enum intel_display_power_domain domain
)
1949 __intel_display_power_put(dev_priv
, domain
);
1950 intel_runtime_pm_put_unchecked(dev_priv
);
1953 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1954 void intel_display_power_put(struct drm_i915_private
*dev_priv
,
1955 enum intel_display_power_domain domain
,
1956 intel_wakeref_t wakeref
)
1958 __intel_display_power_put(dev_priv
, domain
);
1959 intel_runtime_pm_put(dev_priv
, wakeref
);
1963 #define I830_PIPES_POWER_DOMAINS ( \
1964 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1965 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1966 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1967 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1968 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1969 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1970 BIT_ULL(POWER_DOMAIN_INIT))
1972 #define VLV_DISPLAY_POWER_DOMAINS ( \
1973 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1974 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1975 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1976 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1977 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1978 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1979 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1980 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1981 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1982 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1983 BIT_ULL(POWER_DOMAIN_VGA) | \
1984 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1985 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1986 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1987 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1988 BIT_ULL(POWER_DOMAIN_INIT))
1990 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1991 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1992 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1993 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1994 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1995 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1996 BIT_ULL(POWER_DOMAIN_INIT))
1998 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1999 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2000 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2001 BIT_ULL(POWER_DOMAIN_INIT))
2003 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2004 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2005 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2006 BIT_ULL(POWER_DOMAIN_INIT))
2008 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2009 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2010 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2011 BIT_ULL(POWER_DOMAIN_INIT))
2013 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2014 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2015 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2016 BIT_ULL(POWER_DOMAIN_INIT))
2018 #define CHV_DISPLAY_POWER_DOMAINS ( \
2019 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2020 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2021 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2022 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2023 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2024 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2025 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2026 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2027 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2028 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2029 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2030 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2031 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2032 BIT_ULL(POWER_DOMAIN_VGA) | \
2033 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2034 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2035 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2036 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2037 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2038 BIT_ULL(POWER_DOMAIN_INIT))
2040 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2041 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2042 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2043 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2044 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2045 BIT_ULL(POWER_DOMAIN_INIT))
2047 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2048 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2049 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2050 BIT_ULL(POWER_DOMAIN_INIT))
2052 #define HSW_DISPLAY_POWER_DOMAINS ( \
2053 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2054 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2055 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2056 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2057 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2058 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2059 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2060 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2061 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2062 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2063 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2064 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2065 BIT_ULL(POWER_DOMAIN_VGA) | \
2066 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2067 BIT_ULL(POWER_DOMAIN_INIT))
2069 #define BDW_DISPLAY_POWER_DOMAINS ( \
2070 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2071 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2072 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2073 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2074 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2075 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2076 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2077 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2078 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2079 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2080 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2081 BIT_ULL(POWER_DOMAIN_VGA) | \
2082 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2083 BIT_ULL(POWER_DOMAIN_INIT))
2085 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2086 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2087 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2088 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2089 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2090 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2091 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2092 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2093 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2094 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2095 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2096 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2097 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2098 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2099 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2100 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2101 BIT_ULL(POWER_DOMAIN_VGA) | \
2102 BIT_ULL(POWER_DOMAIN_INIT))
2103 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2104 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2105 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2106 BIT_ULL(POWER_DOMAIN_INIT))
2107 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2108 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2109 BIT_ULL(POWER_DOMAIN_INIT))
2110 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2111 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2112 BIT_ULL(POWER_DOMAIN_INIT))
2113 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2114 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2115 BIT_ULL(POWER_DOMAIN_INIT))
2116 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2117 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2118 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2119 BIT_ULL(POWER_DOMAIN_MODESET) | \
2120 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2121 BIT_ULL(POWER_DOMAIN_INIT))
2123 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2124 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2125 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2126 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2127 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2128 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2129 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2130 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2131 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2132 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2133 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2134 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2135 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2136 BIT_ULL(POWER_DOMAIN_VGA) | \
2137 BIT_ULL(POWER_DOMAIN_INIT))
2138 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2139 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2140 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2141 BIT_ULL(POWER_DOMAIN_MODESET) | \
2142 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2143 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2144 BIT_ULL(POWER_DOMAIN_INIT))
2145 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2146 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2147 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2148 BIT_ULL(POWER_DOMAIN_INIT))
2149 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2150 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2151 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2152 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2153 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2154 BIT_ULL(POWER_DOMAIN_INIT))
2156 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2157 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2158 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2159 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2160 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2161 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2162 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2163 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2164 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2165 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2166 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2167 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2168 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2169 BIT_ULL(POWER_DOMAIN_VGA) | \
2170 BIT_ULL(POWER_DOMAIN_INIT))
2171 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2172 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2173 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2174 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2175 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2176 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2177 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2178 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2179 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2180 BIT_ULL(POWER_DOMAIN_INIT))
2181 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2182 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2183 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2184 BIT_ULL(POWER_DOMAIN_INIT))
2185 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2186 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2187 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2188 BIT_ULL(POWER_DOMAIN_INIT))
2189 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2190 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2191 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2192 BIT_ULL(POWER_DOMAIN_INIT))
2193 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2194 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2195 BIT_ULL(POWER_DOMAIN_INIT))
2196 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2197 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2198 BIT_ULL(POWER_DOMAIN_INIT))
2199 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2200 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2201 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2202 BIT_ULL(POWER_DOMAIN_MODESET) | \
2203 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2204 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2205 BIT_ULL(POWER_DOMAIN_INIT))
2207 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2208 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2209 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2210 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2211 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2212 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2213 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2214 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2215 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2216 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2217 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2218 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2219 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2220 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2221 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2222 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2223 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2224 BIT_ULL(POWER_DOMAIN_VGA) | \
2225 BIT_ULL(POWER_DOMAIN_INIT))
2226 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2227 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2228 BIT_ULL(POWER_DOMAIN_INIT))
2229 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2230 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2231 BIT_ULL(POWER_DOMAIN_INIT))
2232 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2233 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2234 BIT_ULL(POWER_DOMAIN_INIT))
2235 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2236 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2237 BIT_ULL(POWER_DOMAIN_INIT))
2238 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2239 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2240 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2241 BIT_ULL(POWER_DOMAIN_INIT))
2242 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2243 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2244 BIT_ULL(POWER_DOMAIN_INIT))
2245 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2246 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2247 BIT_ULL(POWER_DOMAIN_INIT))
2248 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2249 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2250 BIT_ULL(POWER_DOMAIN_INIT))
2251 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2252 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2253 BIT_ULL(POWER_DOMAIN_INIT))
2254 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2255 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2256 BIT_ULL(POWER_DOMAIN_INIT))
2257 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2258 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2259 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2260 BIT_ULL(POWER_DOMAIN_MODESET) | \
2261 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2262 BIT_ULL(POWER_DOMAIN_INIT))
2265 * ICL PW_0/PG_0 domains (HW/DMC control):
2267 * - clocks except port PLL
2268 * - central power except FBC
2269 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2270 * ICL PW_1/PG_1 domains (HW/DMC control):
2272 * - PIPE_A and its planes, except VGA
2273 * - transcoder EDP + PSR
2278 #define ICL_PW_4_POWER_DOMAINS ( \
2279 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2280 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2281 BIT_ULL(POWER_DOMAIN_INIT))
2283 #define ICL_PW_3_POWER_DOMAINS ( \
2284 ICL_PW_4_POWER_DOMAINS | \
2285 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2286 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2287 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2288 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2289 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2290 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2291 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2292 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2293 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2294 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2295 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2296 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2297 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2298 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2299 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2300 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2301 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2302 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2303 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2304 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2305 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2306 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2307 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2308 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2309 BIT_ULL(POWER_DOMAIN_VGA) | \
2310 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2311 BIT_ULL(POWER_DOMAIN_INIT))
2314 * - KVMR (HW control)
2316 #define ICL_PW_2_POWER_DOMAINS ( \
2317 ICL_PW_3_POWER_DOMAINS | \
2318 BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
2319 BIT_ULL(POWER_DOMAIN_INIT))
2321 * - KVMR (HW control)
2323 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2324 ICL_PW_2_POWER_DOMAINS | \
2325 BIT_ULL(POWER_DOMAIN_MODESET) | \
2326 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2327 BIT_ULL(POWER_DOMAIN_INIT))
2329 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2330 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2331 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2332 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2333 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2334 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2335 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2336 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2337 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2338 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2339 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2340 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2342 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2343 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2344 BIT_ULL(POWER_DOMAIN_AUX_A))
2345 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2346 BIT_ULL(POWER_DOMAIN_AUX_B))
2347 #define ICL_AUX_C_IO_POWER_DOMAINS ( \
2348 BIT_ULL(POWER_DOMAIN_AUX_C))
2349 #define ICL_AUX_D_IO_POWER_DOMAINS ( \
2350 BIT_ULL(POWER_DOMAIN_AUX_D))
2351 #define ICL_AUX_E_IO_POWER_DOMAINS ( \
2352 BIT_ULL(POWER_DOMAIN_AUX_E))
2353 #define ICL_AUX_F_IO_POWER_DOMAINS ( \
2354 BIT_ULL(POWER_DOMAIN_AUX_F))
2355 #define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2356 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2357 #define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2358 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2359 #define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2360 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2361 #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2362 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2364 static const struct i915_power_well_ops i9xx_always_on_power_well_ops
= {
2365 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2366 .enable
= i9xx_always_on_power_well_noop
,
2367 .disable
= i9xx_always_on_power_well_noop
,
2368 .is_enabled
= i9xx_always_on_power_well_enabled
,
2371 static const struct i915_power_well_ops chv_pipe_power_well_ops
= {
2372 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2373 .enable
= chv_pipe_power_well_enable
,
2374 .disable
= chv_pipe_power_well_disable
,
2375 .is_enabled
= chv_pipe_power_well_enabled
,
2378 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops
= {
2379 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2380 .enable
= chv_dpio_cmn_power_well_enable
,
2381 .disable
= chv_dpio_cmn_power_well_disable
,
2382 .is_enabled
= vlv_power_well_enabled
,
2385 static const struct i915_power_well_desc i9xx_always_on_power_well
[] = {
2387 .name
= "always-on",
2389 .domains
= POWER_DOMAIN_MASK
,
2390 .ops
= &i9xx_always_on_power_well_ops
,
2391 .id
= DISP_PW_ID_NONE
,
2395 static const struct i915_power_well_ops i830_pipes_power_well_ops
= {
2396 .sync_hw
= i830_pipes_power_well_sync_hw
,
2397 .enable
= i830_pipes_power_well_enable
,
2398 .disable
= i830_pipes_power_well_disable
,
2399 .is_enabled
= i830_pipes_power_well_enabled
,
2402 static const struct i915_power_well_desc i830_power_wells
[] = {
2404 .name
= "always-on",
2406 .domains
= POWER_DOMAIN_MASK
,
2407 .ops
= &i9xx_always_on_power_well_ops
,
2408 .id
= DISP_PW_ID_NONE
,
2412 .domains
= I830_PIPES_POWER_DOMAINS
,
2413 .ops
= &i830_pipes_power_well_ops
,
2414 .id
= DISP_PW_ID_NONE
,
2418 static const struct i915_power_well_ops hsw_power_well_ops
= {
2419 .sync_hw
= hsw_power_well_sync_hw
,
2420 .enable
= hsw_power_well_enable
,
2421 .disable
= hsw_power_well_disable
,
2422 .is_enabled
= hsw_power_well_enabled
,
2425 static const struct i915_power_well_ops gen9_dc_off_power_well_ops
= {
2426 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2427 .enable
= gen9_dc_off_power_well_enable
,
2428 .disable
= gen9_dc_off_power_well_disable
,
2429 .is_enabled
= gen9_dc_off_power_well_enabled
,
2432 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops
= {
2433 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2434 .enable
= bxt_dpio_cmn_power_well_enable
,
2435 .disable
= bxt_dpio_cmn_power_well_disable
,
2436 .is_enabled
= bxt_dpio_cmn_power_well_enabled
,
2439 static const struct i915_power_well_regs hsw_power_well_regs
= {
2440 .bios
= HSW_PWR_WELL_CTL1
,
2441 .driver
= HSW_PWR_WELL_CTL2
,
2442 .kvmr
= HSW_PWR_WELL_CTL3
,
2443 .debug
= HSW_PWR_WELL_CTL4
,
2446 static const struct i915_power_well_desc hsw_power_wells
[] = {
2448 .name
= "always-on",
2450 .domains
= POWER_DOMAIN_MASK
,
2451 .ops
= &i9xx_always_on_power_well_ops
,
2452 .id
= DISP_PW_ID_NONE
,
2456 .domains
= HSW_DISPLAY_POWER_DOMAINS
,
2457 .ops
= &hsw_power_well_ops
,
2458 .id
= HSW_DISP_PW_GLOBAL
,
2460 .hsw
.regs
= &hsw_power_well_regs
,
2461 .hsw
.idx
= HSW_PW_CTL_IDX_GLOBAL
,
2462 .hsw
.has_vga
= true,
2467 static const struct i915_power_well_desc bdw_power_wells
[] = {
2469 .name
= "always-on",
2471 .domains
= POWER_DOMAIN_MASK
,
2472 .ops
= &i9xx_always_on_power_well_ops
,
2473 .id
= DISP_PW_ID_NONE
,
2477 .domains
= BDW_DISPLAY_POWER_DOMAINS
,
2478 .ops
= &hsw_power_well_ops
,
2479 .id
= HSW_DISP_PW_GLOBAL
,
2481 .hsw
.regs
= &hsw_power_well_regs
,
2482 .hsw
.idx
= HSW_PW_CTL_IDX_GLOBAL
,
2483 .hsw
.irq_pipe_mask
= BIT(PIPE_B
) | BIT(PIPE_C
),
2484 .hsw
.has_vga
= true,
2489 static const struct i915_power_well_ops vlv_display_power_well_ops
= {
2490 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2491 .enable
= vlv_display_power_well_enable
,
2492 .disable
= vlv_display_power_well_disable
,
2493 .is_enabled
= vlv_power_well_enabled
,
2496 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops
= {
2497 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2498 .enable
= vlv_dpio_cmn_power_well_enable
,
2499 .disable
= vlv_dpio_cmn_power_well_disable
,
2500 .is_enabled
= vlv_power_well_enabled
,
2503 static const struct i915_power_well_ops vlv_dpio_power_well_ops
= {
2504 .sync_hw
= i9xx_power_well_sync_hw_noop
,
2505 .enable
= vlv_power_well_enable
,
2506 .disable
= vlv_power_well_disable
,
2507 .is_enabled
= vlv_power_well_enabled
,
2510 static const struct i915_power_well_desc vlv_power_wells
[] = {
2512 .name
= "always-on",
2514 .domains
= POWER_DOMAIN_MASK
,
2515 .ops
= &i9xx_always_on_power_well_ops
,
2516 .id
= DISP_PW_ID_NONE
,
2520 .domains
= VLV_DISPLAY_POWER_DOMAINS
,
2521 .ops
= &vlv_display_power_well_ops
,
2522 .id
= VLV_DISP_PW_DISP2D
,
2524 .vlv
.idx
= PUNIT_PWGT_IDX_DISP2D
,
2528 .name
= "dpio-tx-b-01",
2529 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
2530 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
2531 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
2532 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
2533 .ops
= &vlv_dpio_power_well_ops
,
2534 .id
= DISP_PW_ID_NONE
,
2536 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01
,
2540 .name
= "dpio-tx-b-23",
2541 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
2542 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
2543 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
2544 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
2545 .ops
= &vlv_dpio_power_well_ops
,
2546 .id
= DISP_PW_ID_NONE
,
2548 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23
,
2552 .name
= "dpio-tx-c-01",
2553 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
2554 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
2555 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
2556 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
2557 .ops
= &vlv_dpio_power_well_ops
,
2558 .id
= DISP_PW_ID_NONE
,
2560 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01
,
2564 .name
= "dpio-tx-c-23",
2565 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
2566 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
2567 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
2568 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
2569 .ops
= &vlv_dpio_power_well_ops
,
2570 .id
= DISP_PW_ID_NONE
,
2572 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23
,
2576 .name
= "dpio-common",
2577 .domains
= VLV_DPIO_CMN_BC_POWER_DOMAINS
,
2578 .ops
= &vlv_dpio_cmn_power_well_ops
,
2579 .id
= VLV_DISP_PW_DPIO_CMN_BC
,
2581 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_CMN_BC
,
2586 static const struct i915_power_well_desc chv_power_wells
[] = {
2588 .name
= "always-on",
2590 .domains
= POWER_DOMAIN_MASK
,
2591 .ops
= &i9xx_always_on_power_well_ops
,
2592 .id
= DISP_PW_ID_NONE
,
2597 * Pipe A power well is the new disp2d well. Pipe B and C
2598 * power wells don't actually exist. Pipe A power well is
2599 * required for any pipe to work.
2601 .domains
= CHV_DISPLAY_POWER_DOMAINS
,
2602 .ops
= &chv_pipe_power_well_ops
,
2603 .id
= DISP_PW_ID_NONE
,
2606 .name
= "dpio-common-bc",
2607 .domains
= CHV_DPIO_CMN_BC_POWER_DOMAINS
,
2608 .ops
= &chv_dpio_cmn_power_well_ops
,
2609 .id
= VLV_DISP_PW_DPIO_CMN_BC
,
2611 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_CMN_BC
,
2615 .name
= "dpio-common-d",
2616 .domains
= CHV_DPIO_CMN_D_POWER_DOMAINS
,
2617 .ops
= &chv_dpio_cmn_power_well_ops
,
2618 .id
= CHV_DISP_PW_DPIO_CMN_D
,
2620 .vlv
.idx
= PUNIT_PWGT_IDX_DPIO_CMN_D
,
2625 bool intel_display_power_well_is_enabled(struct drm_i915_private
*dev_priv
,
2626 enum i915_power_well_id power_well_id
)
2628 struct i915_power_well
*power_well
;
2631 power_well
= lookup_power_well(dev_priv
, power_well_id
);
2632 ret
= power_well
->desc
->ops
->is_enabled(dev_priv
, power_well
);
2637 static const struct i915_power_well_desc skl_power_wells
[] = {
2639 .name
= "always-on",
2641 .domains
= POWER_DOMAIN_MASK
,
2642 .ops
= &i9xx_always_on_power_well_ops
,
2643 .id
= DISP_PW_ID_NONE
,
2646 .name
= "power well 1",
2647 /* Handled by the DMC firmware */
2650 .ops
= &hsw_power_well_ops
,
2651 .id
= SKL_DISP_PW_1
,
2653 .hsw
.regs
= &hsw_power_well_regs
,
2654 .hsw
.idx
= SKL_PW_CTL_IDX_PW_1
,
2655 .hsw
.has_fuses
= true,
2659 .name
= "MISC IO power well",
2660 /* Handled by the DMC firmware */
2663 .ops
= &hsw_power_well_ops
,
2664 .id
= SKL_DISP_PW_MISC_IO
,
2666 .hsw
.regs
= &hsw_power_well_regs
,
2667 .hsw
.idx
= SKL_PW_CTL_IDX_MISC_IO
,
2672 .domains
= SKL_DISPLAY_DC_OFF_POWER_DOMAINS
,
2673 .ops
= &gen9_dc_off_power_well_ops
,
2674 .id
= DISP_PW_ID_NONE
,
2677 .name
= "power well 2",
2678 .domains
= SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS
,
2679 .ops
= &hsw_power_well_ops
,
2680 .id
= SKL_DISP_PW_2
,
2682 .hsw
.regs
= &hsw_power_well_regs
,
2683 .hsw
.idx
= SKL_PW_CTL_IDX_PW_2
,
2684 .hsw
.irq_pipe_mask
= BIT(PIPE_B
) | BIT(PIPE_C
),
2685 .hsw
.has_vga
= true,
2686 .hsw
.has_fuses
= true,
2690 .name
= "DDI A/E IO power well",
2691 .domains
= SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS
,
2692 .ops
= &hsw_power_well_ops
,
2693 .id
= DISP_PW_ID_NONE
,
2695 .hsw
.regs
= &hsw_power_well_regs
,
2696 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_A_E
,
2700 .name
= "DDI B IO power well",
2701 .domains
= SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS
,
2702 .ops
= &hsw_power_well_ops
,
2703 .id
= DISP_PW_ID_NONE
,
2705 .hsw
.regs
= &hsw_power_well_regs
,
2706 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_B
,
2710 .name
= "DDI C IO power well",
2711 .domains
= SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS
,
2712 .ops
= &hsw_power_well_ops
,
2713 .id
= DISP_PW_ID_NONE
,
2715 .hsw
.regs
= &hsw_power_well_regs
,
2716 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_C
,
2720 .name
= "DDI D IO power well",
2721 .domains
= SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS
,
2722 .ops
= &hsw_power_well_ops
,
2723 .id
= DISP_PW_ID_NONE
,
2725 .hsw
.regs
= &hsw_power_well_regs
,
2726 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_D
,
2731 static const struct i915_power_well_desc bxt_power_wells
[] = {
2733 .name
= "always-on",
2735 .domains
= POWER_DOMAIN_MASK
,
2736 .ops
= &i9xx_always_on_power_well_ops
,
2737 .id
= DISP_PW_ID_NONE
,
2740 .name
= "power well 1",
2741 /* Handled by the DMC firmware */
2744 .ops
= &hsw_power_well_ops
,
2745 .id
= SKL_DISP_PW_1
,
2747 .hsw
.regs
= &hsw_power_well_regs
,
2748 .hsw
.idx
= SKL_PW_CTL_IDX_PW_1
,
2749 .hsw
.has_fuses
= true,
2754 .domains
= BXT_DISPLAY_DC_OFF_POWER_DOMAINS
,
2755 .ops
= &gen9_dc_off_power_well_ops
,
2756 .id
= DISP_PW_ID_NONE
,
2759 .name
= "power well 2",
2760 .domains
= BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS
,
2761 .ops
= &hsw_power_well_ops
,
2762 .id
= SKL_DISP_PW_2
,
2764 .hsw
.regs
= &hsw_power_well_regs
,
2765 .hsw
.idx
= SKL_PW_CTL_IDX_PW_2
,
2766 .hsw
.irq_pipe_mask
= BIT(PIPE_B
) | BIT(PIPE_C
),
2767 .hsw
.has_vga
= true,
2768 .hsw
.has_fuses
= true,
2772 .name
= "dpio-common-a",
2773 .domains
= BXT_DPIO_CMN_A_POWER_DOMAINS
,
2774 .ops
= &bxt_dpio_cmn_power_well_ops
,
2775 .id
= BXT_DISP_PW_DPIO_CMN_A
,
2777 .bxt
.phy
= DPIO_PHY1
,
2781 .name
= "dpio-common-bc",
2782 .domains
= BXT_DPIO_CMN_BC_POWER_DOMAINS
,
2783 .ops
= &bxt_dpio_cmn_power_well_ops
,
2784 .id
= VLV_DISP_PW_DPIO_CMN_BC
,
2786 .bxt
.phy
= DPIO_PHY0
,
2791 static const struct i915_power_well_desc glk_power_wells
[] = {
2793 .name
= "always-on",
2795 .domains
= POWER_DOMAIN_MASK
,
2796 .ops
= &i9xx_always_on_power_well_ops
,
2797 .id
= DISP_PW_ID_NONE
,
2800 .name
= "power well 1",
2801 /* Handled by the DMC firmware */
2804 .ops
= &hsw_power_well_ops
,
2805 .id
= SKL_DISP_PW_1
,
2807 .hsw
.regs
= &hsw_power_well_regs
,
2808 .hsw
.idx
= SKL_PW_CTL_IDX_PW_1
,
2809 .hsw
.has_fuses
= true,
2814 .domains
= GLK_DISPLAY_DC_OFF_POWER_DOMAINS
,
2815 .ops
= &gen9_dc_off_power_well_ops
,
2816 .id
= DISP_PW_ID_NONE
,
2819 .name
= "power well 2",
2820 .domains
= GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS
,
2821 .ops
= &hsw_power_well_ops
,
2822 .id
= SKL_DISP_PW_2
,
2824 .hsw
.regs
= &hsw_power_well_regs
,
2825 .hsw
.idx
= SKL_PW_CTL_IDX_PW_2
,
2826 .hsw
.irq_pipe_mask
= BIT(PIPE_B
) | BIT(PIPE_C
),
2827 .hsw
.has_vga
= true,
2828 .hsw
.has_fuses
= true,
2832 .name
= "dpio-common-a",
2833 .domains
= GLK_DPIO_CMN_A_POWER_DOMAINS
,
2834 .ops
= &bxt_dpio_cmn_power_well_ops
,
2835 .id
= BXT_DISP_PW_DPIO_CMN_A
,
2837 .bxt
.phy
= DPIO_PHY1
,
2841 .name
= "dpio-common-b",
2842 .domains
= GLK_DPIO_CMN_B_POWER_DOMAINS
,
2843 .ops
= &bxt_dpio_cmn_power_well_ops
,
2844 .id
= VLV_DISP_PW_DPIO_CMN_BC
,
2846 .bxt
.phy
= DPIO_PHY0
,
2850 .name
= "dpio-common-c",
2851 .domains
= GLK_DPIO_CMN_C_POWER_DOMAINS
,
2852 .ops
= &bxt_dpio_cmn_power_well_ops
,
2853 .id
= GLK_DISP_PW_DPIO_CMN_C
,
2855 .bxt
.phy
= DPIO_PHY2
,
2860 .domains
= GLK_DISPLAY_AUX_A_POWER_DOMAINS
,
2861 .ops
= &hsw_power_well_ops
,
2862 .id
= DISP_PW_ID_NONE
,
2864 .hsw
.regs
= &hsw_power_well_regs
,
2865 .hsw
.idx
= GLK_PW_CTL_IDX_AUX_A
,
2870 .domains
= GLK_DISPLAY_AUX_B_POWER_DOMAINS
,
2871 .ops
= &hsw_power_well_ops
,
2872 .id
= DISP_PW_ID_NONE
,
2874 .hsw
.regs
= &hsw_power_well_regs
,
2875 .hsw
.idx
= GLK_PW_CTL_IDX_AUX_B
,
2880 .domains
= GLK_DISPLAY_AUX_C_POWER_DOMAINS
,
2881 .ops
= &hsw_power_well_ops
,
2882 .id
= DISP_PW_ID_NONE
,
2884 .hsw
.regs
= &hsw_power_well_regs
,
2885 .hsw
.idx
= GLK_PW_CTL_IDX_AUX_C
,
2889 .name
= "DDI A IO power well",
2890 .domains
= GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS
,
2891 .ops
= &hsw_power_well_ops
,
2892 .id
= DISP_PW_ID_NONE
,
2894 .hsw
.regs
= &hsw_power_well_regs
,
2895 .hsw
.idx
= GLK_PW_CTL_IDX_DDI_A
,
2899 .name
= "DDI B IO power well",
2900 .domains
= GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS
,
2901 .ops
= &hsw_power_well_ops
,
2902 .id
= DISP_PW_ID_NONE
,
2904 .hsw
.regs
= &hsw_power_well_regs
,
2905 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_B
,
2909 .name
= "DDI C IO power well",
2910 .domains
= GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS
,
2911 .ops
= &hsw_power_well_ops
,
2912 .id
= DISP_PW_ID_NONE
,
2914 .hsw
.regs
= &hsw_power_well_regs
,
2915 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_C
,
2920 static const struct i915_power_well_desc cnl_power_wells
[] = {
2922 .name
= "always-on",
2924 .domains
= POWER_DOMAIN_MASK
,
2925 .ops
= &i9xx_always_on_power_well_ops
,
2926 .id
= DISP_PW_ID_NONE
,
2929 .name
= "power well 1",
2930 /* Handled by the DMC firmware */
2933 .ops
= &hsw_power_well_ops
,
2934 .id
= SKL_DISP_PW_1
,
2936 .hsw
.regs
= &hsw_power_well_regs
,
2937 .hsw
.idx
= SKL_PW_CTL_IDX_PW_1
,
2938 .hsw
.has_fuses
= true,
2943 .domains
= CNL_DISPLAY_AUX_A_POWER_DOMAINS
,
2944 .ops
= &hsw_power_well_ops
,
2945 .id
= DISP_PW_ID_NONE
,
2947 .hsw
.regs
= &hsw_power_well_regs
,
2948 .hsw
.idx
= GLK_PW_CTL_IDX_AUX_A
,
2953 .domains
= CNL_DISPLAY_AUX_B_POWER_DOMAINS
,
2954 .ops
= &hsw_power_well_ops
,
2955 .id
= DISP_PW_ID_NONE
,
2957 .hsw
.regs
= &hsw_power_well_regs
,
2958 .hsw
.idx
= GLK_PW_CTL_IDX_AUX_B
,
2963 .domains
= CNL_DISPLAY_AUX_C_POWER_DOMAINS
,
2964 .ops
= &hsw_power_well_ops
,
2965 .id
= DISP_PW_ID_NONE
,
2967 .hsw
.regs
= &hsw_power_well_regs
,
2968 .hsw
.idx
= GLK_PW_CTL_IDX_AUX_C
,
2973 .domains
= CNL_DISPLAY_AUX_D_POWER_DOMAINS
,
2974 .ops
= &hsw_power_well_ops
,
2975 .id
= DISP_PW_ID_NONE
,
2977 .hsw
.regs
= &hsw_power_well_regs
,
2978 .hsw
.idx
= CNL_PW_CTL_IDX_AUX_D
,
2983 .domains
= CNL_DISPLAY_DC_OFF_POWER_DOMAINS
,
2984 .ops
= &gen9_dc_off_power_well_ops
,
2985 .id
= DISP_PW_ID_NONE
,
2988 .name
= "power well 2",
2989 .domains
= CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS
,
2990 .ops
= &hsw_power_well_ops
,
2991 .id
= SKL_DISP_PW_2
,
2993 .hsw
.regs
= &hsw_power_well_regs
,
2994 .hsw
.idx
= SKL_PW_CTL_IDX_PW_2
,
2995 .hsw
.irq_pipe_mask
= BIT(PIPE_B
) | BIT(PIPE_C
),
2996 .hsw
.has_vga
= true,
2997 .hsw
.has_fuses
= true,
3001 .name
= "DDI A IO power well",
3002 .domains
= CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS
,
3003 .ops
= &hsw_power_well_ops
,
3004 .id
= DISP_PW_ID_NONE
,
3006 .hsw
.regs
= &hsw_power_well_regs
,
3007 .hsw
.idx
= GLK_PW_CTL_IDX_DDI_A
,
3011 .name
= "DDI B IO power well",
3012 .domains
= CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS
,
3013 .ops
= &hsw_power_well_ops
,
3014 .id
= DISP_PW_ID_NONE
,
3016 .hsw
.regs
= &hsw_power_well_regs
,
3017 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_B
,
3021 .name
= "DDI C IO power well",
3022 .domains
= CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS
,
3023 .ops
= &hsw_power_well_ops
,
3024 .id
= DISP_PW_ID_NONE
,
3026 .hsw
.regs
= &hsw_power_well_regs
,
3027 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_C
,
3031 .name
= "DDI D IO power well",
3032 .domains
= CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS
,
3033 .ops
= &hsw_power_well_ops
,
3034 .id
= DISP_PW_ID_NONE
,
3036 .hsw
.regs
= &hsw_power_well_regs
,
3037 .hsw
.idx
= SKL_PW_CTL_IDX_DDI_D
,
3041 .name
= "DDI F IO power well",
3042 .domains
= CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS
,
3043 .ops
= &hsw_power_well_ops
,
3044 .id
= DISP_PW_ID_NONE
,
3046 .hsw
.regs
= &hsw_power_well_regs
,
3047 .hsw
.idx
= CNL_PW_CTL_IDX_DDI_F
,
3052 .domains
= CNL_DISPLAY_AUX_F_POWER_DOMAINS
,
3053 .ops
= &hsw_power_well_ops
,
3054 .id
= DISP_PW_ID_NONE
,
3056 .hsw
.regs
= &hsw_power_well_regs
,
3057 .hsw
.idx
= CNL_PW_CTL_IDX_AUX_F
,
3062 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops
= {
3063 .sync_hw
= hsw_power_well_sync_hw
,
3064 .enable
= icl_combo_phy_aux_power_well_enable
,
3065 .disable
= icl_combo_phy_aux_power_well_disable
,
3066 .is_enabled
= hsw_power_well_enabled
,
3069 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops
= {
3070 .sync_hw
= hsw_power_well_sync_hw
,
3071 .enable
= icl_tc_phy_aux_power_well_enable
,
3072 .disable
= hsw_power_well_disable
,
3073 .is_enabled
= hsw_power_well_enabled
,
3076 static const struct i915_power_well_regs icl_aux_power_well_regs
= {
3077 .bios
= ICL_PWR_WELL_CTL_AUX1
,
3078 .driver
= ICL_PWR_WELL_CTL_AUX2
,
3079 .debug
= ICL_PWR_WELL_CTL_AUX4
,
3082 static const struct i915_power_well_regs icl_ddi_power_well_regs
= {
3083 .bios
= ICL_PWR_WELL_CTL_DDI1
,
3084 .driver
= ICL_PWR_WELL_CTL_DDI2
,
3085 .debug
= ICL_PWR_WELL_CTL_DDI4
,
3088 static const struct i915_power_well_desc icl_power_wells
[] = {
3090 .name
= "always-on",
3092 .domains
= POWER_DOMAIN_MASK
,
3093 .ops
= &i9xx_always_on_power_well_ops
,
3094 .id
= DISP_PW_ID_NONE
,
3097 .name
= "power well 1",
3098 /* Handled by the DMC firmware */
3101 .ops
= &hsw_power_well_ops
,
3102 .id
= SKL_DISP_PW_1
,
3104 .hsw
.regs
= &hsw_power_well_regs
,
3105 .hsw
.idx
= ICL_PW_CTL_IDX_PW_1
,
3106 .hsw
.has_fuses
= true,
3111 .domains
= ICL_DISPLAY_DC_OFF_POWER_DOMAINS
,
3112 .ops
= &gen9_dc_off_power_well_ops
,
3113 .id
= DISP_PW_ID_NONE
,
3116 .name
= "power well 2",
3117 .domains
= ICL_PW_2_POWER_DOMAINS
,
3118 .ops
= &hsw_power_well_ops
,
3119 .id
= SKL_DISP_PW_2
,
3121 .hsw
.regs
= &hsw_power_well_regs
,
3122 .hsw
.idx
= ICL_PW_CTL_IDX_PW_2
,
3123 .hsw
.has_fuses
= true,
3127 .name
= "power well 3",
3128 .domains
= ICL_PW_3_POWER_DOMAINS
,
3129 .ops
= &hsw_power_well_ops
,
3130 .id
= DISP_PW_ID_NONE
,
3132 .hsw
.regs
= &hsw_power_well_regs
,
3133 .hsw
.idx
= ICL_PW_CTL_IDX_PW_3
,
3134 .hsw
.irq_pipe_mask
= BIT(PIPE_B
),
3135 .hsw
.has_vga
= true,
3136 .hsw
.has_fuses
= true,
3141 .domains
= ICL_DDI_IO_A_POWER_DOMAINS
,
3142 .ops
= &hsw_power_well_ops
,
3143 .id
= DISP_PW_ID_NONE
,
3145 .hsw
.regs
= &icl_ddi_power_well_regs
,
3146 .hsw
.idx
= ICL_PW_CTL_IDX_DDI_A
,
3151 .domains
= ICL_DDI_IO_B_POWER_DOMAINS
,
3152 .ops
= &hsw_power_well_ops
,
3153 .id
= DISP_PW_ID_NONE
,
3155 .hsw
.regs
= &icl_ddi_power_well_regs
,
3156 .hsw
.idx
= ICL_PW_CTL_IDX_DDI_B
,
3161 .domains
= ICL_DDI_IO_C_POWER_DOMAINS
,
3162 .ops
= &hsw_power_well_ops
,
3163 .id
= DISP_PW_ID_NONE
,
3165 .hsw
.regs
= &icl_ddi_power_well_regs
,
3166 .hsw
.idx
= ICL_PW_CTL_IDX_DDI_C
,
3171 .domains
= ICL_DDI_IO_D_POWER_DOMAINS
,
3172 .ops
= &hsw_power_well_ops
,
3173 .id
= DISP_PW_ID_NONE
,
3175 .hsw
.regs
= &icl_ddi_power_well_regs
,
3176 .hsw
.idx
= ICL_PW_CTL_IDX_DDI_D
,
3181 .domains
= ICL_DDI_IO_E_POWER_DOMAINS
,
3182 .ops
= &hsw_power_well_ops
,
3183 .id
= DISP_PW_ID_NONE
,
3185 .hsw
.regs
= &icl_ddi_power_well_regs
,
3186 .hsw
.idx
= ICL_PW_CTL_IDX_DDI_E
,
3191 .domains
= ICL_DDI_IO_F_POWER_DOMAINS
,
3192 .ops
= &hsw_power_well_ops
,
3193 .id
= DISP_PW_ID_NONE
,
3195 .hsw
.regs
= &icl_ddi_power_well_regs
,
3196 .hsw
.idx
= ICL_PW_CTL_IDX_DDI_F
,
3201 .domains
= ICL_AUX_A_IO_POWER_DOMAINS
,
3202 .ops
= &icl_combo_phy_aux_power_well_ops
,
3203 .id
= DISP_PW_ID_NONE
,
3205 .hsw
.regs
= &icl_aux_power_well_regs
,
3206 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_A
,
3211 .domains
= ICL_AUX_B_IO_POWER_DOMAINS
,
3212 .ops
= &icl_combo_phy_aux_power_well_ops
,
3213 .id
= DISP_PW_ID_NONE
,
3215 .hsw
.regs
= &icl_aux_power_well_regs
,
3216 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_B
,
3221 .domains
= ICL_AUX_C_IO_POWER_DOMAINS
,
3222 .ops
= &icl_tc_phy_aux_power_well_ops
,
3223 .id
= DISP_PW_ID_NONE
,
3225 .hsw
.regs
= &icl_aux_power_well_regs
,
3226 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_C
,
3227 .hsw
.is_tc_tbt
= false,
3232 .domains
= ICL_AUX_D_IO_POWER_DOMAINS
,
3233 .ops
= &icl_tc_phy_aux_power_well_ops
,
3234 .id
= DISP_PW_ID_NONE
,
3236 .hsw
.regs
= &icl_aux_power_well_regs
,
3237 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_D
,
3238 .hsw
.is_tc_tbt
= false,
3243 .domains
= ICL_AUX_E_IO_POWER_DOMAINS
,
3244 .ops
= &icl_tc_phy_aux_power_well_ops
,
3245 .id
= DISP_PW_ID_NONE
,
3247 .hsw
.regs
= &icl_aux_power_well_regs
,
3248 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_E
,
3249 .hsw
.is_tc_tbt
= false,
3254 .domains
= ICL_AUX_F_IO_POWER_DOMAINS
,
3255 .ops
= &icl_tc_phy_aux_power_well_ops
,
3256 .id
= DISP_PW_ID_NONE
,
3258 .hsw
.regs
= &icl_aux_power_well_regs
,
3259 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_F
,
3260 .hsw
.is_tc_tbt
= false,
3265 .domains
= ICL_AUX_TBT1_IO_POWER_DOMAINS
,
3266 .ops
= &icl_tc_phy_aux_power_well_ops
,
3267 .id
= DISP_PW_ID_NONE
,
3269 .hsw
.regs
= &icl_aux_power_well_regs
,
3270 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_TBT1
,
3271 .hsw
.is_tc_tbt
= true,
3276 .domains
= ICL_AUX_TBT2_IO_POWER_DOMAINS
,
3277 .ops
= &icl_tc_phy_aux_power_well_ops
,
3278 .id
= DISP_PW_ID_NONE
,
3280 .hsw
.regs
= &icl_aux_power_well_regs
,
3281 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_TBT2
,
3282 .hsw
.is_tc_tbt
= true,
3287 .domains
= ICL_AUX_TBT3_IO_POWER_DOMAINS
,
3288 .ops
= &icl_tc_phy_aux_power_well_ops
,
3289 .id
= DISP_PW_ID_NONE
,
3291 .hsw
.regs
= &icl_aux_power_well_regs
,
3292 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_TBT3
,
3293 .hsw
.is_tc_tbt
= true,
3298 .domains
= ICL_AUX_TBT4_IO_POWER_DOMAINS
,
3299 .ops
= &icl_tc_phy_aux_power_well_ops
,
3300 .id
= DISP_PW_ID_NONE
,
3302 .hsw
.regs
= &icl_aux_power_well_regs
,
3303 .hsw
.idx
= ICL_PW_CTL_IDX_AUX_TBT4
,
3304 .hsw
.is_tc_tbt
= true,
3308 .name
= "power well 4",
3309 .domains
= ICL_PW_4_POWER_DOMAINS
,
3310 .ops
= &hsw_power_well_ops
,
3311 .id
= DISP_PW_ID_NONE
,
3313 .hsw
.regs
= &hsw_power_well_regs
,
3314 .hsw
.idx
= ICL_PW_CTL_IDX_PW_4
,
3315 .hsw
.has_fuses
= true,
3316 .hsw
.irq_pipe_mask
= BIT(PIPE_C
),
3322 sanitize_disable_power_well_option(const struct drm_i915_private
*dev_priv
,
3323 int disable_power_well
)
3325 if (disable_power_well
>= 0)
3326 return !!disable_power_well
;
3331 static u32
get_allowed_dc_mask(const struct drm_i915_private
*dev_priv
,
3338 if (INTEL_GEN(dev_priv
) >= 11) {
3341 * DC9 has a separate HW flow from the rest of the DC states,
3342 * not depending on the DMC firmware. It's needed by system
3343 * suspend/resume, so allow it unconditionally.
3345 mask
= DC_STATE_EN_DC9
;
3346 } else if (IS_GEN(dev_priv
, 10) || IS_GEN9_BC(dev_priv
)) {
3349 } else if (IS_GEN9_LP(dev_priv
)) {
3351 mask
= DC_STATE_EN_DC9
;
3357 if (!i915_modparams
.disable_power_well
)
3360 if (enable_dc
>= 0 && enable_dc
<= max_dc
) {
3361 requested_dc
= enable_dc
;
3362 } else if (enable_dc
== -1) {
3363 requested_dc
= max_dc
;
3364 } else if (enable_dc
> max_dc
&& enable_dc
<= 2) {
3365 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3367 requested_dc
= max_dc
;
3369 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc
);
3370 requested_dc
= max_dc
;
3373 if (requested_dc
> 1)
3374 mask
|= DC_STATE_EN_UPTO_DC6
;
3375 if (requested_dc
> 0)
3376 mask
|= DC_STATE_EN_UPTO_DC5
;
3378 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask
);
3384 __set_power_wells(struct i915_power_domains
*power_domains
,
3385 const struct i915_power_well_desc
*power_well_descs
,
3386 int power_well_count
)
3388 u64 power_well_ids
= 0;
3391 power_domains
->power_well_count
= power_well_count
;
3392 power_domains
->power_wells
=
3393 kcalloc(power_well_count
,
3394 sizeof(*power_domains
->power_wells
),
3396 if (!power_domains
->power_wells
)
3399 for (i
= 0; i
< power_well_count
; i
++) {
3400 enum i915_power_well_id id
= power_well_descs
[i
].id
;
3402 power_domains
->power_wells
[i
].desc
= &power_well_descs
[i
];
3404 if (id
== DISP_PW_ID_NONE
)
3407 WARN_ON(id
>= sizeof(power_well_ids
) * 8);
3408 WARN_ON(power_well_ids
& BIT_ULL(id
));
3409 power_well_ids
|= BIT_ULL(id
);
3415 #define set_power_wells(power_domains, __power_well_descs) \
3416 __set_power_wells(power_domains, __power_well_descs, \
3417 ARRAY_SIZE(__power_well_descs))
3420 * intel_power_domains_init - initializes the power domain structures
3421 * @dev_priv: i915 device instance
3423 * Initializes the power domain structures for @dev_priv depending upon the
3424 * supported platform.
3426 int intel_power_domains_init(struct drm_i915_private
*dev_priv
)
3428 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3431 i915_modparams
.disable_power_well
=
3432 sanitize_disable_power_well_option(dev_priv
,
3433 i915_modparams
.disable_power_well
);
3434 dev_priv
->csr
.allowed_dc_mask
=
3435 get_allowed_dc_mask(dev_priv
, i915_modparams
.enable_dc
);
3437 BUILD_BUG_ON(POWER_DOMAIN_NUM
> 64);
3439 mutex_init(&power_domains
->lock
);
3442 * The enabling order will be from lower to higher indexed wells,
3443 * the disabling order is reversed.
3445 if (IS_ICELAKE(dev_priv
)) {
3446 err
= set_power_wells(power_domains
, icl_power_wells
);
3447 } else if (IS_CANNONLAKE(dev_priv
)) {
3448 err
= set_power_wells(power_domains
, cnl_power_wells
);
3451 * DDI and Aux IO are getting enabled for all ports
3452 * regardless the presence or use. So, in order to avoid
3453 * timeouts, lets remove them from the list
3454 * for the SKUs without port F.
3456 if (!IS_CNL_WITH_PORT_F(dev_priv
))
3457 power_domains
->power_well_count
-= 2;
3458 } else if (IS_GEMINILAKE(dev_priv
)) {
3459 err
= set_power_wells(power_domains
, glk_power_wells
);
3460 } else if (IS_BROXTON(dev_priv
)) {
3461 err
= set_power_wells(power_domains
, bxt_power_wells
);
3462 } else if (IS_GEN9_BC(dev_priv
)) {
3463 err
= set_power_wells(power_domains
, skl_power_wells
);
3464 } else if (IS_CHERRYVIEW(dev_priv
)) {
3465 err
= set_power_wells(power_domains
, chv_power_wells
);
3466 } else if (IS_BROADWELL(dev_priv
)) {
3467 err
= set_power_wells(power_domains
, bdw_power_wells
);
3468 } else if (IS_HASWELL(dev_priv
)) {
3469 err
= set_power_wells(power_domains
, hsw_power_wells
);
3470 } else if (IS_VALLEYVIEW(dev_priv
)) {
3471 err
= set_power_wells(power_domains
, vlv_power_wells
);
3472 } else if (IS_I830(dev_priv
)) {
3473 err
= set_power_wells(power_domains
, i830_power_wells
);
3475 err
= set_power_wells(power_domains
, i9xx_always_on_power_well
);
3482 * intel_power_domains_cleanup - clean up power domains resources
3483 * @dev_priv: i915 device instance
3485 * Release any resources acquired by intel_power_domains_init()
3487 void intel_power_domains_cleanup(struct drm_i915_private
*dev_priv
)
3489 kfree(dev_priv
->power_domains
.power_wells
);
3492 static void intel_power_domains_sync_hw(struct drm_i915_private
*dev_priv
)
3494 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3495 struct i915_power_well
*power_well
;
3497 mutex_lock(&power_domains
->lock
);
3498 for_each_power_well(dev_priv
, power_well
) {
3499 power_well
->desc
->ops
->sync_hw(dev_priv
, power_well
);
3500 power_well
->hw_enabled
=
3501 power_well
->desc
->ops
->is_enabled(dev_priv
, power_well
);
3503 mutex_unlock(&power_domains
->lock
);
3507 bool intel_dbuf_slice_set(struct drm_i915_private
*dev_priv
,
3508 i915_reg_t reg
, bool enable
)
3512 val
= I915_READ(reg
);
3513 val
= enable
? (val
| DBUF_POWER_REQUEST
) : (val
& ~DBUF_POWER_REQUEST
);
3514 I915_WRITE(reg
, val
);
3518 status
= I915_READ(reg
) & DBUF_POWER_STATE
;
3519 if ((enable
&& !status
) || (!enable
&& status
)) {
3520 DRM_ERROR("DBus power %s timeout!\n",
3521 enable
? "enable" : "disable");
3527 static void gen9_dbuf_enable(struct drm_i915_private
*dev_priv
)
3529 intel_dbuf_slice_set(dev_priv
, DBUF_CTL
, true);
3532 static void gen9_dbuf_disable(struct drm_i915_private
*dev_priv
)
3534 intel_dbuf_slice_set(dev_priv
, DBUF_CTL
, false);
3537 static u8
intel_dbuf_max_slices(struct drm_i915_private
*dev_priv
)
3539 if (INTEL_GEN(dev_priv
) < 11)
3544 void icl_dbuf_slices_update(struct drm_i915_private
*dev_priv
,
3547 const u8 hw_enabled_slices
= dev_priv
->wm
.skl_hw
.ddb
.enabled_slices
;
3550 if (req_slices
> intel_dbuf_max_slices(dev_priv
)) {
3551 DRM_ERROR("Invalid number of dbuf slices requested\n");
3555 if (req_slices
== hw_enabled_slices
|| req_slices
== 0)
3558 if (req_slices
> hw_enabled_slices
)
3559 ret
= intel_dbuf_slice_set(dev_priv
, DBUF_CTL_S2
, true);
3561 ret
= intel_dbuf_slice_set(dev_priv
, DBUF_CTL_S2
, false);
3564 dev_priv
->wm
.skl_hw
.ddb
.enabled_slices
= req_slices
;
3567 static void icl_dbuf_enable(struct drm_i915_private
*dev_priv
)
3569 I915_WRITE(DBUF_CTL_S1
, I915_READ(DBUF_CTL_S1
) | DBUF_POWER_REQUEST
);
3570 I915_WRITE(DBUF_CTL_S2
, I915_READ(DBUF_CTL_S2
) | DBUF_POWER_REQUEST
);
3571 POSTING_READ(DBUF_CTL_S2
);
3575 if (!(I915_READ(DBUF_CTL_S1
) & DBUF_POWER_STATE
) ||
3576 !(I915_READ(DBUF_CTL_S2
) & DBUF_POWER_STATE
))
3577 DRM_ERROR("DBuf power enable timeout\n");
3579 dev_priv
->wm
.skl_hw
.ddb
.enabled_slices
= 2;
3582 static void icl_dbuf_disable(struct drm_i915_private
*dev_priv
)
3584 I915_WRITE(DBUF_CTL_S1
, I915_READ(DBUF_CTL_S1
) & ~DBUF_POWER_REQUEST
);
3585 I915_WRITE(DBUF_CTL_S2
, I915_READ(DBUF_CTL_S2
) & ~DBUF_POWER_REQUEST
);
3586 POSTING_READ(DBUF_CTL_S2
);
3590 if ((I915_READ(DBUF_CTL_S1
) & DBUF_POWER_STATE
) ||
3591 (I915_READ(DBUF_CTL_S2
) & DBUF_POWER_STATE
))
3592 DRM_ERROR("DBuf power disable timeout!\n");
3594 dev_priv
->wm
.skl_hw
.ddb
.enabled_slices
= 0;
3597 static void icl_mbus_init(struct drm_i915_private
*dev_priv
)
3601 val
= MBUS_ABOX_BT_CREDIT_POOL1(16) |
3602 MBUS_ABOX_BT_CREDIT_POOL2(16) |
3603 MBUS_ABOX_B_CREDIT(1) |
3604 MBUS_ABOX_BW_CREDIT(1);
3606 I915_WRITE(MBUS_ABOX_CTL
, val
);
3609 static void intel_pch_reset_handshake(struct drm_i915_private
*dev_priv
,
3613 u32 reset_bits
, val
;
3615 if (IS_IVYBRIDGE(dev_priv
)) {
3617 reset_bits
= WAIT_FOR_PCH_FLR_ACK
| WAIT_FOR_PCH_RESET_ACK
;
3619 reg
= HSW_NDE_RSTWRN_OPT
;
3620 reset_bits
= RESET_PCH_HANDSHAKE_ENABLE
;
3623 val
= I915_READ(reg
);
3630 I915_WRITE(reg
, val
);
3633 static void skl_display_core_init(struct drm_i915_private
*dev_priv
,
3636 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3637 struct i915_power_well
*well
;
3639 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3641 /* enable PCH reset handshake */
3642 intel_pch_reset_handshake(dev_priv
, !HAS_PCH_NOP(dev_priv
));
3644 /* enable PG1 and Misc I/O */
3645 mutex_lock(&power_domains
->lock
);
3647 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3648 intel_power_well_enable(dev_priv
, well
);
3650 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_MISC_IO
);
3651 intel_power_well_enable(dev_priv
, well
);
3653 mutex_unlock(&power_domains
->lock
);
3655 skl_init_cdclk(dev_priv
);
3657 gen9_dbuf_enable(dev_priv
);
3659 if (resume
&& dev_priv
->csr
.dmc_payload
)
3660 intel_csr_load_program(dev_priv
);
3663 static void skl_display_core_uninit(struct drm_i915_private
*dev_priv
)
3665 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3666 struct i915_power_well
*well
;
3668 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3670 gen9_dbuf_disable(dev_priv
);
3672 skl_uninit_cdclk(dev_priv
);
3674 /* The spec doesn't call for removing the reset handshake flag */
3675 /* disable PG1 and Misc I/O */
3677 mutex_lock(&power_domains
->lock
);
3680 * BSpec says to keep the MISC IO power well enabled here, only
3681 * remove our request for power well 1.
3682 * Note that even though the driver's request is removed power well 1
3683 * may stay enabled after this due to DMC's own request on it.
3685 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3686 intel_power_well_disable(dev_priv
, well
);
3688 mutex_unlock(&power_domains
->lock
);
3690 usleep_range(10, 30); /* 10 us delay per Bspec */
3693 void bxt_display_core_init(struct drm_i915_private
*dev_priv
,
3696 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3697 struct i915_power_well
*well
;
3699 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3702 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3703 * or else the reset will hang because there is no PCH to respond.
3704 * Move the handshake programming to initialization sequence.
3705 * Previously was left up to BIOS.
3707 intel_pch_reset_handshake(dev_priv
, false);
3710 mutex_lock(&power_domains
->lock
);
3712 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3713 intel_power_well_enable(dev_priv
, well
);
3715 mutex_unlock(&power_domains
->lock
);
3717 bxt_init_cdclk(dev_priv
);
3719 gen9_dbuf_enable(dev_priv
);
3721 if (resume
&& dev_priv
->csr
.dmc_payload
)
3722 intel_csr_load_program(dev_priv
);
3725 void bxt_display_core_uninit(struct drm_i915_private
*dev_priv
)
3727 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3728 struct i915_power_well
*well
;
3730 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3732 gen9_dbuf_disable(dev_priv
);
3734 bxt_uninit_cdclk(dev_priv
);
3736 /* The spec doesn't call for removing the reset handshake flag */
3739 * Disable PW1 (PG1).
3740 * Note that even though the driver's request is removed power well 1
3741 * may stay enabled after this due to DMC's own request on it.
3743 mutex_lock(&power_domains
->lock
);
3745 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3746 intel_power_well_disable(dev_priv
, well
);
3748 mutex_unlock(&power_domains
->lock
);
3750 usleep_range(10, 30); /* 10 us delay per Bspec */
3753 static void cnl_display_core_init(struct drm_i915_private
*dev_priv
, bool resume
)
3755 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3756 struct i915_power_well
*well
;
3758 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3760 /* 1. Enable PCH Reset Handshake */
3761 intel_pch_reset_handshake(dev_priv
, !HAS_PCH_NOP(dev_priv
));
3764 cnl_combo_phys_init(dev_priv
);
3767 * 4. Enable Power Well 1 (PG1).
3768 * The AUX IO power wells will be enabled on demand.
3770 mutex_lock(&power_domains
->lock
);
3771 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3772 intel_power_well_enable(dev_priv
, well
);
3773 mutex_unlock(&power_domains
->lock
);
3775 /* 5. Enable CD clock */
3776 cnl_init_cdclk(dev_priv
);
3778 /* 6. Enable DBUF */
3779 gen9_dbuf_enable(dev_priv
);
3781 if (resume
&& dev_priv
->csr
.dmc_payload
)
3782 intel_csr_load_program(dev_priv
);
3785 static void cnl_display_core_uninit(struct drm_i915_private
*dev_priv
)
3787 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3788 struct i915_power_well
*well
;
3790 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3792 /* 1. Disable all display engine functions -> aready done */
3794 /* 2. Disable DBUF */
3795 gen9_dbuf_disable(dev_priv
);
3797 /* 3. Disable CD clock */
3798 cnl_uninit_cdclk(dev_priv
);
3801 * 4. Disable Power Well 1 (PG1).
3802 * The AUX IO power wells are toggled on demand, so they are already
3803 * disabled at this point.
3805 mutex_lock(&power_domains
->lock
);
3806 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3807 intel_power_well_disable(dev_priv
, well
);
3808 mutex_unlock(&power_domains
->lock
);
3810 usleep_range(10, 30); /* 10 us delay per Bspec */
3813 cnl_combo_phys_uninit(dev_priv
);
3816 void icl_display_core_init(struct drm_i915_private
*dev_priv
,
3819 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3820 struct i915_power_well
*well
;
3822 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3824 /* 1. Enable PCH reset handshake. */
3825 intel_pch_reset_handshake(dev_priv
, !HAS_PCH_NOP(dev_priv
));
3828 icl_combo_phys_init(dev_priv
);
3831 * 4. Enable Power Well 1 (PG1).
3832 * The AUX IO power wells will be enabled on demand.
3834 mutex_lock(&power_domains
->lock
);
3835 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3836 intel_power_well_enable(dev_priv
, well
);
3837 mutex_unlock(&power_domains
->lock
);
3839 /* 5. Enable CDCLK. */
3840 icl_init_cdclk(dev_priv
);
3842 /* 6. Enable DBUF. */
3843 icl_dbuf_enable(dev_priv
);
3845 /* 7. Setup MBUS. */
3846 icl_mbus_init(dev_priv
);
3848 if (resume
&& dev_priv
->csr
.dmc_payload
)
3849 intel_csr_load_program(dev_priv
);
3852 void icl_display_core_uninit(struct drm_i915_private
*dev_priv
)
3854 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
3855 struct i915_power_well
*well
;
3857 gen9_set_dc_state(dev_priv
, DC_STATE_DISABLE
);
3859 /* 1. Disable all display engine functions -> aready done */
3861 /* 2. Disable DBUF */
3862 icl_dbuf_disable(dev_priv
);
3864 /* 3. Disable CD clock */
3865 icl_uninit_cdclk(dev_priv
);
3868 * 4. Disable Power Well 1 (PG1).
3869 * The AUX IO power wells are toggled on demand, so they are already
3870 * disabled at this point.
3872 mutex_lock(&power_domains
->lock
);
3873 well
= lookup_power_well(dev_priv
, SKL_DISP_PW_1
);
3874 intel_power_well_disable(dev_priv
, well
);
3875 mutex_unlock(&power_domains
->lock
);
3878 icl_combo_phys_uninit(dev_priv
);
3881 static void chv_phy_control_init(struct drm_i915_private
*dev_priv
)
3883 struct i915_power_well
*cmn_bc
=
3884 lookup_power_well(dev_priv
, VLV_DISP_PW_DPIO_CMN_BC
);
3885 struct i915_power_well
*cmn_d
=
3886 lookup_power_well(dev_priv
, CHV_DISP_PW_DPIO_CMN_D
);
3889 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3890 * workaround never ever read DISPLAY_PHY_CONTROL, and
3891 * instead maintain a shadow copy ourselves. Use the actual
3892 * power well state and lane status to reconstruct the
3893 * expected initial value.
3895 dev_priv
->chv_phy_control
=
3896 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS
, DPIO_PHY0
) |
3897 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS
, DPIO_PHY1
) |
3898 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR
, DPIO_PHY0
, DPIO_CH0
) |
3899 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR
, DPIO_PHY0
, DPIO_CH1
) |
3900 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR
, DPIO_PHY1
, DPIO_CH0
);
3903 * If all lanes are disabled we leave the override disabled
3904 * with all power down bits cleared to match the state we
3905 * would use after disabling the port. Otherwise enable the
3906 * override and set the lane powerdown bits accding to the
3907 * current lane status.
3909 if (cmn_bc
->desc
->ops
->is_enabled(dev_priv
, cmn_bc
)) {
3910 u32 status
= I915_READ(DPLL(PIPE_A
));
3913 mask
= status
& DPLL_PORTB_READY_MASK
;
3917 dev_priv
->chv_phy_control
|=
3918 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0
, DPIO_CH0
);
3920 dev_priv
->chv_phy_control
|=
3921 PHY_CH_POWER_DOWN_OVRD(mask
, DPIO_PHY0
, DPIO_CH0
);
3923 mask
= (status
& DPLL_PORTC_READY_MASK
) >> 4;
3927 dev_priv
->chv_phy_control
|=
3928 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0
, DPIO_CH1
);
3930 dev_priv
->chv_phy_control
|=
3931 PHY_CH_POWER_DOWN_OVRD(mask
, DPIO_PHY0
, DPIO_CH1
);
3933 dev_priv
->chv_phy_control
|= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0
);
3935 dev_priv
->chv_phy_assert
[DPIO_PHY0
] = false;
3937 dev_priv
->chv_phy_assert
[DPIO_PHY0
] = true;
3940 if (cmn_d
->desc
->ops
->is_enabled(dev_priv
, cmn_d
)) {
3941 u32 status
= I915_READ(DPIO_PHY_STATUS
);
3944 mask
= status
& DPLL_PORTD_READY_MASK
;
3949 dev_priv
->chv_phy_control
|=
3950 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1
, DPIO_CH0
);
3952 dev_priv
->chv_phy_control
|=
3953 PHY_CH_POWER_DOWN_OVRD(mask
, DPIO_PHY1
, DPIO_CH0
);
3955 dev_priv
->chv_phy_control
|= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1
);
3957 dev_priv
->chv_phy_assert
[DPIO_PHY1
] = false;
3959 dev_priv
->chv_phy_assert
[DPIO_PHY1
] = true;
3962 I915_WRITE(DISPLAY_PHY_CONTROL
, dev_priv
->chv_phy_control
);
3964 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3965 dev_priv
->chv_phy_control
);
3968 static void vlv_cmnlane_wa(struct drm_i915_private
*dev_priv
)
3970 struct i915_power_well
*cmn
=
3971 lookup_power_well(dev_priv
, VLV_DISP_PW_DPIO_CMN_BC
);
3972 struct i915_power_well
*disp2d
=
3973 lookup_power_well(dev_priv
, VLV_DISP_PW_DISP2D
);
3975 /* If the display might be already active skip this */
3976 if (cmn
->desc
->ops
->is_enabled(dev_priv
, cmn
) &&
3977 disp2d
->desc
->ops
->is_enabled(dev_priv
, disp2d
) &&
3978 I915_READ(DPIO_CTL
) & DPIO_CMNRST
)
3981 DRM_DEBUG_KMS("toggling display PHY side reset\n");
3983 /* cmnlane needs DPLL registers */
3984 disp2d
->desc
->ops
->enable(dev_priv
, disp2d
);
3987 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3988 * Need to assert and de-assert PHY SB reset by gating the
3989 * common lane power, then un-gating it.
3990 * Simply ungating isn't enough to reset the PHY enough to get
3991 * ports and lanes running.
3993 cmn
->desc
->ops
->disable(dev_priv
, cmn
);
3996 static void intel_power_domains_verify_state(struct drm_i915_private
*dev_priv
);
3999 * intel_power_domains_init_hw - initialize hardware power domain state
4000 * @i915: i915 device instance
4001 * @resume: Called from resume code paths or not
4003 * This function initializes the hardware power domain state and enables all
4004 * power wells belonging to the INIT power domain. Power wells in other
4005 * domains (and not in the INIT domain) are referenced or disabled by
4006 * intel_modeset_readout_hw_state(). After that the reference count of each
4007 * power well must match its HW enabled state, see
4008 * intel_power_domains_verify_state().
4010 * It will return with power domains disabled (to be enabled later by
4011 * intel_power_domains_enable()) and must be paired with
4012 * intel_power_domains_fini_hw().
4014 void intel_power_domains_init_hw(struct drm_i915_private
*i915
, bool resume
)
4016 struct i915_power_domains
*power_domains
= &i915
->power_domains
;
4018 power_domains
->initializing
= true;
4020 if (IS_ICELAKE(i915
)) {
4021 icl_display_core_init(i915
, resume
);
4022 } else if (IS_CANNONLAKE(i915
)) {
4023 cnl_display_core_init(i915
, resume
);
4024 } else if (IS_GEN9_BC(i915
)) {
4025 skl_display_core_init(i915
, resume
);
4026 } else if (IS_GEN9_LP(i915
)) {
4027 bxt_display_core_init(i915
, resume
);
4028 } else if (IS_CHERRYVIEW(i915
)) {
4029 mutex_lock(&power_domains
->lock
);
4030 chv_phy_control_init(i915
);
4031 mutex_unlock(&power_domains
->lock
);
4032 } else if (IS_VALLEYVIEW(i915
)) {
4033 mutex_lock(&power_domains
->lock
);
4034 vlv_cmnlane_wa(i915
);
4035 mutex_unlock(&power_domains
->lock
);
4036 } else if (IS_IVYBRIDGE(i915
) || INTEL_GEN(i915
) >= 7) {
4037 intel_pch_reset_handshake(i915
, !HAS_PCH_NOP(i915
));
4041 * Keep all power wells enabled for any dependent HW access during
4042 * initialization and to make sure we keep BIOS enabled display HW
4043 * resources powered until display HW readout is complete. We drop
4044 * this reference in intel_power_domains_enable().
4046 power_domains
->wakeref
=
4047 intel_display_power_get(i915
, POWER_DOMAIN_INIT
);
4049 /* Disable power support if the user asked so. */
4050 if (!i915_modparams
.disable_power_well
)
4051 intel_display_power_get(i915
, POWER_DOMAIN_INIT
);
4052 intel_power_domains_sync_hw(i915
);
4054 power_domains
->initializing
= false;
4058 * intel_power_domains_fini_hw - deinitialize hw power domain state
4059 * @i915: i915 device instance
4061 * De-initializes the display power domain HW state. It also ensures that the
4062 * device stays powered up so that the driver can be reloaded.
4064 * It must be called with power domains already disabled (after a call to
4065 * intel_power_domains_disable()) and must be paired with
4066 * intel_power_domains_init_hw().
4068 void intel_power_domains_fini_hw(struct drm_i915_private
*i915
)
4070 intel_wakeref_t wakeref __maybe_unused
=
4071 fetch_and_zero(&i915
->power_domains
.wakeref
);
4073 /* Remove the refcount we took to keep power well support disabled. */
4074 if (!i915_modparams
.disable_power_well
)
4075 intel_display_power_put_unchecked(i915
, POWER_DOMAIN_INIT
);
4077 intel_power_domains_verify_state(i915
);
4079 /* Keep the power well enabled, but cancel its rpm wakeref. */
4080 intel_runtime_pm_put(i915
, wakeref
);
4084 * intel_power_domains_enable - enable toggling of display power wells
4085 * @i915: i915 device instance
4087 * Enable the ondemand enabling/disabling of the display power wells. Note that
4088 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4089 * only at specific points of the display modeset sequence, thus they are not
4090 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4091 * of these function is to keep the rest of power wells enabled until the end
4092 * of display HW readout (which will acquire the power references reflecting
4093 * the current HW state).
4095 void intel_power_domains_enable(struct drm_i915_private
*i915
)
4097 intel_wakeref_t wakeref __maybe_unused
=
4098 fetch_and_zero(&i915
->power_domains
.wakeref
);
4100 intel_display_power_put(i915
, POWER_DOMAIN_INIT
, wakeref
);
4101 intel_power_domains_verify_state(i915
);
4105 * intel_power_domains_disable - disable toggling of display power wells
4106 * @i915: i915 device instance
4108 * Disable the ondemand enabling/disabling of the display power wells. See
4109 * intel_power_domains_enable() for which power wells this call controls.
4111 void intel_power_domains_disable(struct drm_i915_private
*i915
)
4113 struct i915_power_domains
*power_domains
= &i915
->power_domains
;
4115 WARN_ON(power_domains
->wakeref
);
4116 power_domains
->wakeref
=
4117 intel_display_power_get(i915
, POWER_DOMAIN_INIT
);
4119 intel_power_domains_verify_state(i915
);
4123 * intel_power_domains_suspend - suspend power domain state
4124 * @i915: i915 device instance
4125 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4127 * This function prepares the hardware power domain state before entering
4130 * It must be called with power domains already disabled (after a call to
4131 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4133 void intel_power_domains_suspend(struct drm_i915_private
*i915
,
4134 enum i915_drm_suspend_mode suspend_mode
)
4136 struct i915_power_domains
*power_domains
= &i915
->power_domains
;
4137 intel_wakeref_t wakeref __maybe_unused
=
4138 fetch_and_zero(&power_domains
->wakeref
);
4140 intel_display_power_put(i915
, POWER_DOMAIN_INIT
, wakeref
);
4143 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4144 * support don't manually deinit the power domains. This also means the
4145 * CSR/DMC firmware will stay active, it will power down any HW
4146 * resources as required and also enable deeper system power states
4147 * that would be blocked if the firmware was inactive.
4149 if (!(i915
->csr
.allowed_dc_mask
& DC_STATE_EN_DC9
) &&
4150 suspend_mode
== I915_DRM_SUSPEND_IDLE
&&
4151 i915
->csr
.dmc_payload
) {
4152 intel_power_domains_verify_state(i915
);
4157 * Even if power well support was disabled we still want to disable
4158 * power wells if power domains must be deinitialized for suspend.
4160 if (!i915_modparams
.disable_power_well
) {
4161 intel_display_power_put_unchecked(i915
, POWER_DOMAIN_INIT
);
4162 intel_power_domains_verify_state(i915
);
4165 if (IS_ICELAKE(i915
))
4166 icl_display_core_uninit(i915
);
4167 else if (IS_CANNONLAKE(i915
))
4168 cnl_display_core_uninit(i915
);
4169 else if (IS_GEN9_BC(i915
))
4170 skl_display_core_uninit(i915
);
4171 else if (IS_GEN9_LP(i915
))
4172 bxt_display_core_uninit(i915
);
4174 power_domains
->display_core_suspended
= true;
4178 * intel_power_domains_resume - resume power domain state
4179 * @i915: i915 device instance
4181 * This function resume the hardware power domain state during system resume.
4183 * It will return with power domain support disabled (to be enabled later by
4184 * intel_power_domains_enable()) and must be paired with
4185 * intel_power_domains_suspend().
4187 void intel_power_domains_resume(struct drm_i915_private
*i915
)
4189 struct i915_power_domains
*power_domains
= &i915
->power_domains
;
4191 if (power_domains
->display_core_suspended
) {
4192 intel_power_domains_init_hw(i915
, true);
4193 power_domains
->display_core_suspended
= false;
4195 WARN_ON(power_domains
->wakeref
);
4196 power_domains
->wakeref
=
4197 intel_display_power_get(i915
, POWER_DOMAIN_INIT
);
4200 intel_power_domains_verify_state(i915
);
4203 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4205 static void intel_power_domains_dump_info(struct drm_i915_private
*i915
)
4207 struct i915_power_domains
*power_domains
= &i915
->power_domains
;
4208 struct i915_power_well
*power_well
;
4210 for_each_power_well(i915
, power_well
) {
4211 enum intel_display_power_domain domain
;
4213 DRM_DEBUG_DRIVER("%-25s %d\n",
4214 power_well
->desc
->name
, power_well
->count
);
4216 for_each_power_domain(domain
, power_well
->desc
->domains
)
4217 DRM_DEBUG_DRIVER(" %-23s %d\n",
4218 intel_display_power_domain_str(domain
),
4219 power_domains
->domain_use_count
[domain
]);
4224 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4225 * @i915: i915 device instance
4227 * Verify if the reference count of each power well matches its HW enabled
4228 * state and the total refcount of the domains it belongs to. This must be
4229 * called after modeset HW state sanitization, which is responsible for
4230 * acquiring reference counts for any power wells in use and disabling the
4231 * ones left on by BIOS but not required by any active output.
4233 static void intel_power_domains_verify_state(struct drm_i915_private
*i915
)
4235 struct i915_power_domains
*power_domains
= &i915
->power_domains
;
4236 struct i915_power_well
*power_well
;
4237 bool dump_domain_info
;
4239 mutex_lock(&power_domains
->lock
);
4241 dump_domain_info
= false;
4242 for_each_power_well(i915
, power_well
) {
4243 enum intel_display_power_domain domain
;
4247 enabled
= power_well
->desc
->ops
->is_enabled(i915
, power_well
);
4248 if ((power_well
->count
|| power_well
->desc
->always_on
) !=
4250 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4251 power_well
->desc
->name
,
4252 power_well
->count
, enabled
);
4255 for_each_power_domain(domain
, power_well
->desc
->domains
)
4256 domains_count
+= power_domains
->domain_use_count
[domain
];
4258 if (power_well
->count
!= domains_count
) {
4259 DRM_ERROR("power well %s refcount/domain refcount mismatch "
4260 "(refcount %d/domains refcount %d)\n",
4261 power_well
->desc
->name
, power_well
->count
,
4263 dump_domain_info
= true;
4267 if (dump_domain_info
) {
4271 intel_power_domains_dump_info(i915
);
4276 mutex_unlock(&power_domains
->lock
);
4281 static void intel_power_domains_verify_state(struct drm_i915_private
*i915
)
4288 * intel_runtime_pm_get - grab a runtime pm reference
4289 * @i915: i915 device instance
4291 * This function grabs a device-level runtime pm reference (mostly used for GEM
4292 * code to ensure the GTT or GT is on) and ensures that it is powered up.
4294 * Any runtime pm reference obtained by this function must have a symmetric
4295 * call to intel_runtime_pm_put() to release the reference again.
4297 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4299 intel_wakeref_t
intel_runtime_pm_get(struct drm_i915_private
*i915
)
4301 struct pci_dev
*pdev
= i915
->drm
.pdev
;
4302 struct device
*kdev
= &pdev
->dev
;
4305 ret
= pm_runtime_get_sync(kdev
);
4306 WARN_ONCE(ret
< 0, "pm_runtime_get_sync() failed: %d\n", ret
);
4308 return track_intel_runtime_pm_wakeref(i915
);
4312 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
4313 * @i915: i915 device instance
4315 * This function grabs a device-level runtime pm reference if the device is
4316 * already in use and ensures that it is powered up. It is illegal to try
4317 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
4319 * Any runtime pm reference obtained by this function must have a symmetric
4320 * call to intel_runtime_pm_put() to release the reference again.
4322 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4323 * as True if the wakeref was acquired, or False otherwise.
4325 intel_wakeref_t
intel_runtime_pm_get_if_in_use(struct drm_i915_private
*i915
)
4327 if (IS_ENABLED(CONFIG_PM
)) {
4328 struct pci_dev
*pdev
= i915
->drm
.pdev
;
4329 struct device
*kdev
= &pdev
->dev
;
4332 * In cases runtime PM is disabled by the RPM core and we get
4333 * an -EINVAL return value we are not supposed to call this
4334 * function, since the power state is undefined. This applies
4335 * atm to the late/early system suspend/resume handlers.
4337 if (pm_runtime_get_if_in_use(kdev
) <= 0)
4341 return track_intel_runtime_pm_wakeref(i915
);
4345 * intel_runtime_pm_get_noresume - grab a runtime pm reference
4346 * @i915: i915 device instance
4348 * This function grabs a device-level runtime pm reference (mostly used for GEM
4349 * code to ensure the GTT or GT is on).
4351 * It will _not_ power up the device but instead only check that it's powered
4352 * on. Therefore it is only valid to call this functions from contexts where
4353 * the device is known to be powered up and where trying to power it up would
4354 * result in hilarity and deadlocks. That pretty much means only the system
4355 * suspend/resume code where this is used to grab runtime pm references for
4356 * delayed setup down in work items.
4358 * Any runtime pm reference obtained by this function must have a symmetric
4359 * call to intel_runtime_pm_put() to release the reference again.
4361 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4363 intel_wakeref_t
intel_runtime_pm_get_noresume(struct drm_i915_private
*i915
)
4365 struct pci_dev
*pdev
= i915
->drm
.pdev
;
4366 struct device
*kdev
= &pdev
->dev
;
4368 assert_rpm_wakelock_held(i915
);
4369 pm_runtime_get_noresume(kdev
);
4371 return track_intel_runtime_pm_wakeref(i915
);
4375 * intel_runtime_pm_put - release a runtime pm reference
4376 * @i915: i915 device instance
4378 * This function drops the device-level runtime pm reference obtained by
4379 * intel_runtime_pm_get() and might power down the corresponding
4380 * hardware block right away if this is the last reference.
4382 void intel_runtime_pm_put_unchecked(struct drm_i915_private
*i915
)
4384 struct pci_dev
*pdev
= i915
->drm
.pdev
;
4385 struct device
*kdev
= &pdev
->dev
;
4387 untrack_intel_runtime_pm_wakeref(i915
);
4389 pm_runtime_mark_last_busy(kdev
);
4390 pm_runtime_put_autosuspend(kdev
);
4393 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4394 void intel_runtime_pm_put(struct drm_i915_private
*i915
, intel_wakeref_t wref
)
4396 cancel_intel_runtime_pm_wakeref(i915
, wref
);
4397 intel_runtime_pm_put_unchecked(i915
);
4402 * intel_runtime_pm_enable - enable runtime pm
4403 * @i915: i915 device instance
4405 * This function enables runtime pm at the end of the driver load sequence.
4407 * Note that this function does currently not enable runtime pm for the
4408 * subordinate display power domains. That is done by
4409 * intel_power_domains_enable().
4411 void intel_runtime_pm_enable(struct drm_i915_private
*i915
)
4413 struct pci_dev
*pdev
= i915
->drm
.pdev
;
4414 struct device
*kdev
= &pdev
->dev
;
4417 * Disable the system suspend direct complete optimization, which can
4418 * leave the device suspended skipping the driver's suspend handlers
4419 * if the device was already runtime suspended. This is needed due to
4420 * the difference in our runtime and system suspend sequence and
4421 * becaue the HDA driver may require us to enable the audio power
4422 * domain during system suspend.
4424 dev_pm_set_driver_flags(kdev
, DPM_FLAG_NEVER_SKIP
);
4426 pm_runtime_set_autosuspend_delay(kdev
, 10000); /* 10s */
4427 pm_runtime_mark_last_busy(kdev
);
4430 * Take a permanent reference to disable the RPM functionality and drop
4431 * it only when unloading the driver. Use the low level get/put helpers,
4432 * so the driver's own RPM reference tracking asserts also work on
4433 * platforms without RPM support.
4435 if (!HAS_RUNTIME_PM(i915
)) {
4438 pm_runtime_dont_use_autosuspend(kdev
);
4439 ret
= pm_runtime_get_sync(kdev
);
4440 WARN(ret
< 0, "pm_runtime_get_sync() failed: %d\n", ret
);
4442 pm_runtime_use_autosuspend(kdev
);
4446 * The core calls the driver load handler with an RPM reference held.
4447 * We drop that here and will reacquire it during unloading in
4448 * intel_power_domains_fini().
4450 pm_runtime_put_autosuspend(kdev
);
4453 void intel_runtime_pm_disable(struct drm_i915_private
*i915
)
4455 struct pci_dev
*pdev
= i915
->drm
.pdev
;
4456 struct device
*kdev
= &pdev
->dev
;
4458 /* Transfer rpm ownership back to core */
4459 WARN(pm_runtime_get_sync(kdev
) < 0,
4460 "Failed to pass rpm ownership back to core\n");
4462 pm_runtime_dont_use_autosuspend(kdev
);
4464 if (!HAS_RUNTIME_PM(i915
))
4465 pm_runtime_put(kdev
);
4468 void intel_runtime_pm_cleanup(struct drm_i915_private
*i915
)
4470 struct i915_runtime_pm
*rpm
= &i915
->runtime_pm
;
4473 count
= atomic_fetch_inc(&rpm
->wakeref_count
); /* balance untrack */
4475 "i915->runtime_pm.wakeref_count=%d on cleanup\n",
4478 untrack_intel_runtime_pm_wakeref(i915
);
4481 void intel_runtime_pm_init_early(struct drm_i915_private
*i915
)
4483 init_intel_runtime_pm_wakeref(i915
);