]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/i915/intel_runtime_pm.c
dm persistent data: Simplify stack trace handling
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
CommitLineData
9c065a7d
SV
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
bd780f37
CW
32#include <drm/drm_print.h>
33
9c065a7d
SV
34#include "i915_drv.h"
35#include "intel_drv.h"
9c065a7d 36
e4e7684f
SV
37/**
38 * DOC: runtime pm
39 *
40 * The i915 driver supports dynamic enabling and disabling of entire hardware
41 * blocks at runtime. This is especially important on the display side where
42 * software is supposed to control many power gates manually on recent hardware,
43 * since on the GT side a lot of the power management is done by the hardware.
44 * But even there some manual control at the device level is required.
45 *
46 * Since i915 supports a diverse set of platforms with a unified codebase and
47 * hardware engineers just love to shuffle functionality around between power
48 * domains there's a sizeable amount of indirection required. This file provides
49 * generic functions to the driver for grabbing and releasing references for
50 * abstract power domains. It then maps those to the actual power wells
51 * present for a given platform.
52 */
53
bd780f37
CW
54#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55
56#include <linux/sort.h>
57
58#define STACKDEPTH 8
59
60static noinline depot_stack_handle_t __save_depot_stack(void)
61{
62 unsigned long entries[STACKDEPTH];
63 struct stack_trace trace = {
64 .entries = entries,
65 .max_entries = ARRAY_SIZE(entries),
66 .skip = 1,
67 };
68
69 save_stack_trace(&trace);
bd780f37
CW
70 return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
71}
72
73static void __print_depot_stack(depot_stack_handle_t stack,
74 char *buf, int sz, int indent)
75{
76 unsigned long entries[STACKDEPTH];
77 struct stack_trace trace = {
78 .entries = entries,
79 .max_entries = ARRAY_SIZE(entries),
80 };
81
82 depot_fetch_stack(stack, &trace);
83 snprint_stack_trace(buf, sz, &trace, indent);
84}
85
86static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
87{
88 struct i915_runtime_pm *rpm = &i915->runtime_pm;
89
90 spin_lock_init(&rpm->debug.lock);
91}
92
16e4dd03 93static noinline depot_stack_handle_t
bd780f37
CW
94track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
95{
96 struct i915_runtime_pm *rpm = &i915->runtime_pm;
97 depot_stack_handle_t stack, *stacks;
98 unsigned long flags;
99
100 atomic_inc(&rpm->wakeref_count);
101 assert_rpm_wakelock_held(i915);
102
103 if (!HAS_RUNTIME_PM(i915))
16e4dd03 104 return -1;
bd780f37
CW
105
106 stack = __save_depot_stack();
107 if (!stack)
16e4dd03 108 return -1;
bd780f37
CW
109
110 spin_lock_irqsave(&rpm->debug.lock, flags);
111
112 if (!rpm->debug.count)
113 rpm->debug.last_acquire = stack;
114
115 stacks = krealloc(rpm->debug.owners,
116 (rpm->debug.count + 1) * sizeof(*stacks),
117 GFP_NOWAIT | __GFP_NOWARN);
118 if (stacks) {
119 stacks[rpm->debug.count++] = stack;
120 rpm->debug.owners = stacks;
16e4dd03
CW
121 } else {
122 stack = -1;
bd780f37
CW
123 }
124
125 spin_unlock_irqrestore(&rpm->debug.lock, flags);
16e4dd03
CW
126
127 return stack;
128}
129
130static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
131 depot_stack_handle_t stack)
132{
133 struct i915_runtime_pm *rpm = &i915->runtime_pm;
134 unsigned long flags, n;
135 bool found = false;
136
137 if (unlikely(stack == -1))
138 return;
139
140 spin_lock_irqsave(&rpm->debug.lock, flags);
141 for (n = rpm->debug.count; n--; ) {
142 if (rpm->debug.owners[n] == stack) {
143 memmove(rpm->debug.owners + n,
144 rpm->debug.owners + n + 1,
145 (--rpm->debug.count - n) * sizeof(stack));
146 found = true;
147 break;
148 }
149 }
150 spin_unlock_irqrestore(&rpm->debug.lock, flags);
151
152 if (WARN(!found,
153 "Unmatched wakeref (tracking %lu), count %u\n",
154 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
155 char *buf;
156
157 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
158 if (!buf)
159 return;
160
161 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
162 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
163
164 stack = READ_ONCE(rpm->debug.last_release);
165 if (stack) {
166 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
167 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
168 }
169
170 kfree(buf);
171 }
bd780f37
CW
172}
173
174static int cmphandle(const void *_a, const void *_b)
175{
176 const depot_stack_handle_t * const a = _a, * const b = _b;
177
178 if (*a < *b)
179 return -1;
180 else if (*a > *b)
181 return 1;
182 else
183 return 0;
184}
185
186static void
187__print_intel_runtime_pm_wakeref(struct drm_printer *p,
188 const struct intel_runtime_pm_debug *dbg)
189{
190 unsigned long i;
191 char *buf;
192
193 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
194 if (!buf)
195 return;
196
197 if (dbg->last_acquire) {
198 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
199 drm_printf(p, "Wakeref last acquired:\n%s", buf);
200 }
201
202 if (dbg->last_release) {
203 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
204 drm_printf(p, "Wakeref last released:\n%s", buf);
205 }
206
207 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
208
209 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
210
211 for (i = 0; i < dbg->count; i++) {
212 depot_stack_handle_t stack = dbg->owners[i];
213 unsigned long rep;
214
215 rep = 1;
216 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
217 rep++, i++;
218 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
219 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
220 }
221
222 kfree(buf);
223}
224
225static noinline void
226untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
227{
228 struct i915_runtime_pm *rpm = &i915->runtime_pm;
229 struct intel_runtime_pm_debug dbg = {};
230 struct drm_printer p;
231 unsigned long flags;
232
233 assert_rpm_wakelock_held(i915);
234 if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
235 &rpm->debug.lock,
236 flags)) {
237 dbg = rpm->debug;
238
239 rpm->debug.owners = NULL;
240 rpm->debug.count = 0;
241 rpm->debug.last_release = __save_depot_stack();
242
243 spin_unlock_irqrestore(&rpm->debug.lock, flags);
244 }
245 if (!dbg.count)
246 return;
247
248 p = drm_debug_printer("i915");
249 __print_intel_runtime_pm_wakeref(&p, &dbg);
250
251 kfree(dbg.owners);
252}
253
254void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
255 struct drm_printer *p)
256{
257 struct intel_runtime_pm_debug dbg = {};
258
259 do {
260 struct i915_runtime_pm *rpm = &i915->runtime_pm;
261 unsigned long alloc = dbg.count;
262 depot_stack_handle_t *s;
263
264 spin_lock_irq(&rpm->debug.lock);
265 dbg.count = rpm->debug.count;
266 if (dbg.count <= alloc) {
267 memcpy(dbg.owners,
268 rpm->debug.owners,
269 dbg.count * sizeof(*s));
270 }
271 dbg.last_acquire = rpm->debug.last_acquire;
272 dbg.last_release = rpm->debug.last_release;
273 spin_unlock_irq(&rpm->debug.lock);
274 if (dbg.count <= alloc)
275 break;
276
277 s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
278 if (!s)
279 goto out;
280
281 dbg.owners = s;
282 } while (1);
283
284 __print_intel_runtime_pm_wakeref(p, &dbg);
285
286out:
287 kfree(dbg.owners);
288}
289
290#else
291
292static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
293{
294}
295
16e4dd03
CW
296static depot_stack_handle_t
297track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
bd780f37
CW
298{
299 atomic_inc(&i915->runtime_pm.wakeref_count);
300 assert_rpm_wakelock_held(i915);
16e4dd03 301 return -1;
bd780f37
CW
302}
303
304static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
305{
306 assert_rpm_wakelock_held(i915);
307 atomic_dec(&i915->runtime_pm.wakeref_count);
308}
309
310#endif
311
5aefb239 312bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
438b8dc4 313 enum i915_power_well_id power_well_id);
5aefb239 314
9895ad03
DS
315const char *
316intel_display_power_domain_str(enum intel_display_power_domain domain)
317{
318 switch (domain) {
319 case POWER_DOMAIN_PIPE_A:
320 return "PIPE_A";
321 case POWER_DOMAIN_PIPE_B:
322 return "PIPE_B";
323 case POWER_DOMAIN_PIPE_C:
324 return "PIPE_C";
325 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
326 return "PIPE_A_PANEL_FITTER";
327 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
328 return "PIPE_B_PANEL_FITTER";
329 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
330 return "PIPE_C_PANEL_FITTER";
331 case POWER_DOMAIN_TRANSCODER_A:
332 return "TRANSCODER_A";
333 case POWER_DOMAIN_TRANSCODER_B:
334 return "TRANSCODER_B";
335 case POWER_DOMAIN_TRANSCODER_C:
336 return "TRANSCODER_C";
337 case POWER_DOMAIN_TRANSCODER_EDP:
338 return "TRANSCODER_EDP";
91ba2c8b
MN
339 case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
340 return "TRANSCODER_EDP_VDSC";
4d1de975
JN
341 case POWER_DOMAIN_TRANSCODER_DSI_A:
342 return "TRANSCODER_DSI_A";
343 case POWER_DOMAIN_TRANSCODER_DSI_C:
344 return "TRANSCODER_DSI_C";
9895ad03
DS
345 case POWER_DOMAIN_PORT_DDI_A_LANES:
346 return "PORT_DDI_A_LANES";
347 case POWER_DOMAIN_PORT_DDI_B_LANES:
348 return "PORT_DDI_B_LANES";
349 case POWER_DOMAIN_PORT_DDI_C_LANES:
350 return "PORT_DDI_C_LANES";
351 case POWER_DOMAIN_PORT_DDI_D_LANES:
352 return "PORT_DDI_D_LANES";
353 case POWER_DOMAIN_PORT_DDI_E_LANES:
354 return "PORT_DDI_E_LANES";
9787e835
RV
355 case POWER_DOMAIN_PORT_DDI_F_LANES:
356 return "PORT_DDI_F_LANES";
62b69566
ACO
357 case POWER_DOMAIN_PORT_DDI_A_IO:
358 return "PORT_DDI_A_IO";
359 case POWER_DOMAIN_PORT_DDI_B_IO:
360 return "PORT_DDI_B_IO";
361 case POWER_DOMAIN_PORT_DDI_C_IO:
362 return "PORT_DDI_C_IO";
363 case POWER_DOMAIN_PORT_DDI_D_IO:
364 return "PORT_DDI_D_IO";
365 case POWER_DOMAIN_PORT_DDI_E_IO:
366 return "PORT_DDI_E_IO";
9787e835
RV
367 case POWER_DOMAIN_PORT_DDI_F_IO:
368 return "PORT_DDI_F_IO";
9895ad03
DS
369 case POWER_DOMAIN_PORT_DSI:
370 return "PORT_DSI";
371 case POWER_DOMAIN_PORT_CRT:
372 return "PORT_CRT";
373 case POWER_DOMAIN_PORT_OTHER:
374 return "PORT_OTHER";
375 case POWER_DOMAIN_VGA:
376 return "VGA";
377 case POWER_DOMAIN_AUDIO:
378 return "AUDIO";
379 case POWER_DOMAIN_PLLS:
380 return "PLLS";
381 case POWER_DOMAIN_AUX_A:
382 return "AUX_A";
383 case POWER_DOMAIN_AUX_B:
384 return "AUX_B";
385 case POWER_DOMAIN_AUX_C:
386 return "AUX_C";
387 case POWER_DOMAIN_AUX_D:
388 return "AUX_D";
bb187e93
JA
389 case POWER_DOMAIN_AUX_E:
390 return "AUX_E";
a324fcac
RV
391 case POWER_DOMAIN_AUX_F:
392 return "AUX_F";
b891d5e4
DP
393 case POWER_DOMAIN_AUX_IO_A:
394 return "AUX_IO_A";
67ca07e7
ID
395 case POWER_DOMAIN_AUX_TBT1:
396 return "AUX_TBT1";
397 case POWER_DOMAIN_AUX_TBT2:
398 return "AUX_TBT2";
399 case POWER_DOMAIN_AUX_TBT3:
400 return "AUX_TBT3";
401 case POWER_DOMAIN_AUX_TBT4:
402 return "AUX_TBT4";
9895ad03
DS
403 case POWER_DOMAIN_GMBUS:
404 return "GMBUS";
405 case POWER_DOMAIN_INIT:
406 return "INIT";
407 case POWER_DOMAIN_MODESET:
408 return "MODESET";
b6876374
TU
409 case POWER_DOMAIN_GT_IRQ:
410 return "GT_IRQ";
9895ad03
DS
411 default:
412 MISSING_CASE(domain);
413 return "?";
414 }
415}
416
e8ca9320
DL
417static void intel_power_well_enable(struct drm_i915_private *dev_priv,
418 struct i915_power_well *power_well)
419{
f28ec6f4
ID
420 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
421 power_well->desc->ops->enable(dev_priv, power_well);
e8ca9320
DL
422 power_well->hw_enabled = true;
423}
424
dcddab3a
DL
425static void intel_power_well_disable(struct drm_i915_private *dev_priv,
426 struct i915_power_well *power_well)
427{
f28ec6f4 428 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
dcddab3a 429 power_well->hw_enabled = false;
f28ec6f4 430 power_well->desc->ops->disable(dev_priv, power_well);
dcddab3a
DL
431}
432
b409ca95
ID
433static void intel_power_well_get(struct drm_i915_private *dev_priv,
434 struct i915_power_well *power_well)
435{
436 if (!power_well->count++)
437 intel_power_well_enable(dev_priv, power_well);
438}
439
440static void intel_power_well_put(struct drm_i915_private *dev_priv,
441 struct i915_power_well *power_well)
442{
443 WARN(!power_well->count, "Use count on power well %s is already zero",
f28ec6f4 444 power_well->desc->name);
b409ca95
ID
445
446 if (!--power_well->count)
447 intel_power_well_disable(dev_priv, power_well);
448}
449
e4e7684f
SV
450/**
451 * __intel_display_power_is_enabled - unlocked check for a power domain
452 * @dev_priv: i915 device instance
453 * @domain: power domain to check
454 *
455 * This is the unlocked version of intel_display_power_is_enabled() and should
456 * only be used from error capture and recovery code where deadlocks are
457 * possible.
458 *
459 * Returns:
460 * True when the power domain is enabled, false otherwise.
461 */
f458ebbc
SV
462bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
463 enum intel_display_power_domain domain)
9c065a7d 464{
9c065a7d
SV
465 struct i915_power_well *power_well;
466 bool is_enabled;
9c065a7d 467
ad1443f0 468 if (dev_priv->runtime_pm.suspended)
9c065a7d
SV
469 return false;
470
9c065a7d
SV
471 is_enabled = true;
472
56d4eac0 473 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
f28ec6f4 474 if (power_well->desc->always_on)
9c065a7d
SV
475 continue;
476
477 if (!power_well->hw_enabled) {
478 is_enabled = false;
479 break;
480 }
481 }
482
483 return is_enabled;
484}
485
e4e7684f 486/**
f61ccae3 487 * intel_display_power_is_enabled - check for a power domain
e4e7684f
SV
488 * @dev_priv: i915 device instance
489 * @domain: power domain to check
490 *
491 * This function can be used to check the hw power domain state. It is mostly
492 * used in hardware state readout functions. Everywhere else code should rely
493 * upon explicit power domain reference counting to ensure that the hardware
494 * block is powered up before accessing it.
495 *
496 * Callers must hold the relevant modesetting locks to ensure that concurrent
497 * threads can't disable the power well while the caller tries to read a few
498 * registers.
499 *
500 * Returns:
501 * True when the power domain is enabled, false otherwise.
502 */
f458ebbc
SV
503bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
504 enum intel_display_power_domain domain)
9c065a7d
SV
505{
506 struct i915_power_domains *power_domains;
507 bool ret;
508
509 power_domains = &dev_priv->power_domains;
510
511 mutex_lock(&power_domains->lock);
f458ebbc 512 ret = __intel_display_power_is_enabled(dev_priv, domain);
9c065a7d
SV
513 mutex_unlock(&power_domains->lock);
514
515 return ret;
516}
517
518/*
519 * Starting with Haswell, we have a "Power Down Well" that can be turned off
520 * when not needed anymore. We have 4 registers that can request the power well
521 * to be enabled, and it will only be disabled if none of the registers is
522 * requesting it to be enabled.
523 */
001bd2cb
ID
524static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
525 u8 irq_pipe_mask, bool has_vga)
9c065a7d 526{
52a05c30 527 struct pci_dev *pdev = dev_priv->drm.pdev;
9c065a7d
SV
528
529 /*
530 * After we re-enable the power well, if we touch VGA register 0x3d5
531 * we'll get unclaimed register interrupts. This stops after we write
532 * anything to the VGA MSR register. The vgacon module uses this
533 * register all the time, so if we unbind our driver and, as a
534 * consequence, bind vgacon, we'll get stuck in an infinite loop at
535 * console_unlock(). So make here we touch the VGA MSR register, making
536 * sure vgacon can keep working normally without triggering interrupts
537 * and error messages.
538 */
001bd2cb
ID
539 if (has_vga) {
540 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
541 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
542 vga_put(pdev, VGA_RSRC_LEGACY_IO);
543 }
9c065a7d 544
001bd2cb
ID
545 if (irq_pipe_mask)
546 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
9c065a7d
SV
547}
548
001bd2cb
ID
549static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
550 u8 irq_pipe_mask)
aae8ba84 551{
001bd2cb
ID
552 if (irq_pipe_mask)
553 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
aae8ba84
VS
554}
555
aae8ba84 556
76347c04
ID
557static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
558 struct i915_power_well *power_well)
42d9366d 559{
75e39688
ID
560 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
561 int pw_idx = power_well->desc->hsw.idx;
42d9366d
ID
562
563 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
564 WARN_ON(intel_wait_for_register(dev_priv,
75e39688
ID
565 regs->driver,
566 HSW_PWR_WELL_CTL_STATE(pw_idx),
567 HSW_PWR_WELL_CTL_STATE(pw_idx),
42d9366d
ID
568 1));
569}
570
76347c04 571static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
75e39688
ID
572 const struct i915_power_well_regs *regs,
573 int pw_idx)
42d9366d 574{
75e39688 575 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
42d9366d
ID
576 u32 ret;
577
75e39688
ID
578 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
579 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
580 if (regs->kvmr.reg)
581 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
582 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
42d9366d
ID
583
584 return ret;
585}
586
76347c04
ID
587static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
588 struct i915_power_well *power_well)
42d9366d 589{
75e39688
ID
590 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
591 int pw_idx = power_well->desc->hsw.idx;
42d9366d
ID
592 bool disabled;
593 u32 reqs;
594
595 /*
596 * Bspec doesn't require waiting for PWs to get disabled, but still do
597 * this for paranoia. The known cases where a PW will be forced on:
598 * - a KVMR request on any power well via the KVMR request register
599 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
600 * DEBUG request registers
601 * Skip the wait in case any of the request bits are set and print a
602 * diagnostic message.
603 */
75e39688
ID
604 wait_for((disabled = !(I915_READ(regs->driver) &
605 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
606 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
42d9366d
ID
607 if (disabled)
608 return;
609
610 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
f28ec6f4 611 power_well->desc->name,
42d9366d
ID
612 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
613}
614
b2891eb2
ID
615static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
616 enum skl_power_gate pg)
617{
618 /* Timeout 5us for PG#0, for other PGs 1us */
619 WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
620 SKL_FUSE_PG_DIST_STATUS(pg),
621 SKL_FUSE_PG_DIST_STATUS(pg), 1));
622}
623
ec46d483
ID
624static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
625 struct i915_power_well *power_well)
9c065a7d 626{
75e39688
ID
627 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
628 int pw_idx = power_well->desc->hsw.idx;
f28ec6f4 629 bool wait_fuses = power_well->desc->hsw.has_fuses;
320671f9 630 enum skl_power_gate uninitialized_var(pg);
1af474fe
ID
631 u32 val;
632
b2891eb2 633 if (wait_fuses) {
75e39688
ID
634 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
635 SKL_PW_CTL_IDX_TO_PG(pw_idx);
b2891eb2
ID
636 /*
637 * For PW1 we have to wait both for the PW0/PG0 fuse state
638 * before enabling the power well and PW1/PG1's own fuse
639 * state after the enabling. For all other power wells with
640 * fuses we only have to wait for that PW/PG's fuse state
641 * after the enabling.
642 */
643 if (pg == SKL_PG1)
644 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
645 }
646
75e39688
ID
647 val = I915_READ(regs->driver);
648 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
76347c04 649 hsw_wait_for_power_well_enable(dev_priv, power_well);
001bd2cb 650
ddd39e4b
LDM
651 /* Display WA #1178: cnl */
652 if (IS_CANNONLAKE(dev_priv) &&
75e39688
ID
653 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
654 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
655 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
ddd39e4b 656 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
75e39688 657 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
ddd39e4b
LDM
658 }
659
b2891eb2
ID
660 if (wait_fuses)
661 gen9_wait_for_power_well_fuses(dev_priv, pg);
662
f28ec6f4
ID
663 hsw_power_well_post_enable(dev_priv,
664 power_well->desc->hsw.irq_pipe_mask,
665 power_well->desc->hsw.has_vga);
ec46d483 666}
00742cab 667
ec46d483
ID
668static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
669 struct i915_power_well *power_well)
670{
75e39688
ID
671 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
672 int pw_idx = power_well->desc->hsw.idx;
1af474fe
ID
673 u32 val;
674
f28ec6f4
ID
675 hsw_power_well_pre_disable(dev_priv,
676 power_well->desc->hsw.irq_pipe_mask);
001bd2cb 677
75e39688
ID
678 val = I915_READ(regs->driver);
679 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
76347c04 680 hsw_wait_for_power_well_disable(dev_priv, power_well);
9c065a7d
SV
681}
682
75e39688 683#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
67ca07e7
ID
684
685static void
686icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
687 struct i915_power_well *power_well)
688{
75e39688
ID
689 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
690 int pw_idx = power_well->desc->hsw.idx;
691 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
67ca07e7
ID
692 u32 val;
693
75e39688
ID
694 val = I915_READ(regs->driver);
695 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
67ca07e7
ID
696
697 val = I915_READ(ICL_PORT_CL_DW12(port));
698 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
699
700 hsw_wait_for_power_well_enable(dev_priv, power_well);
ffd7e32d
LDM
701
702 /* Display WA #1178: icl */
703 if (IS_ICELAKE(dev_priv) &&
704 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
705 !intel_bios_is_port_edp(dev_priv, port)) {
706 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
707 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
708 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
709 }
67ca07e7
ID
710}
711
712static void
713icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
714 struct i915_power_well *power_well)
715{
75e39688
ID
716 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
717 int pw_idx = power_well->desc->hsw.idx;
718 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
67ca07e7
ID
719 u32 val;
720
721 val = I915_READ(ICL_PORT_CL_DW12(port));
722 I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
723
75e39688
ID
724 val = I915_READ(regs->driver);
725 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
67ca07e7
ID
726
727 hsw_wait_for_power_well_disable(dev_priv, power_well);
728}
729
c7375d95
ID
730#define ICL_AUX_PW_TO_CH(pw_idx) \
731 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
732
733static void
734icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
735 struct i915_power_well *power_well)
736{
737 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
738 u32 val;
739
740 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
741 val &= ~DP_AUX_CH_CTL_TBT_IO;
742 if (power_well->desc->hsw.is_tc_tbt)
743 val |= DP_AUX_CH_CTL_TBT_IO;
744 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
745
746 hsw_power_well_enable(dev_priv, power_well);
747}
748
d42539ba
ID
749/*
750 * We should only use the power well if we explicitly asked the hardware to
751 * enable it, so check if it's enabled and also check if we've requested it to
752 * be enabled.
753 */
754static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
755 struct i915_power_well *power_well)
756{
75e39688 757 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
cb8ef723 758 enum i915_power_well_id id = power_well->desc->id;
75e39688
ID
759 int pw_idx = power_well->desc->hsw.idx;
760 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
761 HSW_PWR_WELL_CTL_STATE(pw_idx);
cb8ef723
ID
762 u32 val;
763
764 val = I915_READ(regs->driver);
765
766 /*
767 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
768 * and the MISC_IO PW will be not restored, so check instead for the
769 * BIOS's own request bits, which are forced-on for these power wells
770 * when exiting DC5/6.
771 */
cf819eff 772 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
cb8ef723
ID
773 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
774 val |= I915_READ(regs->bios);
d42539ba 775
cb8ef723 776 return (val & mask) == mask;
d42539ba
ID
777}
778
664326f8
SK
779static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
780{
bfcdabe8
ID
781 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
782 "DC9 already programmed to be enabled.\n");
783 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
784 "DC5 still not disabled to enable DC9.\n");
75e39688
ID
785 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
786 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
e8a3a2a3 787 "Power well 2 on.\n");
bfcdabe8
ID
788 WARN_ONCE(intel_irqs_enabled(dev_priv),
789 "Interrupts not disabled yet.\n");
664326f8
SK
790
791 /*
792 * TODO: check for the following to verify the conditions to enter DC9
793 * state are satisfied:
794 * 1] Check relevant display engine registers to verify if mode set
795 * disable sequence was followed.
796 * 2] Check if display uninitialize sequence is initialized.
797 */
798}
799
800static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
801{
bfcdabe8
ID
802 WARN_ONCE(intel_irqs_enabled(dev_priv),
803 "Interrupts not disabled yet.\n");
804 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
805 "DC5 still not disabled.\n");
664326f8
SK
806
807 /*
808 * TODO: check for the following to verify DC9 state was indeed
809 * entered before programming to disable it:
810 * 1] Check relevant display engine registers to verify if mode
811 * set disable sequence was followed.
812 * 2] Check if display uninitialize sequence is initialized.
813 */
814}
815
779cb5d3
MK
816static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
817 u32 state)
818{
819 int rewrites = 0;
820 int rereads = 0;
821 u32 v;
822
823 I915_WRITE(DC_STATE_EN, state);
824
825 /* It has been observed that disabling the dc6 state sometimes
826 * doesn't stick and dmc keeps returning old value. Make sure
827 * the write really sticks enough times and also force rewrite until
828 * we are confident that state is exactly what we want.
829 */
830 do {
831 v = I915_READ(DC_STATE_EN);
832
833 if (v != state) {
834 I915_WRITE(DC_STATE_EN, state);
835 rewrites++;
836 rereads = 0;
837 } else if (rereads++ > 5) {
838 break;
839 }
840
841 } while (rewrites < 100);
842
843 if (v != state)
844 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
845 state, v);
846
847 /* Most of the times we need one retry, avoid spam */
848 if (rewrites > 1)
849 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
850 state, rewrites);
851}
852
da2f41d1 853static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
664326f8 854{
da2f41d1 855 u32 mask;
664326f8 856
13ae3a0d 857 mask = DC_STATE_EN_UPTO_DC5;
3e68928b
AM
858 if (INTEL_GEN(dev_priv) >= 11)
859 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
860 else if (IS_GEN9_LP(dev_priv))
13ae3a0d
ID
861 mask |= DC_STATE_EN_DC9;
862 else
863 mask |= DC_STATE_EN_UPTO_DC6;
664326f8 864
da2f41d1
ID
865 return mask;
866}
867
868void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
869{
870 u32 val;
871
872 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
873
874 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
875 dev_priv->csr.dc_state, val);
876 dev_priv->csr.dc_state = val;
877}
878
13e1592f
ID
879/**
880 * gen9_set_dc_state - set target display C power state
881 * @dev_priv: i915 device instance
882 * @state: target DC power state
883 * - DC_STATE_DISABLE
884 * - DC_STATE_EN_UPTO_DC5
885 * - DC_STATE_EN_UPTO_DC6
886 * - DC_STATE_EN_DC9
887 *
888 * Signal to DMC firmware/HW the target DC power state passed in @state.
889 * DMC/HW can turn off individual display clocks and power rails when entering
890 * a deeper DC power state (higher in number) and turns these back when exiting
891 * that state to a shallower power state (lower in number). The HW will decide
892 * when to actually enter a given state on an on-demand basis, for instance
893 * depending on the active state of display pipes. The state of display
894 * registers backed by affected power rails are saved/restored as needed.
895 *
896 * Based on the above enabling a deeper DC power state is asynchronous wrt.
897 * enabling it. Disabling a deeper power state is synchronous: for instance
898 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
899 * back on and register state is restored. This is guaranteed by the MMIO write
900 * to DC_STATE_EN blocking until the state is restored.
901 */
739f3abd 902static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
da2f41d1 903{
739f3abd
JN
904 u32 val;
905 u32 mask;
da2f41d1 906
a37baf3b
ID
907 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
908 state &= dev_priv->csr.allowed_dc_mask;
443646c7 909
664326f8 910 val = I915_READ(DC_STATE_EN);
da2f41d1 911 mask = gen9_dc_mask(dev_priv);
13ae3a0d
ID
912 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
913 val & mask, state);
832dba88
PJ
914
915 /* Check if DMC is ignoring our DC state requests */
916 if ((val & mask) != dev_priv->csr.dc_state)
917 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
918 dev_priv->csr.dc_state, val & mask);
919
13ae3a0d
ID
920 val &= ~mask;
921 val |= state;
779cb5d3
MK
922
923 gen9_write_dc_state(dev_priv, val);
832dba88
PJ
924
925 dev_priv->csr.dc_state = val & mask;
664326f8
SK
926}
927
13ae3a0d 928void bxt_enable_dc9(struct drm_i915_private *dev_priv)
664326f8 929{
13ae3a0d
ID
930 assert_can_enable_dc9(dev_priv);
931
932 DRM_DEBUG_KMS("Enabling DC9\n");
3e68928b
AM
933 /*
934 * Power sequencer reset is not needed on
935 * platforms with South Display Engine on PCH,
936 * because PPS registers are always on.
937 */
938 if (!HAS_PCH_SPLIT(dev_priv))
939 intel_power_sequencer_reset(dev_priv);
13ae3a0d
ID
940 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
941}
942
943void bxt_disable_dc9(struct drm_i915_private *dev_priv)
944{
664326f8
SK
945 assert_can_disable_dc9(dev_priv);
946
947 DRM_DEBUG_KMS("Disabling DC9\n");
948
13ae3a0d 949 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
8090ba8c
ID
950
951 intel_pps_unlock_regs_wa(dev_priv);
664326f8
SK
952}
953
af5fead2
SV
954static void assert_csr_loaded(struct drm_i915_private *dev_priv)
955{
956 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
957 "CSR program storage start is NULL\n");
958 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
959 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
960}
961
f7480b2f
PZ
962static struct i915_power_well *
963lookup_power_well(struct drm_i915_private *dev_priv,
964 enum i915_power_well_id power_well_id)
965{
966 struct i915_power_well *power_well;
967
968 for_each_power_well(dev_priv, power_well)
969 if (power_well->desc->id == power_well_id)
970 return power_well;
971
972 /*
973 * It's not feasible to add error checking code to the callers since
974 * this condition really shouldn't happen and it doesn't even make sense
975 * to abort things like display initialization sequences. Just return
976 * the first power well and hope the WARN gets reported so we can fix
977 * our driver.
978 */
979 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
980 return &dev_priv->power_domains.power_wells[0];
981}
982
5aefb239 983static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
dc174300 984{
5aefb239
SS
985 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
986 SKL_DISP_PW_2);
987
6ff8ab0d 988 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
5aefb239 989
6ff8ab0d
JB
990 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
991 "DC5 already programmed to be enabled.\n");
c9b8846a 992 assert_rpm_wakelock_held(dev_priv);
5aefb239
SS
993
994 assert_csr_loaded(dev_priv);
995}
996
f62c79b3 997void gen9_enable_dc5(struct drm_i915_private *dev_priv)
5aefb239 998{
5aefb239 999 assert_can_enable_dc5(dev_priv);
6b457d31
SK
1000
1001 DRM_DEBUG_KMS("Enabling DC5\n");
1002
53421c2f
LDM
1003 /* Wa Display #1183: skl,kbl,cfl */
1004 if (IS_GEN9_BC(dev_priv))
1005 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1006 SKL_SELECT_ALTERNATE_DC_EXIT);
1007
13ae3a0d 1008 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
dc174300
SS
1009}
1010
93c7cb6c 1011static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
f75a1985 1012{
6ff8ab0d
JB
1013 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1014 "Backlight is not disabled.\n");
1015 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1016 "DC6 already programmed to be enabled.\n");
93c7cb6c
SS
1017
1018 assert_csr_loaded(dev_priv);
1019}
1020
3e68928b 1021void skl_enable_dc6(struct drm_i915_private *dev_priv)
93c7cb6c 1022{
93c7cb6c 1023 assert_can_enable_dc6(dev_priv);
74b4f371
SK
1024
1025 DRM_DEBUG_KMS("Enabling DC6\n");
1026
b49be662
ID
1027 /* Wa Display #1183: skl,kbl,cfl */
1028 if (IS_GEN9_BC(dev_priv))
1029 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1030 SKL_SELECT_ALTERNATE_DC_EXIT);
13ae3a0d 1031
b49be662 1032 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
f75a1985
SS
1033}
1034
9c065a7d
SV
1035static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1036 struct i915_power_well *power_well)
1037{
75e39688
ID
1038 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1039 int pw_idx = power_well->desc->hsw.idx;
1040 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1041 u32 bios_req = I915_READ(regs->bios);
1af474fe 1042
16e84914 1043 /* Take over the request bit if set by BIOS. */
1af474fe 1044 if (bios_req & mask) {
75e39688 1045 u32 drv_req = I915_READ(regs->driver);
1af474fe
ID
1046
1047 if (!(drv_req & mask))
75e39688
ID
1048 I915_WRITE(regs->driver, drv_req | mask);
1049 I915_WRITE(regs->bios, bios_req & ~mask);
16e84914 1050 }
9c065a7d
SV
1051}
1052
9c8d0b8e
ID
1053static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1054 struct i915_power_well *power_well)
1055{
f28ec6f4 1056 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1057}
1058
1059static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1060 struct i915_power_well *power_well)
1061{
f28ec6f4 1062 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1063}
1064
1065static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1066 struct i915_power_well *power_well)
1067{
f28ec6f4 1068 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1069}
1070
9c8d0b8e
ID
1071static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1072{
1073 struct i915_power_well *power_well;
1074
2183b499 1075 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
9c8d0b8e 1076 if (power_well->count > 0)
f28ec6f4 1077 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e 1078
d9fcdc8d 1079 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9c8d0b8e 1080 if (power_well->count > 0)
f28ec6f4 1081 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
0a116ce8
ACO
1082
1083 if (IS_GEMINILAKE(dev_priv)) {
2183b499
ID
1084 power_well = lookup_power_well(dev_priv,
1085 GLK_DISP_PW_DPIO_CMN_C);
0a116ce8 1086 if (power_well->count > 0)
f28ec6f4
ID
1087 bxt_ddi_phy_verify_state(dev_priv,
1088 power_well->desc->bxt.phy);
0a116ce8 1089 }
9c8d0b8e
ID
1090}
1091
9f836f90
PJ
1092static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1093 struct i915_power_well *power_well)
1094{
1095 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1096}
1097
18a8067c
VS
1098static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1099{
1100 u32 tmp = I915_READ(DBUF_CTL);
1101
1102 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1103 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1104 "Unexpected DBuf power power state (0x%08x)\n", tmp);
1105}
1106
9f836f90
PJ
1107static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1108 struct i915_power_well *power_well)
1109{
49cd97a3
VS
1110 struct intel_cdclk_state cdclk_state = {};
1111
5b773eb4 1112 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
adc7f04b 1113
49cd97a3 1114 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
64600bd5
VS
1115 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1116 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
342be926 1117
18a8067c
VS
1118 gen9_assert_dbuf_enabled(dev_priv);
1119
cc3f90f0 1120 if (IS_GEN9_LP(dev_priv))
9c8d0b8e 1121 bxt_verify_ddi_phy_power_wells(dev_priv);
602438ea
ID
1122
1123 if (INTEL_GEN(dev_priv) >= 11)
1124 /*
1125 * DMC retains HW context only for port A, the other combo
1126 * PHY's HW context for port B is lost after DC transitions,
1127 * so we need to restore it manually.
1128 */
1129 icl_combo_phys_init(dev_priv);
9f836f90
PJ
1130}
1131
1132static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1133 struct i915_power_well *power_well)
1134{
f74ed08d
ID
1135 if (!dev_priv->csr.dmc_payload)
1136 return;
1137
a37baf3b 1138 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
9f836f90 1139 skl_enable_dc6(dev_priv);
a37baf3b 1140 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
9f836f90
PJ
1141 gen9_enable_dc5(dev_priv);
1142}
1143
3c1b38e6
ID
1144static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1145 struct i915_power_well *power_well)
9f836f90 1146{
9f836f90
PJ
1147}
1148
9c065a7d
SV
1149static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1150 struct i915_power_well *power_well)
1151{
1152}
1153
1154static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1155 struct i915_power_well *power_well)
1156{
1157 return true;
1158}
1159
2ee0da16
VS
1160static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1161 struct i915_power_well *power_well)
1162{
1163 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1164 i830_enable_pipe(dev_priv, PIPE_A);
1165 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1166 i830_enable_pipe(dev_priv, PIPE_B);
1167}
1168
1169static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1170 struct i915_power_well *power_well)
1171{
1172 i830_disable_pipe(dev_priv, PIPE_B);
1173 i830_disable_pipe(dev_priv, PIPE_A);
1174}
1175
1176static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1177 struct i915_power_well *power_well)
1178{
1179 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1180 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1181}
1182
1183static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1184 struct i915_power_well *power_well)
1185{
1186 if (power_well->count > 0)
1187 i830_pipes_power_well_enable(dev_priv, power_well);
1188 else
1189 i830_pipes_power_well_disable(dev_priv, power_well);
1190}
1191
9c065a7d
SV
1192static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1193 struct i915_power_well *power_well, bool enable)
1194{
d13dd05a 1195 int pw_idx = power_well->desc->vlv.idx;
9c065a7d
SV
1196 u32 mask;
1197 u32 state;
1198 u32 ctrl;
1199
d13dd05a
ID
1200 mask = PUNIT_PWRGT_MASK(pw_idx);
1201 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1202 PUNIT_PWRGT_PWR_GATE(pw_idx);
9c065a7d 1203
9f817501 1204 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1205
1206#define COND \
1207 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1208
1209 if (COND)
1210 goto out;
1211
1212 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1213 ctrl &= ~mask;
1214 ctrl |= state;
1215 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1216
1217 if (wait_for(COND, 100))
7e35ab88 1218 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
9c065a7d
SV
1219 state,
1220 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1221
1222#undef COND
1223
1224out:
9f817501 1225 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1226}
1227
9c065a7d
SV
1228static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1229 struct i915_power_well *power_well)
1230{
1231 vlv_set_power_well(dev_priv, power_well, true);
1232}
1233
1234static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1235 struct i915_power_well *power_well)
1236{
1237 vlv_set_power_well(dev_priv, power_well, false);
1238}
1239
1240static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1241 struct i915_power_well *power_well)
1242{
d13dd05a 1243 int pw_idx = power_well->desc->vlv.idx;
9c065a7d
SV
1244 bool enabled = false;
1245 u32 mask;
1246 u32 state;
1247 u32 ctrl;
1248
d13dd05a
ID
1249 mask = PUNIT_PWRGT_MASK(pw_idx);
1250 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
9c065a7d 1251
9f817501 1252 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1253
1254 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1255 /*
1256 * We only ever set the power-on and power-gate states, anything
1257 * else is unexpected.
1258 */
d13dd05a
ID
1259 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1260 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
9c065a7d
SV
1261 if (state == ctrl)
1262 enabled = true;
1263
1264 /*
1265 * A transient state at this point would mean some unexpected party
1266 * is poking at the power controls too.
1267 */
1268 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1269 WARN_ON(ctrl != state);
1270
9f817501 1271 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1272
1273 return enabled;
1274}
1275
766078df
VS
1276static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1277{
721d4845
HG
1278 u32 val;
1279
1280 /*
1281 * On driver load, a pipe may be active and driving a DSI display.
1282 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1283 * (and never recovering) in this case. intel_dsi_post_disable() will
1284 * clear it when we turn off the display.
1285 */
1286 val = I915_READ(DSPCLK_GATE_D);
1287 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1288 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1289 I915_WRITE(DSPCLK_GATE_D, val);
766078df
VS
1290
1291 /*
1292 * Disable trickle feed and enable pnd deadline calculation
1293 */
1294 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1295 I915_WRITE(CBR1_VLV, 0);
19ab4ed3
VS
1296
1297 WARN_ON(dev_priv->rawclk_freq == 0);
1298
1299 I915_WRITE(RAWCLK_FREQ_VLV,
1300 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
766078df
VS
1301}
1302
2be7d540 1303static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
9c065a7d 1304{
9504a892 1305 struct intel_encoder *encoder;
5a8fbb7d
VS
1306 enum pipe pipe;
1307
1308 /*
1309 * Enable the CRI clock source so we can get at the
1310 * display and the reference clock for VGA
1311 * hotplug / manual detection. Supposedly DSI also
1312 * needs the ref clock up and running.
1313 *
1314 * CHV DPLL B/C have some issues if VGA mode is enabled.
1315 */
801388cb 1316 for_each_pipe(dev_priv, pipe) {
5a8fbb7d
VS
1317 u32 val = I915_READ(DPLL(pipe));
1318
1319 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1320 if (pipe != PIPE_A)
1321 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1322
1323 I915_WRITE(DPLL(pipe), val);
1324 }
9c065a7d 1325
766078df
VS
1326 vlv_init_display_clock_gating(dev_priv);
1327
9c065a7d
SV
1328 spin_lock_irq(&dev_priv->irq_lock);
1329 valleyview_enable_display_irqs(dev_priv);
1330 spin_unlock_irq(&dev_priv->irq_lock);
1331
1332 /*
1333 * During driver initialization/resume we can avoid restoring the
1334 * part of the HW/SW state that will be inited anyway explicitly.
1335 */
1336 if (dev_priv->power_domains.initializing)
1337 return;
1338
b963291c 1339 intel_hpd_init(dev_priv);
9c065a7d 1340
9504a892
L
1341 /* Re-enable the ADPA, if we have one */
1342 for_each_intel_encoder(&dev_priv->drm, encoder) {
1343 if (encoder->type == INTEL_OUTPUT_ANALOG)
1344 intel_crt_reset(&encoder->base);
1345 }
1346
29b74b7f 1347 i915_redisable_vga_power_on(dev_priv);
8090ba8c
ID
1348
1349 intel_pps_unlock_regs_wa(dev_priv);
9c065a7d
SV
1350}
1351
2be7d540
VS
1352static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1353{
1354 spin_lock_irq(&dev_priv->irq_lock);
1355 valleyview_disable_display_irqs(dev_priv);
1356 spin_unlock_irq(&dev_priv->irq_lock);
1357
2230fde8 1358 /* make sure we're done processing display irqs */
91c8a326 1359 synchronize_irq(dev_priv->drm.irq);
2230fde8 1360
78597996 1361 intel_power_sequencer_reset(dev_priv);
19625e85 1362
b64b5409
L
1363 /* Prevent us from re-enabling polling on accident in late suspend */
1364 if (!dev_priv->drm.dev->power.is_suspended)
1365 intel_hpd_poll_init(dev_priv);
2be7d540
VS
1366}
1367
1368static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1369 struct i915_power_well *power_well)
1370{
2be7d540
VS
1371 vlv_set_power_well(dev_priv, power_well, true);
1372
1373 vlv_display_power_well_init(dev_priv);
1374}
1375
9c065a7d
SV
1376static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1377 struct i915_power_well *power_well)
1378{
2be7d540 1379 vlv_display_power_well_deinit(dev_priv);
9c065a7d
SV
1380
1381 vlv_set_power_well(dev_priv, power_well, false);
9c065a7d
SV
1382}
1383
1384static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1385 struct i915_power_well *power_well)
1386{
5a8fbb7d 1387 /* since ref/cri clock was enabled */
9c065a7d
SV
1388 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1389
1390 vlv_set_power_well(dev_priv, power_well, true);
1391
1392 /*
1393 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1394 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1395 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1396 * b. The other bits such as sfr settings / modesel may all
1397 * be set to 0.
1398 *
1399 * This should only be done on init and resume from S3 with
1400 * both PLLs disabled, or we risk losing DPIO and PLL
1401 * synchronization.
1402 */
1403 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1404}
1405
1406static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1407 struct i915_power_well *power_well)
1408{
1409 enum pipe pipe;
1410
9c065a7d
SV
1411 for_each_pipe(dev_priv, pipe)
1412 assert_pll_disabled(dev_priv, pipe);
1413
1414 /* Assert common reset */
1415 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1416
1417 vlv_set_power_well(dev_priv, power_well, false);
1418}
1419
d8fc70b7 1420#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
30142273 1421
30142273
VS
1422#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1423
1424static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1425{
1426 struct i915_power_well *cmn_bc =
2183b499 1427 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
30142273 1428 struct i915_power_well *cmn_d =
2183b499 1429 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
30142273
VS
1430 u32 phy_control = dev_priv->chv_phy_control;
1431 u32 phy_status = 0;
3be60de9 1432 u32 phy_status_mask = 0xffffffff;
30142273 1433
3be60de9
VS
1434 /*
1435 * The BIOS can leave the PHY is some weird state
1436 * where it doesn't fully power down some parts.
1437 * Disable the asserts until the PHY has been fully
1438 * reset (ie. the power well has been disabled at
1439 * least once).
1440 */
1441 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1442 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1443 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1444 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1445 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1446 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1447 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1448
1449 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1450 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1451 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1452 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1453
f28ec6f4 1454 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
30142273
VS
1455 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1456
1457 /* this assumes override is only used to enable lanes */
1458 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1459 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1460
1461 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1462 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1463
1464 /* CL1 is on whenever anything is on in either channel */
1465 if (BITS_SET(phy_control,
1466 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1467 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1468 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1469
1470 /*
1471 * The DPLLB check accounts for the pipe B + port A usage
1472 * with CL2 powered up but all the lanes in the second channel
1473 * powered down.
1474 */
1475 if (BITS_SET(phy_control,
1476 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1477 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1478 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1479
1480 if (BITS_SET(phy_control,
1481 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1482 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1483 if (BITS_SET(phy_control,
1484 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1485 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1486
1487 if (BITS_SET(phy_control,
1488 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1489 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1490 if (BITS_SET(phy_control,
1491 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1492 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1493 }
1494
f28ec6f4 1495 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
30142273
VS
1496 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1497
1498 /* this assumes override is only used to enable lanes */
1499 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1500 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1501
1502 if (BITS_SET(phy_control,
1503 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1504 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1505
1506 if (BITS_SET(phy_control,
1507 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1508 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1509 if (BITS_SET(phy_control,
1510 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1511 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1512 }
1513
3be60de9
VS
1514 phy_status &= phy_status_mask;
1515
30142273
VS
1516 /*
1517 * The PHY may be busy with some initial calibration and whatnot,
1518 * so the power state can take a while to actually change.
1519 */
919fcd51
CW
1520 if (intel_wait_for_register(dev_priv,
1521 DISPLAY_PHY_STATUS,
1522 phy_status_mask,
1523 phy_status,
1524 10))
1525 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1526 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1527 phy_status, dev_priv->chv_phy_control);
30142273
VS
1528}
1529
1530#undef BITS_SET
1531
9c065a7d
SV
1532static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1533 struct i915_power_well *power_well)
1534{
1535 enum dpio_phy phy;
e0fce78f 1536 enum pipe pipe;
739f3abd 1537 u32 tmp;
9c065a7d 1538
2183b499
ID
1539 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1540 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
9c065a7d 1541
2183b499 1542 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
e0fce78f 1543 pipe = PIPE_A;
9c065a7d 1544 phy = DPIO_PHY0;
e0fce78f
VS
1545 } else {
1546 pipe = PIPE_C;
9c065a7d 1547 phy = DPIO_PHY1;
e0fce78f 1548 }
5a8fbb7d
VS
1549
1550 /* since ref/cri clock was enabled */
9c065a7d
SV
1551 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1552 vlv_set_power_well(dev_priv, power_well, true);
1553
1554 /* Poll for phypwrgood signal */
ffebb83b
CW
1555 if (intel_wait_for_register(dev_priv,
1556 DISPLAY_PHY_STATUS,
1557 PHY_POWERGOOD(phy),
1558 PHY_POWERGOOD(phy),
1559 1))
9c065a7d
SV
1560 DRM_ERROR("Display PHY %d is not power up\n", phy);
1561
e0fce78f
VS
1562 mutex_lock(&dev_priv->sb_lock);
1563
1564 /* Enable dynamic power down */
1565 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
ee279218
VS
1566 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1567 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
e0fce78f
VS
1568 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1569
2183b499 1570 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
e0fce78f
VS
1571 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1572 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1573 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
3e288786
VS
1574 } else {
1575 /*
1576 * Force the non-existing CL2 off. BXT does this
1577 * too, so maybe it saves some power even though
1578 * CL2 doesn't exist?
1579 */
1580 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1581 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1582 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
e0fce78f
VS
1583 }
1584
1585 mutex_unlock(&dev_priv->sb_lock);
1586
70722468
VS
1587 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1588 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
e0fce78f
VS
1589
1590 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1591 phy, dev_priv->chv_phy_control);
30142273
VS
1592
1593 assert_chv_phy_status(dev_priv);
9c065a7d
SV
1594}
1595
1596static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1597 struct i915_power_well *power_well)
1598{
1599 enum dpio_phy phy;
1600
2183b499
ID
1601 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1602 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
9c065a7d 1603
2183b499 1604 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
9c065a7d
SV
1605 phy = DPIO_PHY0;
1606 assert_pll_disabled(dev_priv, PIPE_A);
1607 assert_pll_disabled(dev_priv, PIPE_B);
1608 } else {
1609 phy = DPIO_PHY1;
1610 assert_pll_disabled(dev_priv, PIPE_C);
1611 }
1612
70722468
VS
1613 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1614 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
9c065a7d
SV
1615
1616 vlv_set_power_well(dev_priv, power_well, false);
e0fce78f
VS
1617
1618 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1619 phy, dev_priv->chv_phy_control);
30142273 1620
3be60de9
VS
1621 /* PHY is fully reset now, so we can enable the PHY state asserts */
1622 dev_priv->chv_phy_assert[phy] = true;
1623
30142273 1624 assert_chv_phy_status(dev_priv);
e0fce78f
VS
1625}
1626
6669e39f
VS
1627static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1628 enum dpio_channel ch, bool override, unsigned int mask)
1629{
1630 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1631 u32 reg, val, expected, actual;
1632
3be60de9
VS
1633 /*
1634 * The BIOS can leave the PHY is some weird state
1635 * where it doesn't fully power down some parts.
1636 * Disable the asserts until the PHY has been fully
1637 * reset (ie. the power well has been disabled at
1638 * least once).
1639 */
1640 if (!dev_priv->chv_phy_assert[phy])
1641 return;
1642
6669e39f
VS
1643 if (ch == DPIO_CH0)
1644 reg = _CHV_CMN_DW0_CH0;
1645 else
1646 reg = _CHV_CMN_DW6_CH1;
1647
1648 mutex_lock(&dev_priv->sb_lock);
1649 val = vlv_dpio_read(dev_priv, pipe, reg);
1650 mutex_unlock(&dev_priv->sb_lock);
1651
1652 /*
1653 * This assumes !override is only used when the port is disabled.
1654 * All lanes should power down even without the override when
1655 * the port is disabled.
1656 */
1657 if (!override || mask == 0xf) {
1658 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1659 /*
1660 * If CH1 common lane is not active anymore
1661 * (eg. for pipe B DPLL) the entire channel will
1662 * shut down, which causes the common lane registers
1663 * to read as 0. That means we can't actually check
1664 * the lane power down status bits, but as the entire
1665 * register reads as 0 it's a good indication that the
1666 * channel is indeed entirely powered down.
1667 */
1668 if (ch == DPIO_CH1 && val == 0)
1669 expected = 0;
1670 } else if (mask != 0x0) {
1671 expected = DPIO_ANYDL_POWERDOWN;
1672 } else {
1673 expected = 0;
1674 }
1675
1676 if (ch == DPIO_CH0)
1677 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1678 else
1679 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1680 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1681
1682 WARN(actual != expected,
1683 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1684 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1685 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1686 reg, val);
1687}
1688
b0b33846
VS
1689bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1690 enum dpio_channel ch, bool override)
1691{
1692 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1693 bool was_override;
1694
1695 mutex_lock(&power_domains->lock);
1696
1697 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1698
1699 if (override == was_override)
1700 goto out;
1701
1702 if (override)
1703 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1704 else
1705 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1706
1707 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1708
1709 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1710 phy, ch, dev_priv->chv_phy_control);
1711
30142273
VS
1712 assert_chv_phy_status(dev_priv);
1713
b0b33846
VS
1714out:
1715 mutex_unlock(&power_domains->lock);
1716
1717 return was_override;
1718}
1719
e0fce78f
VS
1720void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1721 bool override, unsigned int mask)
1722{
1723 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1724 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1725 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1726 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1727
1728 mutex_lock(&power_domains->lock);
1729
1730 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1731 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1732
1733 if (override)
1734 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1735 else
1736 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1737
1738 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1739
1740 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1741 phy, ch, mask, dev_priv->chv_phy_control);
1742
30142273
VS
1743 assert_chv_phy_status(dev_priv);
1744
6669e39f
VS
1745 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1746
e0fce78f 1747 mutex_unlock(&power_domains->lock);
9c065a7d
SV
1748}
1749
1750static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1751 struct i915_power_well *power_well)
1752{
f49193cd 1753 enum pipe pipe = PIPE_A;
9c065a7d
SV
1754 bool enabled;
1755 u32 state, ctrl;
1756
9f817501 1757 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1758
1759 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1760 /*
1761 * We only ever set the power-on and power-gate states, anything
1762 * else is unexpected.
1763 */
1764 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1765 enabled = state == DP_SSS_PWR_ON(pipe);
1766
1767 /*
1768 * A transient state at this point would mean some unexpected party
1769 * is poking at the power controls too.
1770 */
1771 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1772 WARN_ON(ctrl << 16 != state);
1773
9f817501 1774 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1775
1776 return enabled;
1777}
1778
1779static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1780 struct i915_power_well *power_well,
1781 bool enable)
1782{
f49193cd 1783 enum pipe pipe = PIPE_A;
9c065a7d
SV
1784 u32 state;
1785 u32 ctrl;
1786
1787 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1788
9f817501 1789 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1790
1791#define COND \
1792 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1793
1794 if (COND)
1795 goto out;
1796
1797 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1798 ctrl &= ~DP_SSC_MASK(pipe);
1799 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1800 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1801
1802 if (wait_for(COND, 100))
7e35ab88 1803 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
9c065a7d
SV
1804 state,
1805 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1806
1807#undef COND
1808
1809out:
9f817501 1810 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1811}
1812
9c065a7d
SV
1813static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1814 struct i915_power_well *power_well)
1815{
9c065a7d 1816 chv_set_pipe_power_well(dev_priv, power_well, true);
afd6275d 1817
2be7d540 1818 vlv_display_power_well_init(dev_priv);
9c065a7d
SV
1819}
1820
1821static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1822 struct i915_power_well *power_well)
1823{
2be7d540 1824 vlv_display_power_well_deinit(dev_priv);
afd6275d 1825
9c065a7d
SV
1826 chv_set_pipe_power_well(dev_priv, power_well, false);
1827}
1828
09731280
ID
1829static void
1830__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1831 enum intel_display_power_domain domain)
1832{
1833 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1834 struct i915_power_well *power_well;
09731280 1835
75ccb2ec 1836 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
b409ca95 1837 intel_power_well_get(dev_priv, power_well);
09731280
ID
1838
1839 power_domains->domain_use_count[domain]++;
1840}
1841
e4e7684f
SV
1842/**
1843 * intel_display_power_get - grab a power domain reference
1844 * @dev_priv: i915 device instance
1845 * @domain: power domain to reference
1846 *
1847 * This function grabs a power domain reference for @domain and ensures that the
1848 * power domain and all its parents are powered up. Therefore users should only
1849 * grab a reference to the innermost power domain they need.
1850 *
1851 * Any power domain reference obtained by this function must have a symmetric
1852 * call to intel_display_power_put() to release the reference again.
1853 */
0e6e0be4
CW
1854intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1855 enum intel_display_power_domain domain)
9c065a7d 1856{
09731280 1857 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0e6e0be4 1858 intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
9c065a7d 1859
09731280
ID
1860 mutex_lock(&power_domains->lock);
1861
1862 __intel_display_power_get_domain(dev_priv, domain);
1863
1864 mutex_unlock(&power_domains->lock);
0e6e0be4
CW
1865
1866 return wakeref;
09731280
ID
1867}
1868
1869/**
1870 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1871 * @dev_priv: i915 device instance
1872 * @domain: power domain to reference
1873 *
1874 * This function grabs a power domain reference for @domain and ensures that the
1875 * power domain and all its parents are powered up. Therefore users should only
1876 * grab a reference to the innermost power domain they need.
1877 *
1878 * Any power domain reference obtained by this function must have a symmetric
1879 * call to intel_display_power_put() to release the reference again.
1880 */
0e6e0be4
CW
1881intel_wakeref_t
1882intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1883 enum intel_display_power_domain domain)
09731280
ID
1884{
1885 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0e6e0be4 1886 intel_wakeref_t wakeref;
09731280
ID
1887 bool is_enabled;
1888
0e6e0be4
CW
1889 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
1890 if (!wakeref)
09731280 1891 return false;
9c065a7d
SV
1892
1893 mutex_lock(&power_domains->lock);
1894
09731280
ID
1895 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1896 __intel_display_power_get_domain(dev_priv, domain);
1897 is_enabled = true;
1898 } else {
1899 is_enabled = false;
9c065a7d
SV
1900 }
1901
9c065a7d 1902 mutex_unlock(&power_domains->lock);
09731280 1903
0e6e0be4
CW
1904 if (!is_enabled) {
1905 intel_runtime_pm_put(dev_priv, wakeref);
1906 wakeref = 0;
1907 }
09731280 1908
0e6e0be4 1909 return wakeref;
9c065a7d
SV
1910}
1911
0e6e0be4
CW
1912static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1913 enum intel_display_power_domain domain)
9c065a7d
SV
1914{
1915 struct i915_power_domains *power_domains;
1916 struct i915_power_well *power_well;
9c065a7d
SV
1917
1918 power_domains = &dev_priv->power_domains;
1919
1920 mutex_lock(&power_domains->lock);
1921
11c86db8
DS
1922 WARN(!power_domains->domain_use_count[domain],
1923 "Use count on domain %s is already zero\n",
1924 intel_display_power_domain_str(domain));
9c065a7d
SV
1925 power_domains->domain_use_count[domain]--;
1926
56d4eac0 1927 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
b409ca95 1928 intel_power_well_put(dev_priv, power_well);
9c065a7d
SV
1929
1930 mutex_unlock(&power_domains->lock);
0e6e0be4 1931}
9c065a7d 1932
0e6e0be4
CW
1933/**
1934 * intel_display_power_put - release a power domain reference
1935 * @dev_priv: i915 device instance
1936 * @domain: power domain to reference
1937 *
1938 * This function drops the power domain reference obtained by
1939 * intel_display_power_get() and might power down the corresponding hardware
1940 * block right away if this is the last reference.
1941 */
1942void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1943 enum intel_display_power_domain domain)
1944{
1945 __intel_display_power_put(dev_priv, domain);
16e4dd03 1946 intel_runtime_pm_put_unchecked(dev_priv);
9c065a7d
SV
1947}
1948
0e6e0be4
CW
1949#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1950void intel_display_power_put(struct drm_i915_private *dev_priv,
1951 enum intel_display_power_domain domain,
1952 intel_wakeref_t wakeref)
1953{
1954 __intel_display_power_put(dev_priv, domain);
1955 intel_runtime_pm_put(dev_priv, wakeref);
1956}
1957#endif
1958
965a79ad
ID
1959#define I830_PIPES_POWER_DOMAINS ( \
1960 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1961 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1962 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1963 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1964 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1965 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
d8fc70b7 1966 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 1967
465ac0c6 1968#define VLV_DISPLAY_POWER_DOMAINS ( \
d8fc70b7
ACO
1969 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1970 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1971 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1972 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1973 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1974 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1975 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1976 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1977 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1978 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1979 BIT_ULL(POWER_DOMAIN_VGA) | \
1980 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1981 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1982 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1983 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1984 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1985
1986#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
d8fc70b7
ACO
1987 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1988 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1989 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1990 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1991 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1992 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1993
1994#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
d8fc70b7
ACO
1995 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1996 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1997 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1998
1999#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
d8fc70b7
ACO
2000 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2001 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2002 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2003
2004#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
d8fc70b7
ACO
2005 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2006 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2007 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2008
2009#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
d8fc70b7
ACO
2010 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2011 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2012 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 2013
465ac0c6 2014#define CHV_DISPLAY_POWER_DOMAINS ( \
d8fc70b7
ACO
2015 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2016 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2017 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2018 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2019 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2020 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2021 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2022 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2023 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2024 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2025 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2026 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2027 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2028 BIT_ULL(POWER_DOMAIN_VGA) | \
2029 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2030 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2031 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2032 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2033 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2034 BIT_ULL(POWER_DOMAIN_INIT))
465ac0c6 2035
9c065a7d 2036#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
d8fc70b7
ACO
2037 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2038 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2039 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2040 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2041 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2042
2043#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
d8fc70b7
ACO
2044 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2045 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2046 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 2047
965a79ad
ID
2048#define HSW_DISPLAY_POWER_DOMAINS ( \
2049 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2050 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2051 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2052 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2053 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2054 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2055 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2056 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2057 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2058 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2059 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2060 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2061 BIT_ULL(POWER_DOMAIN_VGA) | \
2062 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2063 BIT_ULL(POWER_DOMAIN_INIT))
2064
2065#define BDW_DISPLAY_POWER_DOMAINS ( \
2066 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2067 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2068 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2069 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2070 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2071 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2072 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2073 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2074 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2075 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2076 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2077 BIT_ULL(POWER_DOMAIN_VGA) | \
2078 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2079 BIT_ULL(POWER_DOMAIN_INIT))
2080
2081#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2082 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2083 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2084 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2085 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2086 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2087 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2088 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2089 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2090 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2091 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2092 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2093 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2094 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2095 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2096 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2097 BIT_ULL(POWER_DOMAIN_VGA) | \
2098 BIT_ULL(POWER_DOMAIN_INIT))
2099#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2100 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2101 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2102 BIT_ULL(POWER_DOMAIN_INIT))
2103#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2104 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2105 BIT_ULL(POWER_DOMAIN_INIT))
2106#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2107 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2108 BIT_ULL(POWER_DOMAIN_INIT))
2109#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2110 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2111 BIT_ULL(POWER_DOMAIN_INIT))
2112#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2113 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2114 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2115 BIT_ULL(POWER_DOMAIN_MODESET) | \
2116 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2117 BIT_ULL(POWER_DOMAIN_INIT))
2118
2119#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2120 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2121 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2122 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2123 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2124 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2125 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2126 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2127 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2128 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2129 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2130 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2131 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2132 BIT_ULL(POWER_DOMAIN_VGA) | \
965a79ad
ID
2133 BIT_ULL(POWER_DOMAIN_INIT))
2134#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2135 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2136 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2137 BIT_ULL(POWER_DOMAIN_MODESET) | \
2138 BIT_ULL(POWER_DOMAIN_AUX_A) | \
54c105d6 2139 BIT_ULL(POWER_DOMAIN_GMBUS) | \
965a79ad
ID
2140 BIT_ULL(POWER_DOMAIN_INIT))
2141#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2142 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2143 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2144 BIT_ULL(POWER_DOMAIN_INIT))
2145#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2146 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2147 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2148 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2149 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2150 BIT_ULL(POWER_DOMAIN_INIT))
2151
2152#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2153 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2154 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2155 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2156 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2157 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2158 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2159 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2160 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2161 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2162 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2163 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2164 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2165 BIT_ULL(POWER_DOMAIN_VGA) | \
2166 BIT_ULL(POWER_DOMAIN_INIT))
2167#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2168 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2169#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2170 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2171#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2172 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2173#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2174 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2175 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2176 BIT_ULL(POWER_DOMAIN_INIT))
2177#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2178 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2179 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2180 BIT_ULL(POWER_DOMAIN_INIT))
2181#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2182 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2183 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2184 BIT_ULL(POWER_DOMAIN_INIT))
2185#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2186 BIT_ULL(POWER_DOMAIN_AUX_A) | \
52528055 2187 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
965a79ad
ID
2188 BIT_ULL(POWER_DOMAIN_INIT))
2189#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2190 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2191 BIT_ULL(POWER_DOMAIN_INIT))
2192#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2193 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2194 BIT_ULL(POWER_DOMAIN_INIT))
2195#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2196 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2197 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2198 BIT_ULL(POWER_DOMAIN_MODESET) | \
2199 BIT_ULL(POWER_DOMAIN_AUX_A) | \
156961ae 2200 BIT_ULL(POWER_DOMAIN_GMBUS) | \
965a79ad
ID
2201 BIT_ULL(POWER_DOMAIN_INIT))
2202
2203#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2204 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2205 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2206 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2207 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2208 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2209 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2210 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2211 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2212 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2213 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
9787e835 2214 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
965a79ad
ID
2215 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2216 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2217 BIT_ULL(POWER_DOMAIN_AUX_D) | \
a324fcac 2218 BIT_ULL(POWER_DOMAIN_AUX_F) | \
965a79ad
ID
2219 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2220 BIT_ULL(POWER_DOMAIN_VGA) | \
2221 BIT_ULL(POWER_DOMAIN_INIT))
2222#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2223 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
965a79ad
ID
2224 BIT_ULL(POWER_DOMAIN_INIT))
2225#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2226 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2227 BIT_ULL(POWER_DOMAIN_INIT))
2228#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2229 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2230 BIT_ULL(POWER_DOMAIN_INIT))
2231#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2232 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2233 BIT_ULL(POWER_DOMAIN_INIT))
2234#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2235 BIT_ULL(POWER_DOMAIN_AUX_A) | \
b891d5e4 2236 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
965a79ad
ID
2237 BIT_ULL(POWER_DOMAIN_INIT))
2238#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2239 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2240 BIT_ULL(POWER_DOMAIN_INIT))
2241#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2242 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2243 BIT_ULL(POWER_DOMAIN_INIT))
2244#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2245 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2246 BIT_ULL(POWER_DOMAIN_INIT))
a324fcac
RV
2247#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2248 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2249 BIT_ULL(POWER_DOMAIN_INIT))
9787e835
RV
2250#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2251 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2252 BIT_ULL(POWER_DOMAIN_INIT))
965a79ad
ID
2253#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2254 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
6e7a3f52 2255 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2256 BIT_ULL(POWER_DOMAIN_MODESET) | \
2257 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2ee0da16
VS
2258 BIT_ULL(POWER_DOMAIN_INIT))
2259
67ca07e7
ID
2260/*
2261 * ICL PW_0/PG_0 domains (HW/DMC control):
2262 * - PCI
2263 * - clocks except port PLL
2264 * - central power except FBC
2265 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2266 * ICL PW_1/PG_1 domains (HW/DMC control):
2267 * - DBUF function
2268 * - PIPE_A and its planes, except VGA
2269 * - transcoder EDP + PSR
2270 * - transcoder DSI
2271 * - DDI_A
2272 * - FBC
2273 */
2274#define ICL_PW_4_POWER_DOMAINS ( \
2275 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2276 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2277 BIT_ULL(POWER_DOMAIN_INIT))
2278 /* VDSC/joining */
2279#define ICL_PW_3_POWER_DOMAINS ( \
2280 ICL_PW_4_POWER_DOMAINS | \
2281 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2282 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2283 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2284 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2285 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2286 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2287 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2288 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2289 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2290 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2291 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2292 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2293 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2294 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2295 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2296 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2297 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2298 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2299 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2300 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2301 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2302 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2303 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2304 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2305 BIT_ULL(POWER_DOMAIN_VGA) | \
2306 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2307 BIT_ULL(POWER_DOMAIN_INIT))
2308 /*
2309 * - transcoder WD
2310 * - KVMR (HW control)
2311 */
2312#define ICL_PW_2_POWER_DOMAINS ( \
2313 ICL_PW_3_POWER_DOMAINS | \
91ba2c8b 2314 BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
67ca07e7
ID
2315 BIT_ULL(POWER_DOMAIN_INIT))
2316 /*
67ca07e7
ID
2317 * - KVMR (HW control)
2318 */
2319#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2320 ICL_PW_2_POWER_DOMAINS | \
2321 BIT_ULL(POWER_DOMAIN_MODESET) | \
2322 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2323 BIT_ULL(POWER_DOMAIN_INIT))
2324
2325#define ICL_DDI_IO_A_POWER_DOMAINS ( \
2326 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2327#define ICL_DDI_IO_B_POWER_DOMAINS ( \
2328 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2329#define ICL_DDI_IO_C_POWER_DOMAINS ( \
2330 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2331#define ICL_DDI_IO_D_POWER_DOMAINS ( \
2332 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2333#define ICL_DDI_IO_E_POWER_DOMAINS ( \
2334 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2335#define ICL_DDI_IO_F_POWER_DOMAINS ( \
2336 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2337
2338#define ICL_AUX_A_IO_POWER_DOMAINS ( \
9e3b5ce9 2339 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
67ca07e7
ID
2340 BIT_ULL(POWER_DOMAIN_AUX_A))
2341#define ICL_AUX_B_IO_POWER_DOMAINS ( \
2342 BIT_ULL(POWER_DOMAIN_AUX_B))
2343#define ICL_AUX_C_IO_POWER_DOMAINS ( \
2344 BIT_ULL(POWER_DOMAIN_AUX_C))
2345#define ICL_AUX_D_IO_POWER_DOMAINS ( \
2346 BIT_ULL(POWER_DOMAIN_AUX_D))
2347#define ICL_AUX_E_IO_POWER_DOMAINS ( \
2348 BIT_ULL(POWER_DOMAIN_AUX_E))
2349#define ICL_AUX_F_IO_POWER_DOMAINS ( \
2350 BIT_ULL(POWER_DOMAIN_AUX_F))
2351#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2352 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2353#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2354 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2355#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2356 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2357#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2358 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2359
9c065a7d 2360static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3c1b38e6 2361 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2362 .enable = i9xx_always_on_power_well_noop,
2363 .disable = i9xx_always_on_power_well_noop,
2364 .is_enabled = i9xx_always_on_power_well_enabled,
2365};
2366
2367static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3c1b38e6 2368 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2369 .enable = chv_pipe_power_well_enable,
2370 .disable = chv_pipe_power_well_disable,
2371 .is_enabled = chv_pipe_power_well_enabled,
2372};
2373
2374static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3c1b38e6 2375 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2376 .enable = chv_dpio_cmn_power_well_enable,
2377 .disable = chv_dpio_cmn_power_well_disable,
2378 .is_enabled = vlv_power_well_enabled,
2379};
2380
f28ec6f4 2381static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
9c065a7d
SV
2382 {
2383 .name = "always-on",
285cf66d 2384 .always_on = true,
9c065a7d
SV
2385 .domains = POWER_DOMAIN_MASK,
2386 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2387 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2388 },
2389};
2390
2ee0da16
VS
2391static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2392 .sync_hw = i830_pipes_power_well_sync_hw,
2393 .enable = i830_pipes_power_well_enable,
2394 .disable = i830_pipes_power_well_disable,
2395 .is_enabled = i830_pipes_power_well_enabled,
2396};
2397
f28ec6f4 2398static const struct i915_power_well_desc i830_power_wells[] = {
2ee0da16
VS
2399 {
2400 .name = "always-on",
285cf66d 2401 .always_on = true,
2ee0da16
VS
2402 .domains = POWER_DOMAIN_MASK,
2403 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2404 .id = DISP_PW_ID_NONE,
2ee0da16
VS
2405 },
2406 {
2407 .name = "pipes",
2408 .domains = I830_PIPES_POWER_DOMAINS,
2409 .ops = &i830_pipes_power_well_ops,
4739a9d2 2410 .id = DISP_PW_ID_NONE,
2ee0da16
VS
2411 },
2412};
2413
9c065a7d
SV
2414static const struct i915_power_well_ops hsw_power_well_ops = {
2415 .sync_hw = hsw_power_well_sync_hw,
2416 .enable = hsw_power_well_enable,
2417 .disable = hsw_power_well_disable,
2418 .is_enabled = hsw_power_well_enabled,
2419};
2420
9f836f90 2421static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3c1b38e6 2422 .sync_hw = i9xx_power_well_sync_hw_noop,
9f836f90
PJ
2423 .enable = gen9_dc_off_power_well_enable,
2424 .disable = gen9_dc_off_power_well_disable,
2425 .is_enabled = gen9_dc_off_power_well_enabled,
2426};
2427
9c8d0b8e 2428static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3c1b38e6 2429 .sync_hw = i9xx_power_well_sync_hw_noop,
9c8d0b8e
ID
2430 .enable = bxt_dpio_cmn_power_well_enable,
2431 .disable = bxt_dpio_cmn_power_well_disable,
2432 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2433};
2434
75e39688
ID
2435static const struct i915_power_well_regs hsw_power_well_regs = {
2436 .bios = HSW_PWR_WELL_CTL1,
2437 .driver = HSW_PWR_WELL_CTL2,
2438 .kvmr = HSW_PWR_WELL_CTL3,
2439 .debug = HSW_PWR_WELL_CTL4,
2440};
2441
f28ec6f4 2442static const struct i915_power_well_desc hsw_power_wells[] = {
9c065a7d
SV
2443 {
2444 .name = "always-on",
285cf66d 2445 .always_on = true,
998bd66a 2446 .domains = POWER_DOMAIN_MASK,
9c065a7d 2447 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2448 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2449 },
2450 {
2451 .name = "display",
2452 .domains = HSW_DISPLAY_POWER_DOMAINS,
2453 .ops = &hsw_power_well_ops,
fb9248e2 2454 .id = HSW_DISP_PW_GLOBAL,
0a445945 2455 {
75e39688
ID
2456 .hsw.regs = &hsw_power_well_regs,
2457 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
0a445945
ID
2458 .hsw.has_vga = true,
2459 },
9c065a7d
SV
2460 },
2461};
2462
f28ec6f4 2463static const struct i915_power_well_desc bdw_power_wells[] = {
9c065a7d
SV
2464 {
2465 .name = "always-on",
285cf66d 2466 .always_on = true,
998bd66a 2467 .domains = POWER_DOMAIN_MASK,
9c065a7d 2468 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2469 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2470 },
2471 {
2472 .name = "display",
2473 .domains = BDW_DISPLAY_POWER_DOMAINS,
2474 .ops = &hsw_power_well_ops,
fb9248e2 2475 .id = HSW_DISP_PW_GLOBAL,
0a445945 2476 {
75e39688
ID
2477 .hsw.regs = &hsw_power_well_regs,
2478 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
0a445945
ID
2479 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2480 .hsw.has_vga = true,
2481 },
9c065a7d
SV
2482 },
2483};
2484
2485static const struct i915_power_well_ops vlv_display_power_well_ops = {
3c1b38e6 2486 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2487 .enable = vlv_display_power_well_enable,
2488 .disable = vlv_display_power_well_disable,
2489 .is_enabled = vlv_power_well_enabled,
2490};
2491
2492static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3c1b38e6 2493 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2494 .enable = vlv_dpio_cmn_power_well_enable,
2495 .disable = vlv_dpio_cmn_power_well_disable,
2496 .is_enabled = vlv_power_well_enabled,
2497};
2498
2499static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3c1b38e6 2500 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2501 .enable = vlv_power_well_enable,
2502 .disable = vlv_power_well_disable,
2503 .is_enabled = vlv_power_well_enabled,
2504};
2505
f28ec6f4 2506static const struct i915_power_well_desc vlv_power_wells[] = {
9c065a7d
SV
2507 {
2508 .name = "always-on",
285cf66d 2509 .always_on = true,
998bd66a 2510 .domains = POWER_DOMAIN_MASK,
9c065a7d 2511 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2512 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2513 },
2514 {
2515 .name = "display",
2516 .domains = VLV_DISPLAY_POWER_DOMAINS,
9c065a7d 2517 .ops = &vlv_display_power_well_ops,
2183b499 2518 .id = VLV_DISP_PW_DISP2D,
d13dd05a
ID
2519 {
2520 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2521 },
9c065a7d
SV
2522 },
2523 {
2524 .name = "dpio-tx-b-01",
2525 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2526 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2527 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2528 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2529 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2530 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2531 {
2532 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2533 },
9c065a7d
SV
2534 },
2535 {
2536 .name = "dpio-tx-b-23",
2537 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2538 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2539 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2540 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2541 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2542 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2543 {
2544 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2545 },
9c065a7d
SV
2546 },
2547 {
2548 .name = "dpio-tx-c-01",
2549 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2550 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2551 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2552 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2553 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2554 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2555 {
2556 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2557 },
9c065a7d
SV
2558 },
2559 {
2560 .name = "dpio-tx-c-23",
2561 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2562 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2563 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2564 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2565 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2566 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2567 {
2568 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2569 },
9c065a7d
SV
2570 },
2571 {
2572 .name = "dpio-common",
2573 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
9c065a7d 2574 .ops = &vlv_dpio_cmn_power_well_ops,
2183b499 2575 .id = VLV_DISP_PW_DPIO_CMN_BC,
d13dd05a
ID
2576 {
2577 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2578 },
9c065a7d
SV
2579 },
2580};
2581
f28ec6f4 2582static const struct i915_power_well_desc chv_power_wells[] = {
9c065a7d
SV
2583 {
2584 .name = "always-on",
285cf66d 2585 .always_on = true,
998bd66a 2586 .domains = POWER_DOMAIN_MASK,
9c065a7d 2587 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2588 .id = DISP_PW_ID_NONE,
9c065a7d 2589 },
9c065a7d
SV
2590 {
2591 .name = "display",
baa4e575 2592 /*
fde61e4b
VS
2593 * Pipe A power well is the new disp2d well. Pipe B and C
2594 * power wells don't actually exist. Pipe A power well is
2595 * required for any pipe to work.
baa4e575 2596 */
465ac0c6 2597 .domains = CHV_DISPLAY_POWER_DOMAINS,
9c065a7d 2598 .ops = &chv_pipe_power_well_ops,
4739a9d2 2599 .id = DISP_PW_ID_NONE,
9c065a7d 2600 },
9c065a7d
SV
2601 {
2602 .name = "dpio-common-bc",
71849b67 2603 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
9c065a7d 2604 .ops = &chv_dpio_cmn_power_well_ops,
2183b499 2605 .id = VLV_DISP_PW_DPIO_CMN_BC,
d13dd05a
ID
2606 {
2607 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2608 },
9c065a7d
SV
2609 },
2610 {
2611 .name = "dpio-common-d",
71849b67 2612 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
9c065a7d 2613 .ops = &chv_dpio_cmn_power_well_ops,
2183b499 2614 .id = CHV_DISP_PW_DPIO_CMN_D,
d13dd05a
ID
2615 {
2616 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2617 },
9c065a7d 2618 },
9c065a7d
SV
2619};
2620
5aefb239 2621bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
438b8dc4 2622 enum i915_power_well_id power_well_id)
5aefb239
SS
2623{
2624 struct i915_power_well *power_well;
2625 bool ret;
2626
2627 power_well = lookup_power_well(dev_priv, power_well_id);
f28ec6f4 2628 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
5aefb239
SS
2629
2630 return ret;
2631}
2632
f28ec6f4 2633static const struct i915_power_well_desc skl_power_wells[] = {
94dd5138
S
2634 {
2635 .name = "always-on",
285cf66d 2636 .always_on = true,
998bd66a 2637 .domains = POWER_DOMAIN_MASK,
94dd5138 2638 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2639 .id = DISP_PW_ID_NONE,
94dd5138
S
2640 },
2641 {
2642 .name = "power well 1",
4a76f295 2643 /* Handled by the DMC firmware */
fa96ed1f 2644 .always_on = true,
4a76f295 2645 .domains = 0,
4196b918 2646 .ops = &hsw_power_well_ops,
01c3faa7 2647 .id = SKL_DISP_PW_1,
0a445945 2648 {
75e39688
ID
2649 .hsw.regs = &hsw_power_well_regs,
2650 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2651 .hsw.has_fuses = true,
2652 },
94dd5138
S
2653 },
2654 {
2655 .name = "MISC IO power well",
4a76f295 2656 /* Handled by the DMC firmware */
fa96ed1f 2657 .always_on = true,
4a76f295 2658 .domains = 0,
4196b918 2659 .ops = &hsw_power_well_ops,
01c3faa7 2660 .id = SKL_DISP_PW_MISC_IO,
75e39688
ID
2661 {
2662 .hsw.regs = &hsw_power_well_regs,
2663 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2664 },
94dd5138 2665 },
9f836f90
PJ
2666 {
2667 .name = "DC off",
2668 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2669 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2670 .id = DISP_PW_ID_NONE,
9f836f90 2671 },
94dd5138
S
2672 {
2673 .name = "power well 2",
2674 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2675 .ops = &hsw_power_well_ops,
01c3faa7 2676 .id = SKL_DISP_PW_2,
0a445945 2677 {
75e39688
ID
2678 .hsw.regs = &hsw_power_well_regs,
2679 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2680 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2681 .hsw.has_vga = true,
2682 .hsw.has_fuses = true,
2683 },
94dd5138
S
2684 },
2685 {
62b69566
ACO
2686 .name = "DDI A/E IO power well",
2687 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
4196b918 2688 .ops = &hsw_power_well_ops,
4739a9d2 2689 .id = DISP_PW_ID_NONE,
75e39688
ID
2690 {
2691 .hsw.regs = &hsw_power_well_regs,
2692 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2693 },
94dd5138
S
2694 },
2695 {
62b69566
ACO
2696 .name = "DDI B IO power well",
2697 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
4196b918 2698 .ops = &hsw_power_well_ops,
4739a9d2 2699 .id = DISP_PW_ID_NONE,
75e39688
ID
2700 {
2701 .hsw.regs = &hsw_power_well_regs,
2702 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2703 },
94dd5138
S
2704 },
2705 {
62b69566
ACO
2706 .name = "DDI C IO power well",
2707 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
4196b918 2708 .ops = &hsw_power_well_ops,
4739a9d2 2709 .id = DISP_PW_ID_NONE,
75e39688
ID
2710 {
2711 .hsw.regs = &hsw_power_well_regs,
2712 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2713 },
94dd5138
S
2714 },
2715 {
62b69566
ACO
2716 .name = "DDI D IO power well",
2717 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
4196b918 2718 .ops = &hsw_power_well_ops,
4739a9d2 2719 .id = DISP_PW_ID_NONE,
75e39688
ID
2720 {
2721 .hsw.regs = &hsw_power_well_regs,
2722 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2723 },
94dd5138
S
2724 },
2725};
2726
f28ec6f4 2727static const struct i915_power_well_desc bxt_power_wells[] = {
0b4a2a36
S
2728 {
2729 .name = "always-on",
285cf66d 2730 .always_on = true,
998bd66a 2731 .domains = POWER_DOMAIN_MASK,
0b4a2a36 2732 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2733 .id = DISP_PW_ID_NONE,
0b4a2a36
S
2734 },
2735 {
2736 .name = "power well 1",
fa96ed1f
ID
2737 /* Handled by the DMC firmware */
2738 .always_on = true,
d7d7c9ee 2739 .domains = 0,
4196b918 2740 .ops = &hsw_power_well_ops,
01c3faa7 2741 .id = SKL_DISP_PW_1,
0a445945 2742 {
75e39688
ID
2743 .hsw.regs = &hsw_power_well_regs,
2744 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2745 .hsw.has_fuses = true,
2746 },
0b4a2a36 2747 },
9f836f90
PJ
2748 {
2749 .name = "DC off",
2750 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2751 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2752 .id = DISP_PW_ID_NONE,
9f836f90 2753 },
0b4a2a36
S
2754 {
2755 .name = "power well 2",
2756 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2757 .ops = &hsw_power_well_ops,
01c3faa7 2758 .id = SKL_DISP_PW_2,
0a445945 2759 {
75e39688
ID
2760 .hsw.regs = &hsw_power_well_regs,
2761 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2762 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2763 .hsw.has_vga = true,
2764 .hsw.has_fuses = true,
2765 },
9f836f90 2766 },
9c8d0b8e
ID
2767 {
2768 .name = "dpio-common-a",
2769 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2770 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2771 .id = BXT_DISP_PW_DPIO_CMN_A,
0a445945
ID
2772 {
2773 .bxt.phy = DPIO_PHY1,
2774 },
9c8d0b8e
ID
2775 },
2776 {
2777 .name = "dpio-common-bc",
2778 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2779 .ops = &bxt_dpio_cmn_power_well_ops,
d9fcdc8d 2780 .id = VLV_DISP_PW_DPIO_CMN_BC,
0a445945
ID
2781 {
2782 .bxt.phy = DPIO_PHY0,
2783 },
9c8d0b8e 2784 },
0b4a2a36
S
2785};
2786
f28ec6f4 2787static const struct i915_power_well_desc glk_power_wells[] = {
0d03926d
ACO
2788 {
2789 .name = "always-on",
285cf66d 2790 .always_on = true,
0d03926d
ACO
2791 .domains = POWER_DOMAIN_MASK,
2792 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2793 .id = DISP_PW_ID_NONE,
0d03926d
ACO
2794 },
2795 {
2796 .name = "power well 1",
2797 /* Handled by the DMC firmware */
fa96ed1f 2798 .always_on = true,
0d03926d 2799 .domains = 0,
4196b918 2800 .ops = &hsw_power_well_ops,
0d03926d 2801 .id = SKL_DISP_PW_1,
0a445945 2802 {
75e39688
ID
2803 .hsw.regs = &hsw_power_well_regs,
2804 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2805 .hsw.has_fuses = true,
2806 },
0d03926d
ACO
2807 },
2808 {
2809 .name = "DC off",
2810 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2811 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2812 .id = DISP_PW_ID_NONE,
0d03926d
ACO
2813 },
2814 {
2815 .name = "power well 2",
2816 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2817 .ops = &hsw_power_well_ops,
0d03926d 2818 .id = SKL_DISP_PW_2,
0a445945 2819 {
75e39688
ID
2820 .hsw.regs = &hsw_power_well_regs,
2821 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2822 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2823 .hsw.has_vga = true,
2824 .hsw.has_fuses = true,
2825 },
0d03926d 2826 },
0a116ce8
ACO
2827 {
2828 .name = "dpio-common-a",
2829 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2830 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2831 .id = BXT_DISP_PW_DPIO_CMN_A,
0a445945
ID
2832 {
2833 .bxt.phy = DPIO_PHY1,
2834 },
0a116ce8
ACO
2835 },
2836 {
2837 .name = "dpio-common-b",
2838 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2839 .ops = &bxt_dpio_cmn_power_well_ops,
d9fcdc8d 2840 .id = VLV_DISP_PW_DPIO_CMN_BC,
0a445945
ID
2841 {
2842 .bxt.phy = DPIO_PHY0,
2843 },
0a116ce8
ACO
2844 },
2845 {
2846 .name = "dpio-common-c",
2847 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2848 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2849 .id = GLK_DISP_PW_DPIO_CMN_C,
0a445945
ID
2850 {
2851 .bxt.phy = DPIO_PHY2,
2852 },
0a116ce8 2853 },
0d03926d
ACO
2854 {
2855 .name = "AUX A",
2856 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
4196b918 2857 .ops = &hsw_power_well_ops,
4739a9d2 2858 .id = DISP_PW_ID_NONE,
75e39688
ID
2859 {
2860 .hsw.regs = &hsw_power_well_regs,
2861 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2862 },
0d03926d
ACO
2863 },
2864 {
2865 .name = "AUX B",
2866 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
4196b918 2867 .ops = &hsw_power_well_ops,
4739a9d2 2868 .id = DISP_PW_ID_NONE,
75e39688
ID
2869 {
2870 .hsw.regs = &hsw_power_well_regs,
2871 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2872 },
0d03926d
ACO
2873 },
2874 {
2875 .name = "AUX C",
2876 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
4196b918 2877 .ops = &hsw_power_well_ops,
4739a9d2 2878 .id = DISP_PW_ID_NONE,
75e39688
ID
2879 {
2880 .hsw.regs = &hsw_power_well_regs,
2881 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2882 },
0d03926d
ACO
2883 },
2884 {
62b69566
ACO
2885 .name = "DDI A IO power well",
2886 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
4196b918 2887 .ops = &hsw_power_well_ops,
4739a9d2 2888 .id = DISP_PW_ID_NONE,
75e39688
ID
2889 {
2890 .hsw.regs = &hsw_power_well_regs,
2891 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2892 },
0d03926d
ACO
2893 },
2894 {
62b69566
ACO
2895 .name = "DDI B IO power well",
2896 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
4196b918 2897 .ops = &hsw_power_well_ops,
4739a9d2 2898 .id = DISP_PW_ID_NONE,
75e39688
ID
2899 {
2900 .hsw.regs = &hsw_power_well_regs,
2901 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2902 },
0d03926d
ACO
2903 },
2904 {
62b69566
ACO
2905 .name = "DDI C IO power well",
2906 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
4196b918 2907 .ops = &hsw_power_well_ops,
4739a9d2 2908 .id = DISP_PW_ID_NONE,
75e39688
ID
2909 {
2910 .hsw.regs = &hsw_power_well_regs,
2911 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2912 },
0d03926d
ACO
2913 },
2914};
2915
f28ec6f4 2916static const struct i915_power_well_desc cnl_power_wells[] = {
8bcd3dd4
VS
2917 {
2918 .name = "always-on",
285cf66d 2919 .always_on = true,
8bcd3dd4
VS
2920 .domains = POWER_DOMAIN_MASK,
2921 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2922 .id = DISP_PW_ID_NONE,
8bcd3dd4
VS
2923 },
2924 {
2925 .name = "power well 1",
2926 /* Handled by the DMC firmware */
fa96ed1f 2927 .always_on = true,
8bcd3dd4 2928 .domains = 0,
4196b918 2929 .ops = &hsw_power_well_ops,
8bcd3dd4 2930 .id = SKL_DISP_PW_1,
0a445945 2931 {
75e39688
ID
2932 .hsw.regs = &hsw_power_well_regs,
2933 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2934 .hsw.has_fuses = true,
2935 },
8bcd3dd4
VS
2936 },
2937 {
2938 .name = "AUX A",
2939 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
4196b918 2940 .ops = &hsw_power_well_ops,
4739a9d2 2941 .id = DISP_PW_ID_NONE,
75e39688
ID
2942 {
2943 .hsw.regs = &hsw_power_well_regs,
2944 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2945 },
8bcd3dd4
VS
2946 },
2947 {
2948 .name = "AUX B",
2949 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
4196b918 2950 .ops = &hsw_power_well_ops,
4739a9d2 2951 .id = DISP_PW_ID_NONE,
75e39688
ID
2952 {
2953 .hsw.regs = &hsw_power_well_regs,
2954 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2955 },
8bcd3dd4
VS
2956 },
2957 {
2958 .name = "AUX C",
2959 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
4196b918 2960 .ops = &hsw_power_well_ops,
4739a9d2 2961 .id = DISP_PW_ID_NONE,
75e39688
ID
2962 {
2963 .hsw.regs = &hsw_power_well_regs,
2964 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2965 },
8bcd3dd4
VS
2966 },
2967 {
2968 .name = "AUX D",
2969 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
4196b918 2970 .ops = &hsw_power_well_ops,
4739a9d2 2971 .id = DISP_PW_ID_NONE,
75e39688
ID
2972 {
2973 .hsw.regs = &hsw_power_well_regs,
2974 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2975 },
8bcd3dd4
VS
2976 },
2977 {
2978 .name = "DC off",
2979 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2980 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2981 .id = DISP_PW_ID_NONE,
8bcd3dd4
VS
2982 },
2983 {
2984 .name = "power well 2",
2985 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2986 .ops = &hsw_power_well_ops,
8bcd3dd4 2987 .id = SKL_DISP_PW_2,
0a445945 2988 {
75e39688
ID
2989 .hsw.regs = &hsw_power_well_regs,
2990 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2991 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2992 .hsw.has_vga = true,
2993 .hsw.has_fuses = true,
2994 },
8bcd3dd4
VS
2995 },
2996 {
2997 .name = "DDI A IO power well",
2998 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
4196b918 2999 .ops = &hsw_power_well_ops,
4739a9d2 3000 .id = DISP_PW_ID_NONE,
75e39688
ID
3001 {
3002 .hsw.regs = &hsw_power_well_regs,
3003 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3004 },
8bcd3dd4
VS
3005 },
3006 {
3007 .name = "DDI B IO power well",
3008 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
4196b918 3009 .ops = &hsw_power_well_ops,
4739a9d2 3010 .id = DISP_PW_ID_NONE,
75e39688
ID
3011 {
3012 .hsw.regs = &hsw_power_well_regs,
3013 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3014 },
8bcd3dd4
VS
3015 },
3016 {
3017 .name = "DDI C IO power well",
3018 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
4196b918 3019 .ops = &hsw_power_well_ops,
4739a9d2 3020 .id = DISP_PW_ID_NONE,
75e39688
ID
3021 {
3022 .hsw.regs = &hsw_power_well_regs,
3023 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3024 },
8bcd3dd4
VS
3025 },
3026 {
3027 .name = "DDI D IO power well",
3028 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
4196b918 3029 .ops = &hsw_power_well_ops,
4739a9d2 3030 .id = DISP_PW_ID_NONE,
75e39688
ID
3031 {
3032 .hsw.regs = &hsw_power_well_regs,
3033 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3034 },
8bcd3dd4 3035 },
9787e835
RV
3036 {
3037 .name = "DDI F IO power well",
3038 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3039 .ops = &hsw_power_well_ops,
4739a9d2 3040 .id = DISP_PW_ID_NONE,
75e39688
ID
3041 {
3042 .hsw.regs = &hsw_power_well_regs,
3043 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3044 },
9787e835 3045 },
a324fcac
RV
3046 {
3047 .name = "AUX F",
3048 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3049 .ops = &hsw_power_well_ops,
4739a9d2 3050 .id = DISP_PW_ID_NONE,
75e39688
ID
3051 {
3052 .hsw.regs = &hsw_power_well_regs,
3053 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3054 },
a324fcac 3055 },
8bcd3dd4
VS
3056};
3057
67ca07e7
ID
3058static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3059 .sync_hw = hsw_power_well_sync_hw,
3060 .enable = icl_combo_phy_aux_power_well_enable,
3061 .disable = icl_combo_phy_aux_power_well_disable,
3062 .is_enabled = hsw_power_well_enabled,
3063};
3064
c7375d95
ID
3065static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3066 .sync_hw = hsw_power_well_sync_hw,
3067 .enable = icl_tc_phy_aux_power_well_enable,
3068 .disable = hsw_power_well_disable,
3069 .is_enabled = hsw_power_well_enabled,
3070};
3071
75e39688
ID
3072static const struct i915_power_well_regs icl_aux_power_well_regs = {
3073 .bios = ICL_PWR_WELL_CTL_AUX1,
3074 .driver = ICL_PWR_WELL_CTL_AUX2,
3075 .debug = ICL_PWR_WELL_CTL_AUX4,
3076};
3077
3078static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3079 .bios = ICL_PWR_WELL_CTL_DDI1,
3080 .driver = ICL_PWR_WELL_CTL_DDI2,
3081 .debug = ICL_PWR_WELL_CTL_DDI4,
3082};
3083
f28ec6f4 3084static const struct i915_power_well_desc icl_power_wells[] = {
67ca07e7
ID
3085 {
3086 .name = "always-on",
285cf66d 3087 .always_on = true,
67ca07e7
ID
3088 .domains = POWER_DOMAIN_MASK,
3089 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 3090 .id = DISP_PW_ID_NONE,
67ca07e7
ID
3091 },
3092 {
3093 .name = "power well 1",
3094 /* Handled by the DMC firmware */
fa96ed1f 3095 .always_on = true,
67ca07e7
ID
3096 .domains = 0,
3097 .ops = &hsw_power_well_ops,
d9fcdc8d 3098 .id = SKL_DISP_PW_1,
ae9b06ca 3099 {
75e39688
ID
3100 .hsw.regs = &hsw_power_well_regs,
3101 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
ae9b06ca
ID
3102 .hsw.has_fuses = true,
3103 },
67ca07e7 3104 },
a33e1ece
ID
3105 {
3106 .name = "DC off",
3107 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3108 .ops = &gen9_dc_off_power_well_ops,
3109 .id = DISP_PW_ID_NONE,
3110 },
67ca07e7
ID
3111 {
3112 .name = "power well 2",
3113 .domains = ICL_PW_2_POWER_DOMAINS,
3114 .ops = &hsw_power_well_ops,
d9fcdc8d 3115 .id = SKL_DISP_PW_2,
ae9b06ca 3116 {
75e39688
ID
3117 .hsw.regs = &hsw_power_well_regs,
3118 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
ae9b06ca
ID
3119 .hsw.has_fuses = true,
3120 },
67ca07e7 3121 },
67ca07e7
ID
3122 {
3123 .name = "power well 3",
3124 .domains = ICL_PW_3_POWER_DOMAINS,
3125 .ops = &hsw_power_well_ops,
4739a9d2 3126 .id = DISP_PW_ID_NONE,
ae9b06ca 3127 {
75e39688
ID
3128 .hsw.regs = &hsw_power_well_regs,
3129 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
ae9b06ca
ID
3130 .hsw.irq_pipe_mask = BIT(PIPE_B),
3131 .hsw.has_vga = true,
3132 .hsw.has_fuses = true,
3133 },
67ca07e7
ID
3134 },
3135 {
3136 .name = "DDI A IO",
3137 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3138 .ops = &hsw_power_well_ops,
4739a9d2 3139 .id = DISP_PW_ID_NONE,
75e39688
ID
3140 {
3141 .hsw.regs = &icl_ddi_power_well_regs,
3142 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3143 },
67ca07e7
ID
3144 },
3145 {
3146 .name = "DDI B IO",
3147 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3148 .ops = &hsw_power_well_ops,
4739a9d2 3149 .id = DISP_PW_ID_NONE,
75e39688
ID
3150 {
3151 .hsw.regs = &icl_ddi_power_well_regs,
3152 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3153 },
67ca07e7
ID
3154 },
3155 {
3156 .name = "DDI C IO",
3157 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3158 .ops = &hsw_power_well_ops,
4739a9d2 3159 .id = DISP_PW_ID_NONE,
75e39688
ID
3160 {
3161 .hsw.regs = &icl_ddi_power_well_regs,
3162 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3163 },
67ca07e7
ID
3164 },
3165 {
3166 .name = "DDI D IO",
3167 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3168 .ops = &hsw_power_well_ops,
4739a9d2 3169 .id = DISP_PW_ID_NONE,
75e39688
ID
3170 {
3171 .hsw.regs = &icl_ddi_power_well_regs,
3172 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3173 },
67ca07e7
ID
3174 },
3175 {
3176 .name = "DDI E IO",
3177 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3178 .ops = &hsw_power_well_ops,
4739a9d2 3179 .id = DISP_PW_ID_NONE,
75e39688
ID
3180 {
3181 .hsw.regs = &icl_ddi_power_well_regs,
3182 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3183 },
67ca07e7
ID
3184 },
3185 {
3186 .name = "DDI F IO",
3187 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3188 .ops = &hsw_power_well_ops,
4739a9d2 3189 .id = DISP_PW_ID_NONE,
75e39688
ID
3190 {
3191 .hsw.regs = &icl_ddi_power_well_regs,
3192 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3193 },
67ca07e7
ID
3194 },
3195 {
3196 .name = "AUX A",
3197 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3198 .ops = &icl_combo_phy_aux_power_well_ops,
4739a9d2 3199 .id = DISP_PW_ID_NONE,
75e39688
ID
3200 {
3201 .hsw.regs = &icl_aux_power_well_regs,
3202 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3203 },
67ca07e7
ID
3204 },
3205 {
3206 .name = "AUX B",
3207 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3208 .ops = &icl_combo_phy_aux_power_well_ops,
4739a9d2 3209 .id = DISP_PW_ID_NONE,
75e39688
ID
3210 {
3211 .hsw.regs = &icl_aux_power_well_regs,
3212 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3213 },
67ca07e7
ID
3214 },
3215 {
3216 .name = "AUX C",
3217 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
c7375d95 3218 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3219 .id = DISP_PW_ID_NONE,
75e39688
ID
3220 {
3221 .hsw.regs = &icl_aux_power_well_regs,
3222 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
c7375d95 3223 .hsw.is_tc_tbt = false,
75e39688 3224 },
67ca07e7
ID
3225 },
3226 {
3227 .name = "AUX D",
3228 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
c7375d95 3229 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3230 .id = DISP_PW_ID_NONE,
75e39688
ID
3231 {
3232 .hsw.regs = &icl_aux_power_well_regs,
3233 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
c7375d95 3234 .hsw.is_tc_tbt = false,
75e39688 3235 },
67ca07e7
ID
3236 },
3237 {
3238 .name = "AUX E",
3239 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
c7375d95 3240 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3241 .id = DISP_PW_ID_NONE,
75e39688
ID
3242 {
3243 .hsw.regs = &icl_aux_power_well_regs,
3244 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
c7375d95 3245 .hsw.is_tc_tbt = false,
75e39688 3246 },
67ca07e7
ID
3247 },
3248 {
3249 .name = "AUX F",
3250 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
c7375d95 3251 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3252 .id = DISP_PW_ID_NONE,
75e39688
ID
3253 {
3254 .hsw.regs = &icl_aux_power_well_regs,
3255 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
c7375d95 3256 .hsw.is_tc_tbt = false,
75e39688 3257 },
67ca07e7
ID
3258 },
3259 {
3260 .name = "AUX TBT1",
3261 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
c7375d95 3262 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3263 .id = DISP_PW_ID_NONE,
75e39688
ID
3264 {
3265 .hsw.regs = &icl_aux_power_well_regs,
3266 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
c7375d95 3267 .hsw.is_tc_tbt = true,
75e39688 3268 },
67ca07e7
ID
3269 },
3270 {
3271 .name = "AUX TBT2",
3272 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
c7375d95 3273 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3274 .id = DISP_PW_ID_NONE,
75e39688
ID
3275 {
3276 .hsw.regs = &icl_aux_power_well_regs,
3277 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
c7375d95 3278 .hsw.is_tc_tbt = true,
75e39688 3279 },
67ca07e7
ID
3280 },
3281 {
3282 .name = "AUX TBT3",
3283 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
c7375d95 3284 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3285 .id = DISP_PW_ID_NONE,
75e39688
ID
3286 {
3287 .hsw.regs = &icl_aux_power_well_regs,
3288 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
c7375d95 3289 .hsw.is_tc_tbt = true,
75e39688 3290 },
67ca07e7
ID
3291 },
3292 {
3293 .name = "AUX TBT4",
3294 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
c7375d95 3295 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3296 .id = DISP_PW_ID_NONE,
75e39688
ID
3297 {
3298 .hsw.regs = &icl_aux_power_well_regs,
3299 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
c7375d95 3300 .hsw.is_tc_tbt = true,
75e39688 3301 },
67ca07e7
ID
3302 },
3303 {
3304 .name = "power well 4",
3305 .domains = ICL_PW_4_POWER_DOMAINS,
3306 .ops = &hsw_power_well_ops,
4739a9d2 3307 .id = DISP_PW_ID_NONE,
ae9b06ca 3308 {
75e39688
ID
3309 .hsw.regs = &hsw_power_well_regs,
3310 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
ae9b06ca
ID
3311 .hsw.has_fuses = true,
3312 .hsw.irq_pipe_mask = BIT(PIPE_C),
3313 },
67ca07e7
ID
3314 },
3315};
3316
1b0e3a04
ID
3317static int
3318sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3319 int disable_power_well)
3320{
3321 if (disable_power_well >= 0)
3322 return !!disable_power_well;
3323
1b0e3a04
ID
3324 return 1;
3325}
3326
739f3abd
JN
3327static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3328 int enable_dc)
a37baf3b 3329{
739f3abd 3330 u32 mask;
a37baf3b
ID
3331 int requested_dc;
3332 int max_dc;
3333
3e68928b 3334 if (INTEL_GEN(dev_priv) >= 11) {
a37baf3b 3335 max_dc = 2;
a37baf3b
ID
3336 /*
3337 * DC9 has a separate HW flow from the rest of the DC states,
3338 * not depending on the DMC firmware. It's needed by system
3339 * suspend/resume, so allow it unconditionally.
3340 */
3341 mask = DC_STATE_EN_DC9;
cf819eff 3342 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3e68928b
AM
3343 max_dc = 2;
3344 mask = 0;
3345 } else if (IS_GEN9_LP(dev_priv)) {
3346 max_dc = 1;
3347 mask = DC_STATE_EN_DC9;
a37baf3b
ID
3348 } else {
3349 max_dc = 0;
3350 mask = 0;
3351 }
3352
4f044a88 3353 if (!i915_modparams.disable_power_well)
66e2c4c3
ID
3354 max_dc = 0;
3355
a37baf3b
ID
3356 if (enable_dc >= 0 && enable_dc <= max_dc) {
3357 requested_dc = enable_dc;
3358 } else if (enable_dc == -1) {
3359 requested_dc = max_dc;
3360 } else if (enable_dc > max_dc && enable_dc <= 2) {
3361 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3362 enable_dc, max_dc);
3363 requested_dc = max_dc;
3364 } else {
3365 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3366 requested_dc = max_dc;
3367 }
3368
3369 if (requested_dc > 1)
3370 mask |= DC_STATE_EN_UPTO_DC6;
3371 if (requested_dc > 0)
3372 mask |= DC_STATE_EN_UPTO_DC5;
3373
3374 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3375
3376 return mask;
3377}
3378
f28ec6f4
ID
3379static int
3380__set_power_wells(struct i915_power_domains *power_domains,
3381 const struct i915_power_well_desc *power_well_descs,
3382 int power_well_count)
21792c60 3383{
f28ec6f4 3384 u64 power_well_ids = 0;
21792c60
ID
3385 int i;
3386
f28ec6f4
ID
3387 power_domains->power_well_count = power_well_count;
3388 power_domains->power_wells =
3389 kcalloc(power_well_count,
3390 sizeof(*power_domains->power_wells),
3391 GFP_KERNEL);
3392 if (!power_domains->power_wells)
3393 return -ENOMEM;
3394
3395 for (i = 0; i < power_well_count; i++) {
3396 enum i915_power_well_id id = power_well_descs[i].id;
3397
3398 power_domains->power_wells[i].desc = &power_well_descs[i];
21792c60 3399
4739a9d2
ID
3400 if (id == DISP_PW_ID_NONE)
3401 continue;
3402
21792c60
ID
3403 WARN_ON(id >= sizeof(power_well_ids) * 8);
3404 WARN_ON(power_well_ids & BIT_ULL(id));
3405 power_well_ids |= BIT_ULL(id);
3406 }
f28ec6f4
ID
3407
3408 return 0;
21792c60
ID
3409}
3410
f28ec6f4
ID
3411#define set_power_wells(power_domains, __power_well_descs) \
3412 __set_power_wells(power_domains, __power_well_descs, \
3413 ARRAY_SIZE(__power_well_descs))
9c065a7d 3414
e4e7684f
SV
3415/**
3416 * intel_power_domains_init - initializes the power domain structures
3417 * @dev_priv: i915 device instance
3418 *
3419 * Initializes the power domain structures for @dev_priv depending upon the
3420 * supported platform.
3421 */
9c065a7d
SV
3422int intel_power_domains_init(struct drm_i915_private *dev_priv)
3423{
3424 struct i915_power_domains *power_domains = &dev_priv->power_domains;
f28ec6f4 3425 int err;
9c065a7d 3426
4f044a88
MW
3427 i915_modparams.disable_power_well =
3428 sanitize_disable_power_well_option(dev_priv,
3429 i915_modparams.disable_power_well);
3430 dev_priv->csr.allowed_dc_mask =
3431 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
1b0e3a04 3432
d8fc70b7 3433 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
f0ab43e6 3434
9c065a7d
SV
3435 mutex_init(&power_domains->lock);
3436
3437 /*
3438 * The enabling order will be from lower to higher indexed wells,
3439 * the disabling order is reversed.
3440 */
67ca07e7 3441 if (IS_ICELAKE(dev_priv)) {
f28ec6f4 3442 err = set_power_wells(power_domains, icl_power_wells);
8bcd3dd4 3443 } else if (IS_CANNONLAKE(dev_priv)) {
f28ec6f4 3444 err = set_power_wells(power_domains, cnl_power_wells);
a324fcac
RV
3445
3446 /*
9787e835 3447 * DDI and Aux IO are getting enabled for all ports
a324fcac 3448 * regardless the presence or use. So, in order to avoid
9787e835 3449 * timeouts, lets remove them from the list
a324fcac
RV
3450 * for the SKUs without port F.
3451 */
3452 if (!IS_CNL_WITH_PORT_F(dev_priv))
9787e835 3453 power_domains->power_well_count -= 2;
0d03926d 3454 } else if (IS_GEMINILAKE(dev_priv)) {
f28ec6f4 3455 err = set_power_wells(power_domains, glk_power_wells);
fb72deae
RV
3456 } else if (IS_BROXTON(dev_priv)) {
3457 err = set_power_wells(power_domains, bxt_power_wells);
3458 } else if (IS_GEN9_BC(dev_priv)) {
3459 err = set_power_wells(power_domains, skl_power_wells);
2d1fe073 3460 } else if (IS_CHERRYVIEW(dev_priv)) {
f28ec6f4 3461 err = set_power_wells(power_domains, chv_power_wells);
fb72deae
RV
3462 } else if (IS_BROADWELL(dev_priv)) {
3463 err = set_power_wells(power_domains, bdw_power_wells);
3464 } else if (IS_HASWELL(dev_priv)) {
3465 err = set_power_wells(power_domains, hsw_power_wells);
2d1fe073 3466 } else if (IS_VALLEYVIEW(dev_priv)) {
f28ec6f4 3467 err = set_power_wells(power_domains, vlv_power_wells);
2ee0da16 3468 } else if (IS_I830(dev_priv)) {
f28ec6f4 3469 err = set_power_wells(power_domains, i830_power_wells);
9c065a7d 3470 } else {
f28ec6f4 3471 err = set_power_wells(power_domains, i9xx_always_on_power_well);
9c065a7d
SV
3472 }
3473
f28ec6f4
ID
3474 return err;
3475}
21792c60 3476
f28ec6f4
ID
3477/**
3478 * intel_power_domains_cleanup - clean up power domains resources
3479 * @dev_priv: i915 device instance
3480 *
3481 * Release any resources acquired by intel_power_domains_init()
3482 */
3483void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3484{
3485 kfree(dev_priv->power_domains.power_wells);
9c065a7d
SV
3486}
3487
30eade12 3488static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
9c065a7d
SV
3489{
3490 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3491 struct i915_power_well *power_well;
9c065a7d
SV
3492
3493 mutex_lock(&power_domains->lock);
75ccb2ec 3494 for_each_power_well(dev_priv, power_well) {
f28ec6f4
ID
3495 power_well->desc->ops->sync_hw(dev_priv, power_well);
3496 power_well->hw_enabled =
3497 power_well->desc->ops->is_enabled(dev_priv, power_well);
9c065a7d
SV
3498 }
3499 mutex_unlock(&power_domains->lock);
3500}
3501
aa9664ff
MK
3502static inline
3503bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3504 i915_reg_t reg, bool enable)
70c2c184 3505{
aa9664ff 3506 u32 val, status;
70c2c184 3507
aa9664ff
MK
3508 val = I915_READ(reg);
3509 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3510 I915_WRITE(reg, val);
3511 POSTING_READ(reg);
70c2c184
VS
3512 udelay(10);
3513
aa9664ff
MK
3514 status = I915_READ(reg) & DBUF_POWER_STATE;
3515 if ((enable && !status) || (!enable && status)) {
3516 DRM_ERROR("DBus power %s timeout!\n",
3517 enable ? "enable" : "disable");
3518 return false;
3519 }
3520 return true;
3521}
3522
3523static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3524{
3525 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
70c2c184
VS
3526}
3527
3528static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3529{
aa9664ff
MK
3530 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3531}
70c2c184 3532
aa9664ff
MK
3533static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3534{
3535 if (INTEL_GEN(dev_priv) < 11)
3536 return 1;
3537 return 2;
3538}
70c2c184 3539
aa9664ff
MK
3540void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3541 u8 req_slices)
3542{
8577c319 3543 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
aa9664ff
MK
3544 bool ret;
3545
3546 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3547 DRM_ERROR("Invalid number of dbuf slices requested\n");
3548 return;
3549 }
3550
3551 if (req_slices == hw_enabled_slices || req_slices == 0)
3552 return;
3553
aa9664ff
MK
3554 if (req_slices > hw_enabled_slices)
3555 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3556 else
3557 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3558
3559 if (ret)
3560 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
70c2c184
VS
3561}
3562
746edf8f
MK
3563static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3564{
3565 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3566 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3567 POSTING_READ(DBUF_CTL_S2);
3568
3569 udelay(10);
3570
3571 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3572 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3573 DRM_ERROR("DBuf power enable timeout\n");
74bd8004
MK
3574 else
3575 dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
746edf8f
MK
3576}
3577
3578static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3579{
3580 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3581 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3582 POSTING_READ(DBUF_CTL_S2);
3583
3584 udelay(10);
3585
3586 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3587 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3588 DRM_ERROR("DBuf power disable timeout!\n");
74bd8004
MK
3589 else
3590 dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
746edf8f
MK
3591}
3592
4cb4585e
MK
3593static void icl_mbus_init(struct drm_i915_private *dev_priv)
3594{
739f3abd 3595 u32 val;
4cb4585e
MK
3596
3597 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3598 MBUS_ABOX_BT_CREDIT_POOL2(16) |
3599 MBUS_ABOX_B_CREDIT(1) |
3600 MBUS_ABOX_BW_CREDIT(1);
3601
3602 I915_WRITE(MBUS_ABOX_CTL, val);
3603}
3604
7c86828d
JRS
3605static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3606 bool enable)
3607{
6edafc4e
JRS
3608 i915_reg_t reg;
3609 u32 reset_bits, val;
3610
3611 if (IS_IVYBRIDGE(dev_priv)) {
3612 reg = GEN7_MSG_CTL;
3613 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3614 } else {
3615 reg = HSW_NDE_RSTWRN_OPT;
3616 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3617 }
3618
3619 val = I915_READ(reg);
7c86828d
JRS
3620
3621 if (enable)
6edafc4e 3622 val |= reset_bits;
7c86828d 3623 else
6edafc4e 3624 val &= ~reset_bits;
7c86828d 3625
6edafc4e 3626 I915_WRITE(reg, val);
7c86828d
JRS
3627}
3628
73dfc227 3629static void skl_display_core_init(struct drm_i915_private *dev_priv,
443a93ac 3630 bool resume)
73dfc227
ID
3631{
3632 struct i915_power_domains *power_domains = &dev_priv->power_domains;
443a93ac 3633 struct i915_power_well *well;
73dfc227 3634
d26fa1d5
ID
3635 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3636
73dfc227 3637 /* enable PCH reset handshake */
6edafc4e 3638 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
73dfc227
ID
3639
3640 /* enable PG1 and Misc I/O */
3641 mutex_lock(&power_domains->lock);
443a93ac
ID
3642
3643 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3644 intel_power_well_enable(dev_priv, well);
3645
3646 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3647 intel_power_well_enable(dev_priv, well);
3648
73dfc227
ID
3649 mutex_unlock(&power_domains->lock);
3650
73dfc227
ID
3651 skl_init_cdclk(dev_priv);
3652
70c2c184
VS
3653 gen9_dbuf_enable(dev_priv);
3654
9f7eb31a 3655 if (resume && dev_priv->csr.dmc_payload)
2abc525b 3656 intel_csr_load_program(dev_priv);
73dfc227
ID
3657}
3658
3659static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3660{
3661 struct i915_power_domains *power_domains = &dev_priv->power_domains;
443a93ac 3662 struct i915_power_well *well;
73dfc227 3663
d26fa1d5
ID
3664 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3665
70c2c184
VS
3666 gen9_dbuf_disable(dev_priv);
3667
73dfc227
ID
3668 skl_uninit_cdclk(dev_priv);
3669
3670 /* The spec doesn't call for removing the reset handshake flag */
3671 /* disable PG1 and Misc I/O */
443a93ac 3672
73dfc227 3673 mutex_lock(&power_domains->lock);
443a93ac 3674
edfda8e3
ID
3675 /*
3676 * BSpec says to keep the MISC IO power well enabled here, only
3677 * remove our request for power well 1.
42d9366d
ID
3678 * Note that even though the driver's request is removed power well 1
3679 * may stay enabled after this due to DMC's own request on it.
edfda8e3 3680 */
443a93ac
ID
3681 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3682 intel_power_well_disable(dev_priv, well);
3683
73dfc227 3684 mutex_unlock(&power_domains->lock);
846c6b26
ID
3685
3686 usleep_range(10, 30); /* 10 us delay per Bspec */
73dfc227
ID
3687}
3688
d7d7c9ee
ID
3689void bxt_display_core_init(struct drm_i915_private *dev_priv,
3690 bool resume)
3691{
3692 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3693 struct i915_power_well *well;
d7d7c9ee
ID
3694
3695 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3696
3697 /*
3698 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3699 * or else the reset will hang because there is no PCH to respond.
3700 * Move the handshake programming to initialization sequence.
3701 * Previously was left up to BIOS.
3702 */
7c86828d 3703 intel_pch_reset_handshake(dev_priv, false);
d7d7c9ee
ID
3704
3705 /* Enable PG1 */
3706 mutex_lock(&power_domains->lock);
3707
3708 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3709 intel_power_well_enable(dev_priv, well);
3710
3711 mutex_unlock(&power_domains->lock);
3712
324513c0 3713 bxt_init_cdclk(dev_priv);
70c2c184
VS
3714
3715 gen9_dbuf_enable(dev_priv);
3716
d7d7c9ee
ID
3717 if (resume && dev_priv->csr.dmc_payload)
3718 intel_csr_load_program(dev_priv);
3719}
3720
3721void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3722{
3723 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3724 struct i915_power_well *well;
3725
3726 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3727
70c2c184
VS
3728 gen9_dbuf_disable(dev_priv);
3729
324513c0 3730 bxt_uninit_cdclk(dev_priv);
d7d7c9ee
ID
3731
3732 /* The spec doesn't call for removing the reset handshake flag */
3733
42d9366d
ID
3734 /*
3735 * Disable PW1 (PG1).
3736 * Note that even though the driver's request is removed power well 1
3737 * may stay enabled after this due to DMC's own request on it.
3738 */
d7d7c9ee
ID
3739 mutex_lock(&power_domains->lock);
3740
3741 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3742 intel_power_well_disable(dev_priv, well);
3743
3744 mutex_unlock(&power_domains->lock);
846c6b26
ID
3745
3746 usleep_range(10, 30); /* 10 us delay per Bspec */
d7d7c9ee
ID
3747}
3748
ade5ee7e
PZ
3749static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3750{
3751 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3752 struct i915_power_well *well;
ade5ee7e
PZ
3753
3754 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3755
3756 /* 1. Enable PCH Reset Handshake */
6edafc4e 3757 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
ade5ee7e 3758
c45198b1
ID
3759 /* 2-3. */
3760 cnl_combo_phys_init(dev_priv);
d8d4a512 3761
b38131fb
ID
3762 /*
3763 * 4. Enable Power Well 1 (PG1).
3764 * The AUX IO power wells will be enabled on demand.
3765 */
d8d4a512
VS
3766 mutex_lock(&power_domains->lock);
3767 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3768 intel_power_well_enable(dev_priv, well);
3769 mutex_unlock(&power_domains->lock);
3770
3771 /* 5. Enable CD clock */
3772 cnl_init_cdclk(dev_priv);
3773
3774 /* 6. Enable DBUF */
3775 gen9_dbuf_enable(dev_priv);
57522c4c
ID
3776
3777 if (resume && dev_priv->csr.dmc_payload)
3778 intel_csr_load_program(dev_priv);
d8d4a512
VS
3779}
3780
d8d4a512
VS
3781static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3782{
3783 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3784 struct i915_power_well *well;
d8d4a512
VS
3785
3786 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3787
3788 /* 1. Disable all display engine functions -> aready done */
3789
3790 /* 2. Disable DBUF */
3791 gen9_dbuf_disable(dev_priv);
3792
3793 /* 3. Disable CD clock */
3794 cnl_uninit_cdclk(dev_priv);
3795
b38131fb
ID
3796 /*
3797 * 4. Disable Power Well 1 (PG1).
3798 * The AUX IO power wells are toggled on demand, so they are already
3799 * disabled at this point.
3800 */
d8d4a512
VS
3801 mutex_lock(&power_domains->lock);
3802 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3803 intel_power_well_disable(dev_priv, well);
3804 mutex_unlock(&power_domains->lock);
3805
846c6b26
ID
3806 usleep_range(10, 30); /* 10 us delay per Bspec */
3807
c45198b1
ID
3808 /* 5. */
3809 cnl_combo_phys_uninit(dev_priv);
d8d4a512
VS
3810}
3811
3e68928b
AM
3812void icl_display_core_init(struct drm_i915_private *dev_priv,
3813 bool resume)
ad186f3f 3814{
67ca07e7
ID
3815 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3816 struct i915_power_well *well;
ad186f3f
PZ
3817
3818 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3819
3820 /* 1. Enable PCH reset handshake. */
6edafc4e 3821 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
ad186f3f 3822
c45198b1
ID
3823 /* 2-3. */
3824 icl_combo_phys_init(dev_priv);
ad186f3f 3825
67ca07e7
ID
3826 /*
3827 * 4. Enable Power Well 1 (PG1).
3828 * The AUX IO power wells will be enabled on demand.
3829 */
3830 mutex_lock(&power_domains->lock);
d9fcdc8d 3831 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
67ca07e7
ID
3832 intel_power_well_enable(dev_priv, well);
3833 mutex_unlock(&power_domains->lock);
ad186f3f
PZ
3834
3835 /* 5. Enable CDCLK. */
3836 icl_init_cdclk(dev_priv);
3837
3838 /* 6. Enable DBUF. */
746edf8f 3839 icl_dbuf_enable(dev_priv);
ad186f3f
PZ
3840
3841 /* 7. Setup MBUS. */
4cb4585e 3842 icl_mbus_init(dev_priv);
4445930f
AS
3843
3844 if (resume && dev_priv->csr.dmc_payload)
3845 intel_csr_load_program(dev_priv);
ad186f3f
PZ
3846}
3847
3e68928b 3848void icl_display_core_uninit(struct drm_i915_private *dev_priv)
ad186f3f 3849{
67ca07e7
ID
3850 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3851 struct i915_power_well *well;
ad186f3f
PZ
3852
3853 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3854
3855 /* 1. Disable all display engine functions -> aready done */
3856
3857 /* 2. Disable DBUF */
746edf8f 3858 icl_dbuf_disable(dev_priv);
ad186f3f
PZ
3859
3860 /* 3. Disable CD clock */
3861 icl_uninit_cdclk(dev_priv);
3862
67ca07e7
ID
3863 /*
3864 * 4. Disable Power Well 1 (PG1).
3865 * The AUX IO power wells are toggled on demand, so they are already
3866 * disabled at this point.
3867 */
3868 mutex_lock(&power_domains->lock);
d9fcdc8d 3869 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
67ca07e7
ID
3870 intel_power_well_disable(dev_priv, well);
3871 mutex_unlock(&power_domains->lock);
ad186f3f 3872
c45198b1
ID
3873 /* 5. */
3874 icl_combo_phys_uninit(dev_priv);
ad186f3f
PZ
3875}
3876
70722468
VS
3877static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3878{
3879 struct i915_power_well *cmn_bc =
2183b499 3880 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
70722468 3881 struct i915_power_well *cmn_d =
2183b499 3882 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
70722468
VS
3883
3884 /*
3885 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3886 * workaround never ever read DISPLAY_PHY_CONTROL, and
3887 * instead maintain a shadow copy ourselves. Use the actual
e0fce78f
VS
3888 * power well state and lane status to reconstruct the
3889 * expected initial value.
70722468
VS
3890 */
3891 dev_priv->chv_phy_control =
bc284542
VS
3892 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3893 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
e0fce78f
VS
3894 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3895 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3896 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3897
3898 /*
3899 * If all lanes are disabled we leave the override disabled
3900 * with all power down bits cleared to match the state we
3901 * would use after disabling the port. Otherwise enable the
3902 * override and set the lane powerdown bits accding to the
3903 * current lane status.
3904 */
f28ec6f4 3905 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
739f3abd 3906 u32 status = I915_READ(DPLL(PIPE_A));
e0fce78f
VS
3907 unsigned int mask;
3908
3909 mask = status & DPLL_PORTB_READY_MASK;
3910 if (mask == 0xf)
3911 mask = 0x0;
3912 else
3913 dev_priv->chv_phy_control |=
3914 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3915
3916 dev_priv->chv_phy_control |=
3917 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3918
3919 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3920 if (mask == 0xf)
3921 mask = 0x0;
3922 else
3923 dev_priv->chv_phy_control |=
3924 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3925
3926 dev_priv->chv_phy_control |=
3927 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3928
70722468 3929 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3be60de9
VS
3930
3931 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3932 } else {
3933 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
e0fce78f
VS
3934 }
3935
f28ec6f4 3936 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
739f3abd 3937 u32 status = I915_READ(DPIO_PHY_STATUS);
e0fce78f
VS
3938 unsigned int mask;
3939
3940 mask = status & DPLL_PORTD_READY_MASK;
3941
3942 if (mask == 0xf)
3943 mask = 0x0;
3944 else
3945 dev_priv->chv_phy_control |=
3946 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3947
3948 dev_priv->chv_phy_control |=
3949 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3950
70722468 3951 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3be60de9
VS
3952
3953 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3954 } else {
3955 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
e0fce78f
VS
3956 }
3957
3958 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3959
3960 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3961 dev_priv->chv_phy_control);
70722468
VS
3962}
3963
9c065a7d
SV
3964static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3965{
3966 struct i915_power_well *cmn =
2183b499 3967 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9c065a7d 3968 struct i915_power_well *disp2d =
2183b499 3969 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
9c065a7d 3970
9c065a7d 3971 /* If the display might be already active skip this */
f28ec6f4
ID
3972 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3973 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
9c065a7d
SV
3974 I915_READ(DPIO_CTL) & DPIO_CMNRST)
3975 return;
3976
3977 DRM_DEBUG_KMS("toggling display PHY side reset\n");
3978
3979 /* cmnlane needs DPLL registers */
f28ec6f4 3980 disp2d->desc->ops->enable(dev_priv, disp2d);
9c065a7d
SV
3981
3982 /*
3983 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3984 * Need to assert and de-assert PHY SB reset by gating the
3985 * common lane power, then un-gating it.
3986 * Simply ungating isn't enough to reset the PHY enough to get
3987 * ports and lanes running.
3988 */
f28ec6f4 3989 cmn->desc->ops->disable(dev_priv, cmn);
9c065a7d
SV
3990}
3991
6dfc4a8f
ID
3992static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
3993
e4e7684f
SV
3994/**
3995 * intel_power_domains_init_hw - initialize hardware power domain state
25c896bd 3996 * @i915: i915 device instance
14bb2c11 3997 * @resume: Called from resume code paths or not
e4e7684f
SV
3998 *
3999 * This function initializes the hardware power domain state and enables all
8d8c386c 4000 * power wells belonging to the INIT power domain. Power wells in other
d8c5d29f
ID
4001 * domains (and not in the INIT domain) are referenced or disabled by
4002 * intel_modeset_readout_hw_state(). After that the reference count of each
4003 * power well must match its HW enabled state, see
4004 * intel_power_domains_verify_state().
2cd9a689
ID
4005 *
4006 * It will return with power domains disabled (to be enabled later by
4007 * intel_power_domains_enable()) and must be paired with
4008 * intel_power_domains_fini_hw().
e4e7684f 4009 */
25c896bd 4010void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
9c065a7d 4011{
25c896bd 4012 struct i915_power_domains *power_domains = &i915->power_domains;
9c065a7d
SV
4013
4014 power_domains->initializing = true;
4015
25c896bd
CW
4016 if (IS_ICELAKE(i915)) {
4017 icl_display_core_init(i915, resume);
4018 } else if (IS_CANNONLAKE(i915)) {
4019 cnl_display_core_init(i915, resume);
4020 } else if (IS_GEN9_BC(i915)) {
4021 skl_display_core_init(i915, resume);
4022 } else if (IS_GEN9_LP(i915)) {
4023 bxt_display_core_init(i915, resume);
4024 } else if (IS_CHERRYVIEW(i915)) {
770effb1 4025 mutex_lock(&power_domains->lock);
25c896bd 4026 chv_phy_control_init(i915);
770effb1 4027 mutex_unlock(&power_domains->lock);
25c896bd 4028 } else if (IS_VALLEYVIEW(i915)) {
9c065a7d 4029 mutex_lock(&power_domains->lock);
25c896bd 4030 vlv_cmnlane_wa(i915);
9c065a7d 4031 mutex_unlock(&power_domains->lock);
25c896bd
CW
4032 } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
4033 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4034 }
9c065a7d 4035
2cd9a689
ID
4036 /*
4037 * Keep all power wells enabled for any dependent HW access during
4038 * initialization and to make sure we keep BIOS enabled display HW
4039 * resources powered until display HW readout is complete. We drop
4040 * this reference in intel_power_domains_enable().
4041 */
25c896bd
CW
4042 power_domains->wakeref =
4043 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4044
d314cd43 4045 /* Disable power support if the user asked so. */
4f044a88 4046 if (!i915_modparams.disable_power_well)
25c896bd
CW
4047 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4048 intel_power_domains_sync_hw(i915);
6dfc4a8f 4049
d8c5d29f 4050 power_domains->initializing = false;
9c065a7d
SV
4051}
4052
48a287ed
ID
4053/**
4054 * intel_power_domains_fini_hw - deinitialize hw power domain state
25c896bd 4055 * @i915: i915 device instance
48a287ed
ID
4056 *
4057 * De-initializes the display power domain HW state. It also ensures that the
4058 * device stays powered up so that the driver can be reloaded.
2cd9a689
ID
4059 *
4060 * It must be called with power domains already disabled (after a call to
4061 * intel_power_domains_disable()) and must be paired with
4062 * intel_power_domains_init_hw().
48a287ed 4063 */
25c896bd 4064void intel_power_domains_fini_hw(struct drm_i915_private *i915)
48a287ed 4065{
25c896bd
CW
4066 intel_wakeref_t wakeref __maybe_unused =
4067 fetch_and_zero(&i915->power_domains.wakeref);
48a287ed
ID
4068
4069 /* Remove the refcount we took to keep power well support disabled. */
4070 if (!i915_modparams.disable_power_well)
25c896bd
CW
4071 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4072
4073 intel_power_domains_verify_state(i915);
6dfc4a8f 4074
25c896bd
CW
4075 /* Keep the power well enabled, but cancel its rpm wakeref. */
4076 intel_runtime_pm_put(i915, wakeref);
48a287ed
ID
4077}
4078
2cd9a689
ID
4079/**
4080 * intel_power_domains_enable - enable toggling of display power wells
25c896bd 4081 * @i915: i915 device instance
2cd9a689
ID
4082 *
4083 * Enable the ondemand enabling/disabling of the display power wells. Note that
4084 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4085 * only at specific points of the display modeset sequence, thus they are not
4086 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4087 * of these function is to keep the rest of power wells enabled until the end
4088 * of display HW readout (which will acquire the power references reflecting
4089 * the current HW state).
4090 */
25c896bd 4091void intel_power_domains_enable(struct drm_i915_private *i915)
2cd9a689 4092{
25c896bd
CW
4093 intel_wakeref_t wakeref __maybe_unused =
4094 fetch_and_zero(&i915->power_domains.wakeref);
6dfc4a8f 4095
25c896bd
CW
4096 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4097 intel_power_domains_verify_state(i915);
2cd9a689
ID
4098}
4099
4100/**
4101 * intel_power_domains_disable - disable toggling of display power wells
25c896bd 4102 * @i915: i915 device instance
2cd9a689
ID
4103 *
4104 * Disable the ondemand enabling/disabling of the display power wells. See
4105 * intel_power_domains_enable() for which power wells this call controls.
4106 */
25c896bd 4107void intel_power_domains_disable(struct drm_i915_private *i915)
2cd9a689 4108{
25c896bd 4109 struct i915_power_domains *power_domains = &i915->power_domains;
6dfc4a8f 4110
25c896bd
CW
4111 WARN_ON(power_domains->wakeref);
4112 power_domains->wakeref =
4113 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4114
4115 intel_power_domains_verify_state(i915);
2cd9a689
ID
4116}
4117
73dfc227
ID
4118/**
4119 * intel_power_domains_suspend - suspend power domain state
25c896bd 4120 * @i915: i915 device instance
2cd9a689 4121 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
73dfc227
ID
4122 *
4123 * This function prepares the hardware power domain state before entering
2cd9a689
ID
4124 * system suspend.
4125 *
4126 * It must be called with power domains already disabled (after a call to
4127 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
73dfc227 4128 */
25c896bd 4129void intel_power_domains_suspend(struct drm_i915_private *i915,
2cd9a689 4130 enum i915_drm_suspend_mode suspend_mode)
73dfc227 4131{
25c896bd
CW
4132 struct i915_power_domains *power_domains = &i915->power_domains;
4133 intel_wakeref_t wakeref __maybe_unused =
4134 fetch_and_zero(&power_domains->wakeref);
2cd9a689 4135
25c896bd 4136 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2cd9a689
ID
4137
4138 /*
a61d904f
ID
4139 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4140 * support don't manually deinit the power domains. This also means the
4141 * CSR/DMC firmware will stay active, it will power down any HW
4142 * resources as required and also enable deeper system power states
4143 * that would be blocked if the firmware was inactive.
2cd9a689 4144 */
25c896bd 4145 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
a61d904f 4146 suspend_mode == I915_DRM_SUSPEND_IDLE &&
25c896bd
CW
4147 i915->csr.dmc_payload) {
4148 intel_power_domains_verify_state(i915);
2cd9a689 4149 return;
6dfc4a8f 4150 }
2cd9a689 4151
d314cd43
ID
4152 /*
4153 * Even if power well support was disabled we still want to disable
2cd9a689 4154 * power wells if power domains must be deinitialized for suspend.
d314cd43 4155 */
6dfc4a8f 4156 if (!i915_modparams.disable_power_well) {
25c896bd
CW
4157 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4158 intel_power_domains_verify_state(i915);
6dfc4a8f 4159 }
2622d79b 4160
25c896bd
CW
4161 if (IS_ICELAKE(i915))
4162 icl_display_core_uninit(i915);
4163 else if (IS_CANNONLAKE(i915))
4164 cnl_display_core_uninit(i915);
4165 else if (IS_GEN9_BC(i915))
4166 skl_display_core_uninit(i915);
4167 else if (IS_GEN9_LP(i915))
4168 bxt_display_core_uninit(i915);
2cd9a689
ID
4169
4170 power_domains->display_core_suspended = true;
4171}
4172
4173/**
4174 * intel_power_domains_resume - resume power domain state
25c896bd 4175 * @i915: i915 device instance
2cd9a689
ID
4176 *
4177 * This function resume the hardware power domain state during system resume.
4178 *
4179 * It will return with power domain support disabled (to be enabled later by
4180 * intel_power_domains_enable()) and must be paired with
4181 * intel_power_domains_suspend().
4182 */
25c896bd 4183void intel_power_domains_resume(struct drm_i915_private *i915)
2cd9a689 4184{
25c896bd 4185 struct i915_power_domains *power_domains = &i915->power_domains;
2cd9a689
ID
4186
4187 if (power_domains->display_core_suspended) {
25c896bd 4188 intel_power_domains_init_hw(i915, true);
2cd9a689 4189 power_domains->display_core_suspended = false;
6dfc4a8f 4190 } else {
25c896bd
CW
4191 WARN_ON(power_domains->wakeref);
4192 power_domains->wakeref =
4193 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2cd9a689
ID
4194 }
4195
25c896bd 4196 intel_power_domains_verify_state(i915);
73dfc227
ID
4197}
4198
6dfc4a8f
ID
4199#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4200
25c896bd 4201static void intel_power_domains_dump_info(struct drm_i915_private *i915)
8d8c386c 4202{
25c896bd 4203 struct i915_power_domains *power_domains = &i915->power_domains;
8d8c386c
ID
4204 struct i915_power_well *power_well;
4205
25c896bd 4206 for_each_power_well(i915, power_well) {
8d8c386c
ID
4207 enum intel_display_power_domain domain;
4208
4209 DRM_DEBUG_DRIVER("%-25s %d\n",
f28ec6f4 4210 power_well->desc->name, power_well->count);
8d8c386c 4211
f28ec6f4 4212 for_each_power_domain(domain, power_well->desc->domains)
8d8c386c
ID
4213 DRM_DEBUG_DRIVER(" %-23s %d\n",
4214 intel_display_power_domain_str(domain),
4215 power_domains->domain_use_count[domain]);
4216 }
4217}
4218
4219/**
4220 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
25c896bd 4221 * @i915: i915 device instance
8d8c386c
ID
4222 *
4223 * Verify if the reference count of each power well matches its HW enabled
4224 * state and the total refcount of the domains it belongs to. This must be
4225 * called after modeset HW state sanitization, which is responsible for
4226 * acquiring reference counts for any power wells in use and disabling the
4227 * ones left on by BIOS but not required by any active output.
4228 */
25c896bd 4229static void intel_power_domains_verify_state(struct drm_i915_private *i915)
8d8c386c 4230{
25c896bd 4231 struct i915_power_domains *power_domains = &i915->power_domains;
8d8c386c
ID
4232 struct i915_power_well *power_well;
4233 bool dump_domain_info;
4234
4235 mutex_lock(&power_domains->lock);
4236
4237 dump_domain_info = false;
25c896bd 4238 for_each_power_well(i915, power_well) {
8d8c386c
ID
4239 enum intel_display_power_domain domain;
4240 int domains_count;
4241 bool enabled;
4242
25c896bd 4243 enabled = power_well->desc->ops->is_enabled(i915, power_well);
f28ec6f4
ID
4244 if ((power_well->count || power_well->desc->always_on) !=
4245 enabled)
8d8c386c 4246 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
f28ec6f4
ID
4247 power_well->desc->name,
4248 power_well->count, enabled);
8d8c386c
ID
4249
4250 domains_count = 0;
f28ec6f4 4251 for_each_power_domain(domain, power_well->desc->domains)
8d8c386c
ID
4252 domains_count += power_domains->domain_use_count[domain];
4253
4254 if (power_well->count != domains_count) {
4255 DRM_ERROR("power well %s refcount/domain refcount mismatch "
4256 "(refcount %d/domains refcount %d)\n",
f28ec6f4 4257 power_well->desc->name, power_well->count,
8d8c386c
ID
4258 domains_count);
4259 dump_domain_info = true;
4260 }
4261 }
4262
4263 if (dump_domain_info) {
4264 static bool dumped;
4265
4266 if (!dumped) {
25c896bd 4267 intel_power_domains_dump_info(i915);
8d8c386c
ID
4268 dumped = true;
4269 }
4270 }
4271
4272 mutex_unlock(&power_domains->lock);
4273}
4274
6dfc4a8f
ID
4275#else
4276
25c896bd 4277static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6dfc4a8f
ID
4278{
4279}
4280
4281#endif
4282
e4e7684f
SV
4283/**
4284 * intel_runtime_pm_get - grab a runtime pm reference
bd780f37 4285 * @i915: i915 device instance
e4e7684f
SV
4286 *
4287 * This function grabs a device-level runtime pm reference (mostly used for GEM
4288 * code to ensure the GTT or GT is on) and ensures that it is powered up.
4289 *
4290 * Any runtime pm reference obtained by this function must have a symmetric
4291 * call to intel_runtime_pm_put() to release the reference again.
16e4dd03
CW
4292 *
4293 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
e4e7684f 4294 */
16e4dd03 4295intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
9c065a7d 4296{
bd780f37 4297 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4298 struct device *kdev = &pdev->dev;
f5073824 4299 int ret;
9c065a7d 4300
f5073824
ID
4301 ret = pm_runtime_get_sync(kdev);
4302 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
1f814dac 4303
16e4dd03 4304 return track_intel_runtime_pm_wakeref(i915);
9c065a7d
SV
4305}
4306
09731280
ID
4307/**
4308 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
bd780f37 4309 * @i915: i915 device instance
09731280
ID
4310 *
4311 * This function grabs a device-level runtime pm reference if the device is
acb79148
CW
4312 * already in use and ensures that it is powered up. It is illegal to try
4313 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
09731280
ID
4314 *
4315 * Any runtime pm reference obtained by this function must have a symmetric
4316 * call to intel_runtime_pm_put() to release the reference again.
acb79148 4317 *
16e4dd03
CW
4318 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4319 * as True if the wakeref was acquired, or False otherwise.
09731280 4320 */
16e4dd03 4321intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
09731280 4322{
135dc79e 4323 if (IS_ENABLED(CONFIG_PM)) {
bd780f37 4324 struct pci_dev *pdev = i915->drm.pdev;
acb79148 4325 struct device *kdev = &pdev->dev;
09731280 4326
135dc79e
CW
4327 /*
4328 * In cases runtime PM is disabled by the RPM core and we get
4329 * an -EINVAL return value we are not supposed to call this
4330 * function, since the power state is undefined. This applies
4331 * atm to the late/early system suspend/resume handlers.
4332 */
acb79148 4333 if (pm_runtime_get_if_in_use(kdev) <= 0)
16e4dd03 4334 return 0;
135dc79e 4335 }
09731280 4336
16e4dd03 4337 return track_intel_runtime_pm_wakeref(i915);
09731280
ID
4338}
4339
e4e7684f
SV
4340/**
4341 * intel_runtime_pm_get_noresume - grab a runtime pm reference
bd780f37 4342 * @i915: i915 device instance
e4e7684f
SV
4343 *
4344 * This function grabs a device-level runtime pm reference (mostly used for GEM
4345 * code to ensure the GTT or GT is on).
4346 *
4347 * It will _not_ power up the device but instead only check that it's powered
4348 * on. Therefore it is only valid to call this functions from contexts where
4349 * the device is known to be powered up and where trying to power it up would
4350 * result in hilarity and deadlocks. That pretty much means only the system
4351 * suspend/resume code where this is used to grab runtime pm references for
4352 * delayed setup down in work items.
4353 *
4354 * Any runtime pm reference obtained by this function must have a symmetric
4355 * call to intel_runtime_pm_put() to release the reference again.
16e4dd03
CW
4356 *
4357 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
e4e7684f 4358 */
16e4dd03 4359intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
9c065a7d 4360{
bd780f37 4361 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4362 struct device *kdev = &pdev->dev;
9c065a7d 4363
bd780f37 4364 assert_rpm_wakelock_held(i915);
c49d13ee 4365 pm_runtime_get_noresume(kdev);
1f814dac 4366
16e4dd03 4367 return track_intel_runtime_pm_wakeref(i915);
9c065a7d
SV
4368}
4369
e4e7684f
SV
4370/**
4371 * intel_runtime_pm_put - release a runtime pm reference
bd780f37 4372 * @i915: i915 device instance
e4e7684f
SV
4373 *
4374 * This function drops the device-level runtime pm reference obtained by
4375 * intel_runtime_pm_get() and might power down the corresponding
4376 * hardware block right away if this is the last reference.
4377 */
16e4dd03 4378void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
9c065a7d 4379{
bd780f37 4380 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4381 struct device *kdev = &pdev->dev;
9c065a7d 4382
bd780f37 4383 untrack_intel_runtime_pm_wakeref(i915);
1f814dac 4384
c49d13ee
DW
4385 pm_runtime_mark_last_busy(kdev);
4386 pm_runtime_put_autosuspend(kdev);
9c065a7d
SV
4387}
4388
16e4dd03
CW
4389#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4390void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
4391{
4392 cancel_intel_runtime_pm_wakeref(i915, wref);
4393 intel_runtime_pm_put_unchecked(i915);
4394}
4395#endif
4396
e4e7684f
SV
4397/**
4398 * intel_runtime_pm_enable - enable runtime pm
bd780f37 4399 * @i915: i915 device instance
e4e7684f
SV
4400 *
4401 * This function enables runtime pm at the end of the driver load sequence.
4402 *
4403 * Note that this function does currently not enable runtime pm for the
2cd9a689
ID
4404 * subordinate display power domains. That is done by
4405 * intel_power_domains_enable().
e4e7684f 4406 */
bd780f37 4407void intel_runtime_pm_enable(struct drm_i915_private *i915)
9c065a7d 4408{
bd780f37 4409 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4410 struct device *kdev = &pdev->dev;
9c065a7d 4411
07d80572
CW
4412 /*
4413 * Disable the system suspend direct complete optimization, which can
4414 * leave the device suspended skipping the driver's suspend handlers
4415 * if the device was already runtime suspended. This is needed due to
4416 * the difference in our runtime and system suspend sequence and
4417 * becaue the HDA driver may require us to enable the audio power
4418 * domain during system suspend.
4419 */
4420 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4421
c49d13ee
DW
4422 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4423 pm_runtime_mark_last_busy(kdev);
cbc68dc9 4424
25b181b4
ID
4425 /*
4426 * Take a permanent reference to disable the RPM functionality and drop
4427 * it only when unloading the driver. Use the low level get/put helpers,
4428 * so the driver's own RPM reference tracking asserts also work on
4429 * platforms without RPM support.
4430 */
bd780f37 4431 if (!HAS_RUNTIME_PM(i915)) {
f5073824
ID
4432 int ret;
4433
c49d13ee 4434 pm_runtime_dont_use_autosuspend(kdev);
f5073824
ID
4435 ret = pm_runtime_get_sync(kdev);
4436 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
cbc68dc9 4437 } else {
c49d13ee 4438 pm_runtime_use_autosuspend(kdev);
cbc68dc9 4439 }
9c065a7d 4440
aabee1bb
ID
4441 /*
4442 * The core calls the driver load handler with an RPM reference held.
4443 * We drop that here and will reacquire it during unloading in
4444 * intel_power_domains_fini().
4445 */
c49d13ee 4446 pm_runtime_put_autosuspend(kdev);
9c065a7d 4447}
07d80572 4448
bd780f37 4449void intel_runtime_pm_disable(struct drm_i915_private *i915)
07d80572 4450{
bd780f37 4451 struct pci_dev *pdev = i915->drm.pdev;
07d80572
CW
4452 struct device *kdev = &pdev->dev;
4453
4454 /* Transfer rpm ownership back to core */
bd780f37 4455 WARN(pm_runtime_get_sync(kdev) < 0,
07d80572
CW
4456 "Failed to pass rpm ownership back to core\n");
4457
4458 pm_runtime_dont_use_autosuspend(kdev);
4459
bd780f37 4460 if (!HAS_RUNTIME_PM(i915))
07d80572
CW
4461 pm_runtime_put(kdev);
4462}
bd780f37
CW
4463
4464void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
4465{
4466 struct i915_runtime_pm *rpm = &i915->runtime_pm;
4467 int count;
4468
4469 count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
4470 WARN(count,
4471 "i915->runtime_pm.wakeref_count=%d on cleanup\n",
4472 count);
4473
4474 untrack_intel_runtime_pm_wakeref(i915);
4475}
4476
4477void intel_runtime_pm_init_early(struct drm_i915_private *i915)
4478{
4479 init_intel_runtime_pm_wakeref(i915);
4480}