]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/i915/intel_runtime_pm.c
drm/i915: Track all held rpm wakerefs
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
CommitLineData
9c065a7d
SV
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
bd780f37
CW
32#include <drm/drm_print.h>
33
9c065a7d
SV
34#include "i915_drv.h"
35#include "intel_drv.h"
9c065a7d 36
e4e7684f
SV
37/**
38 * DOC: runtime pm
39 *
40 * The i915 driver supports dynamic enabling and disabling of entire hardware
41 * blocks at runtime. This is especially important on the display side where
42 * software is supposed to control many power gates manually on recent hardware,
43 * since on the GT side a lot of the power management is done by the hardware.
44 * But even there some manual control at the device level is required.
45 *
46 * Since i915 supports a diverse set of platforms with a unified codebase and
47 * hardware engineers just love to shuffle functionality around between power
48 * domains there's a sizeable amount of indirection required. This file provides
49 * generic functions to the driver for grabbing and releasing references for
50 * abstract power domains. It then maps those to the actual power wells
51 * present for a given platform.
52 */
53
bd780f37
CW
54#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55
56#include <linux/sort.h>
57
58#define STACKDEPTH 8
59
60static noinline depot_stack_handle_t __save_depot_stack(void)
61{
62 unsigned long entries[STACKDEPTH];
63 struct stack_trace trace = {
64 .entries = entries,
65 .max_entries = ARRAY_SIZE(entries),
66 .skip = 1,
67 };
68
69 save_stack_trace(&trace);
70 if (trace.nr_entries &&
71 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
72 trace.nr_entries--;
73
74 return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
75}
76
77static void __print_depot_stack(depot_stack_handle_t stack,
78 char *buf, int sz, int indent)
79{
80 unsigned long entries[STACKDEPTH];
81 struct stack_trace trace = {
82 .entries = entries,
83 .max_entries = ARRAY_SIZE(entries),
84 };
85
86 depot_fetch_stack(stack, &trace);
87 snprint_stack_trace(buf, sz, &trace, indent);
88}
89
90static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
91{
92 struct i915_runtime_pm *rpm = &i915->runtime_pm;
93
94 spin_lock_init(&rpm->debug.lock);
95}
96
97static noinline void
98track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
99{
100 struct i915_runtime_pm *rpm = &i915->runtime_pm;
101 depot_stack_handle_t stack, *stacks;
102 unsigned long flags;
103
104 atomic_inc(&rpm->wakeref_count);
105 assert_rpm_wakelock_held(i915);
106
107 if (!HAS_RUNTIME_PM(i915))
108 return;
109
110 stack = __save_depot_stack();
111 if (!stack)
112 return;
113
114 spin_lock_irqsave(&rpm->debug.lock, flags);
115
116 if (!rpm->debug.count)
117 rpm->debug.last_acquire = stack;
118
119 stacks = krealloc(rpm->debug.owners,
120 (rpm->debug.count + 1) * sizeof(*stacks),
121 GFP_NOWAIT | __GFP_NOWARN);
122 if (stacks) {
123 stacks[rpm->debug.count++] = stack;
124 rpm->debug.owners = stacks;
125 }
126
127 spin_unlock_irqrestore(&rpm->debug.lock, flags);
128}
129
130static int cmphandle(const void *_a, const void *_b)
131{
132 const depot_stack_handle_t * const a = _a, * const b = _b;
133
134 if (*a < *b)
135 return -1;
136 else if (*a > *b)
137 return 1;
138 else
139 return 0;
140}
141
142static void
143__print_intel_runtime_pm_wakeref(struct drm_printer *p,
144 const struct intel_runtime_pm_debug *dbg)
145{
146 unsigned long i;
147 char *buf;
148
149 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
150 if (!buf)
151 return;
152
153 if (dbg->last_acquire) {
154 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
155 drm_printf(p, "Wakeref last acquired:\n%s", buf);
156 }
157
158 if (dbg->last_release) {
159 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
160 drm_printf(p, "Wakeref last released:\n%s", buf);
161 }
162
163 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
164
165 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
166
167 for (i = 0; i < dbg->count; i++) {
168 depot_stack_handle_t stack = dbg->owners[i];
169 unsigned long rep;
170
171 rep = 1;
172 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
173 rep++, i++;
174 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
175 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
176 }
177
178 kfree(buf);
179}
180
181static noinline void
182untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
183{
184 struct i915_runtime_pm *rpm = &i915->runtime_pm;
185 struct intel_runtime_pm_debug dbg = {};
186 struct drm_printer p;
187 unsigned long flags;
188
189 assert_rpm_wakelock_held(i915);
190 if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
191 &rpm->debug.lock,
192 flags)) {
193 dbg = rpm->debug;
194
195 rpm->debug.owners = NULL;
196 rpm->debug.count = 0;
197 rpm->debug.last_release = __save_depot_stack();
198
199 spin_unlock_irqrestore(&rpm->debug.lock, flags);
200 }
201 if (!dbg.count)
202 return;
203
204 p = drm_debug_printer("i915");
205 __print_intel_runtime_pm_wakeref(&p, &dbg);
206
207 kfree(dbg.owners);
208}
209
210void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
211 struct drm_printer *p)
212{
213 struct intel_runtime_pm_debug dbg = {};
214
215 do {
216 struct i915_runtime_pm *rpm = &i915->runtime_pm;
217 unsigned long alloc = dbg.count;
218 depot_stack_handle_t *s;
219
220 spin_lock_irq(&rpm->debug.lock);
221 dbg.count = rpm->debug.count;
222 if (dbg.count <= alloc) {
223 memcpy(dbg.owners,
224 rpm->debug.owners,
225 dbg.count * sizeof(*s));
226 }
227 dbg.last_acquire = rpm->debug.last_acquire;
228 dbg.last_release = rpm->debug.last_release;
229 spin_unlock_irq(&rpm->debug.lock);
230 if (dbg.count <= alloc)
231 break;
232
233 s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
234 if (!s)
235 goto out;
236
237 dbg.owners = s;
238 } while (1);
239
240 __print_intel_runtime_pm_wakeref(p, &dbg);
241
242out:
243 kfree(dbg.owners);
244}
245
246#else
247
248static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
249{
250}
251
252static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
253{
254 atomic_inc(&i915->runtime_pm.wakeref_count);
255 assert_rpm_wakelock_held(i915);
256}
257
258static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
259{
260 assert_rpm_wakelock_held(i915);
261 atomic_dec(&i915->runtime_pm.wakeref_count);
262}
263
264#endif
265
5aefb239 266bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
438b8dc4 267 enum i915_power_well_id power_well_id);
5aefb239 268
9895ad03
DS
269const char *
270intel_display_power_domain_str(enum intel_display_power_domain domain)
271{
272 switch (domain) {
273 case POWER_DOMAIN_PIPE_A:
274 return "PIPE_A";
275 case POWER_DOMAIN_PIPE_B:
276 return "PIPE_B";
277 case POWER_DOMAIN_PIPE_C:
278 return "PIPE_C";
279 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
280 return "PIPE_A_PANEL_FITTER";
281 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
282 return "PIPE_B_PANEL_FITTER";
283 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
284 return "PIPE_C_PANEL_FITTER";
285 case POWER_DOMAIN_TRANSCODER_A:
286 return "TRANSCODER_A";
287 case POWER_DOMAIN_TRANSCODER_B:
288 return "TRANSCODER_B";
289 case POWER_DOMAIN_TRANSCODER_C:
290 return "TRANSCODER_C";
291 case POWER_DOMAIN_TRANSCODER_EDP:
292 return "TRANSCODER_EDP";
91ba2c8b
MN
293 case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
294 return "TRANSCODER_EDP_VDSC";
4d1de975
JN
295 case POWER_DOMAIN_TRANSCODER_DSI_A:
296 return "TRANSCODER_DSI_A";
297 case POWER_DOMAIN_TRANSCODER_DSI_C:
298 return "TRANSCODER_DSI_C";
9895ad03
DS
299 case POWER_DOMAIN_PORT_DDI_A_LANES:
300 return "PORT_DDI_A_LANES";
301 case POWER_DOMAIN_PORT_DDI_B_LANES:
302 return "PORT_DDI_B_LANES";
303 case POWER_DOMAIN_PORT_DDI_C_LANES:
304 return "PORT_DDI_C_LANES";
305 case POWER_DOMAIN_PORT_DDI_D_LANES:
306 return "PORT_DDI_D_LANES";
307 case POWER_DOMAIN_PORT_DDI_E_LANES:
308 return "PORT_DDI_E_LANES";
9787e835
RV
309 case POWER_DOMAIN_PORT_DDI_F_LANES:
310 return "PORT_DDI_F_LANES";
62b69566
ACO
311 case POWER_DOMAIN_PORT_DDI_A_IO:
312 return "PORT_DDI_A_IO";
313 case POWER_DOMAIN_PORT_DDI_B_IO:
314 return "PORT_DDI_B_IO";
315 case POWER_DOMAIN_PORT_DDI_C_IO:
316 return "PORT_DDI_C_IO";
317 case POWER_DOMAIN_PORT_DDI_D_IO:
318 return "PORT_DDI_D_IO";
319 case POWER_DOMAIN_PORT_DDI_E_IO:
320 return "PORT_DDI_E_IO";
9787e835
RV
321 case POWER_DOMAIN_PORT_DDI_F_IO:
322 return "PORT_DDI_F_IO";
9895ad03
DS
323 case POWER_DOMAIN_PORT_DSI:
324 return "PORT_DSI";
325 case POWER_DOMAIN_PORT_CRT:
326 return "PORT_CRT";
327 case POWER_DOMAIN_PORT_OTHER:
328 return "PORT_OTHER";
329 case POWER_DOMAIN_VGA:
330 return "VGA";
331 case POWER_DOMAIN_AUDIO:
332 return "AUDIO";
333 case POWER_DOMAIN_PLLS:
334 return "PLLS";
335 case POWER_DOMAIN_AUX_A:
336 return "AUX_A";
337 case POWER_DOMAIN_AUX_B:
338 return "AUX_B";
339 case POWER_DOMAIN_AUX_C:
340 return "AUX_C";
341 case POWER_DOMAIN_AUX_D:
342 return "AUX_D";
bb187e93
JA
343 case POWER_DOMAIN_AUX_E:
344 return "AUX_E";
a324fcac
RV
345 case POWER_DOMAIN_AUX_F:
346 return "AUX_F";
b891d5e4
DP
347 case POWER_DOMAIN_AUX_IO_A:
348 return "AUX_IO_A";
67ca07e7
ID
349 case POWER_DOMAIN_AUX_TBT1:
350 return "AUX_TBT1";
351 case POWER_DOMAIN_AUX_TBT2:
352 return "AUX_TBT2";
353 case POWER_DOMAIN_AUX_TBT3:
354 return "AUX_TBT3";
355 case POWER_DOMAIN_AUX_TBT4:
356 return "AUX_TBT4";
9895ad03
DS
357 case POWER_DOMAIN_GMBUS:
358 return "GMBUS";
359 case POWER_DOMAIN_INIT:
360 return "INIT";
361 case POWER_DOMAIN_MODESET:
362 return "MODESET";
b6876374
TU
363 case POWER_DOMAIN_GT_IRQ:
364 return "GT_IRQ";
9895ad03
DS
365 default:
366 MISSING_CASE(domain);
367 return "?";
368 }
369}
370
e8ca9320
DL
371static void intel_power_well_enable(struct drm_i915_private *dev_priv,
372 struct i915_power_well *power_well)
373{
f28ec6f4
ID
374 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
375 power_well->desc->ops->enable(dev_priv, power_well);
e8ca9320
DL
376 power_well->hw_enabled = true;
377}
378
dcddab3a
DL
379static void intel_power_well_disable(struct drm_i915_private *dev_priv,
380 struct i915_power_well *power_well)
381{
f28ec6f4 382 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
dcddab3a 383 power_well->hw_enabled = false;
f28ec6f4 384 power_well->desc->ops->disable(dev_priv, power_well);
dcddab3a
DL
385}
386
b409ca95
ID
387static void intel_power_well_get(struct drm_i915_private *dev_priv,
388 struct i915_power_well *power_well)
389{
390 if (!power_well->count++)
391 intel_power_well_enable(dev_priv, power_well);
392}
393
394static void intel_power_well_put(struct drm_i915_private *dev_priv,
395 struct i915_power_well *power_well)
396{
397 WARN(!power_well->count, "Use count on power well %s is already zero",
f28ec6f4 398 power_well->desc->name);
b409ca95
ID
399
400 if (!--power_well->count)
401 intel_power_well_disable(dev_priv, power_well);
402}
403
e4e7684f
SV
404/**
405 * __intel_display_power_is_enabled - unlocked check for a power domain
406 * @dev_priv: i915 device instance
407 * @domain: power domain to check
408 *
409 * This is the unlocked version of intel_display_power_is_enabled() and should
410 * only be used from error capture and recovery code where deadlocks are
411 * possible.
412 *
413 * Returns:
414 * True when the power domain is enabled, false otherwise.
415 */
f458ebbc
SV
416bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
417 enum intel_display_power_domain domain)
9c065a7d 418{
9c065a7d
SV
419 struct i915_power_well *power_well;
420 bool is_enabled;
9c065a7d 421
ad1443f0 422 if (dev_priv->runtime_pm.suspended)
9c065a7d
SV
423 return false;
424
9c065a7d
SV
425 is_enabled = true;
426
56d4eac0 427 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
f28ec6f4 428 if (power_well->desc->always_on)
9c065a7d
SV
429 continue;
430
431 if (!power_well->hw_enabled) {
432 is_enabled = false;
433 break;
434 }
435 }
436
437 return is_enabled;
438}
439
e4e7684f 440/**
f61ccae3 441 * intel_display_power_is_enabled - check for a power domain
e4e7684f
SV
442 * @dev_priv: i915 device instance
443 * @domain: power domain to check
444 *
445 * This function can be used to check the hw power domain state. It is mostly
446 * used in hardware state readout functions. Everywhere else code should rely
447 * upon explicit power domain reference counting to ensure that the hardware
448 * block is powered up before accessing it.
449 *
450 * Callers must hold the relevant modesetting locks to ensure that concurrent
451 * threads can't disable the power well while the caller tries to read a few
452 * registers.
453 *
454 * Returns:
455 * True when the power domain is enabled, false otherwise.
456 */
f458ebbc
SV
457bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
458 enum intel_display_power_domain domain)
9c065a7d
SV
459{
460 struct i915_power_domains *power_domains;
461 bool ret;
462
463 power_domains = &dev_priv->power_domains;
464
465 mutex_lock(&power_domains->lock);
f458ebbc 466 ret = __intel_display_power_is_enabled(dev_priv, domain);
9c065a7d
SV
467 mutex_unlock(&power_domains->lock);
468
469 return ret;
470}
471
472/*
473 * Starting with Haswell, we have a "Power Down Well" that can be turned off
474 * when not needed anymore. We have 4 registers that can request the power well
475 * to be enabled, and it will only be disabled if none of the registers is
476 * requesting it to be enabled.
477 */
001bd2cb
ID
478static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
479 u8 irq_pipe_mask, bool has_vga)
9c065a7d 480{
52a05c30 481 struct pci_dev *pdev = dev_priv->drm.pdev;
9c065a7d
SV
482
483 /*
484 * After we re-enable the power well, if we touch VGA register 0x3d5
485 * we'll get unclaimed register interrupts. This stops after we write
486 * anything to the VGA MSR register. The vgacon module uses this
487 * register all the time, so if we unbind our driver and, as a
488 * consequence, bind vgacon, we'll get stuck in an infinite loop at
489 * console_unlock(). So make here we touch the VGA MSR register, making
490 * sure vgacon can keep working normally without triggering interrupts
491 * and error messages.
492 */
001bd2cb
ID
493 if (has_vga) {
494 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
495 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
496 vga_put(pdev, VGA_RSRC_LEGACY_IO);
497 }
9c065a7d 498
001bd2cb
ID
499 if (irq_pipe_mask)
500 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
9c065a7d
SV
501}
502
001bd2cb
ID
503static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
504 u8 irq_pipe_mask)
aae8ba84 505{
001bd2cb
ID
506 if (irq_pipe_mask)
507 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
aae8ba84
VS
508}
509
aae8ba84 510
76347c04
ID
511static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
512 struct i915_power_well *power_well)
42d9366d 513{
75e39688
ID
514 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
515 int pw_idx = power_well->desc->hsw.idx;
42d9366d
ID
516
517 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
518 WARN_ON(intel_wait_for_register(dev_priv,
75e39688
ID
519 regs->driver,
520 HSW_PWR_WELL_CTL_STATE(pw_idx),
521 HSW_PWR_WELL_CTL_STATE(pw_idx),
42d9366d
ID
522 1));
523}
524
76347c04 525static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
75e39688
ID
526 const struct i915_power_well_regs *regs,
527 int pw_idx)
42d9366d 528{
75e39688 529 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
42d9366d
ID
530 u32 ret;
531
75e39688
ID
532 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
533 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
534 if (regs->kvmr.reg)
535 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
536 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
42d9366d
ID
537
538 return ret;
539}
540
76347c04
ID
541static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
542 struct i915_power_well *power_well)
42d9366d 543{
75e39688
ID
544 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
545 int pw_idx = power_well->desc->hsw.idx;
42d9366d
ID
546 bool disabled;
547 u32 reqs;
548
549 /*
550 * Bspec doesn't require waiting for PWs to get disabled, but still do
551 * this for paranoia. The known cases where a PW will be forced on:
552 * - a KVMR request on any power well via the KVMR request register
553 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
554 * DEBUG request registers
555 * Skip the wait in case any of the request bits are set and print a
556 * diagnostic message.
557 */
75e39688
ID
558 wait_for((disabled = !(I915_READ(regs->driver) &
559 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
560 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
42d9366d
ID
561 if (disabled)
562 return;
563
564 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
f28ec6f4 565 power_well->desc->name,
42d9366d
ID
566 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
567}
568
b2891eb2
ID
569static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
570 enum skl_power_gate pg)
571{
572 /* Timeout 5us for PG#0, for other PGs 1us */
573 WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
574 SKL_FUSE_PG_DIST_STATUS(pg),
575 SKL_FUSE_PG_DIST_STATUS(pg), 1));
576}
577
ec46d483
ID
578static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
579 struct i915_power_well *power_well)
9c065a7d 580{
75e39688
ID
581 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
582 int pw_idx = power_well->desc->hsw.idx;
f28ec6f4 583 bool wait_fuses = power_well->desc->hsw.has_fuses;
320671f9 584 enum skl_power_gate uninitialized_var(pg);
1af474fe
ID
585 u32 val;
586
b2891eb2 587 if (wait_fuses) {
75e39688
ID
588 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
589 SKL_PW_CTL_IDX_TO_PG(pw_idx);
b2891eb2
ID
590 /*
591 * For PW1 we have to wait both for the PW0/PG0 fuse state
592 * before enabling the power well and PW1/PG1's own fuse
593 * state after the enabling. For all other power wells with
594 * fuses we only have to wait for that PW/PG's fuse state
595 * after the enabling.
596 */
597 if (pg == SKL_PG1)
598 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
599 }
600
75e39688
ID
601 val = I915_READ(regs->driver);
602 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
76347c04 603 hsw_wait_for_power_well_enable(dev_priv, power_well);
001bd2cb 604
ddd39e4b
LDM
605 /* Display WA #1178: cnl */
606 if (IS_CANNONLAKE(dev_priv) &&
75e39688
ID
607 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
608 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
609 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
ddd39e4b 610 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
75e39688 611 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
ddd39e4b
LDM
612 }
613
b2891eb2
ID
614 if (wait_fuses)
615 gen9_wait_for_power_well_fuses(dev_priv, pg);
616
f28ec6f4
ID
617 hsw_power_well_post_enable(dev_priv,
618 power_well->desc->hsw.irq_pipe_mask,
619 power_well->desc->hsw.has_vga);
ec46d483 620}
00742cab 621
ec46d483
ID
622static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
623 struct i915_power_well *power_well)
624{
75e39688
ID
625 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
626 int pw_idx = power_well->desc->hsw.idx;
1af474fe
ID
627 u32 val;
628
f28ec6f4
ID
629 hsw_power_well_pre_disable(dev_priv,
630 power_well->desc->hsw.irq_pipe_mask);
001bd2cb 631
75e39688
ID
632 val = I915_READ(regs->driver);
633 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
76347c04 634 hsw_wait_for_power_well_disable(dev_priv, power_well);
9c065a7d
SV
635}
636
75e39688 637#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
67ca07e7
ID
638
639static void
640icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
641 struct i915_power_well *power_well)
642{
75e39688
ID
643 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
644 int pw_idx = power_well->desc->hsw.idx;
645 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
67ca07e7
ID
646 u32 val;
647
75e39688
ID
648 val = I915_READ(regs->driver);
649 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
67ca07e7
ID
650
651 val = I915_READ(ICL_PORT_CL_DW12(port));
652 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
653
654 hsw_wait_for_power_well_enable(dev_priv, power_well);
ffd7e32d
LDM
655
656 /* Display WA #1178: icl */
657 if (IS_ICELAKE(dev_priv) &&
658 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
659 !intel_bios_is_port_edp(dev_priv, port)) {
660 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
661 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
662 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
663 }
67ca07e7
ID
664}
665
666static void
667icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
668 struct i915_power_well *power_well)
669{
75e39688
ID
670 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
671 int pw_idx = power_well->desc->hsw.idx;
672 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
67ca07e7
ID
673 u32 val;
674
675 val = I915_READ(ICL_PORT_CL_DW12(port));
676 I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
677
75e39688
ID
678 val = I915_READ(regs->driver);
679 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
67ca07e7
ID
680
681 hsw_wait_for_power_well_disable(dev_priv, power_well);
682}
683
c7375d95
ID
684#define ICL_AUX_PW_TO_CH(pw_idx) \
685 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
686
687static void
688icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
689 struct i915_power_well *power_well)
690{
691 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
692 u32 val;
693
694 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
695 val &= ~DP_AUX_CH_CTL_TBT_IO;
696 if (power_well->desc->hsw.is_tc_tbt)
697 val |= DP_AUX_CH_CTL_TBT_IO;
698 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
699
700 hsw_power_well_enable(dev_priv, power_well);
701}
702
d42539ba
ID
703/*
704 * We should only use the power well if we explicitly asked the hardware to
705 * enable it, so check if it's enabled and also check if we've requested it to
706 * be enabled.
707 */
708static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
709 struct i915_power_well *power_well)
710{
75e39688 711 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
cb8ef723 712 enum i915_power_well_id id = power_well->desc->id;
75e39688
ID
713 int pw_idx = power_well->desc->hsw.idx;
714 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
715 HSW_PWR_WELL_CTL_STATE(pw_idx);
cb8ef723
ID
716 u32 val;
717
718 val = I915_READ(regs->driver);
719
720 /*
721 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
722 * and the MISC_IO PW will be not restored, so check instead for the
723 * BIOS's own request bits, which are forced-on for these power wells
724 * when exiting DC5/6.
725 */
cf819eff 726 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
cb8ef723
ID
727 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
728 val |= I915_READ(regs->bios);
d42539ba 729
cb8ef723 730 return (val & mask) == mask;
d42539ba
ID
731}
732
664326f8
SK
733static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
734{
bfcdabe8
ID
735 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
736 "DC9 already programmed to be enabled.\n");
737 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
738 "DC5 still not disabled to enable DC9.\n");
75e39688
ID
739 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
740 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
e8a3a2a3 741 "Power well 2 on.\n");
bfcdabe8
ID
742 WARN_ONCE(intel_irqs_enabled(dev_priv),
743 "Interrupts not disabled yet.\n");
664326f8
SK
744
745 /*
746 * TODO: check for the following to verify the conditions to enter DC9
747 * state are satisfied:
748 * 1] Check relevant display engine registers to verify if mode set
749 * disable sequence was followed.
750 * 2] Check if display uninitialize sequence is initialized.
751 */
752}
753
754static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
755{
bfcdabe8
ID
756 WARN_ONCE(intel_irqs_enabled(dev_priv),
757 "Interrupts not disabled yet.\n");
758 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
759 "DC5 still not disabled.\n");
664326f8
SK
760
761 /*
762 * TODO: check for the following to verify DC9 state was indeed
763 * entered before programming to disable it:
764 * 1] Check relevant display engine registers to verify if mode
765 * set disable sequence was followed.
766 * 2] Check if display uninitialize sequence is initialized.
767 */
768}
769
779cb5d3
MK
770static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
771 u32 state)
772{
773 int rewrites = 0;
774 int rereads = 0;
775 u32 v;
776
777 I915_WRITE(DC_STATE_EN, state);
778
779 /* It has been observed that disabling the dc6 state sometimes
780 * doesn't stick and dmc keeps returning old value. Make sure
781 * the write really sticks enough times and also force rewrite until
782 * we are confident that state is exactly what we want.
783 */
784 do {
785 v = I915_READ(DC_STATE_EN);
786
787 if (v != state) {
788 I915_WRITE(DC_STATE_EN, state);
789 rewrites++;
790 rereads = 0;
791 } else if (rereads++ > 5) {
792 break;
793 }
794
795 } while (rewrites < 100);
796
797 if (v != state)
798 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
799 state, v);
800
801 /* Most of the times we need one retry, avoid spam */
802 if (rewrites > 1)
803 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
804 state, rewrites);
805}
806
da2f41d1 807static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
664326f8 808{
da2f41d1 809 u32 mask;
664326f8 810
13ae3a0d 811 mask = DC_STATE_EN_UPTO_DC5;
3e68928b
AM
812 if (INTEL_GEN(dev_priv) >= 11)
813 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
814 else if (IS_GEN9_LP(dev_priv))
13ae3a0d
ID
815 mask |= DC_STATE_EN_DC9;
816 else
817 mask |= DC_STATE_EN_UPTO_DC6;
664326f8 818
da2f41d1
ID
819 return mask;
820}
821
822void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
823{
824 u32 val;
825
826 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
827
828 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
829 dev_priv->csr.dc_state, val);
830 dev_priv->csr.dc_state = val;
831}
832
13e1592f
ID
833/**
834 * gen9_set_dc_state - set target display C power state
835 * @dev_priv: i915 device instance
836 * @state: target DC power state
837 * - DC_STATE_DISABLE
838 * - DC_STATE_EN_UPTO_DC5
839 * - DC_STATE_EN_UPTO_DC6
840 * - DC_STATE_EN_DC9
841 *
842 * Signal to DMC firmware/HW the target DC power state passed in @state.
843 * DMC/HW can turn off individual display clocks and power rails when entering
844 * a deeper DC power state (higher in number) and turns these back when exiting
845 * that state to a shallower power state (lower in number). The HW will decide
846 * when to actually enter a given state on an on-demand basis, for instance
847 * depending on the active state of display pipes. The state of display
848 * registers backed by affected power rails are saved/restored as needed.
849 *
850 * Based on the above enabling a deeper DC power state is asynchronous wrt.
851 * enabling it. Disabling a deeper power state is synchronous: for instance
852 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
853 * back on and register state is restored. This is guaranteed by the MMIO write
854 * to DC_STATE_EN blocking until the state is restored.
855 */
da2f41d1
ID
856static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
857{
858 uint32_t val;
859 uint32_t mask;
860
a37baf3b
ID
861 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
862 state &= dev_priv->csr.allowed_dc_mask;
443646c7 863
664326f8 864 val = I915_READ(DC_STATE_EN);
da2f41d1 865 mask = gen9_dc_mask(dev_priv);
13ae3a0d
ID
866 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
867 val & mask, state);
832dba88
PJ
868
869 /* Check if DMC is ignoring our DC state requests */
870 if ((val & mask) != dev_priv->csr.dc_state)
871 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
872 dev_priv->csr.dc_state, val & mask);
873
13ae3a0d
ID
874 val &= ~mask;
875 val |= state;
779cb5d3
MK
876
877 gen9_write_dc_state(dev_priv, val);
832dba88
PJ
878
879 dev_priv->csr.dc_state = val & mask;
664326f8
SK
880}
881
13ae3a0d 882void bxt_enable_dc9(struct drm_i915_private *dev_priv)
664326f8 883{
13ae3a0d
ID
884 assert_can_enable_dc9(dev_priv);
885
886 DRM_DEBUG_KMS("Enabling DC9\n");
3e68928b
AM
887 /*
888 * Power sequencer reset is not needed on
889 * platforms with South Display Engine on PCH,
890 * because PPS registers are always on.
891 */
892 if (!HAS_PCH_SPLIT(dev_priv))
893 intel_power_sequencer_reset(dev_priv);
13ae3a0d
ID
894 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
895}
896
897void bxt_disable_dc9(struct drm_i915_private *dev_priv)
898{
664326f8
SK
899 assert_can_disable_dc9(dev_priv);
900
901 DRM_DEBUG_KMS("Disabling DC9\n");
902
13ae3a0d 903 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
8090ba8c
ID
904
905 intel_pps_unlock_regs_wa(dev_priv);
664326f8
SK
906}
907
af5fead2
SV
908static void assert_csr_loaded(struct drm_i915_private *dev_priv)
909{
910 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
911 "CSR program storage start is NULL\n");
912 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
913 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
914}
915
f7480b2f
PZ
916static struct i915_power_well *
917lookup_power_well(struct drm_i915_private *dev_priv,
918 enum i915_power_well_id power_well_id)
919{
920 struct i915_power_well *power_well;
921
922 for_each_power_well(dev_priv, power_well)
923 if (power_well->desc->id == power_well_id)
924 return power_well;
925
926 /*
927 * It's not feasible to add error checking code to the callers since
928 * this condition really shouldn't happen and it doesn't even make sense
929 * to abort things like display initialization sequences. Just return
930 * the first power well and hope the WARN gets reported so we can fix
931 * our driver.
932 */
933 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
934 return &dev_priv->power_domains.power_wells[0];
935}
936
5aefb239 937static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
dc174300 938{
5aefb239
SS
939 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
940 SKL_DISP_PW_2);
941
6ff8ab0d 942 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
5aefb239 943
6ff8ab0d
JB
944 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
945 "DC5 already programmed to be enabled.\n");
c9b8846a 946 assert_rpm_wakelock_held(dev_priv);
5aefb239
SS
947
948 assert_csr_loaded(dev_priv);
949}
950
f62c79b3 951void gen9_enable_dc5(struct drm_i915_private *dev_priv)
5aefb239 952{
5aefb239 953 assert_can_enable_dc5(dev_priv);
6b457d31
SK
954
955 DRM_DEBUG_KMS("Enabling DC5\n");
956
53421c2f
LDM
957 /* Wa Display #1183: skl,kbl,cfl */
958 if (IS_GEN9_BC(dev_priv))
959 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
960 SKL_SELECT_ALTERNATE_DC_EXIT);
961
13ae3a0d 962 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
dc174300
SS
963}
964
93c7cb6c 965static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
f75a1985 966{
6ff8ab0d
JB
967 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
968 "Backlight is not disabled.\n");
969 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
970 "DC6 already programmed to be enabled.\n");
93c7cb6c
SS
971
972 assert_csr_loaded(dev_priv);
973}
974
3e68928b 975void skl_enable_dc6(struct drm_i915_private *dev_priv)
93c7cb6c 976{
93c7cb6c 977 assert_can_enable_dc6(dev_priv);
74b4f371
SK
978
979 DRM_DEBUG_KMS("Enabling DC6\n");
980
b49be662
ID
981 /* Wa Display #1183: skl,kbl,cfl */
982 if (IS_GEN9_BC(dev_priv))
983 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
984 SKL_SELECT_ALTERNATE_DC_EXIT);
13ae3a0d 985
b49be662 986 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
f75a1985
SS
987}
988
9c065a7d
SV
989static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
990 struct i915_power_well *power_well)
991{
75e39688
ID
992 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
993 int pw_idx = power_well->desc->hsw.idx;
994 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
995 u32 bios_req = I915_READ(regs->bios);
1af474fe 996
16e84914 997 /* Take over the request bit if set by BIOS. */
1af474fe 998 if (bios_req & mask) {
75e39688 999 u32 drv_req = I915_READ(regs->driver);
1af474fe
ID
1000
1001 if (!(drv_req & mask))
75e39688
ID
1002 I915_WRITE(regs->driver, drv_req | mask);
1003 I915_WRITE(regs->bios, bios_req & ~mask);
16e84914 1004 }
9c065a7d
SV
1005}
1006
9c8d0b8e
ID
1007static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1008 struct i915_power_well *power_well)
1009{
f28ec6f4 1010 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1011}
1012
1013static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1014 struct i915_power_well *power_well)
1015{
f28ec6f4 1016 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1017}
1018
1019static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1020 struct i915_power_well *power_well)
1021{
f28ec6f4 1022 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1023}
1024
9c8d0b8e
ID
1025static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1026{
1027 struct i915_power_well *power_well;
1028
2183b499 1029 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
9c8d0b8e 1030 if (power_well->count > 0)
f28ec6f4 1031 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e 1032
d9fcdc8d 1033 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9c8d0b8e 1034 if (power_well->count > 0)
f28ec6f4 1035 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
0a116ce8
ACO
1036
1037 if (IS_GEMINILAKE(dev_priv)) {
2183b499
ID
1038 power_well = lookup_power_well(dev_priv,
1039 GLK_DISP_PW_DPIO_CMN_C);
0a116ce8 1040 if (power_well->count > 0)
f28ec6f4
ID
1041 bxt_ddi_phy_verify_state(dev_priv,
1042 power_well->desc->bxt.phy);
0a116ce8 1043 }
9c8d0b8e
ID
1044}
1045
9f836f90
PJ
1046static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1047 struct i915_power_well *power_well)
1048{
1049 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1050}
1051
18a8067c
VS
1052static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1053{
1054 u32 tmp = I915_READ(DBUF_CTL);
1055
1056 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1057 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1058 "Unexpected DBuf power power state (0x%08x)\n", tmp);
1059}
1060
9f836f90
PJ
1061static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1062 struct i915_power_well *power_well)
1063{
49cd97a3
VS
1064 struct intel_cdclk_state cdclk_state = {};
1065
5b773eb4 1066 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
adc7f04b 1067
49cd97a3 1068 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
64600bd5
VS
1069 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1070 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
342be926 1071
18a8067c
VS
1072 gen9_assert_dbuf_enabled(dev_priv);
1073
cc3f90f0 1074 if (IS_GEN9_LP(dev_priv))
9c8d0b8e 1075 bxt_verify_ddi_phy_power_wells(dev_priv);
602438ea
ID
1076
1077 if (INTEL_GEN(dev_priv) >= 11)
1078 /*
1079 * DMC retains HW context only for port A, the other combo
1080 * PHY's HW context for port B is lost after DC transitions,
1081 * so we need to restore it manually.
1082 */
1083 icl_combo_phys_init(dev_priv);
9f836f90
PJ
1084}
1085
1086static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1087 struct i915_power_well *power_well)
1088{
f74ed08d
ID
1089 if (!dev_priv->csr.dmc_payload)
1090 return;
1091
a37baf3b 1092 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
9f836f90 1093 skl_enable_dc6(dev_priv);
a37baf3b 1094 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
9f836f90
PJ
1095 gen9_enable_dc5(dev_priv);
1096}
1097
3c1b38e6
ID
1098static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1099 struct i915_power_well *power_well)
9f836f90 1100{
9f836f90
PJ
1101}
1102
9c065a7d
SV
1103static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1104 struct i915_power_well *power_well)
1105{
1106}
1107
1108static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1109 struct i915_power_well *power_well)
1110{
1111 return true;
1112}
1113
2ee0da16
VS
1114static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1115 struct i915_power_well *power_well)
1116{
1117 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1118 i830_enable_pipe(dev_priv, PIPE_A);
1119 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1120 i830_enable_pipe(dev_priv, PIPE_B);
1121}
1122
1123static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1124 struct i915_power_well *power_well)
1125{
1126 i830_disable_pipe(dev_priv, PIPE_B);
1127 i830_disable_pipe(dev_priv, PIPE_A);
1128}
1129
1130static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1131 struct i915_power_well *power_well)
1132{
1133 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1134 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1135}
1136
1137static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1138 struct i915_power_well *power_well)
1139{
1140 if (power_well->count > 0)
1141 i830_pipes_power_well_enable(dev_priv, power_well);
1142 else
1143 i830_pipes_power_well_disable(dev_priv, power_well);
1144}
1145
9c065a7d
SV
1146static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1147 struct i915_power_well *power_well, bool enable)
1148{
d13dd05a 1149 int pw_idx = power_well->desc->vlv.idx;
9c065a7d
SV
1150 u32 mask;
1151 u32 state;
1152 u32 ctrl;
1153
d13dd05a
ID
1154 mask = PUNIT_PWRGT_MASK(pw_idx);
1155 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1156 PUNIT_PWRGT_PWR_GATE(pw_idx);
9c065a7d 1157
9f817501 1158 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1159
1160#define COND \
1161 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1162
1163 if (COND)
1164 goto out;
1165
1166 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1167 ctrl &= ~mask;
1168 ctrl |= state;
1169 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1170
1171 if (wait_for(COND, 100))
7e35ab88 1172 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
9c065a7d
SV
1173 state,
1174 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1175
1176#undef COND
1177
1178out:
9f817501 1179 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1180}
1181
9c065a7d
SV
1182static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1183 struct i915_power_well *power_well)
1184{
1185 vlv_set_power_well(dev_priv, power_well, true);
1186}
1187
1188static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1189 struct i915_power_well *power_well)
1190{
1191 vlv_set_power_well(dev_priv, power_well, false);
1192}
1193
1194static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1195 struct i915_power_well *power_well)
1196{
d13dd05a 1197 int pw_idx = power_well->desc->vlv.idx;
9c065a7d
SV
1198 bool enabled = false;
1199 u32 mask;
1200 u32 state;
1201 u32 ctrl;
1202
d13dd05a
ID
1203 mask = PUNIT_PWRGT_MASK(pw_idx);
1204 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
9c065a7d 1205
9f817501 1206 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1207
1208 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1209 /*
1210 * We only ever set the power-on and power-gate states, anything
1211 * else is unexpected.
1212 */
d13dd05a
ID
1213 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1214 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
9c065a7d
SV
1215 if (state == ctrl)
1216 enabled = true;
1217
1218 /*
1219 * A transient state at this point would mean some unexpected party
1220 * is poking at the power controls too.
1221 */
1222 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1223 WARN_ON(ctrl != state);
1224
9f817501 1225 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1226
1227 return enabled;
1228}
1229
766078df
VS
1230static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1231{
721d4845
HG
1232 u32 val;
1233
1234 /*
1235 * On driver load, a pipe may be active and driving a DSI display.
1236 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1237 * (and never recovering) in this case. intel_dsi_post_disable() will
1238 * clear it when we turn off the display.
1239 */
1240 val = I915_READ(DSPCLK_GATE_D);
1241 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1242 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1243 I915_WRITE(DSPCLK_GATE_D, val);
766078df
VS
1244
1245 /*
1246 * Disable trickle feed and enable pnd deadline calculation
1247 */
1248 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1249 I915_WRITE(CBR1_VLV, 0);
19ab4ed3
VS
1250
1251 WARN_ON(dev_priv->rawclk_freq == 0);
1252
1253 I915_WRITE(RAWCLK_FREQ_VLV,
1254 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
766078df
VS
1255}
1256
2be7d540 1257static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
9c065a7d 1258{
9504a892 1259 struct intel_encoder *encoder;
5a8fbb7d
VS
1260 enum pipe pipe;
1261
1262 /*
1263 * Enable the CRI clock source so we can get at the
1264 * display and the reference clock for VGA
1265 * hotplug / manual detection. Supposedly DSI also
1266 * needs the ref clock up and running.
1267 *
1268 * CHV DPLL B/C have some issues if VGA mode is enabled.
1269 */
801388cb 1270 for_each_pipe(dev_priv, pipe) {
5a8fbb7d
VS
1271 u32 val = I915_READ(DPLL(pipe));
1272
1273 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1274 if (pipe != PIPE_A)
1275 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1276
1277 I915_WRITE(DPLL(pipe), val);
1278 }
9c065a7d 1279
766078df
VS
1280 vlv_init_display_clock_gating(dev_priv);
1281
9c065a7d
SV
1282 spin_lock_irq(&dev_priv->irq_lock);
1283 valleyview_enable_display_irqs(dev_priv);
1284 spin_unlock_irq(&dev_priv->irq_lock);
1285
1286 /*
1287 * During driver initialization/resume we can avoid restoring the
1288 * part of the HW/SW state that will be inited anyway explicitly.
1289 */
1290 if (dev_priv->power_domains.initializing)
1291 return;
1292
b963291c 1293 intel_hpd_init(dev_priv);
9c065a7d 1294
9504a892
L
1295 /* Re-enable the ADPA, if we have one */
1296 for_each_intel_encoder(&dev_priv->drm, encoder) {
1297 if (encoder->type == INTEL_OUTPUT_ANALOG)
1298 intel_crt_reset(&encoder->base);
1299 }
1300
29b74b7f 1301 i915_redisable_vga_power_on(dev_priv);
8090ba8c
ID
1302
1303 intel_pps_unlock_regs_wa(dev_priv);
9c065a7d
SV
1304}
1305
2be7d540
VS
1306static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1307{
1308 spin_lock_irq(&dev_priv->irq_lock);
1309 valleyview_disable_display_irqs(dev_priv);
1310 spin_unlock_irq(&dev_priv->irq_lock);
1311
2230fde8 1312 /* make sure we're done processing display irqs */
91c8a326 1313 synchronize_irq(dev_priv->drm.irq);
2230fde8 1314
78597996 1315 intel_power_sequencer_reset(dev_priv);
19625e85 1316
b64b5409
L
1317 /* Prevent us from re-enabling polling on accident in late suspend */
1318 if (!dev_priv->drm.dev->power.is_suspended)
1319 intel_hpd_poll_init(dev_priv);
2be7d540
VS
1320}
1321
1322static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1323 struct i915_power_well *power_well)
1324{
2be7d540
VS
1325 vlv_set_power_well(dev_priv, power_well, true);
1326
1327 vlv_display_power_well_init(dev_priv);
1328}
1329
9c065a7d
SV
1330static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1331 struct i915_power_well *power_well)
1332{
2be7d540 1333 vlv_display_power_well_deinit(dev_priv);
9c065a7d
SV
1334
1335 vlv_set_power_well(dev_priv, power_well, false);
9c065a7d
SV
1336}
1337
1338static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1339 struct i915_power_well *power_well)
1340{
5a8fbb7d 1341 /* since ref/cri clock was enabled */
9c065a7d
SV
1342 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1343
1344 vlv_set_power_well(dev_priv, power_well, true);
1345
1346 /*
1347 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1348 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1349 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1350 * b. The other bits such as sfr settings / modesel may all
1351 * be set to 0.
1352 *
1353 * This should only be done on init and resume from S3 with
1354 * both PLLs disabled, or we risk losing DPIO and PLL
1355 * synchronization.
1356 */
1357 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1358}
1359
1360static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1361 struct i915_power_well *power_well)
1362{
1363 enum pipe pipe;
1364
9c065a7d
SV
1365 for_each_pipe(dev_priv, pipe)
1366 assert_pll_disabled(dev_priv, pipe);
1367
1368 /* Assert common reset */
1369 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1370
1371 vlv_set_power_well(dev_priv, power_well, false);
1372}
1373
d8fc70b7 1374#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
30142273 1375
30142273
VS
1376#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1377
1378static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1379{
1380 struct i915_power_well *cmn_bc =
2183b499 1381 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
30142273 1382 struct i915_power_well *cmn_d =
2183b499 1383 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
30142273
VS
1384 u32 phy_control = dev_priv->chv_phy_control;
1385 u32 phy_status = 0;
3be60de9 1386 u32 phy_status_mask = 0xffffffff;
30142273 1387
3be60de9
VS
1388 /*
1389 * The BIOS can leave the PHY is some weird state
1390 * where it doesn't fully power down some parts.
1391 * Disable the asserts until the PHY has been fully
1392 * reset (ie. the power well has been disabled at
1393 * least once).
1394 */
1395 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1396 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1397 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1398 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1399 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1400 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1401 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1402
1403 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1404 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1405 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1406 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1407
f28ec6f4 1408 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
30142273
VS
1409 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1410
1411 /* this assumes override is only used to enable lanes */
1412 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1413 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1414
1415 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1416 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1417
1418 /* CL1 is on whenever anything is on in either channel */
1419 if (BITS_SET(phy_control,
1420 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1421 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1422 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1423
1424 /*
1425 * The DPLLB check accounts for the pipe B + port A usage
1426 * with CL2 powered up but all the lanes in the second channel
1427 * powered down.
1428 */
1429 if (BITS_SET(phy_control,
1430 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1431 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1432 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1433
1434 if (BITS_SET(phy_control,
1435 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1436 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1437 if (BITS_SET(phy_control,
1438 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1439 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1440
1441 if (BITS_SET(phy_control,
1442 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1443 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1444 if (BITS_SET(phy_control,
1445 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1446 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1447 }
1448
f28ec6f4 1449 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
30142273
VS
1450 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1451
1452 /* this assumes override is only used to enable lanes */
1453 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1454 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1455
1456 if (BITS_SET(phy_control,
1457 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1458 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1459
1460 if (BITS_SET(phy_control,
1461 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1462 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1463 if (BITS_SET(phy_control,
1464 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1465 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1466 }
1467
3be60de9
VS
1468 phy_status &= phy_status_mask;
1469
30142273
VS
1470 /*
1471 * The PHY may be busy with some initial calibration and whatnot,
1472 * so the power state can take a while to actually change.
1473 */
919fcd51
CW
1474 if (intel_wait_for_register(dev_priv,
1475 DISPLAY_PHY_STATUS,
1476 phy_status_mask,
1477 phy_status,
1478 10))
1479 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1480 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1481 phy_status, dev_priv->chv_phy_control);
30142273
VS
1482}
1483
1484#undef BITS_SET
1485
9c065a7d
SV
1486static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1487 struct i915_power_well *power_well)
1488{
1489 enum dpio_phy phy;
e0fce78f
VS
1490 enum pipe pipe;
1491 uint32_t tmp;
9c065a7d 1492
2183b499
ID
1493 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1494 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
9c065a7d 1495
2183b499 1496 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
e0fce78f 1497 pipe = PIPE_A;
9c065a7d 1498 phy = DPIO_PHY0;
e0fce78f
VS
1499 } else {
1500 pipe = PIPE_C;
9c065a7d 1501 phy = DPIO_PHY1;
e0fce78f 1502 }
5a8fbb7d
VS
1503
1504 /* since ref/cri clock was enabled */
9c065a7d
SV
1505 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1506 vlv_set_power_well(dev_priv, power_well, true);
1507
1508 /* Poll for phypwrgood signal */
ffebb83b
CW
1509 if (intel_wait_for_register(dev_priv,
1510 DISPLAY_PHY_STATUS,
1511 PHY_POWERGOOD(phy),
1512 PHY_POWERGOOD(phy),
1513 1))
9c065a7d
SV
1514 DRM_ERROR("Display PHY %d is not power up\n", phy);
1515
e0fce78f
VS
1516 mutex_lock(&dev_priv->sb_lock);
1517
1518 /* Enable dynamic power down */
1519 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
ee279218
VS
1520 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1521 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
e0fce78f
VS
1522 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1523
2183b499 1524 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
e0fce78f
VS
1525 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1526 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1527 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
3e288786
VS
1528 } else {
1529 /*
1530 * Force the non-existing CL2 off. BXT does this
1531 * too, so maybe it saves some power even though
1532 * CL2 doesn't exist?
1533 */
1534 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1535 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1536 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
e0fce78f
VS
1537 }
1538
1539 mutex_unlock(&dev_priv->sb_lock);
1540
70722468
VS
1541 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1542 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
e0fce78f
VS
1543
1544 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1545 phy, dev_priv->chv_phy_control);
30142273
VS
1546
1547 assert_chv_phy_status(dev_priv);
9c065a7d
SV
1548}
1549
1550static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1551 struct i915_power_well *power_well)
1552{
1553 enum dpio_phy phy;
1554
2183b499
ID
1555 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1556 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
9c065a7d 1557
2183b499 1558 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
9c065a7d
SV
1559 phy = DPIO_PHY0;
1560 assert_pll_disabled(dev_priv, PIPE_A);
1561 assert_pll_disabled(dev_priv, PIPE_B);
1562 } else {
1563 phy = DPIO_PHY1;
1564 assert_pll_disabled(dev_priv, PIPE_C);
1565 }
1566
70722468
VS
1567 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1568 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
9c065a7d
SV
1569
1570 vlv_set_power_well(dev_priv, power_well, false);
e0fce78f
VS
1571
1572 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1573 phy, dev_priv->chv_phy_control);
30142273 1574
3be60de9
VS
1575 /* PHY is fully reset now, so we can enable the PHY state asserts */
1576 dev_priv->chv_phy_assert[phy] = true;
1577
30142273 1578 assert_chv_phy_status(dev_priv);
e0fce78f
VS
1579}
1580
6669e39f
VS
1581static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1582 enum dpio_channel ch, bool override, unsigned int mask)
1583{
1584 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1585 u32 reg, val, expected, actual;
1586
3be60de9
VS
1587 /*
1588 * The BIOS can leave the PHY is some weird state
1589 * where it doesn't fully power down some parts.
1590 * Disable the asserts until the PHY has been fully
1591 * reset (ie. the power well has been disabled at
1592 * least once).
1593 */
1594 if (!dev_priv->chv_phy_assert[phy])
1595 return;
1596
6669e39f
VS
1597 if (ch == DPIO_CH0)
1598 reg = _CHV_CMN_DW0_CH0;
1599 else
1600 reg = _CHV_CMN_DW6_CH1;
1601
1602 mutex_lock(&dev_priv->sb_lock);
1603 val = vlv_dpio_read(dev_priv, pipe, reg);
1604 mutex_unlock(&dev_priv->sb_lock);
1605
1606 /*
1607 * This assumes !override is only used when the port is disabled.
1608 * All lanes should power down even without the override when
1609 * the port is disabled.
1610 */
1611 if (!override || mask == 0xf) {
1612 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1613 /*
1614 * If CH1 common lane is not active anymore
1615 * (eg. for pipe B DPLL) the entire channel will
1616 * shut down, which causes the common lane registers
1617 * to read as 0. That means we can't actually check
1618 * the lane power down status bits, but as the entire
1619 * register reads as 0 it's a good indication that the
1620 * channel is indeed entirely powered down.
1621 */
1622 if (ch == DPIO_CH1 && val == 0)
1623 expected = 0;
1624 } else if (mask != 0x0) {
1625 expected = DPIO_ANYDL_POWERDOWN;
1626 } else {
1627 expected = 0;
1628 }
1629
1630 if (ch == DPIO_CH0)
1631 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1632 else
1633 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1634 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1635
1636 WARN(actual != expected,
1637 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1638 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1639 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1640 reg, val);
1641}
1642
b0b33846
VS
1643bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1644 enum dpio_channel ch, bool override)
1645{
1646 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1647 bool was_override;
1648
1649 mutex_lock(&power_domains->lock);
1650
1651 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1652
1653 if (override == was_override)
1654 goto out;
1655
1656 if (override)
1657 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1658 else
1659 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1660
1661 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1662
1663 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1664 phy, ch, dev_priv->chv_phy_control);
1665
30142273
VS
1666 assert_chv_phy_status(dev_priv);
1667
b0b33846
VS
1668out:
1669 mutex_unlock(&power_domains->lock);
1670
1671 return was_override;
1672}
1673
e0fce78f
VS
1674void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1675 bool override, unsigned int mask)
1676{
1677 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1678 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1679 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1680 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1681
1682 mutex_lock(&power_domains->lock);
1683
1684 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1685 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1686
1687 if (override)
1688 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1689 else
1690 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1691
1692 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1693
1694 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1695 phy, ch, mask, dev_priv->chv_phy_control);
1696
30142273
VS
1697 assert_chv_phy_status(dev_priv);
1698
6669e39f
VS
1699 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1700
e0fce78f 1701 mutex_unlock(&power_domains->lock);
9c065a7d
SV
1702}
1703
1704static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1705 struct i915_power_well *power_well)
1706{
f49193cd 1707 enum pipe pipe = PIPE_A;
9c065a7d
SV
1708 bool enabled;
1709 u32 state, ctrl;
1710
9f817501 1711 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1712
1713 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1714 /*
1715 * We only ever set the power-on and power-gate states, anything
1716 * else is unexpected.
1717 */
1718 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1719 enabled = state == DP_SSS_PWR_ON(pipe);
1720
1721 /*
1722 * A transient state at this point would mean some unexpected party
1723 * is poking at the power controls too.
1724 */
1725 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1726 WARN_ON(ctrl << 16 != state);
1727
9f817501 1728 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1729
1730 return enabled;
1731}
1732
1733static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1734 struct i915_power_well *power_well,
1735 bool enable)
1736{
f49193cd 1737 enum pipe pipe = PIPE_A;
9c065a7d
SV
1738 u32 state;
1739 u32 ctrl;
1740
1741 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1742
9f817501 1743 mutex_lock(&dev_priv->pcu_lock);
9c065a7d
SV
1744
1745#define COND \
1746 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1747
1748 if (COND)
1749 goto out;
1750
1751 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1752 ctrl &= ~DP_SSC_MASK(pipe);
1753 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1754 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1755
1756 if (wait_for(COND, 100))
7e35ab88 1757 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
9c065a7d
SV
1758 state,
1759 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1760
1761#undef COND
1762
1763out:
9f817501 1764 mutex_unlock(&dev_priv->pcu_lock);
9c065a7d
SV
1765}
1766
9c065a7d
SV
1767static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1768 struct i915_power_well *power_well)
1769{
9c065a7d 1770 chv_set_pipe_power_well(dev_priv, power_well, true);
afd6275d 1771
2be7d540 1772 vlv_display_power_well_init(dev_priv);
9c065a7d
SV
1773}
1774
1775static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1776 struct i915_power_well *power_well)
1777{
2be7d540 1778 vlv_display_power_well_deinit(dev_priv);
afd6275d 1779
9c065a7d
SV
1780 chv_set_pipe_power_well(dev_priv, power_well, false);
1781}
1782
09731280
ID
1783static void
1784__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1785 enum intel_display_power_domain domain)
1786{
1787 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1788 struct i915_power_well *power_well;
09731280 1789
75ccb2ec 1790 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
b409ca95 1791 intel_power_well_get(dev_priv, power_well);
09731280
ID
1792
1793 power_domains->domain_use_count[domain]++;
1794}
1795
e4e7684f
SV
1796/**
1797 * intel_display_power_get - grab a power domain reference
1798 * @dev_priv: i915 device instance
1799 * @domain: power domain to reference
1800 *
1801 * This function grabs a power domain reference for @domain and ensures that the
1802 * power domain and all its parents are powered up. Therefore users should only
1803 * grab a reference to the innermost power domain they need.
1804 *
1805 * Any power domain reference obtained by this function must have a symmetric
1806 * call to intel_display_power_put() to release the reference again.
1807 */
9c065a7d
SV
1808void intel_display_power_get(struct drm_i915_private *dev_priv,
1809 enum intel_display_power_domain domain)
1810{
09731280 1811 struct i915_power_domains *power_domains = &dev_priv->power_domains;
9c065a7d
SV
1812
1813 intel_runtime_pm_get(dev_priv);
1814
09731280
ID
1815 mutex_lock(&power_domains->lock);
1816
1817 __intel_display_power_get_domain(dev_priv, domain);
1818
1819 mutex_unlock(&power_domains->lock);
1820}
1821
1822/**
1823 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1824 * @dev_priv: i915 device instance
1825 * @domain: power domain to reference
1826 *
1827 * This function grabs a power domain reference for @domain and ensures that the
1828 * power domain and all its parents are powered up. Therefore users should only
1829 * grab a reference to the innermost power domain they need.
1830 *
1831 * Any power domain reference obtained by this function must have a symmetric
1832 * call to intel_display_power_put() to release the reference again.
1833 */
1834bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1835 enum intel_display_power_domain domain)
1836{
1837 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1838 bool is_enabled;
1839
1840 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1841 return false;
9c065a7d
SV
1842
1843 mutex_lock(&power_domains->lock);
1844
09731280
ID
1845 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1846 __intel_display_power_get_domain(dev_priv, domain);
1847 is_enabled = true;
1848 } else {
1849 is_enabled = false;
9c065a7d
SV
1850 }
1851
9c065a7d 1852 mutex_unlock(&power_domains->lock);
09731280
ID
1853
1854 if (!is_enabled)
1855 intel_runtime_pm_put(dev_priv);
1856
1857 return is_enabled;
9c065a7d
SV
1858}
1859
e4e7684f
SV
1860/**
1861 * intel_display_power_put - release a power domain reference
1862 * @dev_priv: i915 device instance
1863 * @domain: power domain to reference
1864 *
1865 * This function drops the power domain reference obtained by
1866 * intel_display_power_get() and might power down the corresponding hardware
1867 * block right away if this is the last reference.
1868 */
9c065a7d
SV
1869void intel_display_power_put(struct drm_i915_private *dev_priv,
1870 enum intel_display_power_domain domain)
1871{
1872 struct i915_power_domains *power_domains;
1873 struct i915_power_well *power_well;
9c065a7d
SV
1874
1875 power_domains = &dev_priv->power_domains;
1876
1877 mutex_lock(&power_domains->lock);
1878
11c86db8
DS
1879 WARN(!power_domains->domain_use_count[domain],
1880 "Use count on domain %s is already zero\n",
1881 intel_display_power_domain_str(domain));
9c065a7d
SV
1882 power_domains->domain_use_count[domain]--;
1883
56d4eac0 1884 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
b409ca95 1885 intel_power_well_put(dev_priv, power_well);
9c065a7d
SV
1886
1887 mutex_unlock(&power_domains->lock);
1888
1889 intel_runtime_pm_put(dev_priv);
1890}
1891
965a79ad
ID
1892#define I830_PIPES_POWER_DOMAINS ( \
1893 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1894 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1895 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1896 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1897 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1898 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
d8fc70b7 1899 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 1900
465ac0c6 1901#define VLV_DISPLAY_POWER_DOMAINS ( \
d8fc70b7
ACO
1902 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1903 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1904 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1905 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1906 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1907 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1908 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1909 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1910 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1911 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1912 BIT_ULL(POWER_DOMAIN_VGA) | \
1913 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1914 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1915 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1916 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1917 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1918
1919#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
d8fc70b7
ACO
1920 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1921 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1922 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1923 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1924 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1925 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1926
1927#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
d8fc70b7
ACO
1928 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1929 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1930 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1931
1932#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
d8fc70b7
ACO
1933 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1934 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1935 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1936
1937#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
d8fc70b7
ACO
1938 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1939 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1940 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1941
1942#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
d8fc70b7
ACO
1943 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1944 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1945 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 1946
465ac0c6 1947#define CHV_DISPLAY_POWER_DOMAINS ( \
d8fc70b7
ACO
1948 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1949 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1950 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1951 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1952 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1953 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1954 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1955 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1956 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1957 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1958 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1959 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1960 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1961 BIT_ULL(POWER_DOMAIN_VGA) | \
1962 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1963 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1964 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1965 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1966 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1967 BIT_ULL(POWER_DOMAIN_INIT))
465ac0c6 1968
9c065a7d 1969#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
d8fc70b7
ACO
1970 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1971 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1972 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1973 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1974 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
1975
1976#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
d8fc70b7
ACO
1977 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1978 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1979 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 1980
965a79ad
ID
1981#define HSW_DISPLAY_POWER_DOMAINS ( \
1982 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1983 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1984 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1985 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1986 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1987 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1988 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1989 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1990 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1991 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1992 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1993 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1994 BIT_ULL(POWER_DOMAIN_VGA) | \
1995 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1996 BIT_ULL(POWER_DOMAIN_INIT))
1997
1998#define BDW_DISPLAY_POWER_DOMAINS ( \
1999 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2000 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2001 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2002 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2003 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2004 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2005 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2006 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2007 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2008 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2009 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2010 BIT_ULL(POWER_DOMAIN_VGA) | \
2011 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2012 BIT_ULL(POWER_DOMAIN_INIT))
2013
2014#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2015 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2016 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2017 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2018 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2019 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2020 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2021 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2022 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2023 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2024 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2025 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2026 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2027 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2028 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2029 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2030 BIT_ULL(POWER_DOMAIN_VGA) | \
2031 BIT_ULL(POWER_DOMAIN_INIT))
2032#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2033 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2034 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2035 BIT_ULL(POWER_DOMAIN_INIT))
2036#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2037 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2038 BIT_ULL(POWER_DOMAIN_INIT))
2039#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2040 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2041 BIT_ULL(POWER_DOMAIN_INIT))
2042#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2043 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2044 BIT_ULL(POWER_DOMAIN_INIT))
2045#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2046 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2047 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2048 BIT_ULL(POWER_DOMAIN_MODESET) | \
2049 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2050 BIT_ULL(POWER_DOMAIN_INIT))
2051
2052#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2053 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2054 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2055 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2056 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2057 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2058 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2059 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2060 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2061 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2062 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2063 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2064 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2065 BIT_ULL(POWER_DOMAIN_VGA) | \
965a79ad
ID
2066 BIT_ULL(POWER_DOMAIN_INIT))
2067#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2068 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2069 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2070 BIT_ULL(POWER_DOMAIN_MODESET) | \
2071 BIT_ULL(POWER_DOMAIN_AUX_A) | \
54c105d6 2072 BIT_ULL(POWER_DOMAIN_GMBUS) | \
965a79ad
ID
2073 BIT_ULL(POWER_DOMAIN_INIT))
2074#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2075 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2076 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2077 BIT_ULL(POWER_DOMAIN_INIT))
2078#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2079 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2080 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2081 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2082 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2083 BIT_ULL(POWER_DOMAIN_INIT))
2084
2085#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2086 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2087 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2088 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2089 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2090 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2091 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2092 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2093 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2094 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2095 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2096 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2097 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2098 BIT_ULL(POWER_DOMAIN_VGA) | \
2099 BIT_ULL(POWER_DOMAIN_INIT))
2100#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2101 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2102#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2103 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2104#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2105 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2106#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2107 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2108 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2109 BIT_ULL(POWER_DOMAIN_INIT))
2110#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2111 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2112 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2113 BIT_ULL(POWER_DOMAIN_INIT))
2114#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2115 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2116 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2117 BIT_ULL(POWER_DOMAIN_INIT))
2118#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2119 BIT_ULL(POWER_DOMAIN_AUX_A) | \
52528055 2120 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
965a79ad
ID
2121 BIT_ULL(POWER_DOMAIN_INIT))
2122#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2123 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2124 BIT_ULL(POWER_DOMAIN_INIT))
2125#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2126 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2127 BIT_ULL(POWER_DOMAIN_INIT))
2128#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2129 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2130 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2131 BIT_ULL(POWER_DOMAIN_MODESET) | \
2132 BIT_ULL(POWER_DOMAIN_AUX_A) | \
156961ae 2133 BIT_ULL(POWER_DOMAIN_GMBUS) | \
965a79ad
ID
2134 BIT_ULL(POWER_DOMAIN_INIT))
2135
2136#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2137 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2138 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2139 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2140 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2141 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2142 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2143 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2144 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2145 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2146 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
9787e835 2147 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
965a79ad
ID
2148 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2149 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2150 BIT_ULL(POWER_DOMAIN_AUX_D) | \
a324fcac 2151 BIT_ULL(POWER_DOMAIN_AUX_F) | \
965a79ad
ID
2152 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2153 BIT_ULL(POWER_DOMAIN_VGA) | \
2154 BIT_ULL(POWER_DOMAIN_INIT))
2155#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2156 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
965a79ad
ID
2157 BIT_ULL(POWER_DOMAIN_INIT))
2158#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2159 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2160 BIT_ULL(POWER_DOMAIN_INIT))
2161#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2162 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2163 BIT_ULL(POWER_DOMAIN_INIT))
2164#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2165 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2166 BIT_ULL(POWER_DOMAIN_INIT))
2167#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2168 BIT_ULL(POWER_DOMAIN_AUX_A) | \
b891d5e4 2169 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
965a79ad
ID
2170 BIT_ULL(POWER_DOMAIN_INIT))
2171#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2172 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2173 BIT_ULL(POWER_DOMAIN_INIT))
2174#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2175 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2176 BIT_ULL(POWER_DOMAIN_INIT))
2177#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2178 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2179 BIT_ULL(POWER_DOMAIN_INIT))
a324fcac
RV
2180#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2181 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2182 BIT_ULL(POWER_DOMAIN_INIT))
9787e835
RV
2183#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2184 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2185 BIT_ULL(POWER_DOMAIN_INIT))
965a79ad
ID
2186#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2187 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
6e7a3f52 2188 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2189 BIT_ULL(POWER_DOMAIN_MODESET) | \
2190 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2ee0da16
VS
2191 BIT_ULL(POWER_DOMAIN_INIT))
2192
67ca07e7
ID
2193/*
2194 * ICL PW_0/PG_0 domains (HW/DMC control):
2195 * - PCI
2196 * - clocks except port PLL
2197 * - central power except FBC
2198 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2199 * ICL PW_1/PG_1 domains (HW/DMC control):
2200 * - DBUF function
2201 * - PIPE_A and its planes, except VGA
2202 * - transcoder EDP + PSR
2203 * - transcoder DSI
2204 * - DDI_A
2205 * - FBC
2206 */
2207#define ICL_PW_4_POWER_DOMAINS ( \
2208 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2209 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2210 BIT_ULL(POWER_DOMAIN_INIT))
2211 /* VDSC/joining */
2212#define ICL_PW_3_POWER_DOMAINS ( \
2213 ICL_PW_4_POWER_DOMAINS | \
2214 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2215 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2216 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2217 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2218 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2219 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2220 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2221 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2222 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2223 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2224 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2225 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2226 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2227 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2228 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2229 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2230 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2231 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2232 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2233 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2234 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2235 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2236 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2237 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2238 BIT_ULL(POWER_DOMAIN_VGA) | \
2239 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2240 BIT_ULL(POWER_DOMAIN_INIT))
2241 /*
2242 * - transcoder WD
2243 * - KVMR (HW control)
2244 */
2245#define ICL_PW_2_POWER_DOMAINS ( \
2246 ICL_PW_3_POWER_DOMAINS | \
91ba2c8b 2247 BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
67ca07e7
ID
2248 BIT_ULL(POWER_DOMAIN_INIT))
2249 /*
67ca07e7
ID
2250 * - KVMR (HW control)
2251 */
2252#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2253 ICL_PW_2_POWER_DOMAINS | \
2254 BIT_ULL(POWER_DOMAIN_MODESET) | \
2255 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2256 BIT_ULL(POWER_DOMAIN_INIT))
2257
2258#define ICL_DDI_IO_A_POWER_DOMAINS ( \
2259 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2260#define ICL_DDI_IO_B_POWER_DOMAINS ( \
2261 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2262#define ICL_DDI_IO_C_POWER_DOMAINS ( \
2263 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2264#define ICL_DDI_IO_D_POWER_DOMAINS ( \
2265 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2266#define ICL_DDI_IO_E_POWER_DOMAINS ( \
2267 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2268#define ICL_DDI_IO_F_POWER_DOMAINS ( \
2269 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2270
2271#define ICL_AUX_A_IO_POWER_DOMAINS ( \
9e3b5ce9 2272 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
67ca07e7
ID
2273 BIT_ULL(POWER_DOMAIN_AUX_A))
2274#define ICL_AUX_B_IO_POWER_DOMAINS ( \
2275 BIT_ULL(POWER_DOMAIN_AUX_B))
2276#define ICL_AUX_C_IO_POWER_DOMAINS ( \
2277 BIT_ULL(POWER_DOMAIN_AUX_C))
2278#define ICL_AUX_D_IO_POWER_DOMAINS ( \
2279 BIT_ULL(POWER_DOMAIN_AUX_D))
2280#define ICL_AUX_E_IO_POWER_DOMAINS ( \
2281 BIT_ULL(POWER_DOMAIN_AUX_E))
2282#define ICL_AUX_F_IO_POWER_DOMAINS ( \
2283 BIT_ULL(POWER_DOMAIN_AUX_F))
2284#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2285 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2286#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2287 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2288#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2289 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2290#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2291 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2292
9c065a7d 2293static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3c1b38e6 2294 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2295 .enable = i9xx_always_on_power_well_noop,
2296 .disable = i9xx_always_on_power_well_noop,
2297 .is_enabled = i9xx_always_on_power_well_enabled,
2298};
2299
2300static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3c1b38e6 2301 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2302 .enable = chv_pipe_power_well_enable,
2303 .disable = chv_pipe_power_well_disable,
2304 .is_enabled = chv_pipe_power_well_enabled,
2305};
2306
2307static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3c1b38e6 2308 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2309 .enable = chv_dpio_cmn_power_well_enable,
2310 .disable = chv_dpio_cmn_power_well_disable,
2311 .is_enabled = vlv_power_well_enabled,
2312};
2313
f28ec6f4 2314static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
9c065a7d
SV
2315 {
2316 .name = "always-on",
285cf66d 2317 .always_on = true,
9c065a7d
SV
2318 .domains = POWER_DOMAIN_MASK,
2319 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2320 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2321 },
2322};
2323
2ee0da16
VS
2324static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2325 .sync_hw = i830_pipes_power_well_sync_hw,
2326 .enable = i830_pipes_power_well_enable,
2327 .disable = i830_pipes_power_well_disable,
2328 .is_enabled = i830_pipes_power_well_enabled,
2329};
2330
f28ec6f4 2331static const struct i915_power_well_desc i830_power_wells[] = {
2ee0da16
VS
2332 {
2333 .name = "always-on",
285cf66d 2334 .always_on = true,
2ee0da16
VS
2335 .domains = POWER_DOMAIN_MASK,
2336 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2337 .id = DISP_PW_ID_NONE,
2ee0da16
VS
2338 },
2339 {
2340 .name = "pipes",
2341 .domains = I830_PIPES_POWER_DOMAINS,
2342 .ops = &i830_pipes_power_well_ops,
4739a9d2 2343 .id = DISP_PW_ID_NONE,
2ee0da16
VS
2344 },
2345};
2346
9c065a7d
SV
2347static const struct i915_power_well_ops hsw_power_well_ops = {
2348 .sync_hw = hsw_power_well_sync_hw,
2349 .enable = hsw_power_well_enable,
2350 .disable = hsw_power_well_disable,
2351 .is_enabled = hsw_power_well_enabled,
2352};
2353
9f836f90 2354static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3c1b38e6 2355 .sync_hw = i9xx_power_well_sync_hw_noop,
9f836f90
PJ
2356 .enable = gen9_dc_off_power_well_enable,
2357 .disable = gen9_dc_off_power_well_disable,
2358 .is_enabled = gen9_dc_off_power_well_enabled,
2359};
2360
9c8d0b8e 2361static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3c1b38e6 2362 .sync_hw = i9xx_power_well_sync_hw_noop,
9c8d0b8e
ID
2363 .enable = bxt_dpio_cmn_power_well_enable,
2364 .disable = bxt_dpio_cmn_power_well_disable,
2365 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2366};
2367
75e39688
ID
2368static const struct i915_power_well_regs hsw_power_well_regs = {
2369 .bios = HSW_PWR_WELL_CTL1,
2370 .driver = HSW_PWR_WELL_CTL2,
2371 .kvmr = HSW_PWR_WELL_CTL3,
2372 .debug = HSW_PWR_WELL_CTL4,
2373};
2374
f28ec6f4 2375static const struct i915_power_well_desc hsw_power_wells[] = {
9c065a7d
SV
2376 {
2377 .name = "always-on",
285cf66d 2378 .always_on = true,
998bd66a 2379 .domains = POWER_DOMAIN_MASK,
9c065a7d 2380 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2381 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2382 },
2383 {
2384 .name = "display",
2385 .domains = HSW_DISPLAY_POWER_DOMAINS,
2386 .ops = &hsw_power_well_ops,
fb9248e2 2387 .id = HSW_DISP_PW_GLOBAL,
0a445945 2388 {
75e39688
ID
2389 .hsw.regs = &hsw_power_well_regs,
2390 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
0a445945
ID
2391 .hsw.has_vga = true,
2392 },
9c065a7d
SV
2393 },
2394};
2395
f28ec6f4 2396static const struct i915_power_well_desc bdw_power_wells[] = {
9c065a7d
SV
2397 {
2398 .name = "always-on",
285cf66d 2399 .always_on = true,
998bd66a 2400 .domains = POWER_DOMAIN_MASK,
9c065a7d 2401 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2402 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2403 },
2404 {
2405 .name = "display",
2406 .domains = BDW_DISPLAY_POWER_DOMAINS,
2407 .ops = &hsw_power_well_ops,
fb9248e2 2408 .id = HSW_DISP_PW_GLOBAL,
0a445945 2409 {
75e39688
ID
2410 .hsw.regs = &hsw_power_well_regs,
2411 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
0a445945
ID
2412 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2413 .hsw.has_vga = true,
2414 },
9c065a7d
SV
2415 },
2416};
2417
2418static const struct i915_power_well_ops vlv_display_power_well_ops = {
3c1b38e6 2419 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2420 .enable = vlv_display_power_well_enable,
2421 .disable = vlv_display_power_well_disable,
2422 .is_enabled = vlv_power_well_enabled,
2423};
2424
2425static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3c1b38e6 2426 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2427 .enable = vlv_dpio_cmn_power_well_enable,
2428 .disable = vlv_dpio_cmn_power_well_disable,
2429 .is_enabled = vlv_power_well_enabled,
2430};
2431
2432static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3c1b38e6 2433 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2434 .enable = vlv_power_well_enable,
2435 .disable = vlv_power_well_disable,
2436 .is_enabled = vlv_power_well_enabled,
2437};
2438
f28ec6f4 2439static const struct i915_power_well_desc vlv_power_wells[] = {
9c065a7d
SV
2440 {
2441 .name = "always-on",
285cf66d 2442 .always_on = true,
998bd66a 2443 .domains = POWER_DOMAIN_MASK,
9c065a7d 2444 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2445 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2446 },
2447 {
2448 .name = "display",
2449 .domains = VLV_DISPLAY_POWER_DOMAINS,
9c065a7d 2450 .ops = &vlv_display_power_well_ops,
2183b499 2451 .id = VLV_DISP_PW_DISP2D,
d13dd05a
ID
2452 {
2453 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2454 },
9c065a7d
SV
2455 },
2456 {
2457 .name = "dpio-tx-b-01",
2458 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2459 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2460 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2461 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2462 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2463 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2464 {
2465 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2466 },
9c065a7d
SV
2467 },
2468 {
2469 .name = "dpio-tx-b-23",
2470 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2471 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2472 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2473 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2474 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2475 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2476 {
2477 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2478 },
9c065a7d
SV
2479 },
2480 {
2481 .name = "dpio-tx-c-01",
2482 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2483 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2484 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2485 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2486 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2487 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2488 {
2489 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2490 },
9c065a7d
SV
2491 },
2492 {
2493 .name = "dpio-tx-c-23",
2494 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2495 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2496 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2497 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2498 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2499 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2500 {
2501 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2502 },
9c065a7d
SV
2503 },
2504 {
2505 .name = "dpio-common",
2506 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
9c065a7d 2507 .ops = &vlv_dpio_cmn_power_well_ops,
2183b499 2508 .id = VLV_DISP_PW_DPIO_CMN_BC,
d13dd05a
ID
2509 {
2510 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2511 },
9c065a7d
SV
2512 },
2513};
2514
f28ec6f4 2515static const struct i915_power_well_desc chv_power_wells[] = {
9c065a7d
SV
2516 {
2517 .name = "always-on",
285cf66d 2518 .always_on = true,
998bd66a 2519 .domains = POWER_DOMAIN_MASK,
9c065a7d 2520 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2521 .id = DISP_PW_ID_NONE,
9c065a7d 2522 },
9c065a7d
SV
2523 {
2524 .name = "display",
baa4e575 2525 /*
fde61e4b
VS
2526 * Pipe A power well is the new disp2d well. Pipe B and C
2527 * power wells don't actually exist. Pipe A power well is
2528 * required for any pipe to work.
baa4e575 2529 */
465ac0c6 2530 .domains = CHV_DISPLAY_POWER_DOMAINS,
9c065a7d 2531 .ops = &chv_pipe_power_well_ops,
4739a9d2 2532 .id = DISP_PW_ID_NONE,
9c065a7d 2533 },
9c065a7d
SV
2534 {
2535 .name = "dpio-common-bc",
71849b67 2536 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
9c065a7d 2537 .ops = &chv_dpio_cmn_power_well_ops,
2183b499 2538 .id = VLV_DISP_PW_DPIO_CMN_BC,
d13dd05a
ID
2539 {
2540 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2541 },
9c065a7d
SV
2542 },
2543 {
2544 .name = "dpio-common-d",
71849b67 2545 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
9c065a7d 2546 .ops = &chv_dpio_cmn_power_well_ops,
2183b499 2547 .id = CHV_DISP_PW_DPIO_CMN_D,
d13dd05a
ID
2548 {
2549 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2550 },
9c065a7d 2551 },
9c065a7d
SV
2552};
2553
5aefb239 2554bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
438b8dc4 2555 enum i915_power_well_id power_well_id)
5aefb239
SS
2556{
2557 struct i915_power_well *power_well;
2558 bool ret;
2559
2560 power_well = lookup_power_well(dev_priv, power_well_id);
f28ec6f4 2561 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
5aefb239
SS
2562
2563 return ret;
2564}
2565
f28ec6f4 2566static const struct i915_power_well_desc skl_power_wells[] = {
94dd5138
S
2567 {
2568 .name = "always-on",
285cf66d 2569 .always_on = true,
998bd66a 2570 .domains = POWER_DOMAIN_MASK,
94dd5138 2571 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2572 .id = DISP_PW_ID_NONE,
94dd5138
S
2573 },
2574 {
2575 .name = "power well 1",
4a76f295 2576 /* Handled by the DMC firmware */
fa96ed1f 2577 .always_on = true,
4a76f295 2578 .domains = 0,
4196b918 2579 .ops = &hsw_power_well_ops,
01c3faa7 2580 .id = SKL_DISP_PW_1,
0a445945 2581 {
75e39688
ID
2582 .hsw.regs = &hsw_power_well_regs,
2583 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2584 .hsw.has_fuses = true,
2585 },
94dd5138
S
2586 },
2587 {
2588 .name = "MISC IO power well",
4a76f295 2589 /* Handled by the DMC firmware */
fa96ed1f 2590 .always_on = true,
4a76f295 2591 .domains = 0,
4196b918 2592 .ops = &hsw_power_well_ops,
01c3faa7 2593 .id = SKL_DISP_PW_MISC_IO,
75e39688
ID
2594 {
2595 .hsw.regs = &hsw_power_well_regs,
2596 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2597 },
94dd5138 2598 },
9f836f90
PJ
2599 {
2600 .name = "DC off",
2601 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2602 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2603 .id = DISP_PW_ID_NONE,
9f836f90 2604 },
94dd5138
S
2605 {
2606 .name = "power well 2",
2607 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2608 .ops = &hsw_power_well_ops,
01c3faa7 2609 .id = SKL_DISP_PW_2,
0a445945 2610 {
75e39688
ID
2611 .hsw.regs = &hsw_power_well_regs,
2612 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2613 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2614 .hsw.has_vga = true,
2615 .hsw.has_fuses = true,
2616 },
94dd5138
S
2617 },
2618 {
62b69566
ACO
2619 .name = "DDI A/E IO power well",
2620 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
4196b918 2621 .ops = &hsw_power_well_ops,
4739a9d2 2622 .id = DISP_PW_ID_NONE,
75e39688
ID
2623 {
2624 .hsw.regs = &hsw_power_well_regs,
2625 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2626 },
94dd5138
S
2627 },
2628 {
62b69566
ACO
2629 .name = "DDI B IO power well",
2630 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
4196b918 2631 .ops = &hsw_power_well_ops,
4739a9d2 2632 .id = DISP_PW_ID_NONE,
75e39688
ID
2633 {
2634 .hsw.regs = &hsw_power_well_regs,
2635 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2636 },
94dd5138
S
2637 },
2638 {
62b69566
ACO
2639 .name = "DDI C IO power well",
2640 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
4196b918 2641 .ops = &hsw_power_well_ops,
4739a9d2 2642 .id = DISP_PW_ID_NONE,
75e39688
ID
2643 {
2644 .hsw.regs = &hsw_power_well_regs,
2645 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2646 },
94dd5138
S
2647 },
2648 {
62b69566
ACO
2649 .name = "DDI D IO power well",
2650 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
4196b918 2651 .ops = &hsw_power_well_ops,
4739a9d2 2652 .id = DISP_PW_ID_NONE,
75e39688
ID
2653 {
2654 .hsw.regs = &hsw_power_well_regs,
2655 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2656 },
94dd5138
S
2657 },
2658};
2659
f28ec6f4 2660static const struct i915_power_well_desc bxt_power_wells[] = {
0b4a2a36
S
2661 {
2662 .name = "always-on",
285cf66d 2663 .always_on = true,
998bd66a 2664 .domains = POWER_DOMAIN_MASK,
0b4a2a36 2665 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2666 .id = DISP_PW_ID_NONE,
0b4a2a36
S
2667 },
2668 {
2669 .name = "power well 1",
fa96ed1f
ID
2670 /* Handled by the DMC firmware */
2671 .always_on = true,
d7d7c9ee 2672 .domains = 0,
4196b918 2673 .ops = &hsw_power_well_ops,
01c3faa7 2674 .id = SKL_DISP_PW_1,
0a445945 2675 {
75e39688
ID
2676 .hsw.regs = &hsw_power_well_regs,
2677 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2678 .hsw.has_fuses = true,
2679 },
0b4a2a36 2680 },
9f836f90
PJ
2681 {
2682 .name = "DC off",
2683 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2684 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2685 .id = DISP_PW_ID_NONE,
9f836f90 2686 },
0b4a2a36
S
2687 {
2688 .name = "power well 2",
2689 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2690 .ops = &hsw_power_well_ops,
01c3faa7 2691 .id = SKL_DISP_PW_2,
0a445945 2692 {
75e39688
ID
2693 .hsw.regs = &hsw_power_well_regs,
2694 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2695 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2696 .hsw.has_vga = true,
2697 .hsw.has_fuses = true,
2698 },
9f836f90 2699 },
9c8d0b8e
ID
2700 {
2701 .name = "dpio-common-a",
2702 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2703 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2704 .id = BXT_DISP_PW_DPIO_CMN_A,
0a445945
ID
2705 {
2706 .bxt.phy = DPIO_PHY1,
2707 },
9c8d0b8e
ID
2708 },
2709 {
2710 .name = "dpio-common-bc",
2711 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2712 .ops = &bxt_dpio_cmn_power_well_ops,
d9fcdc8d 2713 .id = VLV_DISP_PW_DPIO_CMN_BC,
0a445945
ID
2714 {
2715 .bxt.phy = DPIO_PHY0,
2716 },
9c8d0b8e 2717 },
0b4a2a36
S
2718};
2719
f28ec6f4 2720static const struct i915_power_well_desc glk_power_wells[] = {
0d03926d
ACO
2721 {
2722 .name = "always-on",
285cf66d 2723 .always_on = true,
0d03926d
ACO
2724 .domains = POWER_DOMAIN_MASK,
2725 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2726 .id = DISP_PW_ID_NONE,
0d03926d
ACO
2727 },
2728 {
2729 .name = "power well 1",
2730 /* Handled by the DMC firmware */
fa96ed1f 2731 .always_on = true,
0d03926d 2732 .domains = 0,
4196b918 2733 .ops = &hsw_power_well_ops,
0d03926d 2734 .id = SKL_DISP_PW_1,
0a445945 2735 {
75e39688
ID
2736 .hsw.regs = &hsw_power_well_regs,
2737 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2738 .hsw.has_fuses = true,
2739 },
0d03926d
ACO
2740 },
2741 {
2742 .name = "DC off",
2743 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2744 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2745 .id = DISP_PW_ID_NONE,
0d03926d
ACO
2746 },
2747 {
2748 .name = "power well 2",
2749 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2750 .ops = &hsw_power_well_ops,
0d03926d 2751 .id = SKL_DISP_PW_2,
0a445945 2752 {
75e39688
ID
2753 .hsw.regs = &hsw_power_well_regs,
2754 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2755 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2756 .hsw.has_vga = true,
2757 .hsw.has_fuses = true,
2758 },
0d03926d 2759 },
0a116ce8
ACO
2760 {
2761 .name = "dpio-common-a",
2762 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2763 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2764 .id = BXT_DISP_PW_DPIO_CMN_A,
0a445945
ID
2765 {
2766 .bxt.phy = DPIO_PHY1,
2767 },
0a116ce8
ACO
2768 },
2769 {
2770 .name = "dpio-common-b",
2771 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2772 .ops = &bxt_dpio_cmn_power_well_ops,
d9fcdc8d 2773 .id = VLV_DISP_PW_DPIO_CMN_BC,
0a445945
ID
2774 {
2775 .bxt.phy = DPIO_PHY0,
2776 },
0a116ce8
ACO
2777 },
2778 {
2779 .name = "dpio-common-c",
2780 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2781 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2782 .id = GLK_DISP_PW_DPIO_CMN_C,
0a445945
ID
2783 {
2784 .bxt.phy = DPIO_PHY2,
2785 },
0a116ce8 2786 },
0d03926d
ACO
2787 {
2788 .name = "AUX A",
2789 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
4196b918 2790 .ops = &hsw_power_well_ops,
4739a9d2 2791 .id = DISP_PW_ID_NONE,
75e39688
ID
2792 {
2793 .hsw.regs = &hsw_power_well_regs,
2794 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2795 },
0d03926d
ACO
2796 },
2797 {
2798 .name = "AUX B",
2799 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
4196b918 2800 .ops = &hsw_power_well_ops,
4739a9d2 2801 .id = DISP_PW_ID_NONE,
75e39688
ID
2802 {
2803 .hsw.regs = &hsw_power_well_regs,
2804 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2805 },
0d03926d
ACO
2806 },
2807 {
2808 .name = "AUX C",
2809 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
4196b918 2810 .ops = &hsw_power_well_ops,
4739a9d2 2811 .id = DISP_PW_ID_NONE,
75e39688
ID
2812 {
2813 .hsw.regs = &hsw_power_well_regs,
2814 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2815 },
0d03926d
ACO
2816 },
2817 {
62b69566
ACO
2818 .name = "DDI A IO power well",
2819 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
4196b918 2820 .ops = &hsw_power_well_ops,
4739a9d2 2821 .id = DISP_PW_ID_NONE,
75e39688
ID
2822 {
2823 .hsw.regs = &hsw_power_well_regs,
2824 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2825 },
0d03926d
ACO
2826 },
2827 {
62b69566
ACO
2828 .name = "DDI B IO power well",
2829 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
4196b918 2830 .ops = &hsw_power_well_ops,
4739a9d2 2831 .id = DISP_PW_ID_NONE,
75e39688
ID
2832 {
2833 .hsw.regs = &hsw_power_well_regs,
2834 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2835 },
0d03926d
ACO
2836 },
2837 {
62b69566
ACO
2838 .name = "DDI C IO power well",
2839 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
4196b918 2840 .ops = &hsw_power_well_ops,
4739a9d2 2841 .id = DISP_PW_ID_NONE,
75e39688
ID
2842 {
2843 .hsw.regs = &hsw_power_well_regs,
2844 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2845 },
0d03926d
ACO
2846 },
2847};
2848
f28ec6f4 2849static const struct i915_power_well_desc cnl_power_wells[] = {
8bcd3dd4
VS
2850 {
2851 .name = "always-on",
285cf66d 2852 .always_on = true,
8bcd3dd4
VS
2853 .domains = POWER_DOMAIN_MASK,
2854 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2855 .id = DISP_PW_ID_NONE,
8bcd3dd4
VS
2856 },
2857 {
2858 .name = "power well 1",
2859 /* Handled by the DMC firmware */
fa96ed1f 2860 .always_on = true,
8bcd3dd4 2861 .domains = 0,
4196b918 2862 .ops = &hsw_power_well_ops,
8bcd3dd4 2863 .id = SKL_DISP_PW_1,
0a445945 2864 {
75e39688
ID
2865 .hsw.regs = &hsw_power_well_regs,
2866 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2867 .hsw.has_fuses = true,
2868 },
8bcd3dd4
VS
2869 },
2870 {
2871 .name = "AUX A",
2872 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
4196b918 2873 .ops = &hsw_power_well_ops,
4739a9d2 2874 .id = DISP_PW_ID_NONE,
75e39688
ID
2875 {
2876 .hsw.regs = &hsw_power_well_regs,
2877 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2878 },
8bcd3dd4
VS
2879 },
2880 {
2881 .name = "AUX B",
2882 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
4196b918 2883 .ops = &hsw_power_well_ops,
4739a9d2 2884 .id = DISP_PW_ID_NONE,
75e39688
ID
2885 {
2886 .hsw.regs = &hsw_power_well_regs,
2887 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2888 },
8bcd3dd4
VS
2889 },
2890 {
2891 .name = "AUX C",
2892 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
4196b918 2893 .ops = &hsw_power_well_ops,
4739a9d2 2894 .id = DISP_PW_ID_NONE,
75e39688
ID
2895 {
2896 .hsw.regs = &hsw_power_well_regs,
2897 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2898 },
8bcd3dd4
VS
2899 },
2900 {
2901 .name = "AUX D",
2902 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
4196b918 2903 .ops = &hsw_power_well_ops,
4739a9d2 2904 .id = DISP_PW_ID_NONE,
75e39688
ID
2905 {
2906 .hsw.regs = &hsw_power_well_regs,
2907 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2908 },
8bcd3dd4
VS
2909 },
2910 {
2911 .name = "DC off",
2912 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2913 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2914 .id = DISP_PW_ID_NONE,
8bcd3dd4
VS
2915 },
2916 {
2917 .name = "power well 2",
2918 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2919 .ops = &hsw_power_well_ops,
8bcd3dd4 2920 .id = SKL_DISP_PW_2,
0a445945 2921 {
75e39688
ID
2922 .hsw.regs = &hsw_power_well_regs,
2923 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2924 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2925 .hsw.has_vga = true,
2926 .hsw.has_fuses = true,
2927 },
8bcd3dd4
VS
2928 },
2929 {
2930 .name = "DDI A IO power well",
2931 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
4196b918 2932 .ops = &hsw_power_well_ops,
4739a9d2 2933 .id = DISP_PW_ID_NONE,
75e39688
ID
2934 {
2935 .hsw.regs = &hsw_power_well_regs,
2936 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2937 },
8bcd3dd4
VS
2938 },
2939 {
2940 .name = "DDI B IO power well",
2941 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
4196b918 2942 .ops = &hsw_power_well_ops,
4739a9d2 2943 .id = DISP_PW_ID_NONE,
75e39688
ID
2944 {
2945 .hsw.regs = &hsw_power_well_regs,
2946 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2947 },
8bcd3dd4
VS
2948 },
2949 {
2950 .name = "DDI C IO power well",
2951 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
4196b918 2952 .ops = &hsw_power_well_ops,
4739a9d2 2953 .id = DISP_PW_ID_NONE,
75e39688
ID
2954 {
2955 .hsw.regs = &hsw_power_well_regs,
2956 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2957 },
8bcd3dd4
VS
2958 },
2959 {
2960 .name = "DDI D IO power well",
2961 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
4196b918 2962 .ops = &hsw_power_well_ops,
4739a9d2 2963 .id = DISP_PW_ID_NONE,
75e39688
ID
2964 {
2965 .hsw.regs = &hsw_power_well_regs,
2966 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2967 },
8bcd3dd4 2968 },
9787e835
RV
2969 {
2970 .name = "DDI F IO power well",
2971 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2972 .ops = &hsw_power_well_ops,
4739a9d2 2973 .id = DISP_PW_ID_NONE,
75e39688
ID
2974 {
2975 .hsw.regs = &hsw_power_well_regs,
2976 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
2977 },
9787e835 2978 },
a324fcac
RV
2979 {
2980 .name = "AUX F",
2981 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2982 .ops = &hsw_power_well_ops,
4739a9d2 2983 .id = DISP_PW_ID_NONE,
75e39688
ID
2984 {
2985 .hsw.regs = &hsw_power_well_regs,
2986 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
2987 },
a324fcac 2988 },
8bcd3dd4
VS
2989};
2990
67ca07e7
ID
2991static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2992 .sync_hw = hsw_power_well_sync_hw,
2993 .enable = icl_combo_phy_aux_power_well_enable,
2994 .disable = icl_combo_phy_aux_power_well_disable,
2995 .is_enabled = hsw_power_well_enabled,
2996};
2997
c7375d95
ID
2998static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
2999 .sync_hw = hsw_power_well_sync_hw,
3000 .enable = icl_tc_phy_aux_power_well_enable,
3001 .disable = hsw_power_well_disable,
3002 .is_enabled = hsw_power_well_enabled,
3003};
3004
75e39688
ID
3005static const struct i915_power_well_regs icl_aux_power_well_regs = {
3006 .bios = ICL_PWR_WELL_CTL_AUX1,
3007 .driver = ICL_PWR_WELL_CTL_AUX2,
3008 .debug = ICL_PWR_WELL_CTL_AUX4,
3009};
3010
3011static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3012 .bios = ICL_PWR_WELL_CTL_DDI1,
3013 .driver = ICL_PWR_WELL_CTL_DDI2,
3014 .debug = ICL_PWR_WELL_CTL_DDI4,
3015};
3016
f28ec6f4 3017static const struct i915_power_well_desc icl_power_wells[] = {
67ca07e7
ID
3018 {
3019 .name = "always-on",
285cf66d 3020 .always_on = true,
67ca07e7
ID
3021 .domains = POWER_DOMAIN_MASK,
3022 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 3023 .id = DISP_PW_ID_NONE,
67ca07e7
ID
3024 },
3025 {
3026 .name = "power well 1",
3027 /* Handled by the DMC firmware */
fa96ed1f 3028 .always_on = true,
67ca07e7
ID
3029 .domains = 0,
3030 .ops = &hsw_power_well_ops,
d9fcdc8d 3031 .id = SKL_DISP_PW_1,
ae9b06ca 3032 {
75e39688
ID
3033 .hsw.regs = &hsw_power_well_regs,
3034 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
ae9b06ca
ID
3035 .hsw.has_fuses = true,
3036 },
67ca07e7 3037 },
a33e1ece
ID
3038 {
3039 .name = "DC off",
3040 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3041 .ops = &gen9_dc_off_power_well_ops,
3042 .id = DISP_PW_ID_NONE,
3043 },
67ca07e7
ID
3044 {
3045 .name = "power well 2",
3046 .domains = ICL_PW_2_POWER_DOMAINS,
3047 .ops = &hsw_power_well_ops,
d9fcdc8d 3048 .id = SKL_DISP_PW_2,
ae9b06ca 3049 {
75e39688
ID
3050 .hsw.regs = &hsw_power_well_regs,
3051 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
ae9b06ca
ID
3052 .hsw.has_fuses = true,
3053 },
67ca07e7 3054 },
67ca07e7
ID
3055 {
3056 .name = "power well 3",
3057 .domains = ICL_PW_3_POWER_DOMAINS,
3058 .ops = &hsw_power_well_ops,
4739a9d2 3059 .id = DISP_PW_ID_NONE,
ae9b06ca 3060 {
75e39688
ID
3061 .hsw.regs = &hsw_power_well_regs,
3062 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
ae9b06ca
ID
3063 .hsw.irq_pipe_mask = BIT(PIPE_B),
3064 .hsw.has_vga = true,
3065 .hsw.has_fuses = true,
3066 },
67ca07e7
ID
3067 },
3068 {
3069 .name = "DDI A IO",
3070 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3071 .ops = &hsw_power_well_ops,
4739a9d2 3072 .id = DISP_PW_ID_NONE,
75e39688
ID
3073 {
3074 .hsw.regs = &icl_ddi_power_well_regs,
3075 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3076 },
67ca07e7
ID
3077 },
3078 {
3079 .name = "DDI B IO",
3080 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3081 .ops = &hsw_power_well_ops,
4739a9d2 3082 .id = DISP_PW_ID_NONE,
75e39688
ID
3083 {
3084 .hsw.regs = &icl_ddi_power_well_regs,
3085 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3086 },
67ca07e7
ID
3087 },
3088 {
3089 .name = "DDI C IO",
3090 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3091 .ops = &hsw_power_well_ops,
4739a9d2 3092 .id = DISP_PW_ID_NONE,
75e39688
ID
3093 {
3094 .hsw.regs = &icl_ddi_power_well_regs,
3095 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3096 },
67ca07e7
ID
3097 },
3098 {
3099 .name = "DDI D IO",
3100 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3101 .ops = &hsw_power_well_ops,
4739a9d2 3102 .id = DISP_PW_ID_NONE,
75e39688
ID
3103 {
3104 .hsw.regs = &icl_ddi_power_well_regs,
3105 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3106 },
67ca07e7
ID
3107 },
3108 {
3109 .name = "DDI E IO",
3110 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3111 .ops = &hsw_power_well_ops,
4739a9d2 3112 .id = DISP_PW_ID_NONE,
75e39688
ID
3113 {
3114 .hsw.regs = &icl_ddi_power_well_regs,
3115 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3116 },
67ca07e7
ID
3117 },
3118 {
3119 .name = "DDI F IO",
3120 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3121 .ops = &hsw_power_well_ops,
4739a9d2 3122 .id = DISP_PW_ID_NONE,
75e39688
ID
3123 {
3124 .hsw.regs = &icl_ddi_power_well_regs,
3125 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3126 },
67ca07e7
ID
3127 },
3128 {
3129 .name = "AUX A",
3130 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3131 .ops = &icl_combo_phy_aux_power_well_ops,
4739a9d2 3132 .id = DISP_PW_ID_NONE,
75e39688
ID
3133 {
3134 .hsw.regs = &icl_aux_power_well_regs,
3135 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3136 },
67ca07e7
ID
3137 },
3138 {
3139 .name = "AUX B",
3140 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3141 .ops = &icl_combo_phy_aux_power_well_ops,
4739a9d2 3142 .id = DISP_PW_ID_NONE,
75e39688
ID
3143 {
3144 .hsw.regs = &icl_aux_power_well_regs,
3145 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3146 },
67ca07e7
ID
3147 },
3148 {
3149 .name = "AUX C",
3150 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
c7375d95 3151 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3152 .id = DISP_PW_ID_NONE,
75e39688
ID
3153 {
3154 .hsw.regs = &icl_aux_power_well_regs,
3155 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
c7375d95 3156 .hsw.is_tc_tbt = false,
75e39688 3157 },
67ca07e7
ID
3158 },
3159 {
3160 .name = "AUX D",
3161 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
c7375d95 3162 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3163 .id = DISP_PW_ID_NONE,
75e39688
ID
3164 {
3165 .hsw.regs = &icl_aux_power_well_regs,
3166 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
c7375d95 3167 .hsw.is_tc_tbt = false,
75e39688 3168 },
67ca07e7
ID
3169 },
3170 {
3171 .name = "AUX E",
3172 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
c7375d95 3173 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3174 .id = DISP_PW_ID_NONE,
75e39688
ID
3175 {
3176 .hsw.regs = &icl_aux_power_well_regs,
3177 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
c7375d95 3178 .hsw.is_tc_tbt = false,
75e39688 3179 },
67ca07e7
ID
3180 },
3181 {
3182 .name = "AUX F",
3183 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
c7375d95 3184 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3185 .id = DISP_PW_ID_NONE,
75e39688
ID
3186 {
3187 .hsw.regs = &icl_aux_power_well_regs,
3188 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
c7375d95 3189 .hsw.is_tc_tbt = false,
75e39688 3190 },
67ca07e7
ID
3191 },
3192 {
3193 .name = "AUX TBT1",
3194 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
c7375d95 3195 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3196 .id = DISP_PW_ID_NONE,
75e39688
ID
3197 {
3198 .hsw.regs = &icl_aux_power_well_regs,
3199 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
c7375d95 3200 .hsw.is_tc_tbt = true,
75e39688 3201 },
67ca07e7
ID
3202 },
3203 {
3204 .name = "AUX TBT2",
3205 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
c7375d95 3206 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3207 .id = DISP_PW_ID_NONE,
75e39688
ID
3208 {
3209 .hsw.regs = &icl_aux_power_well_regs,
3210 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
c7375d95 3211 .hsw.is_tc_tbt = true,
75e39688 3212 },
67ca07e7
ID
3213 },
3214 {
3215 .name = "AUX TBT3",
3216 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
c7375d95 3217 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3218 .id = DISP_PW_ID_NONE,
75e39688
ID
3219 {
3220 .hsw.regs = &icl_aux_power_well_regs,
3221 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
c7375d95 3222 .hsw.is_tc_tbt = true,
75e39688 3223 },
67ca07e7
ID
3224 },
3225 {
3226 .name = "AUX TBT4",
3227 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
c7375d95 3228 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3229 .id = DISP_PW_ID_NONE,
75e39688
ID
3230 {
3231 .hsw.regs = &icl_aux_power_well_regs,
3232 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
c7375d95 3233 .hsw.is_tc_tbt = true,
75e39688 3234 },
67ca07e7
ID
3235 },
3236 {
3237 .name = "power well 4",
3238 .domains = ICL_PW_4_POWER_DOMAINS,
3239 .ops = &hsw_power_well_ops,
4739a9d2 3240 .id = DISP_PW_ID_NONE,
ae9b06ca 3241 {
75e39688
ID
3242 .hsw.regs = &hsw_power_well_regs,
3243 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
ae9b06ca
ID
3244 .hsw.has_fuses = true,
3245 .hsw.irq_pipe_mask = BIT(PIPE_C),
3246 },
67ca07e7
ID
3247 },
3248};
3249
1b0e3a04
ID
3250static int
3251sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3252 int disable_power_well)
3253{
3254 if (disable_power_well >= 0)
3255 return !!disable_power_well;
3256
1b0e3a04
ID
3257 return 1;
3258}
3259
a37baf3b
ID
3260static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3261 int enable_dc)
3262{
3263 uint32_t mask;
3264 int requested_dc;
3265 int max_dc;
3266
3e68928b 3267 if (INTEL_GEN(dev_priv) >= 11) {
a37baf3b 3268 max_dc = 2;
a37baf3b
ID
3269 /*
3270 * DC9 has a separate HW flow from the rest of the DC states,
3271 * not depending on the DMC firmware. It's needed by system
3272 * suspend/resume, so allow it unconditionally.
3273 */
3274 mask = DC_STATE_EN_DC9;
cf819eff 3275 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3e68928b
AM
3276 max_dc = 2;
3277 mask = 0;
3278 } else if (IS_GEN9_LP(dev_priv)) {
3279 max_dc = 1;
3280 mask = DC_STATE_EN_DC9;
a37baf3b
ID
3281 } else {
3282 max_dc = 0;
3283 mask = 0;
3284 }
3285
4f044a88 3286 if (!i915_modparams.disable_power_well)
66e2c4c3
ID
3287 max_dc = 0;
3288
a37baf3b
ID
3289 if (enable_dc >= 0 && enable_dc <= max_dc) {
3290 requested_dc = enable_dc;
3291 } else if (enable_dc == -1) {
3292 requested_dc = max_dc;
3293 } else if (enable_dc > max_dc && enable_dc <= 2) {
3294 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3295 enable_dc, max_dc);
3296 requested_dc = max_dc;
3297 } else {
3298 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3299 requested_dc = max_dc;
3300 }
3301
3302 if (requested_dc > 1)
3303 mask |= DC_STATE_EN_UPTO_DC6;
3304 if (requested_dc > 0)
3305 mask |= DC_STATE_EN_UPTO_DC5;
3306
3307 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3308
3309 return mask;
3310}
3311
f28ec6f4
ID
3312static int
3313__set_power_wells(struct i915_power_domains *power_domains,
3314 const struct i915_power_well_desc *power_well_descs,
3315 int power_well_count)
21792c60 3316{
f28ec6f4 3317 u64 power_well_ids = 0;
21792c60
ID
3318 int i;
3319
f28ec6f4
ID
3320 power_domains->power_well_count = power_well_count;
3321 power_domains->power_wells =
3322 kcalloc(power_well_count,
3323 sizeof(*power_domains->power_wells),
3324 GFP_KERNEL);
3325 if (!power_domains->power_wells)
3326 return -ENOMEM;
3327
3328 for (i = 0; i < power_well_count; i++) {
3329 enum i915_power_well_id id = power_well_descs[i].id;
3330
3331 power_domains->power_wells[i].desc = &power_well_descs[i];
21792c60 3332
4739a9d2
ID
3333 if (id == DISP_PW_ID_NONE)
3334 continue;
3335
21792c60
ID
3336 WARN_ON(id >= sizeof(power_well_ids) * 8);
3337 WARN_ON(power_well_ids & BIT_ULL(id));
3338 power_well_ids |= BIT_ULL(id);
3339 }
f28ec6f4
ID
3340
3341 return 0;
21792c60
ID
3342}
3343
f28ec6f4
ID
3344#define set_power_wells(power_domains, __power_well_descs) \
3345 __set_power_wells(power_domains, __power_well_descs, \
3346 ARRAY_SIZE(__power_well_descs))
9c065a7d 3347
e4e7684f
SV
3348/**
3349 * intel_power_domains_init - initializes the power domain structures
3350 * @dev_priv: i915 device instance
3351 *
3352 * Initializes the power domain structures for @dev_priv depending upon the
3353 * supported platform.
3354 */
9c065a7d
SV
3355int intel_power_domains_init(struct drm_i915_private *dev_priv)
3356{
3357 struct i915_power_domains *power_domains = &dev_priv->power_domains;
f28ec6f4 3358 int err;
9c065a7d 3359
4f044a88
MW
3360 i915_modparams.disable_power_well =
3361 sanitize_disable_power_well_option(dev_priv,
3362 i915_modparams.disable_power_well);
3363 dev_priv->csr.allowed_dc_mask =
3364 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
1b0e3a04 3365
d8fc70b7 3366 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
f0ab43e6 3367
9c065a7d
SV
3368 mutex_init(&power_domains->lock);
3369
3370 /*
3371 * The enabling order will be from lower to higher indexed wells,
3372 * the disabling order is reversed.
3373 */
67ca07e7 3374 if (IS_ICELAKE(dev_priv)) {
f28ec6f4 3375 err = set_power_wells(power_domains, icl_power_wells);
8bcd3dd4 3376 } else if (IS_CANNONLAKE(dev_priv)) {
f28ec6f4 3377 err = set_power_wells(power_domains, cnl_power_wells);
a324fcac
RV
3378
3379 /*
9787e835 3380 * DDI and Aux IO are getting enabled for all ports
a324fcac 3381 * regardless the presence or use. So, in order to avoid
9787e835 3382 * timeouts, lets remove them from the list
a324fcac
RV
3383 * for the SKUs without port F.
3384 */
3385 if (!IS_CNL_WITH_PORT_F(dev_priv))
9787e835 3386 power_domains->power_well_count -= 2;
0d03926d 3387 } else if (IS_GEMINILAKE(dev_priv)) {
f28ec6f4 3388 err = set_power_wells(power_domains, glk_power_wells);
fb72deae
RV
3389 } else if (IS_BROXTON(dev_priv)) {
3390 err = set_power_wells(power_domains, bxt_power_wells);
3391 } else if (IS_GEN9_BC(dev_priv)) {
3392 err = set_power_wells(power_domains, skl_power_wells);
2d1fe073 3393 } else if (IS_CHERRYVIEW(dev_priv)) {
f28ec6f4 3394 err = set_power_wells(power_domains, chv_power_wells);
fb72deae
RV
3395 } else if (IS_BROADWELL(dev_priv)) {
3396 err = set_power_wells(power_domains, bdw_power_wells);
3397 } else if (IS_HASWELL(dev_priv)) {
3398 err = set_power_wells(power_domains, hsw_power_wells);
2d1fe073 3399 } else if (IS_VALLEYVIEW(dev_priv)) {
f28ec6f4 3400 err = set_power_wells(power_domains, vlv_power_wells);
2ee0da16 3401 } else if (IS_I830(dev_priv)) {
f28ec6f4 3402 err = set_power_wells(power_domains, i830_power_wells);
9c065a7d 3403 } else {
f28ec6f4 3404 err = set_power_wells(power_domains, i9xx_always_on_power_well);
9c065a7d
SV
3405 }
3406
f28ec6f4
ID
3407 return err;
3408}
21792c60 3409
f28ec6f4
ID
3410/**
3411 * intel_power_domains_cleanup - clean up power domains resources
3412 * @dev_priv: i915 device instance
3413 *
3414 * Release any resources acquired by intel_power_domains_init()
3415 */
3416void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3417{
3418 kfree(dev_priv->power_domains.power_wells);
9c065a7d
SV
3419}
3420
30eade12 3421static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
9c065a7d
SV
3422{
3423 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3424 struct i915_power_well *power_well;
9c065a7d
SV
3425
3426 mutex_lock(&power_domains->lock);
75ccb2ec 3427 for_each_power_well(dev_priv, power_well) {
f28ec6f4
ID
3428 power_well->desc->ops->sync_hw(dev_priv, power_well);
3429 power_well->hw_enabled =
3430 power_well->desc->ops->is_enabled(dev_priv, power_well);
9c065a7d
SV
3431 }
3432 mutex_unlock(&power_domains->lock);
3433}
3434
aa9664ff
MK
3435static inline
3436bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3437 i915_reg_t reg, bool enable)
70c2c184 3438{
aa9664ff 3439 u32 val, status;
70c2c184 3440
aa9664ff
MK
3441 val = I915_READ(reg);
3442 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3443 I915_WRITE(reg, val);
3444 POSTING_READ(reg);
70c2c184
VS
3445 udelay(10);
3446
aa9664ff
MK
3447 status = I915_READ(reg) & DBUF_POWER_STATE;
3448 if ((enable && !status) || (!enable && status)) {
3449 DRM_ERROR("DBus power %s timeout!\n",
3450 enable ? "enable" : "disable");
3451 return false;
3452 }
3453 return true;
3454}
3455
3456static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3457{
3458 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
70c2c184
VS
3459}
3460
3461static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3462{
aa9664ff
MK
3463 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3464}
70c2c184 3465
aa9664ff
MK
3466static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3467{
3468 if (INTEL_GEN(dev_priv) < 11)
3469 return 1;
3470 return 2;
3471}
70c2c184 3472
aa9664ff
MK
3473void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3474 u8 req_slices)
3475{
8577c319 3476 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
aa9664ff
MK
3477 bool ret;
3478
3479 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3480 DRM_ERROR("Invalid number of dbuf slices requested\n");
3481 return;
3482 }
3483
3484 if (req_slices == hw_enabled_slices || req_slices == 0)
3485 return;
3486
aa9664ff
MK
3487 if (req_slices > hw_enabled_slices)
3488 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3489 else
3490 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3491
3492 if (ret)
3493 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
70c2c184
VS
3494}
3495
746edf8f
MK
3496static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3497{
3498 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3499 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3500 POSTING_READ(DBUF_CTL_S2);
3501
3502 udelay(10);
3503
3504 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3505 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3506 DRM_ERROR("DBuf power enable timeout\n");
74bd8004
MK
3507 else
3508 dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
746edf8f
MK
3509}
3510
3511static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3512{
3513 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3514 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3515 POSTING_READ(DBUF_CTL_S2);
3516
3517 udelay(10);
3518
3519 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3520 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3521 DRM_ERROR("DBuf power disable timeout!\n");
74bd8004
MK
3522 else
3523 dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
746edf8f
MK
3524}
3525
4cb4585e
MK
3526static void icl_mbus_init(struct drm_i915_private *dev_priv)
3527{
3528 uint32_t val;
3529
3530 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3531 MBUS_ABOX_BT_CREDIT_POOL2(16) |
3532 MBUS_ABOX_B_CREDIT(1) |
3533 MBUS_ABOX_BW_CREDIT(1);
3534
3535 I915_WRITE(MBUS_ABOX_CTL, val);
3536}
3537
7c86828d
JRS
3538static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3539 bool enable)
3540{
6edafc4e
JRS
3541 i915_reg_t reg;
3542 u32 reset_bits, val;
3543
3544 if (IS_IVYBRIDGE(dev_priv)) {
3545 reg = GEN7_MSG_CTL;
3546 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3547 } else {
3548 reg = HSW_NDE_RSTWRN_OPT;
3549 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3550 }
3551
3552 val = I915_READ(reg);
7c86828d
JRS
3553
3554 if (enable)
6edafc4e 3555 val |= reset_bits;
7c86828d 3556 else
6edafc4e 3557 val &= ~reset_bits;
7c86828d 3558
6edafc4e 3559 I915_WRITE(reg, val);
7c86828d
JRS
3560}
3561
73dfc227 3562static void skl_display_core_init(struct drm_i915_private *dev_priv,
443a93ac 3563 bool resume)
73dfc227
ID
3564{
3565 struct i915_power_domains *power_domains = &dev_priv->power_domains;
443a93ac 3566 struct i915_power_well *well;
73dfc227 3567
d26fa1d5
ID
3568 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3569
73dfc227 3570 /* enable PCH reset handshake */
6edafc4e 3571 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
73dfc227
ID
3572
3573 /* enable PG1 and Misc I/O */
3574 mutex_lock(&power_domains->lock);
443a93ac
ID
3575
3576 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3577 intel_power_well_enable(dev_priv, well);
3578
3579 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3580 intel_power_well_enable(dev_priv, well);
3581
73dfc227
ID
3582 mutex_unlock(&power_domains->lock);
3583
73dfc227
ID
3584 skl_init_cdclk(dev_priv);
3585
70c2c184
VS
3586 gen9_dbuf_enable(dev_priv);
3587
9f7eb31a 3588 if (resume && dev_priv->csr.dmc_payload)
2abc525b 3589 intel_csr_load_program(dev_priv);
73dfc227
ID
3590}
3591
3592static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3593{
3594 struct i915_power_domains *power_domains = &dev_priv->power_domains;
443a93ac 3595 struct i915_power_well *well;
73dfc227 3596
d26fa1d5
ID
3597 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3598
70c2c184
VS
3599 gen9_dbuf_disable(dev_priv);
3600
73dfc227
ID
3601 skl_uninit_cdclk(dev_priv);
3602
3603 /* The spec doesn't call for removing the reset handshake flag */
3604 /* disable PG1 and Misc I/O */
443a93ac 3605
73dfc227 3606 mutex_lock(&power_domains->lock);
443a93ac 3607
edfda8e3
ID
3608 /*
3609 * BSpec says to keep the MISC IO power well enabled here, only
3610 * remove our request for power well 1.
42d9366d
ID
3611 * Note that even though the driver's request is removed power well 1
3612 * may stay enabled after this due to DMC's own request on it.
edfda8e3 3613 */
443a93ac
ID
3614 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3615 intel_power_well_disable(dev_priv, well);
3616
73dfc227 3617 mutex_unlock(&power_domains->lock);
846c6b26
ID
3618
3619 usleep_range(10, 30); /* 10 us delay per Bspec */
73dfc227
ID
3620}
3621
d7d7c9ee
ID
3622void bxt_display_core_init(struct drm_i915_private *dev_priv,
3623 bool resume)
3624{
3625 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3626 struct i915_power_well *well;
d7d7c9ee
ID
3627
3628 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3629
3630 /*
3631 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3632 * or else the reset will hang because there is no PCH to respond.
3633 * Move the handshake programming to initialization sequence.
3634 * Previously was left up to BIOS.
3635 */
7c86828d 3636 intel_pch_reset_handshake(dev_priv, false);
d7d7c9ee
ID
3637
3638 /* Enable PG1 */
3639 mutex_lock(&power_domains->lock);
3640
3641 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3642 intel_power_well_enable(dev_priv, well);
3643
3644 mutex_unlock(&power_domains->lock);
3645
324513c0 3646 bxt_init_cdclk(dev_priv);
70c2c184
VS
3647
3648 gen9_dbuf_enable(dev_priv);
3649
d7d7c9ee
ID
3650 if (resume && dev_priv->csr.dmc_payload)
3651 intel_csr_load_program(dev_priv);
3652}
3653
3654void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3655{
3656 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3657 struct i915_power_well *well;
3658
3659 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3660
70c2c184
VS
3661 gen9_dbuf_disable(dev_priv);
3662
324513c0 3663 bxt_uninit_cdclk(dev_priv);
d7d7c9ee
ID
3664
3665 /* The spec doesn't call for removing the reset handshake flag */
3666
42d9366d
ID
3667 /*
3668 * Disable PW1 (PG1).
3669 * Note that even though the driver's request is removed power well 1
3670 * may stay enabled after this due to DMC's own request on it.
3671 */
d7d7c9ee
ID
3672 mutex_lock(&power_domains->lock);
3673
3674 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3675 intel_power_well_disable(dev_priv, well);
3676
3677 mutex_unlock(&power_domains->lock);
846c6b26
ID
3678
3679 usleep_range(10, 30); /* 10 us delay per Bspec */
d7d7c9ee
ID
3680}
3681
ade5ee7e
PZ
3682static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3683{
3684 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3685 struct i915_power_well *well;
ade5ee7e
PZ
3686
3687 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3688
3689 /* 1. Enable PCH Reset Handshake */
6edafc4e 3690 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
ade5ee7e 3691
c45198b1
ID
3692 /* 2-3. */
3693 cnl_combo_phys_init(dev_priv);
d8d4a512 3694
b38131fb
ID
3695 /*
3696 * 4. Enable Power Well 1 (PG1).
3697 * The AUX IO power wells will be enabled on demand.
3698 */
d8d4a512
VS
3699 mutex_lock(&power_domains->lock);
3700 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3701 intel_power_well_enable(dev_priv, well);
3702 mutex_unlock(&power_domains->lock);
3703
3704 /* 5. Enable CD clock */
3705 cnl_init_cdclk(dev_priv);
3706
3707 /* 6. Enable DBUF */
3708 gen9_dbuf_enable(dev_priv);
57522c4c
ID
3709
3710 if (resume && dev_priv->csr.dmc_payload)
3711 intel_csr_load_program(dev_priv);
d8d4a512
VS
3712}
3713
d8d4a512
VS
3714static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3715{
3716 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3717 struct i915_power_well *well;
d8d4a512
VS
3718
3719 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3720
3721 /* 1. Disable all display engine functions -> aready done */
3722
3723 /* 2. Disable DBUF */
3724 gen9_dbuf_disable(dev_priv);
3725
3726 /* 3. Disable CD clock */
3727 cnl_uninit_cdclk(dev_priv);
3728
b38131fb
ID
3729 /*
3730 * 4. Disable Power Well 1 (PG1).
3731 * The AUX IO power wells are toggled on demand, so they are already
3732 * disabled at this point.
3733 */
d8d4a512
VS
3734 mutex_lock(&power_domains->lock);
3735 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3736 intel_power_well_disable(dev_priv, well);
3737 mutex_unlock(&power_domains->lock);
3738
846c6b26
ID
3739 usleep_range(10, 30); /* 10 us delay per Bspec */
3740
c45198b1
ID
3741 /* 5. */
3742 cnl_combo_phys_uninit(dev_priv);
d8d4a512
VS
3743}
3744
3e68928b
AM
3745void icl_display_core_init(struct drm_i915_private *dev_priv,
3746 bool resume)
ad186f3f 3747{
67ca07e7
ID
3748 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3749 struct i915_power_well *well;
ad186f3f
PZ
3750
3751 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3752
3753 /* 1. Enable PCH reset handshake. */
6edafc4e 3754 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
ad186f3f 3755
c45198b1
ID
3756 /* 2-3. */
3757 icl_combo_phys_init(dev_priv);
ad186f3f 3758
67ca07e7
ID
3759 /*
3760 * 4. Enable Power Well 1 (PG1).
3761 * The AUX IO power wells will be enabled on demand.
3762 */
3763 mutex_lock(&power_domains->lock);
d9fcdc8d 3764 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
67ca07e7
ID
3765 intel_power_well_enable(dev_priv, well);
3766 mutex_unlock(&power_domains->lock);
ad186f3f
PZ
3767
3768 /* 5. Enable CDCLK. */
3769 icl_init_cdclk(dev_priv);
3770
3771 /* 6. Enable DBUF. */
746edf8f 3772 icl_dbuf_enable(dev_priv);
ad186f3f
PZ
3773
3774 /* 7. Setup MBUS. */
4cb4585e 3775 icl_mbus_init(dev_priv);
4445930f
AS
3776
3777 if (resume && dev_priv->csr.dmc_payload)
3778 intel_csr_load_program(dev_priv);
ad186f3f
PZ
3779}
3780
3e68928b 3781void icl_display_core_uninit(struct drm_i915_private *dev_priv)
ad186f3f 3782{
67ca07e7
ID
3783 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3784 struct i915_power_well *well;
ad186f3f
PZ
3785
3786 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3787
3788 /* 1. Disable all display engine functions -> aready done */
3789
3790 /* 2. Disable DBUF */
746edf8f 3791 icl_dbuf_disable(dev_priv);
ad186f3f
PZ
3792
3793 /* 3. Disable CD clock */
3794 icl_uninit_cdclk(dev_priv);
3795
67ca07e7
ID
3796 /*
3797 * 4. Disable Power Well 1 (PG1).
3798 * The AUX IO power wells are toggled on demand, so they are already
3799 * disabled at this point.
3800 */
3801 mutex_lock(&power_domains->lock);
d9fcdc8d 3802 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
67ca07e7
ID
3803 intel_power_well_disable(dev_priv, well);
3804 mutex_unlock(&power_domains->lock);
ad186f3f 3805
c45198b1
ID
3806 /* 5. */
3807 icl_combo_phys_uninit(dev_priv);
ad186f3f
PZ
3808}
3809
70722468
VS
3810static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3811{
3812 struct i915_power_well *cmn_bc =
2183b499 3813 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
70722468 3814 struct i915_power_well *cmn_d =
2183b499 3815 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
70722468
VS
3816
3817 /*
3818 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3819 * workaround never ever read DISPLAY_PHY_CONTROL, and
3820 * instead maintain a shadow copy ourselves. Use the actual
e0fce78f
VS
3821 * power well state and lane status to reconstruct the
3822 * expected initial value.
70722468
VS
3823 */
3824 dev_priv->chv_phy_control =
bc284542
VS
3825 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3826 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
e0fce78f
VS
3827 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3828 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3829 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3830
3831 /*
3832 * If all lanes are disabled we leave the override disabled
3833 * with all power down bits cleared to match the state we
3834 * would use after disabling the port. Otherwise enable the
3835 * override and set the lane powerdown bits accding to the
3836 * current lane status.
3837 */
f28ec6f4 3838 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
e0fce78f
VS
3839 uint32_t status = I915_READ(DPLL(PIPE_A));
3840 unsigned int mask;
3841
3842 mask = status & DPLL_PORTB_READY_MASK;
3843 if (mask == 0xf)
3844 mask = 0x0;
3845 else
3846 dev_priv->chv_phy_control |=
3847 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3848
3849 dev_priv->chv_phy_control |=
3850 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3851
3852 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3853 if (mask == 0xf)
3854 mask = 0x0;
3855 else
3856 dev_priv->chv_phy_control |=
3857 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3858
3859 dev_priv->chv_phy_control |=
3860 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3861
70722468 3862 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3be60de9
VS
3863
3864 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3865 } else {
3866 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
e0fce78f
VS
3867 }
3868
f28ec6f4 3869 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
e0fce78f
VS
3870 uint32_t status = I915_READ(DPIO_PHY_STATUS);
3871 unsigned int mask;
3872
3873 mask = status & DPLL_PORTD_READY_MASK;
3874
3875 if (mask == 0xf)
3876 mask = 0x0;
3877 else
3878 dev_priv->chv_phy_control |=
3879 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3880
3881 dev_priv->chv_phy_control |=
3882 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3883
70722468 3884 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3be60de9
VS
3885
3886 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3887 } else {
3888 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
e0fce78f
VS
3889 }
3890
3891 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3892
3893 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3894 dev_priv->chv_phy_control);
70722468
VS
3895}
3896
9c065a7d
SV
3897static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3898{
3899 struct i915_power_well *cmn =
2183b499 3900 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9c065a7d 3901 struct i915_power_well *disp2d =
2183b499 3902 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
9c065a7d 3903
9c065a7d 3904 /* If the display might be already active skip this */
f28ec6f4
ID
3905 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3906 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
9c065a7d
SV
3907 I915_READ(DPIO_CTL) & DPIO_CMNRST)
3908 return;
3909
3910 DRM_DEBUG_KMS("toggling display PHY side reset\n");
3911
3912 /* cmnlane needs DPLL registers */
f28ec6f4 3913 disp2d->desc->ops->enable(dev_priv, disp2d);
9c065a7d
SV
3914
3915 /*
3916 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3917 * Need to assert and de-assert PHY SB reset by gating the
3918 * common lane power, then un-gating it.
3919 * Simply ungating isn't enough to reset the PHY enough to get
3920 * ports and lanes running.
3921 */
f28ec6f4 3922 cmn->desc->ops->disable(dev_priv, cmn);
9c065a7d
SV
3923}
3924
6dfc4a8f
ID
3925static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
3926
e4e7684f
SV
3927/**
3928 * intel_power_domains_init_hw - initialize hardware power domain state
3929 * @dev_priv: i915 device instance
14bb2c11 3930 * @resume: Called from resume code paths or not
e4e7684f
SV
3931 *
3932 * This function initializes the hardware power domain state and enables all
8d8c386c 3933 * power wells belonging to the INIT power domain. Power wells in other
d8c5d29f
ID
3934 * domains (and not in the INIT domain) are referenced or disabled by
3935 * intel_modeset_readout_hw_state(). After that the reference count of each
3936 * power well must match its HW enabled state, see
3937 * intel_power_domains_verify_state().
2cd9a689
ID
3938 *
3939 * It will return with power domains disabled (to be enabled later by
3940 * intel_power_domains_enable()) and must be paired with
3941 * intel_power_domains_fini_hw().
e4e7684f 3942 */
73dfc227 3943void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
9c065a7d 3944{
9c065a7d
SV
3945 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3946
3947 power_domains->initializing = true;
3948
ad186f3f
PZ
3949 if (IS_ICELAKE(dev_priv)) {
3950 icl_display_core_init(dev_priv, resume);
3951 } else if (IS_CANNONLAKE(dev_priv)) {
d8d4a512
VS
3952 cnl_display_core_init(dev_priv, resume);
3953 } else if (IS_GEN9_BC(dev_priv)) {
73dfc227 3954 skl_display_core_init(dev_priv, resume);
b817c440 3955 } else if (IS_GEN9_LP(dev_priv)) {
d7d7c9ee 3956 bxt_display_core_init(dev_priv, resume);
920a14b2 3957 } else if (IS_CHERRYVIEW(dev_priv)) {
770effb1 3958 mutex_lock(&power_domains->lock);
70722468 3959 chv_phy_control_init(dev_priv);
770effb1 3960 mutex_unlock(&power_domains->lock);
11a914c2 3961 } else if (IS_VALLEYVIEW(dev_priv)) {
9c065a7d
SV
3962 mutex_lock(&power_domains->lock);
3963 vlv_cmnlane_wa(dev_priv);
3964 mutex_unlock(&power_domains->lock);
6edafc4e
JRS
3965 } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
3966 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
9c065a7d 3967
2cd9a689
ID
3968 /*
3969 * Keep all power wells enabled for any dependent HW access during
3970 * initialization and to make sure we keep BIOS enabled display HW
3971 * resources powered until display HW readout is complete. We drop
3972 * this reference in intel_power_domains_enable().
3973 */
3974 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
d314cd43 3975 /* Disable power support if the user asked so. */
4f044a88 3976 if (!i915_modparams.disable_power_well)
d314cd43 3977 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
30eade12 3978 intel_power_domains_sync_hw(dev_priv);
6dfc4a8f 3979
d8c5d29f 3980 power_domains->initializing = false;
9c065a7d
SV
3981}
3982
48a287ed
ID
3983/**
3984 * intel_power_domains_fini_hw - deinitialize hw power domain state
3985 * @dev_priv: i915 device instance
3986 *
3987 * De-initializes the display power domain HW state. It also ensures that the
3988 * device stays powered up so that the driver can be reloaded.
2cd9a689
ID
3989 *
3990 * It must be called with power domains already disabled (after a call to
3991 * intel_power_domains_disable()) and must be paired with
3992 * intel_power_domains_init_hw().
48a287ed
ID
3993 */
3994void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
3995{
07d80572
CW
3996 /* Keep the power well enabled, but cancel its rpm wakeref. */
3997 intel_runtime_pm_put(dev_priv);
48a287ed
ID
3998
3999 /* Remove the refcount we took to keep power well support disabled. */
4000 if (!i915_modparams.disable_power_well)
4001 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
6dfc4a8f
ID
4002
4003 intel_power_domains_verify_state(dev_priv);
48a287ed
ID
4004}
4005
2cd9a689
ID
4006/**
4007 * intel_power_domains_enable - enable toggling of display power wells
4008 * @dev_priv: i915 device instance
4009 *
4010 * Enable the ondemand enabling/disabling of the display power wells. Note that
4011 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4012 * only at specific points of the display modeset sequence, thus they are not
4013 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4014 * of these function is to keep the rest of power wells enabled until the end
4015 * of display HW readout (which will acquire the power references reflecting
4016 * the current HW state).
4017 */
4018void intel_power_domains_enable(struct drm_i915_private *dev_priv)
4019{
4020 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
6dfc4a8f
ID
4021
4022 intel_power_domains_verify_state(dev_priv);
2cd9a689
ID
4023}
4024
4025/**
4026 * intel_power_domains_disable - disable toggling of display power wells
4027 * @dev_priv: i915 device instance
4028 *
4029 * Disable the ondemand enabling/disabling of the display power wells. See
4030 * intel_power_domains_enable() for which power wells this call controls.
4031 */
4032void intel_power_domains_disable(struct drm_i915_private *dev_priv)
4033{
4034 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
6dfc4a8f
ID
4035
4036 intel_power_domains_verify_state(dev_priv);
2cd9a689
ID
4037}
4038
73dfc227
ID
4039/**
4040 * intel_power_domains_suspend - suspend power domain state
4041 * @dev_priv: i915 device instance
2cd9a689 4042 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
73dfc227
ID
4043 *
4044 * This function prepares the hardware power domain state before entering
2cd9a689
ID
4045 * system suspend.
4046 *
4047 * It must be called with power domains already disabled (after a call to
4048 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
73dfc227 4049 */
2cd9a689
ID
4050void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
4051 enum i915_drm_suspend_mode suspend_mode)
73dfc227 4052{
2cd9a689
ID
4053 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4054
4055 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4056
4057 /*
a61d904f
ID
4058 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4059 * support don't manually deinit the power domains. This also means the
4060 * CSR/DMC firmware will stay active, it will power down any HW
4061 * resources as required and also enable deeper system power states
4062 * that would be blocked if the firmware was inactive.
2cd9a689 4063 */
a61d904f
ID
4064 if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4065 suspend_mode == I915_DRM_SUSPEND_IDLE &&
6dfc4a8f
ID
4066 dev_priv->csr.dmc_payload != NULL) {
4067 intel_power_domains_verify_state(dev_priv);
2cd9a689 4068 return;
6dfc4a8f 4069 }
2cd9a689 4070
d314cd43
ID
4071 /*
4072 * Even if power well support was disabled we still want to disable
2cd9a689 4073 * power wells if power domains must be deinitialized for suspend.
d314cd43 4074 */
6dfc4a8f 4075 if (!i915_modparams.disable_power_well) {
d314cd43 4076 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
6dfc4a8f
ID
4077 intel_power_domains_verify_state(dev_priv);
4078 }
2622d79b 4079
ad186f3f
PZ
4080 if (IS_ICELAKE(dev_priv))
4081 icl_display_core_uninit(dev_priv);
4082 else if (IS_CANNONLAKE(dev_priv))
d8d4a512
VS
4083 cnl_display_core_uninit(dev_priv);
4084 else if (IS_GEN9_BC(dev_priv))
2622d79b 4085 skl_display_core_uninit(dev_priv);
b817c440 4086 else if (IS_GEN9_LP(dev_priv))
d7d7c9ee 4087 bxt_display_core_uninit(dev_priv);
2cd9a689
ID
4088
4089 power_domains->display_core_suspended = true;
4090}
4091
4092/**
4093 * intel_power_domains_resume - resume power domain state
4094 * @dev_priv: i915 device instance
4095 *
4096 * This function resume the hardware power domain state during system resume.
4097 *
4098 * It will return with power domain support disabled (to be enabled later by
4099 * intel_power_domains_enable()) and must be paired with
4100 * intel_power_domains_suspend().
4101 */
4102void intel_power_domains_resume(struct drm_i915_private *dev_priv)
4103{
4104 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4105
4106 if (power_domains->display_core_suspended) {
4107 intel_power_domains_init_hw(dev_priv, true);
4108 power_domains->display_core_suspended = false;
6dfc4a8f
ID
4109 } else {
4110 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2cd9a689
ID
4111 }
4112
6dfc4a8f 4113 intel_power_domains_verify_state(dev_priv);
73dfc227
ID
4114}
4115
6dfc4a8f
ID
4116#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4117
8d8c386c
ID
4118static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
4119{
4120 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4121 struct i915_power_well *power_well;
4122
4123 for_each_power_well(dev_priv, power_well) {
4124 enum intel_display_power_domain domain;
4125
4126 DRM_DEBUG_DRIVER("%-25s %d\n",
f28ec6f4 4127 power_well->desc->name, power_well->count);
8d8c386c 4128
f28ec6f4 4129 for_each_power_domain(domain, power_well->desc->domains)
8d8c386c
ID
4130 DRM_DEBUG_DRIVER(" %-23s %d\n",
4131 intel_display_power_domain_str(domain),
4132 power_domains->domain_use_count[domain]);
4133 }
4134}
4135
4136/**
4137 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4138 * @dev_priv: i915 device instance
4139 *
4140 * Verify if the reference count of each power well matches its HW enabled
4141 * state and the total refcount of the domains it belongs to. This must be
4142 * called after modeset HW state sanitization, which is responsible for
4143 * acquiring reference counts for any power wells in use and disabling the
4144 * ones left on by BIOS but not required by any active output.
4145 */
6dfc4a8f 4146static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
8d8c386c
ID
4147{
4148 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4149 struct i915_power_well *power_well;
4150 bool dump_domain_info;
4151
4152 mutex_lock(&power_domains->lock);
4153
4154 dump_domain_info = false;
4155 for_each_power_well(dev_priv, power_well) {
4156 enum intel_display_power_domain domain;
4157 int domains_count;
4158 bool enabled;
4159
f28ec6f4
ID
4160 enabled = power_well->desc->ops->is_enabled(dev_priv,
4161 power_well);
4162 if ((power_well->count || power_well->desc->always_on) !=
4163 enabled)
8d8c386c 4164 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
f28ec6f4
ID
4165 power_well->desc->name,
4166 power_well->count, enabled);
8d8c386c
ID
4167
4168 domains_count = 0;
f28ec6f4 4169 for_each_power_domain(domain, power_well->desc->domains)
8d8c386c
ID
4170 domains_count += power_domains->domain_use_count[domain];
4171
4172 if (power_well->count != domains_count) {
4173 DRM_ERROR("power well %s refcount/domain refcount mismatch "
4174 "(refcount %d/domains refcount %d)\n",
f28ec6f4 4175 power_well->desc->name, power_well->count,
8d8c386c
ID
4176 domains_count);
4177 dump_domain_info = true;
4178 }
4179 }
4180
4181 if (dump_domain_info) {
4182 static bool dumped;
4183
4184 if (!dumped) {
4185 intel_power_domains_dump_info(dev_priv);
4186 dumped = true;
4187 }
4188 }
4189
4190 mutex_unlock(&power_domains->lock);
4191}
4192
6dfc4a8f
ID
4193#else
4194
4195static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
4196{
4197}
4198
4199#endif
4200
e4e7684f
SV
4201/**
4202 * intel_runtime_pm_get - grab a runtime pm reference
bd780f37 4203 * @i915: i915 device instance
e4e7684f
SV
4204 *
4205 * This function grabs a device-level runtime pm reference (mostly used for GEM
4206 * code to ensure the GTT or GT is on) and ensures that it is powered up.
4207 *
4208 * Any runtime pm reference obtained by this function must have a symmetric
4209 * call to intel_runtime_pm_put() to release the reference again.
4210 */
bd780f37 4211void intel_runtime_pm_get(struct drm_i915_private *i915)
9c065a7d 4212{
bd780f37 4213 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4214 struct device *kdev = &pdev->dev;
f5073824 4215 int ret;
9c065a7d 4216
f5073824
ID
4217 ret = pm_runtime_get_sync(kdev);
4218 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
1f814dac 4219
bd780f37 4220 track_intel_runtime_pm_wakeref(i915);
9c065a7d
SV
4221}
4222
09731280
ID
4223/**
4224 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
bd780f37 4225 * @i915: i915 device instance
09731280
ID
4226 *
4227 * This function grabs a device-level runtime pm reference if the device is
acb79148
CW
4228 * already in use and ensures that it is powered up. It is illegal to try
4229 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
09731280
ID
4230 *
4231 * Any runtime pm reference obtained by this function must have a symmetric
4232 * call to intel_runtime_pm_put() to release the reference again.
acb79148
CW
4233 *
4234 * Returns: True if the wakeref was acquired, or False otherwise.
09731280 4235 */
bd780f37 4236bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
09731280 4237{
135dc79e 4238 if (IS_ENABLED(CONFIG_PM)) {
bd780f37 4239 struct pci_dev *pdev = i915->drm.pdev;
acb79148 4240 struct device *kdev = &pdev->dev;
09731280 4241
135dc79e
CW
4242 /*
4243 * In cases runtime PM is disabled by the RPM core and we get
4244 * an -EINVAL return value we are not supposed to call this
4245 * function, since the power state is undefined. This applies
4246 * atm to the late/early system suspend/resume handlers.
4247 */
acb79148 4248 if (pm_runtime_get_if_in_use(kdev) <= 0)
135dc79e
CW
4249 return false;
4250 }
09731280 4251
bd780f37 4252 track_intel_runtime_pm_wakeref(i915);
09731280
ID
4253
4254 return true;
4255}
4256
e4e7684f
SV
4257/**
4258 * intel_runtime_pm_get_noresume - grab a runtime pm reference
bd780f37 4259 * @i915: i915 device instance
e4e7684f
SV
4260 *
4261 * This function grabs a device-level runtime pm reference (mostly used for GEM
4262 * code to ensure the GTT or GT is on).
4263 *
4264 * It will _not_ power up the device but instead only check that it's powered
4265 * on. Therefore it is only valid to call this functions from contexts where
4266 * the device is known to be powered up and where trying to power it up would
4267 * result in hilarity and deadlocks. That pretty much means only the system
4268 * suspend/resume code where this is used to grab runtime pm references for
4269 * delayed setup down in work items.
4270 *
4271 * Any runtime pm reference obtained by this function must have a symmetric
4272 * call to intel_runtime_pm_put() to release the reference again.
4273 */
bd780f37 4274void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
9c065a7d 4275{
bd780f37 4276 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4277 struct device *kdev = &pdev->dev;
9c065a7d 4278
bd780f37 4279 assert_rpm_wakelock_held(i915);
c49d13ee 4280 pm_runtime_get_noresume(kdev);
1f814dac 4281
bd780f37 4282 track_intel_runtime_pm_wakeref(i915);
9c065a7d
SV
4283}
4284
e4e7684f
SV
4285/**
4286 * intel_runtime_pm_put - release a runtime pm reference
bd780f37 4287 * @i915: i915 device instance
e4e7684f
SV
4288 *
4289 * This function drops the device-level runtime pm reference obtained by
4290 * intel_runtime_pm_get() and might power down the corresponding
4291 * hardware block right away if this is the last reference.
4292 */
bd780f37 4293void intel_runtime_pm_put(struct drm_i915_private *i915)
9c065a7d 4294{
bd780f37 4295 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4296 struct device *kdev = &pdev->dev;
9c065a7d 4297
bd780f37 4298 untrack_intel_runtime_pm_wakeref(i915);
1f814dac 4299
c49d13ee
DW
4300 pm_runtime_mark_last_busy(kdev);
4301 pm_runtime_put_autosuspend(kdev);
9c065a7d
SV
4302}
4303
e4e7684f
SV
4304/**
4305 * intel_runtime_pm_enable - enable runtime pm
bd780f37 4306 * @i915: i915 device instance
e4e7684f
SV
4307 *
4308 * This function enables runtime pm at the end of the driver load sequence.
4309 *
4310 * Note that this function does currently not enable runtime pm for the
2cd9a689
ID
4311 * subordinate display power domains. That is done by
4312 * intel_power_domains_enable().
e4e7684f 4313 */
bd780f37 4314void intel_runtime_pm_enable(struct drm_i915_private *i915)
9c065a7d 4315{
bd780f37 4316 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4317 struct device *kdev = &pdev->dev;
9c065a7d 4318
07d80572
CW
4319 /*
4320 * Disable the system suspend direct complete optimization, which can
4321 * leave the device suspended skipping the driver's suspend handlers
4322 * if the device was already runtime suspended. This is needed due to
4323 * the difference in our runtime and system suspend sequence and
4324 * becaue the HDA driver may require us to enable the audio power
4325 * domain during system suspend.
4326 */
4327 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4328
c49d13ee
DW
4329 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4330 pm_runtime_mark_last_busy(kdev);
cbc68dc9 4331
25b181b4
ID
4332 /*
4333 * Take a permanent reference to disable the RPM functionality and drop
4334 * it only when unloading the driver. Use the low level get/put helpers,
4335 * so the driver's own RPM reference tracking asserts also work on
4336 * platforms without RPM support.
4337 */
bd780f37 4338 if (!HAS_RUNTIME_PM(i915)) {
f5073824
ID
4339 int ret;
4340
c49d13ee 4341 pm_runtime_dont_use_autosuspend(kdev);
f5073824
ID
4342 ret = pm_runtime_get_sync(kdev);
4343 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
cbc68dc9 4344 } else {
c49d13ee 4345 pm_runtime_use_autosuspend(kdev);
cbc68dc9 4346 }
9c065a7d 4347
aabee1bb
ID
4348 /*
4349 * The core calls the driver load handler with an RPM reference held.
4350 * We drop that here and will reacquire it during unloading in
4351 * intel_power_domains_fini().
4352 */
c49d13ee 4353 pm_runtime_put_autosuspend(kdev);
9c065a7d 4354}
07d80572 4355
bd780f37 4356void intel_runtime_pm_disable(struct drm_i915_private *i915)
07d80572 4357{
bd780f37 4358 struct pci_dev *pdev = i915->drm.pdev;
07d80572
CW
4359 struct device *kdev = &pdev->dev;
4360
4361 /* Transfer rpm ownership back to core */
bd780f37 4362 WARN(pm_runtime_get_sync(kdev) < 0,
07d80572
CW
4363 "Failed to pass rpm ownership back to core\n");
4364
4365 pm_runtime_dont_use_autosuspend(kdev);
4366
bd780f37 4367 if (!HAS_RUNTIME_PM(i915))
07d80572
CW
4368 pm_runtime_put(kdev);
4369}
bd780f37
CW
4370
4371void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
4372{
4373 struct i915_runtime_pm *rpm = &i915->runtime_pm;
4374 int count;
4375
4376 count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
4377 WARN(count,
4378 "i915->runtime_pm.wakeref_count=%d on cleanup\n",
4379 count);
4380
4381 untrack_intel_runtime_pm_wakeref(i915);
4382}
4383
4384void intel_runtime_pm_init_early(struct drm_i915_private *i915)
4385{
4386 init_intel_runtime_pm_wakeref(i915);
4387}