]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/i915/intel_runtime_pm.c
drm/i915: Add support for tracking wakerefs w/o power-on guarantee
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
CommitLineData
9c065a7d
SV
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
bd780f37
CW
32#include <drm/drm_print.h>
33
9c065a7d 34#include "i915_drv.h"
440e2b3d 35#include "i915_irq.h"
e7674ef6 36#include "intel_cdclk.h"
d5f9db2c 37#include "intel_combo_phy.h"
d2ee2e8a 38#include "intel_crt.h"
174594db 39#include "intel_csr.h"
27fec1f9 40#include "intel_dp.h"
b1ad4c39 41#include "intel_dpio_phy.h"
9c065a7d 42#include "intel_drv.h"
dbeb38d9 43#include "intel_hotplug.h"
56c5098f 44#include "intel_sideband.h"
9c065a7d 45
e4e7684f
SV
46/**
47 * DOC: runtime pm
48 *
49 * The i915 driver supports dynamic enabling and disabling of entire hardware
50 * blocks at runtime. This is especially important on the display side where
51 * software is supposed to control many power gates manually on recent hardware,
52 * since on the GT side a lot of the power management is done by the hardware.
53 * But even there some manual control at the device level is required.
54 *
55 * Since i915 supports a diverse set of platforms with a unified codebase and
56 * hardware engineers just love to shuffle functionality around between power
57 * domains there's a sizeable amount of indirection required. This file provides
58 * generic functions to the driver for grabbing and releasing references for
59 * abstract power domains. It then maps those to the actual power wells
60 * present for a given platform.
61 */
62
bd780f37
CW
63#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
64
65#include <linux/sort.h>
66
67#define STACKDEPTH 8
68
69static noinline depot_stack_handle_t __save_depot_stack(void)
70{
71 unsigned long entries[STACKDEPTH];
72 struct stack_trace trace = {
73 .entries = entries,
74 .max_entries = ARRAY_SIZE(entries),
75 .skip = 1,
76 };
77
78 save_stack_trace(&trace);
79 if (trace.nr_entries &&
80 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
81 trace.nr_entries--;
82
83 return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
84}
85
86static void __print_depot_stack(depot_stack_handle_t stack,
87 char *buf, int sz, int indent)
88{
89 unsigned long entries[STACKDEPTH];
90 struct stack_trace trace = {
91 .entries = entries,
92 .max_entries = ARRAY_SIZE(entries),
93 };
94
95 depot_fetch_stack(stack, &trace);
96 snprint_stack_trace(buf, sz, &trace, indent);
97}
98
99static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
100{
101 struct i915_runtime_pm *rpm = &i915->runtime_pm;
102
103 spin_lock_init(&rpm->debug.lock);
104}
105
16e4dd03 106static noinline depot_stack_handle_t
bd780f37
CW
107track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
108{
109 struct i915_runtime_pm *rpm = &i915->runtime_pm;
110 depot_stack_handle_t stack, *stacks;
111 unsigned long flags;
112
bd780f37 113 if (!HAS_RUNTIME_PM(i915))
16e4dd03 114 return -1;
bd780f37
CW
115
116 stack = __save_depot_stack();
117 if (!stack)
16e4dd03 118 return -1;
bd780f37
CW
119
120 spin_lock_irqsave(&rpm->debug.lock, flags);
121
122 if (!rpm->debug.count)
123 rpm->debug.last_acquire = stack;
124
125 stacks = krealloc(rpm->debug.owners,
126 (rpm->debug.count + 1) * sizeof(*stacks),
127 GFP_NOWAIT | __GFP_NOWARN);
128 if (stacks) {
129 stacks[rpm->debug.count++] = stack;
130 rpm->debug.owners = stacks;
16e4dd03
CW
131 } else {
132 stack = -1;
bd780f37
CW
133 }
134
135 spin_unlock_irqrestore(&rpm->debug.lock, flags);
16e4dd03
CW
136
137 return stack;
138}
139
4547c255
ID
140static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
141 depot_stack_handle_t stack)
16e4dd03
CW
142{
143 struct i915_runtime_pm *rpm = &i915->runtime_pm;
144 unsigned long flags, n;
145 bool found = false;
146
147 if (unlikely(stack == -1))
148 return;
149
150 spin_lock_irqsave(&rpm->debug.lock, flags);
151 for (n = rpm->debug.count; n--; ) {
152 if (rpm->debug.owners[n] == stack) {
153 memmove(rpm->debug.owners + n,
154 rpm->debug.owners + n + 1,
155 (--rpm->debug.count - n) * sizeof(stack));
156 found = true;
157 break;
158 }
159 }
160 spin_unlock_irqrestore(&rpm->debug.lock, flags);
161
162 if (WARN(!found,
163 "Unmatched wakeref (tracking %lu), count %u\n",
164 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
165 char *buf;
166
2e1e5c55 167 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
16e4dd03
CW
168 if (!buf)
169 return;
170
171 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
172 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
173
174 stack = READ_ONCE(rpm->debug.last_release);
175 if (stack) {
176 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
177 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
178 }
179
180 kfree(buf);
181 }
bd780f37
CW
182}
183
184static int cmphandle(const void *_a, const void *_b)
185{
186 const depot_stack_handle_t * const a = _a, * const b = _b;
187
188 if (*a < *b)
189 return -1;
190 else if (*a > *b)
191 return 1;
192 else
193 return 0;
194}
195
196static void
197__print_intel_runtime_pm_wakeref(struct drm_printer *p,
198 const struct intel_runtime_pm_debug *dbg)
199{
200 unsigned long i;
201 char *buf;
202
2e1e5c55 203 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
bd780f37
CW
204 if (!buf)
205 return;
206
207 if (dbg->last_acquire) {
208 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
209 drm_printf(p, "Wakeref last acquired:\n%s", buf);
210 }
211
212 if (dbg->last_release) {
213 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
214 drm_printf(p, "Wakeref last released:\n%s", buf);
215 }
216
217 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
218
219 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
220
221 for (i = 0; i < dbg->count; i++) {
222 depot_stack_handle_t stack = dbg->owners[i];
223 unsigned long rep;
224
225 rep = 1;
226 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
227 rep++, i++;
228 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
229 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
230 }
231
232 kfree(buf);
233}
234
235static noinline void
4547c255 236__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
bd780f37
CW
237{
238 struct i915_runtime_pm *rpm = &i915->runtime_pm;
239 struct intel_runtime_pm_debug dbg = {};
240 struct drm_printer p;
241 unsigned long flags;
242
bd780f37
CW
243 if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
244 &rpm->debug.lock,
245 flags)) {
246 dbg = rpm->debug;
247
248 rpm->debug.owners = NULL;
249 rpm->debug.count = 0;
250 rpm->debug.last_release = __save_depot_stack();
251
252 spin_unlock_irqrestore(&rpm->debug.lock, flags);
253 }
254 if (!dbg.count)
255 return;
256
257 p = drm_debug_printer("i915");
258 __print_intel_runtime_pm_wakeref(&p, &dbg);
259
260 kfree(dbg.owners);
261}
262
263void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
264 struct drm_printer *p)
265{
266 struct intel_runtime_pm_debug dbg = {};
267
268 do {
269 struct i915_runtime_pm *rpm = &i915->runtime_pm;
270 unsigned long alloc = dbg.count;
271 depot_stack_handle_t *s;
272
273 spin_lock_irq(&rpm->debug.lock);
274 dbg.count = rpm->debug.count;
275 if (dbg.count <= alloc) {
276 memcpy(dbg.owners,
277 rpm->debug.owners,
278 dbg.count * sizeof(*s));
279 }
280 dbg.last_acquire = rpm->debug.last_acquire;
281 dbg.last_release = rpm->debug.last_release;
282 spin_unlock_irq(&rpm->debug.lock);
283 if (dbg.count <= alloc)
284 break;
285
2e1e5c55
CW
286 s = krealloc(dbg.owners,
287 dbg.count * sizeof(*s),
288 GFP_NOWAIT | __GFP_NOWARN);
bd780f37
CW
289 if (!s)
290 goto out;
291
292 dbg.owners = s;
293 } while (1);
294
295 __print_intel_runtime_pm_wakeref(p, &dbg);
296
297out:
298 kfree(dbg.owners);
299}
300
301#else
302
303static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
304{
305}
306
16e4dd03
CW
307static depot_stack_handle_t
308track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
bd780f37 309{
16e4dd03 310 return -1;
bd780f37
CW
311}
312
4547c255
ID
313static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
314 intel_wakeref_t wref)
315{
316}
317
318static void
319__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
bd780f37 320{
bd780f37
CW
321 atomic_dec(&i915->runtime_pm.wakeref_count);
322}
323
324#endif
325
4547c255
ID
326static void
327intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock)
328{
329 struct i915_runtime_pm *rpm = &i915->runtime_pm;
330
331 if (wakelock) {
332 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
333 assert_rpm_wakelock_held(i915);
334 } else {
335 atomic_inc(&rpm->wakeref_count);
336 assert_rpm_raw_wakeref_held(i915);
337 }
338}
339
340static void
341intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock)
342{
343 struct i915_runtime_pm *rpm = &i915->runtime_pm;
344
345 if (wakelock) {
346 assert_rpm_wakelock_held(i915);
347 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
348 } else {
349 assert_rpm_raw_wakeref_held(i915);
350 }
351
352 __intel_wakeref_dec_and_check_tracking(i915);
353}
354
5aefb239 355bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
438b8dc4 356 enum i915_power_well_id power_well_id);
5aefb239 357
9895ad03
DS
358const char *
359intel_display_power_domain_str(enum intel_display_power_domain domain)
360{
361 switch (domain) {
362 case POWER_DOMAIN_PIPE_A:
363 return "PIPE_A";
364 case POWER_DOMAIN_PIPE_B:
365 return "PIPE_B";
366 case POWER_DOMAIN_PIPE_C:
367 return "PIPE_C";
368 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
369 return "PIPE_A_PANEL_FITTER";
370 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
371 return "PIPE_B_PANEL_FITTER";
372 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
373 return "PIPE_C_PANEL_FITTER";
374 case POWER_DOMAIN_TRANSCODER_A:
375 return "TRANSCODER_A";
376 case POWER_DOMAIN_TRANSCODER_B:
377 return "TRANSCODER_B";
378 case POWER_DOMAIN_TRANSCODER_C:
379 return "TRANSCODER_C";
380 case POWER_DOMAIN_TRANSCODER_EDP:
381 return "TRANSCODER_EDP";
91ba2c8b
MN
382 case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
383 return "TRANSCODER_EDP_VDSC";
4d1de975
JN
384 case POWER_DOMAIN_TRANSCODER_DSI_A:
385 return "TRANSCODER_DSI_A";
386 case POWER_DOMAIN_TRANSCODER_DSI_C:
387 return "TRANSCODER_DSI_C";
9895ad03
DS
388 case POWER_DOMAIN_PORT_DDI_A_LANES:
389 return "PORT_DDI_A_LANES";
390 case POWER_DOMAIN_PORT_DDI_B_LANES:
391 return "PORT_DDI_B_LANES";
392 case POWER_DOMAIN_PORT_DDI_C_LANES:
393 return "PORT_DDI_C_LANES";
394 case POWER_DOMAIN_PORT_DDI_D_LANES:
395 return "PORT_DDI_D_LANES";
396 case POWER_DOMAIN_PORT_DDI_E_LANES:
397 return "PORT_DDI_E_LANES";
9787e835
RV
398 case POWER_DOMAIN_PORT_DDI_F_LANES:
399 return "PORT_DDI_F_LANES";
62b69566
ACO
400 case POWER_DOMAIN_PORT_DDI_A_IO:
401 return "PORT_DDI_A_IO";
402 case POWER_DOMAIN_PORT_DDI_B_IO:
403 return "PORT_DDI_B_IO";
404 case POWER_DOMAIN_PORT_DDI_C_IO:
405 return "PORT_DDI_C_IO";
406 case POWER_DOMAIN_PORT_DDI_D_IO:
407 return "PORT_DDI_D_IO";
408 case POWER_DOMAIN_PORT_DDI_E_IO:
409 return "PORT_DDI_E_IO";
9787e835
RV
410 case POWER_DOMAIN_PORT_DDI_F_IO:
411 return "PORT_DDI_F_IO";
9895ad03
DS
412 case POWER_DOMAIN_PORT_DSI:
413 return "PORT_DSI";
414 case POWER_DOMAIN_PORT_CRT:
415 return "PORT_CRT";
416 case POWER_DOMAIN_PORT_OTHER:
417 return "PORT_OTHER";
418 case POWER_DOMAIN_VGA:
419 return "VGA";
420 case POWER_DOMAIN_AUDIO:
421 return "AUDIO";
422 case POWER_DOMAIN_PLLS:
423 return "PLLS";
424 case POWER_DOMAIN_AUX_A:
425 return "AUX_A";
426 case POWER_DOMAIN_AUX_B:
427 return "AUX_B";
428 case POWER_DOMAIN_AUX_C:
429 return "AUX_C";
430 case POWER_DOMAIN_AUX_D:
431 return "AUX_D";
bb187e93
JA
432 case POWER_DOMAIN_AUX_E:
433 return "AUX_E";
a324fcac
RV
434 case POWER_DOMAIN_AUX_F:
435 return "AUX_F";
b891d5e4
DP
436 case POWER_DOMAIN_AUX_IO_A:
437 return "AUX_IO_A";
67ca07e7
ID
438 case POWER_DOMAIN_AUX_TBT1:
439 return "AUX_TBT1";
440 case POWER_DOMAIN_AUX_TBT2:
441 return "AUX_TBT2";
442 case POWER_DOMAIN_AUX_TBT3:
443 return "AUX_TBT3";
444 case POWER_DOMAIN_AUX_TBT4:
445 return "AUX_TBT4";
9895ad03
DS
446 case POWER_DOMAIN_GMBUS:
447 return "GMBUS";
448 case POWER_DOMAIN_INIT:
449 return "INIT";
450 case POWER_DOMAIN_MODESET:
451 return "MODESET";
b6876374
TU
452 case POWER_DOMAIN_GT_IRQ:
453 return "GT_IRQ";
9895ad03
DS
454 default:
455 MISSING_CASE(domain);
456 return "?";
457 }
458}
459
e8ca9320
DL
460static void intel_power_well_enable(struct drm_i915_private *dev_priv,
461 struct i915_power_well *power_well)
462{
f28ec6f4
ID
463 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
464 power_well->desc->ops->enable(dev_priv, power_well);
e8ca9320
DL
465 power_well->hw_enabled = true;
466}
467
dcddab3a
DL
468static void intel_power_well_disable(struct drm_i915_private *dev_priv,
469 struct i915_power_well *power_well)
470{
f28ec6f4 471 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
dcddab3a 472 power_well->hw_enabled = false;
f28ec6f4 473 power_well->desc->ops->disable(dev_priv, power_well);
dcddab3a
DL
474}
475
b409ca95
ID
476static void intel_power_well_get(struct drm_i915_private *dev_priv,
477 struct i915_power_well *power_well)
478{
479 if (!power_well->count++)
480 intel_power_well_enable(dev_priv, power_well);
481}
482
483static void intel_power_well_put(struct drm_i915_private *dev_priv,
484 struct i915_power_well *power_well)
485{
486 WARN(!power_well->count, "Use count on power well %s is already zero",
f28ec6f4 487 power_well->desc->name);
b409ca95
ID
488
489 if (!--power_well->count)
490 intel_power_well_disable(dev_priv, power_well);
491}
492
e4e7684f
SV
493/**
494 * __intel_display_power_is_enabled - unlocked check for a power domain
495 * @dev_priv: i915 device instance
496 * @domain: power domain to check
497 *
498 * This is the unlocked version of intel_display_power_is_enabled() and should
499 * only be used from error capture and recovery code where deadlocks are
500 * possible.
501 *
502 * Returns:
503 * True when the power domain is enabled, false otherwise.
504 */
f458ebbc
SV
505bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
506 enum intel_display_power_domain domain)
9c065a7d 507{
9c065a7d
SV
508 struct i915_power_well *power_well;
509 bool is_enabled;
9c065a7d 510
ad1443f0 511 if (dev_priv->runtime_pm.suspended)
9c065a7d
SV
512 return false;
513
9c065a7d
SV
514 is_enabled = true;
515
56d4eac0 516 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
f28ec6f4 517 if (power_well->desc->always_on)
9c065a7d
SV
518 continue;
519
520 if (!power_well->hw_enabled) {
521 is_enabled = false;
522 break;
523 }
524 }
525
526 return is_enabled;
527}
528
e4e7684f 529/**
f61ccae3 530 * intel_display_power_is_enabled - check for a power domain
e4e7684f
SV
531 * @dev_priv: i915 device instance
532 * @domain: power domain to check
533 *
534 * This function can be used to check the hw power domain state. It is mostly
535 * used in hardware state readout functions. Everywhere else code should rely
536 * upon explicit power domain reference counting to ensure that the hardware
537 * block is powered up before accessing it.
538 *
539 * Callers must hold the relevant modesetting locks to ensure that concurrent
540 * threads can't disable the power well while the caller tries to read a few
541 * registers.
542 *
543 * Returns:
544 * True when the power domain is enabled, false otherwise.
545 */
f458ebbc
SV
546bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
547 enum intel_display_power_domain domain)
9c065a7d
SV
548{
549 struct i915_power_domains *power_domains;
550 bool ret;
551
552 power_domains = &dev_priv->power_domains;
553
554 mutex_lock(&power_domains->lock);
f458ebbc 555 ret = __intel_display_power_is_enabled(dev_priv, domain);
9c065a7d
SV
556 mutex_unlock(&power_domains->lock);
557
558 return ret;
559}
560
561/*
562 * Starting with Haswell, we have a "Power Down Well" that can be turned off
563 * when not needed anymore. We have 4 registers that can request the power well
564 * to be enabled, and it will only be disabled if none of the registers is
565 * requesting it to be enabled.
566 */
001bd2cb
ID
567static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
568 u8 irq_pipe_mask, bool has_vga)
9c065a7d 569{
52a05c30 570 struct pci_dev *pdev = dev_priv->drm.pdev;
9c065a7d
SV
571
572 /*
573 * After we re-enable the power well, if we touch VGA register 0x3d5
574 * we'll get unclaimed register interrupts. This stops after we write
575 * anything to the VGA MSR register. The vgacon module uses this
576 * register all the time, so if we unbind our driver and, as a
577 * consequence, bind vgacon, we'll get stuck in an infinite loop at
578 * console_unlock(). So make here we touch the VGA MSR register, making
579 * sure vgacon can keep working normally without triggering interrupts
580 * and error messages.
581 */
001bd2cb
ID
582 if (has_vga) {
583 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
584 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
585 vga_put(pdev, VGA_RSRC_LEGACY_IO);
586 }
9c065a7d 587
001bd2cb
ID
588 if (irq_pipe_mask)
589 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
9c065a7d
SV
590}
591
001bd2cb
ID
592static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
593 u8 irq_pipe_mask)
aae8ba84 594{
001bd2cb
ID
595 if (irq_pipe_mask)
596 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
aae8ba84
VS
597}
598
aae8ba84 599
76347c04
ID
600static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
601 struct i915_power_well *power_well)
42d9366d 602{
75e39688
ID
603 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
604 int pw_idx = power_well->desc->hsw.idx;
42d9366d
ID
605
606 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
97a04e0d 607 WARN_ON(intel_wait_for_register(&dev_priv->uncore,
75e39688
ID
608 regs->driver,
609 HSW_PWR_WELL_CTL_STATE(pw_idx),
610 HSW_PWR_WELL_CTL_STATE(pw_idx),
42d9366d
ID
611 1));
612}
613
76347c04 614static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
75e39688
ID
615 const struct i915_power_well_regs *regs,
616 int pw_idx)
42d9366d 617{
75e39688 618 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
42d9366d
ID
619 u32 ret;
620
75e39688
ID
621 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
622 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
623 if (regs->kvmr.reg)
624 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
625 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
42d9366d
ID
626
627 return ret;
628}
629
76347c04
ID
630static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
631 struct i915_power_well *power_well)
42d9366d 632{
75e39688
ID
633 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
634 int pw_idx = power_well->desc->hsw.idx;
42d9366d
ID
635 bool disabled;
636 u32 reqs;
637
638 /*
639 * Bspec doesn't require waiting for PWs to get disabled, but still do
640 * this for paranoia. The known cases where a PW will be forced on:
641 * - a KVMR request on any power well via the KVMR request register
642 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
643 * DEBUG request registers
644 * Skip the wait in case any of the request bits are set and print a
645 * diagnostic message.
646 */
75e39688
ID
647 wait_for((disabled = !(I915_READ(regs->driver) &
648 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
649 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
42d9366d
ID
650 if (disabled)
651 return;
652
653 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
f28ec6f4 654 power_well->desc->name,
42d9366d
ID
655 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
656}
657
b2891eb2
ID
658static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
659 enum skl_power_gate pg)
660{
661 /* Timeout 5us for PG#0, for other PGs 1us */
97a04e0d 662 WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
b2891eb2
ID
663 SKL_FUSE_PG_DIST_STATUS(pg),
664 SKL_FUSE_PG_DIST_STATUS(pg), 1));
665}
666
ec46d483
ID
667static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
668 struct i915_power_well *power_well)
9c065a7d 669{
75e39688
ID
670 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
671 int pw_idx = power_well->desc->hsw.idx;
f28ec6f4 672 bool wait_fuses = power_well->desc->hsw.has_fuses;
320671f9 673 enum skl_power_gate uninitialized_var(pg);
1af474fe
ID
674 u32 val;
675
b2891eb2 676 if (wait_fuses) {
75e39688
ID
677 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
678 SKL_PW_CTL_IDX_TO_PG(pw_idx);
b2891eb2
ID
679 /*
680 * For PW1 we have to wait both for the PW0/PG0 fuse state
681 * before enabling the power well and PW1/PG1's own fuse
682 * state after the enabling. For all other power wells with
683 * fuses we only have to wait for that PW/PG's fuse state
684 * after the enabling.
685 */
686 if (pg == SKL_PG1)
687 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
688 }
689
75e39688
ID
690 val = I915_READ(regs->driver);
691 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
76347c04 692 hsw_wait_for_power_well_enable(dev_priv, power_well);
001bd2cb 693
ddd39e4b
LDM
694 /* Display WA #1178: cnl */
695 if (IS_CANNONLAKE(dev_priv) &&
75e39688
ID
696 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
697 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
698 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
ddd39e4b 699 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
75e39688 700 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
ddd39e4b
LDM
701 }
702
b2891eb2
ID
703 if (wait_fuses)
704 gen9_wait_for_power_well_fuses(dev_priv, pg);
705
f28ec6f4
ID
706 hsw_power_well_post_enable(dev_priv,
707 power_well->desc->hsw.irq_pipe_mask,
708 power_well->desc->hsw.has_vga);
ec46d483 709}
00742cab 710
ec46d483
ID
711static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
712 struct i915_power_well *power_well)
713{
75e39688
ID
714 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
715 int pw_idx = power_well->desc->hsw.idx;
1af474fe
ID
716 u32 val;
717
f28ec6f4
ID
718 hsw_power_well_pre_disable(dev_priv,
719 power_well->desc->hsw.irq_pipe_mask);
001bd2cb 720
75e39688
ID
721 val = I915_READ(regs->driver);
722 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
76347c04 723 hsw_wait_for_power_well_disable(dev_priv, power_well);
9c065a7d
SV
724}
725
75e39688 726#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
67ca07e7
ID
727
728static void
729icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
730 struct i915_power_well *power_well)
731{
75e39688
ID
732 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
733 int pw_idx = power_well->desc->hsw.idx;
734 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
67ca07e7
ID
735 u32 val;
736
75e39688
ID
737 val = I915_READ(regs->driver);
738 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
67ca07e7
ID
739
740 val = I915_READ(ICL_PORT_CL_DW12(port));
741 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
742
743 hsw_wait_for_power_well_enable(dev_priv, power_well);
ffd7e32d
LDM
744
745 /* Display WA #1178: icl */
746 if (IS_ICELAKE(dev_priv) &&
747 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
748 !intel_bios_is_port_edp(dev_priv, port)) {
749 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
750 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
751 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
752 }
67ca07e7
ID
753}
754
755static void
756icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
757 struct i915_power_well *power_well)
758{
75e39688
ID
759 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
760 int pw_idx = power_well->desc->hsw.idx;
761 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
67ca07e7
ID
762 u32 val;
763
764 val = I915_READ(ICL_PORT_CL_DW12(port));
765 I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
766
75e39688
ID
767 val = I915_READ(regs->driver);
768 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
67ca07e7
ID
769
770 hsw_wait_for_power_well_disable(dev_priv, power_well);
771}
772
c7375d95
ID
773#define ICL_AUX_PW_TO_CH(pw_idx) \
774 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
775
776static void
777icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
778 struct i915_power_well *power_well)
779{
780 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
781 u32 val;
782
783 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
784 val &= ~DP_AUX_CH_CTL_TBT_IO;
785 if (power_well->desc->hsw.is_tc_tbt)
786 val |= DP_AUX_CH_CTL_TBT_IO;
787 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
788
789 hsw_power_well_enable(dev_priv, power_well);
790}
791
d42539ba
ID
792/*
793 * We should only use the power well if we explicitly asked the hardware to
794 * enable it, so check if it's enabled and also check if we've requested it to
795 * be enabled.
796 */
797static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
798 struct i915_power_well *power_well)
799{
75e39688 800 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
cb8ef723 801 enum i915_power_well_id id = power_well->desc->id;
75e39688
ID
802 int pw_idx = power_well->desc->hsw.idx;
803 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
804 HSW_PWR_WELL_CTL_STATE(pw_idx);
cb8ef723
ID
805 u32 val;
806
807 val = I915_READ(regs->driver);
808
809 /*
810 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
811 * and the MISC_IO PW will be not restored, so check instead for the
812 * BIOS's own request bits, which are forced-on for these power wells
813 * when exiting DC5/6.
814 */
cf819eff 815 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
cb8ef723
ID
816 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
817 val |= I915_READ(regs->bios);
d42539ba 818
cb8ef723 819 return (val & mask) == mask;
d42539ba
ID
820}
821
664326f8
SK
822static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
823{
bfcdabe8
ID
824 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
825 "DC9 already programmed to be enabled.\n");
826 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
827 "DC5 still not disabled to enable DC9.\n");
75e39688
ID
828 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
829 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
e8a3a2a3 830 "Power well 2 on.\n");
bfcdabe8
ID
831 WARN_ONCE(intel_irqs_enabled(dev_priv),
832 "Interrupts not disabled yet.\n");
664326f8
SK
833
834 /*
835 * TODO: check for the following to verify the conditions to enter DC9
836 * state are satisfied:
837 * 1] Check relevant display engine registers to verify if mode set
838 * disable sequence was followed.
839 * 2] Check if display uninitialize sequence is initialized.
840 */
841}
842
843static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
844{
bfcdabe8
ID
845 WARN_ONCE(intel_irqs_enabled(dev_priv),
846 "Interrupts not disabled yet.\n");
847 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
848 "DC5 still not disabled.\n");
664326f8
SK
849
850 /*
851 * TODO: check for the following to verify DC9 state was indeed
852 * entered before programming to disable it:
853 * 1] Check relevant display engine registers to verify if mode
854 * set disable sequence was followed.
855 * 2] Check if display uninitialize sequence is initialized.
856 */
857}
858
779cb5d3
MK
859static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
860 u32 state)
861{
862 int rewrites = 0;
863 int rereads = 0;
864 u32 v;
865
866 I915_WRITE(DC_STATE_EN, state);
867
868 /* It has been observed that disabling the dc6 state sometimes
869 * doesn't stick and dmc keeps returning old value. Make sure
870 * the write really sticks enough times and also force rewrite until
871 * we are confident that state is exactly what we want.
872 */
873 do {
874 v = I915_READ(DC_STATE_EN);
875
876 if (v != state) {
877 I915_WRITE(DC_STATE_EN, state);
878 rewrites++;
879 rereads = 0;
880 } else if (rereads++ > 5) {
881 break;
882 }
883
884 } while (rewrites < 100);
885
886 if (v != state)
887 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
888 state, v);
889
890 /* Most of the times we need one retry, avoid spam */
891 if (rewrites > 1)
892 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
893 state, rewrites);
894}
895
da2f41d1 896static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
664326f8 897{
da2f41d1 898 u32 mask;
664326f8 899
13ae3a0d 900 mask = DC_STATE_EN_UPTO_DC5;
3e68928b
AM
901 if (INTEL_GEN(dev_priv) >= 11)
902 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
903 else if (IS_GEN9_LP(dev_priv))
13ae3a0d
ID
904 mask |= DC_STATE_EN_DC9;
905 else
906 mask |= DC_STATE_EN_UPTO_DC6;
664326f8 907
da2f41d1
ID
908 return mask;
909}
910
911void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
912{
913 u32 val;
914
915 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
916
917 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
918 dev_priv->csr.dc_state, val);
919 dev_priv->csr.dc_state = val;
920}
921
13e1592f
ID
922/**
923 * gen9_set_dc_state - set target display C power state
924 * @dev_priv: i915 device instance
925 * @state: target DC power state
926 * - DC_STATE_DISABLE
927 * - DC_STATE_EN_UPTO_DC5
928 * - DC_STATE_EN_UPTO_DC6
929 * - DC_STATE_EN_DC9
930 *
931 * Signal to DMC firmware/HW the target DC power state passed in @state.
932 * DMC/HW can turn off individual display clocks and power rails when entering
933 * a deeper DC power state (higher in number) and turns these back when exiting
934 * that state to a shallower power state (lower in number). The HW will decide
935 * when to actually enter a given state on an on-demand basis, for instance
936 * depending on the active state of display pipes. The state of display
937 * registers backed by affected power rails are saved/restored as needed.
938 *
939 * Based on the above enabling a deeper DC power state is asynchronous wrt.
940 * enabling it. Disabling a deeper power state is synchronous: for instance
941 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
942 * back on and register state is restored. This is guaranteed by the MMIO write
943 * to DC_STATE_EN blocking until the state is restored.
944 */
739f3abd 945static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
da2f41d1 946{
739f3abd
JN
947 u32 val;
948 u32 mask;
da2f41d1 949
a37baf3b
ID
950 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
951 state &= dev_priv->csr.allowed_dc_mask;
443646c7 952
664326f8 953 val = I915_READ(DC_STATE_EN);
da2f41d1 954 mask = gen9_dc_mask(dev_priv);
13ae3a0d
ID
955 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
956 val & mask, state);
832dba88
PJ
957
958 /* Check if DMC is ignoring our DC state requests */
959 if ((val & mask) != dev_priv->csr.dc_state)
960 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
961 dev_priv->csr.dc_state, val & mask);
962
13ae3a0d
ID
963 val &= ~mask;
964 val |= state;
779cb5d3
MK
965
966 gen9_write_dc_state(dev_priv, val);
832dba88
PJ
967
968 dev_priv->csr.dc_state = val & mask;
664326f8
SK
969}
970
13ae3a0d 971void bxt_enable_dc9(struct drm_i915_private *dev_priv)
664326f8 972{
13ae3a0d
ID
973 assert_can_enable_dc9(dev_priv);
974
975 DRM_DEBUG_KMS("Enabling DC9\n");
3e68928b
AM
976 /*
977 * Power sequencer reset is not needed on
978 * platforms with South Display Engine on PCH,
979 * because PPS registers are always on.
980 */
981 if (!HAS_PCH_SPLIT(dev_priv))
982 intel_power_sequencer_reset(dev_priv);
13ae3a0d
ID
983 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
984}
985
986void bxt_disable_dc9(struct drm_i915_private *dev_priv)
987{
664326f8
SK
988 assert_can_disable_dc9(dev_priv);
989
990 DRM_DEBUG_KMS("Disabling DC9\n");
991
13ae3a0d 992 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
8090ba8c
ID
993
994 intel_pps_unlock_regs_wa(dev_priv);
664326f8
SK
995}
996
af5fead2
SV
997static void assert_csr_loaded(struct drm_i915_private *dev_priv)
998{
999 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
1000 "CSR program storage start is NULL\n");
1001 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
1002 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
1003}
1004
f7480b2f
PZ
1005static struct i915_power_well *
1006lookup_power_well(struct drm_i915_private *dev_priv,
1007 enum i915_power_well_id power_well_id)
1008{
1009 struct i915_power_well *power_well;
1010
1011 for_each_power_well(dev_priv, power_well)
1012 if (power_well->desc->id == power_well_id)
1013 return power_well;
1014
1015 /*
1016 * It's not feasible to add error checking code to the callers since
1017 * this condition really shouldn't happen and it doesn't even make sense
1018 * to abort things like display initialization sequences. Just return
1019 * the first power well and hope the WARN gets reported so we can fix
1020 * our driver.
1021 */
1022 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
1023 return &dev_priv->power_domains.power_wells[0];
1024}
1025
5aefb239 1026static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
dc174300 1027{
5aefb239
SS
1028 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
1029 SKL_DISP_PW_2);
1030
6ff8ab0d 1031 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
5aefb239 1032
6ff8ab0d
JB
1033 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
1034 "DC5 already programmed to be enabled.\n");
c9b8846a 1035 assert_rpm_wakelock_held(dev_priv);
5aefb239
SS
1036
1037 assert_csr_loaded(dev_priv);
1038}
1039
f62c79b3 1040void gen9_enable_dc5(struct drm_i915_private *dev_priv)
5aefb239 1041{
5aefb239 1042 assert_can_enable_dc5(dev_priv);
6b457d31
SK
1043
1044 DRM_DEBUG_KMS("Enabling DC5\n");
1045
53421c2f
LDM
1046 /* Wa Display #1183: skl,kbl,cfl */
1047 if (IS_GEN9_BC(dev_priv))
1048 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1049 SKL_SELECT_ALTERNATE_DC_EXIT);
1050
13ae3a0d 1051 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
dc174300
SS
1052}
1053
93c7cb6c 1054static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
f75a1985 1055{
6ff8ab0d
JB
1056 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1057 "Backlight is not disabled.\n");
1058 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1059 "DC6 already programmed to be enabled.\n");
93c7cb6c
SS
1060
1061 assert_csr_loaded(dev_priv);
1062}
1063
3e68928b 1064void skl_enable_dc6(struct drm_i915_private *dev_priv)
93c7cb6c 1065{
93c7cb6c 1066 assert_can_enable_dc6(dev_priv);
74b4f371
SK
1067
1068 DRM_DEBUG_KMS("Enabling DC6\n");
1069
b49be662
ID
1070 /* Wa Display #1183: skl,kbl,cfl */
1071 if (IS_GEN9_BC(dev_priv))
1072 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1073 SKL_SELECT_ALTERNATE_DC_EXIT);
13ae3a0d 1074
b49be662 1075 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
f75a1985
SS
1076}
1077
9c065a7d
SV
1078static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1079 struct i915_power_well *power_well)
1080{
75e39688
ID
1081 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1082 int pw_idx = power_well->desc->hsw.idx;
1083 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1084 u32 bios_req = I915_READ(regs->bios);
1af474fe 1085
16e84914 1086 /* Take over the request bit if set by BIOS. */
1af474fe 1087 if (bios_req & mask) {
75e39688 1088 u32 drv_req = I915_READ(regs->driver);
1af474fe
ID
1089
1090 if (!(drv_req & mask))
75e39688
ID
1091 I915_WRITE(regs->driver, drv_req | mask);
1092 I915_WRITE(regs->bios, bios_req & ~mask);
16e84914 1093 }
9c065a7d
SV
1094}
1095
9c8d0b8e
ID
1096static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1097 struct i915_power_well *power_well)
1098{
f28ec6f4 1099 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1100}
1101
1102static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1103 struct i915_power_well *power_well)
1104{
f28ec6f4 1105 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1106}
1107
1108static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1109 struct i915_power_well *power_well)
1110{
f28ec6f4 1111 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e
ID
1112}
1113
9c8d0b8e
ID
1114static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1115{
1116 struct i915_power_well *power_well;
1117
2183b499 1118 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
9c8d0b8e 1119 if (power_well->count > 0)
f28ec6f4 1120 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
9c8d0b8e 1121
d9fcdc8d 1122 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9c8d0b8e 1123 if (power_well->count > 0)
f28ec6f4 1124 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
0a116ce8
ACO
1125
1126 if (IS_GEMINILAKE(dev_priv)) {
2183b499
ID
1127 power_well = lookup_power_well(dev_priv,
1128 GLK_DISP_PW_DPIO_CMN_C);
0a116ce8 1129 if (power_well->count > 0)
f28ec6f4
ID
1130 bxt_ddi_phy_verify_state(dev_priv,
1131 power_well->desc->bxt.phy);
0a116ce8 1132 }
9c8d0b8e
ID
1133}
1134
9f836f90
PJ
1135static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1136 struct i915_power_well *power_well)
1137{
1138 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1139}
1140
18a8067c
VS
1141static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1142{
1143 u32 tmp = I915_READ(DBUF_CTL);
1144
1145 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1146 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1147 "Unexpected DBuf power power state (0x%08x)\n", tmp);
1148}
1149
9f836f90
PJ
1150static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1151 struct i915_power_well *power_well)
1152{
49cd97a3
VS
1153 struct intel_cdclk_state cdclk_state = {};
1154
5b773eb4 1155 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
adc7f04b 1156
49cd97a3 1157 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
64600bd5
VS
1158 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1159 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
342be926 1160
18a8067c
VS
1161 gen9_assert_dbuf_enabled(dev_priv);
1162
cc3f90f0 1163 if (IS_GEN9_LP(dev_priv))
9c8d0b8e 1164 bxt_verify_ddi_phy_power_wells(dev_priv);
602438ea
ID
1165
1166 if (INTEL_GEN(dev_priv) >= 11)
1167 /*
1168 * DMC retains HW context only for port A, the other combo
1169 * PHY's HW context for port B is lost after DC transitions,
1170 * so we need to restore it manually.
1171 */
c9fd9166 1172 intel_combo_phy_init(dev_priv);
9f836f90
PJ
1173}
1174
1175static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1176 struct i915_power_well *power_well)
1177{
f74ed08d
ID
1178 if (!dev_priv->csr.dmc_payload)
1179 return;
1180
a37baf3b 1181 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
9f836f90 1182 skl_enable_dc6(dev_priv);
a37baf3b 1183 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
9f836f90
PJ
1184 gen9_enable_dc5(dev_priv);
1185}
1186
3c1b38e6
ID
1187static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1188 struct i915_power_well *power_well)
9f836f90 1189{
9f836f90
PJ
1190}
1191
9c065a7d
SV
1192static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1193 struct i915_power_well *power_well)
1194{
1195}
1196
1197static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1198 struct i915_power_well *power_well)
1199{
1200 return true;
1201}
1202
2ee0da16
VS
1203static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1204 struct i915_power_well *power_well)
1205{
1206 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1207 i830_enable_pipe(dev_priv, PIPE_A);
1208 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1209 i830_enable_pipe(dev_priv, PIPE_B);
1210}
1211
1212static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1213 struct i915_power_well *power_well)
1214{
1215 i830_disable_pipe(dev_priv, PIPE_B);
1216 i830_disable_pipe(dev_priv, PIPE_A);
1217}
1218
1219static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1220 struct i915_power_well *power_well)
1221{
1222 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1223 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1224}
1225
1226static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1227 struct i915_power_well *power_well)
1228{
1229 if (power_well->count > 0)
1230 i830_pipes_power_well_enable(dev_priv, power_well);
1231 else
1232 i830_pipes_power_well_disable(dev_priv, power_well);
1233}
1234
9c065a7d
SV
1235static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1236 struct i915_power_well *power_well, bool enable)
1237{
d13dd05a 1238 int pw_idx = power_well->desc->vlv.idx;
9c065a7d
SV
1239 u32 mask;
1240 u32 state;
1241 u32 ctrl;
1242
d13dd05a
ID
1243 mask = PUNIT_PWRGT_MASK(pw_idx);
1244 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1245 PUNIT_PWRGT_PWR_GATE(pw_idx);
9c065a7d 1246
337fa6e0 1247 vlv_punit_get(dev_priv);
9c065a7d
SV
1248
1249#define COND \
1250 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1251
1252 if (COND)
1253 goto out;
1254
1255 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1256 ctrl &= ~mask;
1257 ctrl |= state;
1258 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1259
1260 if (wait_for(COND, 100))
7e35ab88 1261 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
9c065a7d
SV
1262 state,
1263 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1264
1265#undef COND
1266
1267out:
337fa6e0 1268 vlv_punit_put(dev_priv);
9c065a7d
SV
1269}
1270
9c065a7d
SV
1271static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1272 struct i915_power_well *power_well)
1273{
1274 vlv_set_power_well(dev_priv, power_well, true);
1275}
1276
1277static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1278 struct i915_power_well *power_well)
1279{
1280 vlv_set_power_well(dev_priv, power_well, false);
1281}
1282
1283static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1284 struct i915_power_well *power_well)
1285{
d13dd05a 1286 int pw_idx = power_well->desc->vlv.idx;
9c065a7d
SV
1287 bool enabled = false;
1288 u32 mask;
1289 u32 state;
1290 u32 ctrl;
1291
d13dd05a
ID
1292 mask = PUNIT_PWRGT_MASK(pw_idx);
1293 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
9c065a7d 1294
337fa6e0 1295 vlv_punit_get(dev_priv);
9c065a7d
SV
1296
1297 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1298 /*
1299 * We only ever set the power-on and power-gate states, anything
1300 * else is unexpected.
1301 */
d13dd05a
ID
1302 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1303 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
9c065a7d
SV
1304 if (state == ctrl)
1305 enabled = true;
1306
1307 /*
1308 * A transient state at this point would mean some unexpected party
1309 * is poking at the power controls too.
1310 */
1311 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1312 WARN_ON(ctrl != state);
1313
337fa6e0 1314 vlv_punit_put(dev_priv);
9c065a7d
SV
1315
1316 return enabled;
1317}
1318
766078df
VS
1319static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1320{
721d4845
HG
1321 u32 val;
1322
1323 /*
1324 * On driver load, a pipe may be active and driving a DSI display.
1325 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1326 * (and never recovering) in this case. intel_dsi_post_disable() will
1327 * clear it when we turn off the display.
1328 */
1329 val = I915_READ(DSPCLK_GATE_D);
1330 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1331 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1332 I915_WRITE(DSPCLK_GATE_D, val);
766078df
VS
1333
1334 /*
1335 * Disable trickle feed and enable pnd deadline calculation
1336 */
1337 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1338 I915_WRITE(CBR1_VLV, 0);
19ab4ed3
VS
1339
1340 WARN_ON(dev_priv->rawclk_freq == 0);
1341
1342 I915_WRITE(RAWCLK_FREQ_VLV,
1343 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
766078df
VS
1344}
1345
2be7d540 1346static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
9c065a7d 1347{
9504a892 1348 struct intel_encoder *encoder;
5a8fbb7d
VS
1349 enum pipe pipe;
1350
1351 /*
1352 * Enable the CRI clock source so we can get at the
1353 * display and the reference clock for VGA
1354 * hotplug / manual detection. Supposedly DSI also
1355 * needs the ref clock up and running.
1356 *
1357 * CHV DPLL B/C have some issues if VGA mode is enabled.
1358 */
801388cb 1359 for_each_pipe(dev_priv, pipe) {
5a8fbb7d
VS
1360 u32 val = I915_READ(DPLL(pipe));
1361
1362 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1363 if (pipe != PIPE_A)
1364 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1365
1366 I915_WRITE(DPLL(pipe), val);
1367 }
9c065a7d 1368
766078df
VS
1369 vlv_init_display_clock_gating(dev_priv);
1370
9c065a7d
SV
1371 spin_lock_irq(&dev_priv->irq_lock);
1372 valleyview_enable_display_irqs(dev_priv);
1373 spin_unlock_irq(&dev_priv->irq_lock);
1374
1375 /*
1376 * During driver initialization/resume we can avoid restoring the
1377 * part of the HW/SW state that will be inited anyway explicitly.
1378 */
1379 if (dev_priv->power_domains.initializing)
1380 return;
1381
b963291c 1382 intel_hpd_init(dev_priv);
9c065a7d 1383
9504a892
L
1384 /* Re-enable the ADPA, if we have one */
1385 for_each_intel_encoder(&dev_priv->drm, encoder) {
1386 if (encoder->type == INTEL_OUTPUT_ANALOG)
1387 intel_crt_reset(&encoder->base);
1388 }
1389
29b74b7f 1390 i915_redisable_vga_power_on(dev_priv);
8090ba8c
ID
1391
1392 intel_pps_unlock_regs_wa(dev_priv);
9c065a7d
SV
1393}
1394
2be7d540
VS
1395static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1396{
1397 spin_lock_irq(&dev_priv->irq_lock);
1398 valleyview_disable_display_irqs(dev_priv);
1399 spin_unlock_irq(&dev_priv->irq_lock);
1400
2230fde8 1401 /* make sure we're done processing display irqs */
91c8a326 1402 synchronize_irq(dev_priv->drm.irq);
2230fde8 1403
78597996 1404 intel_power_sequencer_reset(dev_priv);
19625e85 1405
b64b5409
L
1406 /* Prevent us from re-enabling polling on accident in late suspend */
1407 if (!dev_priv->drm.dev->power.is_suspended)
1408 intel_hpd_poll_init(dev_priv);
2be7d540
VS
1409}
1410
1411static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1412 struct i915_power_well *power_well)
1413{
2be7d540
VS
1414 vlv_set_power_well(dev_priv, power_well, true);
1415
1416 vlv_display_power_well_init(dev_priv);
1417}
1418
9c065a7d
SV
1419static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1420 struct i915_power_well *power_well)
1421{
2be7d540 1422 vlv_display_power_well_deinit(dev_priv);
9c065a7d
SV
1423
1424 vlv_set_power_well(dev_priv, power_well, false);
9c065a7d
SV
1425}
1426
1427static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1428 struct i915_power_well *power_well)
1429{
5a8fbb7d 1430 /* since ref/cri clock was enabled */
9c065a7d
SV
1431 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1432
1433 vlv_set_power_well(dev_priv, power_well, true);
1434
1435 /*
1436 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1437 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1438 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1439 * b. The other bits such as sfr settings / modesel may all
1440 * be set to 0.
1441 *
1442 * This should only be done on init and resume from S3 with
1443 * both PLLs disabled, or we risk losing DPIO and PLL
1444 * synchronization.
1445 */
1446 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1447}
1448
1449static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1450 struct i915_power_well *power_well)
1451{
1452 enum pipe pipe;
1453
9c065a7d
SV
1454 for_each_pipe(dev_priv, pipe)
1455 assert_pll_disabled(dev_priv, pipe);
1456
1457 /* Assert common reset */
1458 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1459
1460 vlv_set_power_well(dev_priv, power_well, false);
1461}
1462
d8fc70b7 1463#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
30142273 1464
30142273
VS
1465#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1466
1467static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1468{
1469 struct i915_power_well *cmn_bc =
2183b499 1470 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
30142273 1471 struct i915_power_well *cmn_d =
2183b499 1472 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
30142273
VS
1473 u32 phy_control = dev_priv->chv_phy_control;
1474 u32 phy_status = 0;
3be60de9 1475 u32 phy_status_mask = 0xffffffff;
30142273 1476
3be60de9
VS
1477 /*
1478 * The BIOS can leave the PHY is some weird state
1479 * where it doesn't fully power down some parts.
1480 * Disable the asserts until the PHY has been fully
1481 * reset (ie. the power well has been disabled at
1482 * least once).
1483 */
1484 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1485 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1486 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1487 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1488 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1489 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1490 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1491
1492 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1493 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1494 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1495 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1496
f28ec6f4 1497 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
30142273
VS
1498 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1499
1500 /* this assumes override is only used to enable lanes */
1501 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1502 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1503
1504 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1505 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1506
1507 /* CL1 is on whenever anything is on in either channel */
1508 if (BITS_SET(phy_control,
1509 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1510 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1511 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1512
1513 /*
1514 * The DPLLB check accounts for the pipe B + port A usage
1515 * with CL2 powered up but all the lanes in the second channel
1516 * powered down.
1517 */
1518 if (BITS_SET(phy_control,
1519 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1520 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1521 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1522
1523 if (BITS_SET(phy_control,
1524 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1525 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1526 if (BITS_SET(phy_control,
1527 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1528 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1529
1530 if (BITS_SET(phy_control,
1531 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1532 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1533 if (BITS_SET(phy_control,
1534 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1535 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1536 }
1537
f28ec6f4 1538 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
30142273
VS
1539 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1540
1541 /* this assumes override is only used to enable lanes */
1542 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1543 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1544
1545 if (BITS_SET(phy_control,
1546 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1547 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1548
1549 if (BITS_SET(phy_control,
1550 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1551 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1552 if (BITS_SET(phy_control,
1553 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1554 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1555 }
1556
3be60de9
VS
1557 phy_status &= phy_status_mask;
1558
30142273
VS
1559 /*
1560 * The PHY may be busy with some initial calibration and whatnot,
1561 * so the power state can take a while to actually change.
1562 */
97a04e0d 1563 if (intel_wait_for_register(&dev_priv->uncore,
919fcd51
CW
1564 DISPLAY_PHY_STATUS,
1565 phy_status_mask,
1566 phy_status,
1567 10))
1568 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1569 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1570 phy_status, dev_priv->chv_phy_control);
30142273
VS
1571}
1572
1573#undef BITS_SET
1574
9c065a7d
SV
1575static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1576 struct i915_power_well *power_well)
1577{
1578 enum dpio_phy phy;
e0fce78f 1579 enum pipe pipe;
739f3abd 1580 u32 tmp;
9c065a7d 1581
2183b499
ID
1582 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1583 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
9c065a7d 1584
2183b499 1585 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
e0fce78f 1586 pipe = PIPE_A;
9c065a7d 1587 phy = DPIO_PHY0;
e0fce78f
VS
1588 } else {
1589 pipe = PIPE_C;
9c065a7d 1590 phy = DPIO_PHY1;
e0fce78f 1591 }
5a8fbb7d
VS
1592
1593 /* since ref/cri clock was enabled */
9c065a7d
SV
1594 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1595 vlv_set_power_well(dev_priv, power_well, true);
1596
1597 /* Poll for phypwrgood signal */
97a04e0d 1598 if (intel_wait_for_register(&dev_priv->uncore,
ffebb83b
CW
1599 DISPLAY_PHY_STATUS,
1600 PHY_POWERGOOD(phy),
1601 PHY_POWERGOOD(phy),
1602 1))
9c065a7d
SV
1603 DRM_ERROR("Display PHY %d is not power up\n", phy);
1604
221c7862 1605 vlv_dpio_get(dev_priv);
e0fce78f
VS
1606
1607 /* Enable dynamic power down */
1608 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
ee279218
VS
1609 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1610 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
e0fce78f
VS
1611 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1612
2183b499 1613 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
e0fce78f
VS
1614 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1615 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1616 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
3e288786
VS
1617 } else {
1618 /*
1619 * Force the non-existing CL2 off. BXT does this
1620 * too, so maybe it saves some power even though
1621 * CL2 doesn't exist?
1622 */
1623 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1624 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1625 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
e0fce78f
VS
1626 }
1627
221c7862 1628 vlv_dpio_put(dev_priv);
e0fce78f 1629
70722468
VS
1630 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1631 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
e0fce78f
VS
1632
1633 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1634 phy, dev_priv->chv_phy_control);
30142273
VS
1635
1636 assert_chv_phy_status(dev_priv);
9c065a7d
SV
1637}
1638
1639static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1640 struct i915_power_well *power_well)
1641{
1642 enum dpio_phy phy;
1643
2183b499
ID
1644 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1645 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
9c065a7d 1646
2183b499 1647 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
9c065a7d
SV
1648 phy = DPIO_PHY0;
1649 assert_pll_disabled(dev_priv, PIPE_A);
1650 assert_pll_disabled(dev_priv, PIPE_B);
1651 } else {
1652 phy = DPIO_PHY1;
1653 assert_pll_disabled(dev_priv, PIPE_C);
1654 }
1655
70722468
VS
1656 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1657 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
9c065a7d
SV
1658
1659 vlv_set_power_well(dev_priv, power_well, false);
e0fce78f
VS
1660
1661 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1662 phy, dev_priv->chv_phy_control);
30142273 1663
3be60de9
VS
1664 /* PHY is fully reset now, so we can enable the PHY state asserts */
1665 dev_priv->chv_phy_assert[phy] = true;
1666
30142273 1667 assert_chv_phy_status(dev_priv);
e0fce78f
VS
1668}
1669
6669e39f
VS
1670static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1671 enum dpio_channel ch, bool override, unsigned int mask)
1672{
1673 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1674 u32 reg, val, expected, actual;
1675
3be60de9
VS
1676 /*
1677 * The BIOS can leave the PHY is some weird state
1678 * where it doesn't fully power down some parts.
1679 * Disable the asserts until the PHY has been fully
1680 * reset (ie. the power well has been disabled at
1681 * least once).
1682 */
1683 if (!dev_priv->chv_phy_assert[phy])
1684 return;
1685
6669e39f
VS
1686 if (ch == DPIO_CH0)
1687 reg = _CHV_CMN_DW0_CH0;
1688 else
1689 reg = _CHV_CMN_DW6_CH1;
1690
221c7862 1691 vlv_dpio_get(dev_priv);
6669e39f 1692 val = vlv_dpio_read(dev_priv, pipe, reg);
221c7862 1693 vlv_dpio_put(dev_priv);
6669e39f
VS
1694
1695 /*
1696 * This assumes !override is only used when the port is disabled.
1697 * All lanes should power down even without the override when
1698 * the port is disabled.
1699 */
1700 if (!override || mask == 0xf) {
1701 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1702 /*
1703 * If CH1 common lane is not active anymore
1704 * (eg. for pipe B DPLL) the entire channel will
1705 * shut down, which causes the common lane registers
1706 * to read as 0. That means we can't actually check
1707 * the lane power down status bits, but as the entire
1708 * register reads as 0 it's a good indication that the
1709 * channel is indeed entirely powered down.
1710 */
1711 if (ch == DPIO_CH1 && val == 0)
1712 expected = 0;
1713 } else if (mask != 0x0) {
1714 expected = DPIO_ANYDL_POWERDOWN;
1715 } else {
1716 expected = 0;
1717 }
1718
1719 if (ch == DPIO_CH0)
1720 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1721 else
1722 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1723 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1724
1725 WARN(actual != expected,
1726 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1727 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1728 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1729 reg, val);
1730}
1731
b0b33846
VS
1732bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1733 enum dpio_channel ch, bool override)
1734{
1735 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1736 bool was_override;
1737
1738 mutex_lock(&power_domains->lock);
1739
1740 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1741
1742 if (override == was_override)
1743 goto out;
1744
1745 if (override)
1746 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1747 else
1748 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1749
1750 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1751
1752 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1753 phy, ch, dev_priv->chv_phy_control);
1754
30142273
VS
1755 assert_chv_phy_status(dev_priv);
1756
b0b33846
VS
1757out:
1758 mutex_unlock(&power_domains->lock);
1759
1760 return was_override;
1761}
1762
e0fce78f
VS
1763void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1764 bool override, unsigned int mask)
1765{
1766 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1767 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1768 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1769 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1770
1771 mutex_lock(&power_domains->lock);
1772
1773 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1774 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1775
1776 if (override)
1777 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1778 else
1779 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1780
1781 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1782
1783 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1784 phy, ch, mask, dev_priv->chv_phy_control);
1785
30142273
VS
1786 assert_chv_phy_status(dev_priv);
1787
6669e39f
VS
1788 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1789
e0fce78f 1790 mutex_unlock(&power_domains->lock);
9c065a7d
SV
1791}
1792
1793static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1794 struct i915_power_well *power_well)
1795{
f49193cd 1796 enum pipe pipe = PIPE_A;
9c065a7d
SV
1797 bool enabled;
1798 u32 state, ctrl;
1799
337fa6e0 1800 vlv_punit_get(dev_priv);
9c065a7d 1801
c11b813f 1802 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
9c065a7d
SV
1803 /*
1804 * We only ever set the power-on and power-gate states, anything
1805 * else is unexpected.
1806 */
1807 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1808 enabled = state == DP_SSS_PWR_ON(pipe);
1809
1810 /*
1811 * A transient state at this point would mean some unexpected party
1812 * is poking at the power controls too.
1813 */
c11b813f 1814 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
9c065a7d
SV
1815 WARN_ON(ctrl << 16 != state);
1816
337fa6e0 1817 vlv_punit_put(dev_priv);
9c065a7d
SV
1818
1819 return enabled;
1820}
1821
1822static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1823 struct i915_power_well *power_well,
1824 bool enable)
1825{
f49193cd 1826 enum pipe pipe = PIPE_A;
9c065a7d
SV
1827 u32 state;
1828 u32 ctrl;
1829
1830 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1831
337fa6e0 1832 vlv_punit_get(dev_priv);
9c065a7d
SV
1833
1834#define COND \
c11b813f 1835 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
9c065a7d
SV
1836
1837 if (COND)
1838 goto out;
1839
c11b813f 1840 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
9c065a7d
SV
1841 ctrl &= ~DP_SSC_MASK(pipe);
1842 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
c11b813f 1843 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
9c065a7d
SV
1844
1845 if (wait_for(COND, 100))
7e35ab88 1846 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
9c065a7d 1847 state,
c11b813f 1848 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
9c065a7d
SV
1849
1850#undef COND
1851
1852out:
337fa6e0 1853 vlv_punit_put(dev_priv);
9c065a7d
SV
1854}
1855
9c065a7d
SV
1856static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1857 struct i915_power_well *power_well)
1858{
9c065a7d 1859 chv_set_pipe_power_well(dev_priv, power_well, true);
afd6275d 1860
2be7d540 1861 vlv_display_power_well_init(dev_priv);
9c065a7d
SV
1862}
1863
1864static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1865 struct i915_power_well *power_well)
1866{
2be7d540 1867 vlv_display_power_well_deinit(dev_priv);
afd6275d 1868
9c065a7d
SV
1869 chv_set_pipe_power_well(dev_priv, power_well, false);
1870}
1871
09731280
ID
1872static void
1873__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1874 enum intel_display_power_domain domain)
1875{
1876 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1877 struct i915_power_well *power_well;
09731280 1878
75ccb2ec 1879 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
b409ca95 1880 intel_power_well_get(dev_priv, power_well);
09731280
ID
1881
1882 power_domains->domain_use_count[domain]++;
1883}
1884
e4e7684f
SV
1885/**
1886 * intel_display_power_get - grab a power domain reference
1887 * @dev_priv: i915 device instance
1888 * @domain: power domain to reference
1889 *
1890 * This function grabs a power domain reference for @domain and ensures that the
1891 * power domain and all its parents are powered up. Therefore users should only
1892 * grab a reference to the innermost power domain they need.
1893 *
1894 * Any power domain reference obtained by this function must have a symmetric
1895 * call to intel_display_power_put() to release the reference again.
1896 */
0e6e0be4
CW
1897intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1898 enum intel_display_power_domain domain)
9c065a7d 1899{
09731280 1900 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0e6e0be4 1901 intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
9c065a7d 1902
09731280
ID
1903 mutex_lock(&power_domains->lock);
1904
1905 __intel_display_power_get_domain(dev_priv, domain);
1906
1907 mutex_unlock(&power_domains->lock);
0e6e0be4
CW
1908
1909 return wakeref;
09731280
ID
1910}
1911
1912/**
1913 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1914 * @dev_priv: i915 device instance
1915 * @domain: power domain to reference
1916 *
1917 * This function grabs a power domain reference for @domain and ensures that the
1918 * power domain and all its parents are powered up. Therefore users should only
1919 * grab a reference to the innermost power domain they need.
1920 *
1921 * Any power domain reference obtained by this function must have a symmetric
1922 * call to intel_display_power_put() to release the reference again.
1923 */
0e6e0be4
CW
1924intel_wakeref_t
1925intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1926 enum intel_display_power_domain domain)
09731280
ID
1927{
1928 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0e6e0be4 1929 intel_wakeref_t wakeref;
09731280
ID
1930 bool is_enabled;
1931
0e6e0be4
CW
1932 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
1933 if (!wakeref)
09731280 1934 return false;
9c065a7d
SV
1935
1936 mutex_lock(&power_domains->lock);
1937
09731280
ID
1938 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1939 __intel_display_power_get_domain(dev_priv, domain);
1940 is_enabled = true;
1941 } else {
1942 is_enabled = false;
9c065a7d
SV
1943 }
1944
9c065a7d 1945 mutex_unlock(&power_domains->lock);
09731280 1946
0e6e0be4
CW
1947 if (!is_enabled) {
1948 intel_runtime_pm_put(dev_priv, wakeref);
1949 wakeref = 0;
1950 }
09731280 1951
0e6e0be4 1952 return wakeref;
9c065a7d
SV
1953}
1954
0e6e0be4
CW
1955static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1956 enum intel_display_power_domain domain)
9c065a7d
SV
1957{
1958 struct i915_power_domains *power_domains;
1959 struct i915_power_well *power_well;
9c065a7d
SV
1960
1961 power_domains = &dev_priv->power_domains;
1962
1963 mutex_lock(&power_domains->lock);
1964
11c86db8
DS
1965 WARN(!power_domains->domain_use_count[domain],
1966 "Use count on domain %s is already zero\n",
1967 intel_display_power_domain_str(domain));
9c065a7d
SV
1968 power_domains->domain_use_count[domain]--;
1969
56d4eac0 1970 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
b409ca95 1971 intel_power_well_put(dev_priv, power_well);
9c065a7d
SV
1972
1973 mutex_unlock(&power_domains->lock);
0e6e0be4 1974}
9c065a7d 1975
0e6e0be4 1976/**
4547c255 1977 * intel_display_power_put_unchecked - release an unchecked power domain reference
0e6e0be4
CW
1978 * @dev_priv: i915 device instance
1979 * @domain: power domain to reference
1980 *
1981 * This function drops the power domain reference obtained by
1982 * intel_display_power_get() and might power down the corresponding hardware
1983 * block right away if this is the last reference.
4547c255
ID
1984 *
1985 * This function exists only for historical reasons and should be avoided in
1986 * new code, as the correctness of its use cannot be checked. Always use
1987 * intel_display_power_put() instead.
0e6e0be4
CW
1988 */
1989void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1990 enum intel_display_power_domain domain)
1991{
1992 __intel_display_power_put(dev_priv, domain);
16e4dd03 1993 intel_runtime_pm_put_unchecked(dev_priv);
9c065a7d
SV
1994}
1995
0e6e0be4 1996#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4547c255
ID
1997/**
1998 * intel_display_power_put - release a power domain reference
1999 * @dev_priv: i915 device instance
2000 * @domain: power domain to reference
2001 * @wakeref: wakeref acquired for the reference that is being released
2002 *
2003 * This function drops the power domain reference obtained by
2004 * intel_display_power_get() and might power down the corresponding hardware
2005 * block right away if this is the last reference.
2006 */
0e6e0be4
CW
2007void intel_display_power_put(struct drm_i915_private *dev_priv,
2008 enum intel_display_power_domain domain,
2009 intel_wakeref_t wakeref)
2010{
2011 __intel_display_power_put(dev_priv, domain);
2012 intel_runtime_pm_put(dev_priv, wakeref);
2013}
2014#endif
2015
965a79ad
ID
2016#define I830_PIPES_POWER_DOMAINS ( \
2017 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2018 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2019 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2020 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2021 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2022 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
d8fc70b7 2023 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 2024
465ac0c6 2025#define VLV_DISPLAY_POWER_DOMAINS ( \
d8fc70b7
ACO
2026 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2027 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2028 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2029 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2030 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2031 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2032 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2033 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2034 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2035 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2036 BIT_ULL(POWER_DOMAIN_VGA) | \
2037 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2038 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2039 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2040 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2041 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2042
2043#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
d8fc70b7
ACO
2044 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2045 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2046 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2047 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2048 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2049 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2050
2051#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
d8fc70b7
ACO
2052 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2053 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2054 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2055
2056#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
d8fc70b7
ACO
2057 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2058 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2059 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2060
2061#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
d8fc70b7
ACO
2062 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2063 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2064 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2065
2066#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
d8fc70b7
ACO
2067 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2068 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2069 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 2070
465ac0c6 2071#define CHV_DISPLAY_POWER_DOMAINS ( \
d8fc70b7
ACO
2072 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2073 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2074 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2075 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2076 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2077 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2078 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2079 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2080 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2081 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2082 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2083 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2084 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2085 BIT_ULL(POWER_DOMAIN_VGA) | \
2086 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2087 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2088 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2089 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2090 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2091 BIT_ULL(POWER_DOMAIN_INIT))
465ac0c6 2092
9c065a7d 2093#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
d8fc70b7
ACO
2094 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2095 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2096 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2097 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2098 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d
SV
2099
2100#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
d8fc70b7
ACO
2101 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2102 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2103 BIT_ULL(POWER_DOMAIN_INIT))
9c065a7d 2104
965a79ad
ID
2105#define HSW_DISPLAY_POWER_DOMAINS ( \
2106 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2107 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2108 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2109 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2110 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2111 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2112 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2113 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2114 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2115 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2116 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2117 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2118 BIT_ULL(POWER_DOMAIN_VGA) | \
2119 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2120 BIT_ULL(POWER_DOMAIN_INIT))
2121
2122#define BDW_DISPLAY_POWER_DOMAINS ( \
2123 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2124 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2125 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2126 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2127 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2128 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2129 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2130 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2131 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2132 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2133 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2134 BIT_ULL(POWER_DOMAIN_VGA) | \
2135 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2136 BIT_ULL(POWER_DOMAIN_INIT))
2137
2138#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2139 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2140 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2141 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2142 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2143 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2144 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2145 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2146 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2147 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2148 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2149 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2150 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2151 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2152 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2153 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2154 BIT_ULL(POWER_DOMAIN_VGA) | \
2155 BIT_ULL(POWER_DOMAIN_INIT))
2156#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2157 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2158 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2159 BIT_ULL(POWER_DOMAIN_INIT))
2160#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2161 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2162 BIT_ULL(POWER_DOMAIN_INIT))
2163#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2164 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2165 BIT_ULL(POWER_DOMAIN_INIT))
2166#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2167 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2168 BIT_ULL(POWER_DOMAIN_INIT))
2169#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2170 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2171 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2172 BIT_ULL(POWER_DOMAIN_MODESET) | \
2173 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2174 BIT_ULL(POWER_DOMAIN_INIT))
2175
2176#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2177 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2178 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2179 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2180 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2181 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2182 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2183 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2184 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2185 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2186 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2187 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2188 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2189 BIT_ULL(POWER_DOMAIN_VGA) | \
965a79ad
ID
2190 BIT_ULL(POWER_DOMAIN_INIT))
2191#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2192 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2193 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2194 BIT_ULL(POWER_DOMAIN_MODESET) | \
2195 BIT_ULL(POWER_DOMAIN_AUX_A) | \
54c105d6 2196 BIT_ULL(POWER_DOMAIN_GMBUS) | \
965a79ad
ID
2197 BIT_ULL(POWER_DOMAIN_INIT))
2198#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2199 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2200 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2201 BIT_ULL(POWER_DOMAIN_INIT))
2202#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2203 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2204 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2205 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2206 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2207 BIT_ULL(POWER_DOMAIN_INIT))
2208
2209#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2210 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2211 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2212 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2213 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2214 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2215 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2216 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2217 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2218 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2219 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2220 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2221 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2222 BIT_ULL(POWER_DOMAIN_VGA) | \
2223 BIT_ULL(POWER_DOMAIN_INIT))
2224#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2225 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2226#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2227 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2228#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2229 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2230#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2231 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2232 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2233 BIT_ULL(POWER_DOMAIN_INIT))
2234#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2235 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2236 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2237 BIT_ULL(POWER_DOMAIN_INIT))
2238#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2239 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2240 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2241 BIT_ULL(POWER_DOMAIN_INIT))
2242#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2243 BIT_ULL(POWER_DOMAIN_AUX_A) | \
52528055 2244 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
965a79ad
ID
2245 BIT_ULL(POWER_DOMAIN_INIT))
2246#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2247 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2248 BIT_ULL(POWER_DOMAIN_INIT))
2249#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2250 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2251 BIT_ULL(POWER_DOMAIN_INIT))
2252#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2253 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
b6876374 2254 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2255 BIT_ULL(POWER_DOMAIN_MODESET) | \
2256 BIT_ULL(POWER_DOMAIN_AUX_A) | \
156961ae 2257 BIT_ULL(POWER_DOMAIN_GMBUS) | \
965a79ad
ID
2258 BIT_ULL(POWER_DOMAIN_INIT))
2259
2260#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2261 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2262 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2263 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2264 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2265 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2266 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2267 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2268 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2269 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2270 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
9787e835 2271 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
965a79ad
ID
2272 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2273 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2274 BIT_ULL(POWER_DOMAIN_AUX_D) | \
a324fcac 2275 BIT_ULL(POWER_DOMAIN_AUX_F) | \
965a79ad
ID
2276 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2277 BIT_ULL(POWER_DOMAIN_VGA) | \
2278 BIT_ULL(POWER_DOMAIN_INIT))
2279#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2280 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
965a79ad
ID
2281 BIT_ULL(POWER_DOMAIN_INIT))
2282#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2283 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2284 BIT_ULL(POWER_DOMAIN_INIT))
2285#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2286 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2287 BIT_ULL(POWER_DOMAIN_INIT))
2288#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2289 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2290 BIT_ULL(POWER_DOMAIN_INIT))
2291#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2292 BIT_ULL(POWER_DOMAIN_AUX_A) | \
b891d5e4 2293 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
965a79ad
ID
2294 BIT_ULL(POWER_DOMAIN_INIT))
2295#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2296 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2297 BIT_ULL(POWER_DOMAIN_INIT))
2298#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2299 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2300 BIT_ULL(POWER_DOMAIN_INIT))
2301#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2302 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2303 BIT_ULL(POWER_DOMAIN_INIT))
a324fcac
RV
2304#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2305 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2306 BIT_ULL(POWER_DOMAIN_INIT))
9787e835
RV
2307#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2308 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2309 BIT_ULL(POWER_DOMAIN_INIT))
965a79ad
ID
2310#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2311 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
6e7a3f52 2312 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
965a79ad
ID
2313 BIT_ULL(POWER_DOMAIN_MODESET) | \
2314 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2ee0da16
VS
2315 BIT_ULL(POWER_DOMAIN_INIT))
2316
67ca07e7
ID
2317/*
2318 * ICL PW_0/PG_0 domains (HW/DMC control):
2319 * - PCI
2320 * - clocks except port PLL
2321 * - central power except FBC
2322 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2323 * ICL PW_1/PG_1 domains (HW/DMC control):
2324 * - DBUF function
2325 * - PIPE_A and its planes, except VGA
2326 * - transcoder EDP + PSR
2327 * - transcoder DSI
2328 * - DDI_A
2329 * - FBC
2330 */
2331#define ICL_PW_4_POWER_DOMAINS ( \
2332 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2333 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2334 BIT_ULL(POWER_DOMAIN_INIT))
2335 /* VDSC/joining */
2336#define ICL_PW_3_POWER_DOMAINS ( \
2337 ICL_PW_4_POWER_DOMAINS | \
2338 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2339 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2340 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2341 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2342 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2343 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2344 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2345 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2346 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2347 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2348 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2349 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2350 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2351 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2352 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2353 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2354 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2355 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2356 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2357 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2358 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2359 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2360 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2361 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2362 BIT_ULL(POWER_DOMAIN_VGA) | \
2363 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2364 BIT_ULL(POWER_DOMAIN_INIT))
2365 /*
2366 * - transcoder WD
2367 * - KVMR (HW control)
2368 */
2369#define ICL_PW_2_POWER_DOMAINS ( \
2370 ICL_PW_3_POWER_DOMAINS | \
91ba2c8b 2371 BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
67ca07e7
ID
2372 BIT_ULL(POWER_DOMAIN_INIT))
2373 /*
67ca07e7
ID
2374 * - KVMR (HW control)
2375 */
2376#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2377 ICL_PW_2_POWER_DOMAINS | \
2378 BIT_ULL(POWER_DOMAIN_MODESET) | \
2379 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2380 BIT_ULL(POWER_DOMAIN_INIT))
2381
2382#define ICL_DDI_IO_A_POWER_DOMAINS ( \
2383 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2384#define ICL_DDI_IO_B_POWER_DOMAINS ( \
2385 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2386#define ICL_DDI_IO_C_POWER_DOMAINS ( \
2387 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2388#define ICL_DDI_IO_D_POWER_DOMAINS ( \
2389 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2390#define ICL_DDI_IO_E_POWER_DOMAINS ( \
2391 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2392#define ICL_DDI_IO_F_POWER_DOMAINS ( \
2393 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2394
2395#define ICL_AUX_A_IO_POWER_DOMAINS ( \
9e3b5ce9 2396 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
67ca07e7
ID
2397 BIT_ULL(POWER_DOMAIN_AUX_A))
2398#define ICL_AUX_B_IO_POWER_DOMAINS ( \
2399 BIT_ULL(POWER_DOMAIN_AUX_B))
2400#define ICL_AUX_C_IO_POWER_DOMAINS ( \
2401 BIT_ULL(POWER_DOMAIN_AUX_C))
2402#define ICL_AUX_D_IO_POWER_DOMAINS ( \
2403 BIT_ULL(POWER_DOMAIN_AUX_D))
2404#define ICL_AUX_E_IO_POWER_DOMAINS ( \
2405 BIT_ULL(POWER_DOMAIN_AUX_E))
2406#define ICL_AUX_F_IO_POWER_DOMAINS ( \
2407 BIT_ULL(POWER_DOMAIN_AUX_F))
2408#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2409 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2410#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2411 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2412#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2413 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2414#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2415 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2416
9c065a7d 2417static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3c1b38e6 2418 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2419 .enable = i9xx_always_on_power_well_noop,
2420 .disable = i9xx_always_on_power_well_noop,
2421 .is_enabled = i9xx_always_on_power_well_enabled,
2422};
2423
2424static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3c1b38e6 2425 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2426 .enable = chv_pipe_power_well_enable,
2427 .disable = chv_pipe_power_well_disable,
2428 .is_enabled = chv_pipe_power_well_enabled,
2429};
2430
2431static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3c1b38e6 2432 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2433 .enable = chv_dpio_cmn_power_well_enable,
2434 .disable = chv_dpio_cmn_power_well_disable,
2435 .is_enabled = vlv_power_well_enabled,
2436};
2437
f28ec6f4 2438static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
9c065a7d
SV
2439 {
2440 .name = "always-on",
285cf66d 2441 .always_on = true,
9c065a7d
SV
2442 .domains = POWER_DOMAIN_MASK,
2443 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2444 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2445 },
2446};
2447
2ee0da16
VS
2448static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2449 .sync_hw = i830_pipes_power_well_sync_hw,
2450 .enable = i830_pipes_power_well_enable,
2451 .disable = i830_pipes_power_well_disable,
2452 .is_enabled = i830_pipes_power_well_enabled,
2453};
2454
f28ec6f4 2455static const struct i915_power_well_desc i830_power_wells[] = {
2ee0da16
VS
2456 {
2457 .name = "always-on",
285cf66d 2458 .always_on = true,
2ee0da16
VS
2459 .domains = POWER_DOMAIN_MASK,
2460 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2461 .id = DISP_PW_ID_NONE,
2ee0da16
VS
2462 },
2463 {
2464 .name = "pipes",
2465 .domains = I830_PIPES_POWER_DOMAINS,
2466 .ops = &i830_pipes_power_well_ops,
4739a9d2 2467 .id = DISP_PW_ID_NONE,
2ee0da16
VS
2468 },
2469};
2470
9c065a7d
SV
2471static const struct i915_power_well_ops hsw_power_well_ops = {
2472 .sync_hw = hsw_power_well_sync_hw,
2473 .enable = hsw_power_well_enable,
2474 .disable = hsw_power_well_disable,
2475 .is_enabled = hsw_power_well_enabled,
2476};
2477
9f836f90 2478static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3c1b38e6 2479 .sync_hw = i9xx_power_well_sync_hw_noop,
9f836f90
PJ
2480 .enable = gen9_dc_off_power_well_enable,
2481 .disable = gen9_dc_off_power_well_disable,
2482 .is_enabled = gen9_dc_off_power_well_enabled,
2483};
2484
9c8d0b8e 2485static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3c1b38e6 2486 .sync_hw = i9xx_power_well_sync_hw_noop,
9c8d0b8e
ID
2487 .enable = bxt_dpio_cmn_power_well_enable,
2488 .disable = bxt_dpio_cmn_power_well_disable,
2489 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2490};
2491
75e39688
ID
2492static const struct i915_power_well_regs hsw_power_well_regs = {
2493 .bios = HSW_PWR_WELL_CTL1,
2494 .driver = HSW_PWR_WELL_CTL2,
2495 .kvmr = HSW_PWR_WELL_CTL3,
2496 .debug = HSW_PWR_WELL_CTL4,
2497};
2498
f28ec6f4 2499static const struct i915_power_well_desc hsw_power_wells[] = {
9c065a7d
SV
2500 {
2501 .name = "always-on",
285cf66d 2502 .always_on = true,
998bd66a 2503 .domains = POWER_DOMAIN_MASK,
9c065a7d 2504 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2505 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2506 },
2507 {
2508 .name = "display",
2509 .domains = HSW_DISPLAY_POWER_DOMAINS,
2510 .ops = &hsw_power_well_ops,
fb9248e2 2511 .id = HSW_DISP_PW_GLOBAL,
0a445945 2512 {
75e39688
ID
2513 .hsw.regs = &hsw_power_well_regs,
2514 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
0a445945
ID
2515 .hsw.has_vga = true,
2516 },
9c065a7d
SV
2517 },
2518};
2519
f28ec6f4 2520static const struct i915_power_well_desc bdw_power_wells[] = {
9c065a7d
SV
2521 {
2522 .name = "always-on",
285cf66d 2523 .always_on = true,
998bd66a 2524 .domains = POWER_DOMAIN_MASK,
9c065a7d 2525 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2526 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2527 },
2528 {
2529 .name = "display",
2530 .domains = BDW_DISPLAY_POWER_DOMAINS,
2531 .ops = &hsw_power_well_ops,
fb9248e2 2532 .id = HSW_DISP_PW_GLOBAL,
0a445945 2533 {
75e39688
ID
2534 .hsw.regs = &hsw_power_well_regs,
2535 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
0a445945
ID
2536 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2537 .hsw.has_vga = true,
2538 },
9c065a7d
SV
2539 },
2540};
2541
2542static const struct i915_power_well_ops vlv_display_power_well_ops = {
3c1b38e6 2543 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2544 .enable = vlv_display_power_well_enable,
2545 .disable = vlv_display_power_well_disable,
2546 .is_enabled = vlv_power_well_enabled,
2547};
2548
2549static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3c1b38e6 2550 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2551 .enable = vlv_dpio_cmn_power_well_enable,
2552 .disable = vlv_dpio_cmn_power_well_disable,
2553 .is_enabled = vlv_power_well_enabled,
2554};
2555
2556static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3c1b38e6 2557 .sync_hw = i9xx_power_well_sync_hw_noop,
9c065a7d
SV
2558 .enable = vlv_power_well_enable,
2559 .disable = vlv_power_well_disable,
2560 .is_enabled = vlv_power_well_enabled,
2561};
2562
f28ec6f4 2563static const struct i915_power_well_desc vlv_power_wells[] = {
9c065a7d
SV
2564 {
2565 .name = "always-on",
285cf66d 2566 .always_on = true,
998bd66a 2567 .domains = POWER_DOMAIN_MASK,
9c065a7d 2568 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2569 .id = DISP_PW_ID_NONE,
9c065a7d
SV
2570 },
2571 {
2572 .name = "display",
2573 .domains = VLV_DISPLAY_POWER_DOMAINS,
9c065a7d 2574 .ops = &vlv_display_power_well_ops,
2183b499 2575 .id = VLV_DISP_PW_DISP2D,
d13dd05a
ID
2576 {
2577 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2578 },
9c065a7d
SV
2579 },
2580 {
2581 .name = "dpio-tx-b-01",
2582 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2583 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2584 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2585 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2586 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2587 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2588 {
2589 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2590 },
9c065a7d
SV
2591 },
2592 {
2593 .name = "dpio-tx-b-23",
2594 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2595 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2596 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2597 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2598 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2599 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2600 {
2601 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2602 },
9c065a7d
SV
2603 },
2604 {
2605 .name = "dpio-tx-c-01",
2606 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2607 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2608 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2609 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2610 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2611 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2612 {
2613 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2614 },
9c065a7d
SV
2615 },
2616 {
2617 .name = "dpio-tx-c-23",
2618 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2619 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2620 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2621 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2622 .ops = &vlv_dpio_power_well_ops,
4739a9d2 2623 .id = DISP_PW_ID_NONE,
d13dd05a
ID
2624 {
2625 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2626 },
9c065a7d
SV
2627 },
2628 {
2629 .name = "dpio-common",
2630 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
9c065a7d 2631 .ops = &vlv_dpio_cmn_power_well_ops,
2183b499 2632 .id = VLV_DISP_PW_DPIO_CMN_BC,
d13dd05a
ID
2633 {
2634 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2635 },
9c065a7d
SV
2636 },
2637};
2638
f28ec6f4 2639static const struct i915_power_well_desc chv_power_wells[] = {
9c065a7d
SV
2640 {
2641 .name = "always-on",
285cf66d 2642 .always_on = true,
998bd66a 2643 .domains = POWER_DOMAIN_MASK,
9c065a7d 2644 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2645 .id = DISP_PW_ID_NONE,
9c065a7d 2646 },
9c065a7d
SV
2647 {
2648 .name = "display",
baa4e575 2649 /*
fde61e4b
VS
2650 * Pipe A power well is the new disp2d well. Pipe B and C
2651 * power wells don't actually exist. Pipe A power well is
2652 * required for any pipe to work.
baa4e575 2653 */
465ac0c6 2654 .domains = CHV_DISPLAY_POWER_DOMAINS,
9c065a7d 2655 .ops = &chv_pipe_power_well_ops,
4739a9d2 2656 .id = DISP_PW_ID_NONE,
9c065a7d 2657 },
9c065a7d
SV
2658 {
2659 .name = "dpio-common-bc",
71849b67 2660 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
9c065a7d 2661 .ops = &chv_dpio_cmn_power_well_ops,
2183b499 2662 .id = VLV_DISP_PW_DPIO_CMN_BC,
d13dd05a
ID
2663 {
2664 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2665 },
9c065a7d
SV
2666 },
2667 {
2668 .name = "dpio-common-d",
71849b67 2669 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
9c065a7d 2670 .ops = &chv_dpio_cmn_power_well_ops,
2183b499 2671 .id = CHV_DISP_PW_DPIO_CMN_D,
d13dd05a
ID
2672 {
2673 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2674 },
9c065a7d 2675 },
9c065a7d
SV
2676};
2677
5aefb239 2678bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
438b8dc4 2679 enum i915_power_well_id power_well_id)
5aefb239
SS
2680{
2681 struct i915_power_well *power_well;
2682 bool ret;
2683
2684 power_well = lookup_power_well(dev_priv, power_well_id);
f28ec6f4 2685 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
5aefb239
SS
2686
2687 return ret;
2688}
2689
f28ec6f4 2690static const struct i915_power_well_desc skl_power_wells[] = {
94dd5138
S
2691 {
2692 .name = "always-on",
285cf66d 2693 .always_on = true,
998bd66a 2694 .domains = POWER_DOMAIN_MASK,
94dd5138 2695 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2696 .id = DISP_PW_ID_NONE,
94dd5138
S
2697 },
2698 {
2699 .name = "power well 1",
4a76f295 2700 /* Handled by the DMC firmware */
fa96ed1f 2701 .always_on = true,
4a76f295 2702 .domains = 0,
4196b918 2703 .ops = &hsw_power_well_ops,
01c3faa7 2704 .id = SKL_DISP_PW_1,
0a445945 2705 {
75e39688
ID
2706 .hsw.regs = &hsw_power_well_regs,
2707 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2708 .hsw.has_fuses = true,
2709 },
94dd5138
S
2710 },
2711 {
2712 .name = "MISC IO power well",
4a76f295 2713 /* Handled by the DMC firmware */
fa96ed1f 2714 .always_on = true,
4a76f295 2715 .domains = 0,
4196b918 2716 .ops = &hsw_power_well_ops,
01c3faa7 2717 .id = SKL_DISP_PW_MISC_IO,
75e39688
ID
2718 {
2719 .hsw.regs = &hsw_power_well_regs,
2720 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2721 },
94dd5138 2722 },
9f836f90
PJ
2723 {
2724 .name = "DC off",
2725 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2726 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2727 .id = DISP_PW_ID_NONE,
9f836f90 2728 },
94dd5138
S
2729 {
2730 .name = "power well 2",
2731 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2732 .ops = &hsw_power_well_ops,
01c3faa7 2733 .id = SKL_DISP_PW_2,
0a445945 2734 {
75e39688
ID
2735 .hsw.regs = &hsw_power_well_regs,
2736 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2737 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2738 .hsw.has_vga = true,
2739 .hsw.has_fuses = true,
2740 },
94dd5138
S
2741 },
2742 {
62b69566
ACO
2743 .name = "DDI A/E IO power well",
2744 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
4196b918 2745 .ops = &hsw_power_well_ops,
4739a9d2 2746 .id = DISP_PW_ID_NONE,
75e39688
ID
2747 {
2748 .hsw.regs = &hsw_power_well_regs,
2749 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2750 },
94dd5138
S
2751 },
2752 {
62b69566
ACO
2753 .name = "DDI B IO power well",
2754 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
4196b918 2755 .ops = &hsw_power_well_ops,
4739a9d2 2756 .id = DISP_PW_ID_NONE,
75e39688
ID
2757 {
2758 .hsw.regs = &hsw_power_well_regs,
2759 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2760 },
94dd5138
S
2761 },
2762 {
62b69566
ACO
2763 .name = "DDI C IO power well",
2764 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
4196b918 2765 .ops = &hsw_power_well_ops,
4739a9d2 2766 .id = DISP_PW_ID_NONE,
75e39688
ID
2767 {
2768 .hsw.regs = &hsw_power_well_regs,
2769 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2770 },
94dd5138
S
2771 },
2772 {
62b69566
ACO
2773 .name = "DDI D IO power well",
2774 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
4196b918 2775 .ops = &hsw_power_well_ops,
4739a9d2 2776 .id = DISP_PW_ID_NONE,
75e39688
ID
2777 {
2778 .hsw.regs = &hsw_power_well_regs,
2779 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2780 },
94dd5138
S
2781 },
2782};
2783
f28ec6f4 2784static const struct i915_power_well_desc bxt_power_wells[] = {
0b4a2a36
S
2785 {
2786 .name = "always-on",
285cf66d 2787 .always_on = true,
998bd66a 2788 .domains = POWER_DOMAIN_MASK,
0b4a2a36 2789 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2790 .id = DISP_PW_ID_NONE,
0b4a2a36
S
2791 },
2792 {
2793 .name = "power well 1",
fa96ed1f
ID
2794 /* Handled by the DMC firmware */
2795 .always_on = true,
d7d7c9ee 2796 .domains = 0,
4196b918 2797 .ops = &hsw_power_well_ops,
01c3faa7 2798 .id = SKL_DISP_PW_1,
0a445945 2799 {
75e39688
ID
2800 .hsw.regs = &hsw_power_well_regs,
2801 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2802 .hsw.has_fuses = true,
2803 },
0b4a2a36 2804 },
9f836f90
PJ
2805 {
2806 .name = "DC off",
2807 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2808 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2809 .id = DISP_PW_ID_NONE,
9f836f90 2810 },
0b4a2a36
S
2811 {
2812 .name = "power well 2",
2813 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2814 .ops = &hsw_power_well_ops,
01c3faa7 2815 .id = SKL_DISP_PW_2,
0a445945 2816 {
75e39688
ID
2817 .hsw.regs = &hsw_power_well_regs,
2818 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2819 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2820 .hsw.has_vga = true,
2821 .hsw.has_fuses = true,
2822 },
9f836f90 2823 },
9c8d0b8e
ID
2824 {
2825 .name = "dpio-common-a",
2826 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2827 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2828 .id = BXT_DISP_PW_DPIO_CMN_A,
0a445945
ID
2829 {
2830 .bxt.phy = DPIO_PHY1,
2831 },
9c8d0b8e
ID
2832 },
2833 {
2834 .name = "dpio-common-bc",
2835 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2836 .ops = &bxt_dpio_cmn_power_well_ops,
d9fcdc8d 2837 .id = VLV_DISP_PW_DPIO_CMN_BC,
0a445945
ID
2838 {
2839 .bxt.phy = DPIO_PHY0,
2840 },
9c8d0b8e 2841 },
0b4a2a36
S
2842};
2843
f28ec6f4 2844static const struct i915_power_well_desc glk_power_wells[] = {
0d03926d
ACO
2845 {
2846 .name = "always-on",
285cf66d 2847 .always_on = true,
0d03926d
ACO
2848 .domains = POWER_DOMAIN_MASK,
2849 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2850 .id = DISP_PW_ID_NONE,
0d03926d
ACO
2851 },
2852 {
2853 .name = "power well 1",
2854 /* Handled by the DMC firmware */
fa96ed1f 2855 .always_on = true,
0d03926d 2856 .domains = 0,
4196b918 2857 .ops = &hsw_power_well_ops,
0d03926d 2858 .id = SKL_DISP_PW_1,
0a445945 2859 {
75e39688
ID
2860 .hsw.regs = &hsw_power_well_regs,
2861 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2862 .hsw.has_fuses = true,
2863 },
0d03926d
ACO
2864 },
2865 {
2866 .name = "DC off",
2867 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2868 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 2869 .id = DISP_PW_ID_NONE,
0d03926d
ACO
2870 },
2871 {
2872 .name = "power well 2",
2873 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 2874 .ops = &hsw_power_well_ops,
0d03926d 2875 .id = SKL_DISP_PW_2,
0a445945 2876 {
75e39688
ID
2877 .hsw.regs = &hsw_power_well_regs,
2878 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
2879 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2880 .hsw.has_vga = true,
2881 .hsw.has_fuses = true,
2882 },
0d03926d 2883 },
0a116ce8
ACO
2884 {
2885 .name = "dpio-common-a",
2886 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2887 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2888 .id = BXT_DISP_PW_DPIO_CMN_A,
0a445945
ID
2889 {
2890 .bxt.phy = DPIO_PHY1,
2891 },
0a116ce8
ACO
2892 },
2893 {
2894 .name = "dpio-common-b",
2895 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2896 .ops = &bxt_dpio_cmn_power_well_ops,
d9fcdc8d 2897 .id = VLV_DISP_PW_DPIO_CMN_BC,
0a445945
ID
2898 {
2899 .bxt.phy = DPIO_PHY0,
2900 },
0a116ce8
ACO
2901 },
2902 {
2903 .name = "dpio-common-c",
2904 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2905 .ops = &bxt_dpio_cmn_power_well_ops,
2183b499 2906 .id = GLK_DISP_PW_DPIO_CMN_C,
0a445945
ID
2907 {
2908 .bxt.phy = DPIO_PHY2,
2909 },
0a116ce8 2910 },
0d03926d
ACO
2911 {
2912 .name = "AUX A",
2913 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
4196b918 2914 .ops = &hsw_power_well_ops,
4739a9d2 2915 .id = DISP_PW_ID_NONE,
75e39688
ID
2916 {
2917 .hsw.regs = &hsw_power_well_regs,
2918 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2919 },
0d03926d
ACO
2920 },
2921 {
2922 .name = "AUX B",
2923 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
4196b918 2924 .ops = &hsw_power_well_ops,
4739a9d2 2925 .id = DISP_PW_ID_NONE,
75e39688
ID
2926 {
2927 .hsw.regs = &hsw_power_well_regs,
2928 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2929 },
0d03926d
ACO
2930 },
2931 {
2932 .name = "AUX C",
2933 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
4196b918 2934 .ops = &hsw_power_well_ops,
4739a9d2 2935 .id = DISP_PW_ID_NONE,
75e39688
ID
2936 {
2937 .hsw.regs = &hsw_power_well_regs,
2938 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2939 },
0d03926d
ACO
2940 },
2941 {
62b69566
ACO
2942 .name = "DDI A IO power well",
2943 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
4196b918 2944 .ops = &hsw_power_well_ops,
4739a9d2 2945 .id = DISP_PW_ID_NONE,
75e39688
ID
2946 {
2947 .hsw.regs = &hsw_power_well_regs,
2948 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2949 },
0d03926d
ACO
2950 },
2951 {
62b69566
ACO
2952 .name = "DDI B IO power well",
2953 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
4196b918 2954 .ops = &hsw_power_well_ops,
4739a9d2 2955 .id = DISP_PW_ID_NONE,
75e39688
ID
2956 {
2957 .hsw.regs = &hsw_power_well_regs,
2958 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2959 },
0d03926d
ACO
2960 },
2961 {
62b69566
ACO
2962 .name = "DDI C IO power well",
2963 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
4196b918 2964 .ops = &hsw_power_well_ops,
4739a9d2 2965 .id = DISP_PW_ID_NONE,
75e39688
ID
2966 {
2967 .hsw.regs = &hsw_power_well_regs,
2968 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2969 },
0d03926d
ACO
2970 },
2971};
2972
f28ec6f4 2973static const struct i915_power_well_desc cnl_power_wells[] = {
8bcd3dd4
VS
2974 {
2975 .name = "always-on",
285cf66d 2976 .always_on = true,
8bcd3dd4
VS
2977 .domains = POWER_DOMAIN_MASK,
2978 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 2979 .id = DISP_PW_ID_NONE,
8bcd3dd4
VS
2980 },
2981 {
2982 .name = "power well 1",
2983 /* Handled by the DMC firmware */
fa96ed1f 2984 .always_on = true,
8bcd3dd4 2985 .domains = 0,
4196b918 2986 .ops = &hsw_power_well_ops,
8bcd3dd4 2987 .id = SKL_DISP_PW_1,
0a445945 2988 {
75e39688
ID
2989 .hsw.regs = &hsw_power_well_regs,
2990 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
0a445945
ID
2991 .hsw.has_fuses = true,
2992 },
8bcd3dd4
VS
2993 },
2994 {
2995 .name = "AUX A",
2996 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
4196b918 2997 .ops = &hsw_power_well_ops,
4739a9d2 2998 .id = DISP_PW_ID_NONE,
75e39688
ID
2999 {
3000 .hsw.regs = &hsw_power_well_regs,
3001 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3002 },
8bcd3dd4
VS
3003 },
3004 {
3005 .name = "AUX B",
3006 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
4196b918 3007 .ops = &hsw_power_well_ops,
4739a9d2 3008 .id = DISP_PW_ID_NONE,
75e39688
ID
3009 {
3010 .hsw.regs = &hsw_power_well_regs,
3011 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3012 },
8bcd3dd4
VS
3013 },
3014 {
3015 .name = "AUX C",
3016 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
4196b918 3017 .ops = &hsw_power_well_ops,
4739a9d2 3018 .id = DISP_PW_ID_NONE,
75e39688
ID
3019 {
3020 .hsw.regs = &hsw_power_well_regs,
3021 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3022 },
8bcd3dd4
VS
3023 },
3024 {
3025 .name = "AUX D",
3026 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
4196b918 3027 .ops = &hsw_power_well_ops,
4739a9d2 3028 .id = DISP_PW_ID_NONE,
75e39688
ID
3029 {
3030 .hsw.regs = &hsw_power_well_regs,
3031 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3032 },
8bcd3dd4
VS
3033 },
3034 {
3035 .name = "DC off",
3036 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3037 .ops = &gen9_dc_off_power_well_ops,
4739a9d2 3038 .id = DISP_PW_ID_NONE,
8bcd3dd4
VS
3039 },
3040 {
3041 .name = "power well 2",
3042 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
4196b918 3043 .ops = &hsw_power_well_ops,
8bcd3dd4 3044 .id = SKL_DISP_PW_2,
0a445945 3045 {
75e39688
ID
3046 .hsw.regs = &hsw_power_well_regs,
3047 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
0a445945
ID
3048 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3049 .hsw.has_vga = true,
3050 .hsw.has_fuses = true,
3051 },
8bcd3dd4
VS
3052 },
3053 {
3054 .name = "DDI A IO power well",
3055 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
4196b918 3056 .ops = &hsw_power_well_ops,
4739a9d2 3057 .id = DISP_PW_ID_NONE,
75e39688
ID
3058 {
3059 .hsw.regs = &hsw_power_well_regs,
3060 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3061 },
8bcd3dd4
VS
3062 },
3063 {
3064 .name = "DDI B IO power well",
3065 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
4196b918 3066 .ops = &hsw_power_well_ops,
4739a9d2 3067 .id = DISP_PW_ID_NONE,
75e39688
ID
3068 {
3069 .hsw.regs = &hsw_power_well_regs,
3070 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3071 },
8bcd3dd4
VS
3072 },
3073 {
3074 .name = "DDI C IO power well",
3075 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
4196b918 3076 .ops = &hsw_power_well_ops,
4739a9d2 3077 .id = DISP_PW_ID_NONE,
75e39688
ID
3078 {
3079 .hsw.regs = &hsw_power_well_regs,
3080 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3081 },
8bcd3dd4
VS
3082 },
3083 {
3084 .name = "DDI D IO power well",
3085 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
4196b918 3086 .ops = &hsw_power_well_ops,
4739a9d2 3087 .id = DISP_PW_ID_NONE,
75e39688
ID
3088 {
3089 .hsw.regs = &hsw_power_well_regs,
3090 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3091 },
8bcd3dd4 3092 },
9787e835
RV
3093 {
3094 .name = "DDI F IO power well",
3095 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3096 .ops = &hsw_power_well_ops,
4739a9d2 3097 .id = DISP_PW_ID_NONE,
75e39688
ID
3098 {
3099 .hsw.regs = &hsw_power_well_regs,
3100 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3101 },
9787e835 3102 },
a324fcac
RV
3103 {
3104 .name = "AUX F",
3105 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3106 .ops = &hsw_power_well_ops,
4739a9d2 3107 .id = DISP_PW_ID_NONE,
75e39688
ID
3108 {
3109 .hsw.regs = &hsw_power_well_regs,
3110 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3111 },
a324fcac 3112 },
8bcd3dd4
VS
3113};
3114
67ca07e7
ID
3115static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3116 .sync_hw = hsw_power_well_sync_hw,
3117 .enable = icl_combo_phy_aux_power_well_enable,
3118 .disable = icl_combo_phy_aux_power_well_disable,
3119 .is_enabled = hsw_power_well_enabled,
3120};
3121
c7375d95
ID
3122static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3123 .sync_hw = hsw_power_well_sync_hw,
3124 .enable = icl_tc_phy_aux_power_well_enable,
3125 .disable = hsw_power_well_disable,
3126 .is_enabled = hsw_power_well_enabled,
3127};
3128
75e39688
ID
3129static const struct i915_power_well_regs icl_aux_power_well_regs = {
3130 .bios = ICL_PWR_WELL_CTL_AUX1,
3131 .driver = ICL_PWR_WELL_CTL_AUX2,
3132 .debug = ICL_PWR_WELL_CTL_AUX4,
3133};
3134
3135static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3136 .bios = ICL_PWR_WELL_CTL_DDI1,
3137 .driver = ICL_PWR_WELL_CTL_DDI2,
3138 .debug = ICL_PWR_WELL_CTL_DDI4,
3139};
3140
f28ec6f4 3141static const struct i915_power_well_desc icl_power_wells[] = {
67ca07e7
ID
3142 {
3143 .name = "always-on",
285cf66d 3144 .always_on = true,
67ca07e7
ID
3145 .domains = POWER_DOMAIN_MASK,
3146 .ops = &i9xx_always_on_power_well_ops,
4739a9d2 3147 .id = DISP_PW_ID_NONE,
67ca07e7
ID
3148 },
3149 {
3150 .name = "power well 1",
3151 /* Handled by the DMC firmware */
fa96ed1f 3152 .always_on = true,
67ca07e7
ID
3153 .domains = 0,
3154 .ops = &hsw_power_well_ops,
d9fcdc8d 3155 .id = SKL_DISP_PW_1,
ae9b06ca 3156 {
75e39688
ID
3157 .hsw.regs = &hsw_power_well_regs,
3158 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
ae9b06ca
ID
3159 .hsw.has_fuses = true,
3160 },
67ca07e7 3161 },
a33e1ece
ID
3162 {
3163 .name = "DC off",
3164 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3165 .ops = &gen9_dc_off_power_well_ops,
3166 .id = DISP_PW_ID_NONE,
3167 },
67ca07e7
ID
3168 {
3169 .name = "power well 2",
3170 .domains = ICL_PW_2_POWER_DOMAINS,
3171 .ops = &hsw_power_well_ops,
d9fcdc8d 3172 .id = SKL_DISP_PW_2,
ae9b06ca 3173 {
75e39688
ID
3174 .hsw.regs = &hsw_power_well_regs,
3175 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
ae9b06ca
ID
3176 .hsw.has_fuses = true,
3177 },
67ca07e7 3178 },
67ca07e7
ID
3179 {
3180 .name = "power well 3",
3181 .domains = ICL_PW_3_POWER_DOMAINS,
3182 .ops = &hsw_power_well_ops,
4739a9d2 3183 .id = DISP_PW_ID_NONE,
ae9b06ca 3184 {
75e39688
ID
3185 .hsw.regs = &hsw_power_well_regs,
3186 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
ae9b06ca
ID
3187 .hsw.irq_pipe_mask = BIT(PIPE_B),
3188 .hsw.has_vga = true,
3189 .hsw.has_fuses = true,
3190 },
67ca07e7
ID
3191 },
3192 {
3193 .name = "DDI A IO",
3194 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3195 .ops = &hsw_power_well_ops,
4739a9d2 3196 .id = DISP_PW_ID_NONE,
75e39688
ID
3197 {
3198 .hsw.regs = &icl_ddi_power_well_regs,
3199 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3200 },
67ca07e7
ID
3201 },
3202 {
3203 .name = "DDI B IO",
3204 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3205 .ops = &hsw_power_well_ops,
4739a9d2 3206 .id = DISP_PW_ID_NONE,
75e39688
ID
3207 {
3208 .hsw.regs = &icl_ddi_power_well_regs,
3209 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3210 },
67ca07e7
ID
3211 },
3212 {
3213 .name = "DDI C IO",
3214 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3215 .ops = &hsw_power_well_ops,
4739a9d2 3216 .id = DISP_PW_ID_NONE,
75e39688
ID
3217 {
3218 .hsw.regs = &icl_ddi_power_well_regs,
3219 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3220 },
67ca07e7
ID
3221 },
3222 {
3223 .name = "DDI D IO",
3224 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3225 .ops = &hsw_power_well_ops,
4739a9d2 3226 .id = DISP_PW_ID_NONE,
75e39688
ID
3227 {
3228 .hsw.regs = &icl_ddi_power_well_regs,
3229 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3230 },
67ca07e7
ID
3231 },
3232 {
3233 .name = "DDI E IO",
3234 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3235 .ops = &hsw_power_well_ops,
4739a9d2 3236 .id = DISP_PW_ID_NONE,
75e39688
ID
3237 {
3238 .hsw.regs = &icl_ddi_power_well_regs,
3239 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3240 },
67ca07e7
ID
3241 },
3242 {
3243 .name = "DDI F IO",
3244 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3245 .ops = &hsw_power_well_ops,
4739a9d2 3246 .id = DISP_PW_ID_NONE,
75e39688
ID
3247 {
3248 .hsw.regs = &icl_ddi_power_well_regs,
3249 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3250 },
67ca07e7
ID
3251 },
3252 {
3253 .name = "AUX A",
3254 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3255 .ops = &icl_combo_phy_aux_power_well_ops,
4739a9d2 3256 .id = DISP_PW_ID_NONE,
75e39688
ID
3257 {
3258 .hsw.regs = &icl_aux_power_well_regs,
3259 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3260 },
67ca07e7
ID
3261 },
3262 {
3263 .name = "AUX B",
3264 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3265 .ops = &icl_combo_phy_aux_power_well_ops,
4739a9d2 3266 .id = DISP_PW_ID_NONE,
75e39688
ID
3267 {
3268 .hsw.regs = &icl_aux_power_well_regs,
3269 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3270 },
67ca07e7
ID
3271 },
3272 {
3273 .name = "AUX C",
3274 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
c7375d95 3275 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3276 .id = DISP_PW_ID_NONE,
75e39688
ID
3277 {
3278 .hsw.regs = &icl_aux_power_well_regs,
3279 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
c7375d95 3280 .hsw.is_tc_tbt = false,
75e39688 3281 },
67ca07e7
ID
3282 },
3283 {
3284 .name = "AUX D",
3285 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
c7375d95 3286 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3287 .id = DISP_PW_ID_NONE,
75e39688
ID
3288 {
3289 .hsw.regs = &icl_aux_power_well_regs,
3290 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
c7375d95 3291 .hsw.is_tc_tbt = false,
75e39688 3292 },
67ca07e7
ID
3293 },
3294 {
3295 .name = "AUX E",
3296 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
c7375d95 3297 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3298 .id = DISP_PW_ID_NONE,
75e39688
ID
3299 {
3300 .hsw.regs = &icl_aux_power_well_regs,
3301 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
c7375d95 3302 .hsw.is_tc_tbt = false,
75e39688 3303 },
67ca07e7
ID
3304 },
3305 {
3306 .name = "AUX F",
3307 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
c7375d95 3308 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3309 .id = DISP_PW_ID_NONE,
75e39688
ID
3310 {
3311 .hsw.regs = &icl_aux_power_well_regs,
3312 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
c7375d95 3313 .hsw.is_tc_tbt = false,
75e39688 3314 },
67ca07e7
ID
3315 },
3316 {
3317 .name = "AUX TBT1",
3318 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
c7375d95 3319 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3320 .id = DISP_PW_ID_NONE,
75e39688
ID
3321 {
3322 .hsw.regs = &icl_aux_power_well_regs,
3323 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
c7375d95 3324 .hsw.is_tc_tbt = true,
75e39688 3325 },
67ca07e7
ID
3326 },
3327 {
3328 .name = "AUX TBT2",
3329 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
c7375d95 3330 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3331 .id = DISP_PW_ID_NONE,
75e39688
ID
3332 {
3333 .hsw.regs = &icl_aux_power_well_regs,
3334 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
c7375d95 3335 .hsw.is_tc_tbt = true,
75e39688 3336 },
67ca07e7
ID
3337 },
3338 {
3339 .name = "AUX TBT3",
3340 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
c7375d95 3341 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3342 .id = DISP_PW_ID_NONE,
75e39688
ID
3343 {
3344 .hsw.regs = &icl_aux_power_well_regs,
3345 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
c7375d95 3346 .hsw.is_tc_tbt = true,
75e39688 3347 },
67ca07e7
ID
3348 },
3349 {
3350 .name = "AUX TBT4",
3351 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
c7375d95 3352 .ops = &icl_tc_phy_aux_power_well_ops,
4739a9d2 3353 .id = DISP_PW_ID_NONE,
75e39688
ID
3354 {
3355 .hsw.regs = &icl_aux_power_well_regs,
3356 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
c7375d95 3357 .hsw.is_tc_tbt = true,
75e39688 3358 },
67ca07e7
ID
3359 },
3360 {
3361 .name = "power well 4",
3362 .domains = ICL_PW_4_POWER_DOMAINS,
3363 .ops = &hsw_power_well_ops,
4739a9d2 3364 .id = DISP_PW_ID_NONE,
ae9b06ca 3365 {
75e39688
ID
3366 .hsw.regs = &hsw_power_well_regs,
3367 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
ae9b06ca
ID
3368 .hsw.has_fuses = true,
3369 .hsw.irq_pipe_mask = BIT(PIPE_C),
3370 },
67ca07e7
ID
3371 },
3372};
3373
1b0e3a04
ID
3374static int
3375sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3376 int disable_power_well)
3377{
3378 if (disable_power_well >= 0)
3379 return !!disable_power_well;
3380
1b0e3a04
ID
3381 return 1;
3382}
3383
739f3abd
JN
3384static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3385 int enable_dc)
a37baf3b 3386{
739f3abd 3387 u32 mask;
a37baf3b
ID
3388 int requested_dc;
3389 int max_dc;
3390
3e68928b 3391 if (INTEL_GEN(dev_priv) >= 11) {
a37baf3b 3392 max_dc = 2;
a37baf3b
ID
3393 /*
3394 * DC9 has a separate HW flow from the rest of the DC states,
3395 * not depending on the DMC firmware. It's needed by system
3396 * suspend/resume, so allow it unconditionally.
3397 */
3398 mask = DC_STATE_EN_DC9;
cf819eff 3399 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3e68928b
AM
3400 max_dc = 2;
3401 mask = 0;
3402 } else if (IS_GEN9_LP(dev_priv)) {
3403 max_dc = 1;
3404 mask = DC_STATE_EN_DC9;
a37baf3b
ID
3405 } else {
3406 max_dc = 0;
3407 mask = 0;
3408 }
3409
4f044a88 3410 if (!i915_modparams.disable_power_well)
66e2c4c3
ID
3411 max_dc = 0;
3412
a37baf3b
ID
3413 if (enable_dc >= 0 && enable_dc <= max_dc) {
3414 requested_dc = enable_dc;
3415 } else if (enable_dc == -1) {
3416 requested_dc = max_dc;
3417 } else if (enable_dc > max_dc && enable_dc <= 2) {
3418 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3419 enable_dc, max_dc);
3420 requested_dc = max_dc;
3421 } else {
3422 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3423 requested_dc = max_dc;
3424 }
3425
3426 if (requested_dc > 1)
3427 mask |= DC_STATE_EN_UPTO_DC6;
3428 if (requested_dc > 0)
3429 mask |= DC_STATE_EN_UPTO_DC5;
3430
3431 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3432
3433 return mask;
3434}
3435
f28ec6f4
ID
3436static int
3437__set_power_wells(struct i915_power_domains *power_domains,
3438 const struct i915_power_well_desc *power_well_descs,
3439 int power_well_count)
21792c60 3440{
f28ec6f4 3441 u64 power_well_ids = 0;
21792c60
ID
3442 int i;
3443
f28ec6f4
ID
3444 power_domains->power_well_count = power_well_count;
3445 power_domains->power_wells =
3446 kcalloc(power_well_count,
3447 sizeof(*power_domains->power_wells),
3448 GFP_KERNEL);
3449 if (!power_domains->power_wells)
3450 return -ENOMEM;
3451
3452 for (i = 0; i < power_well_count; i++) {
3453 enum i915_power_well_id id = power_well_descs[i].id;
3454
3455 power_domains->power_wells[i].desc = &power_well_descs[i];
21792c60 3456
4739a9d2
ID
3457 if (id == DISP_PW_ID_NONE)
3458 continue;
3459
21792c60
ID
3460 WARN_ON(id >= sizeof(power_well_ids) * 8);
3461 WARN_ON(power_well_ids & BIT_ULL(id));
3462 power_well_ids |= BIT_ULL(id);
3463 }
f28ec6f4
ID
3464
3465 return 0;
21792c60
ID
3466}
3467
f28ec6f4
ID
3468#define set_power_wells(power_domains, __power_well_descs) \
3469 __set_power_wells(power_domains, __power_well_descs, \
3470 ARRAY_SIZE(__power_well_descs))
9c065a7d 3471
e4e7684f
SV
3472/**
3473 * intel_power_domains_init - initializes the power domain structures
3474 * @dev_priv: i915 device instance
3475 *
3476 * Initializes the power domain structures for @dev_priv depending upon the
3477 * supported platform.
3478 */
9c065a7d
SV
3479int intel_power_domains_init(struct drm_i915_private *dev_priv)
3480{
3481 struct i915_power_domains *power_domains = &dev_priv->power_domains;
f28ec6f4 3482 int err;
9c065a7d 3483
4f044a88
MW
3484 i915_modparams.disable_power_well =
3485 sanitize_disable_power_well_option(dev_priv,
3486 i915_modparams.disable_power_well);
3487 dev_priv->csr.allowed_dc_mask =
3488 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
1b0e3a04 3489
d8fc70b7 3490 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
f0ab43e6 3491
9c065a7d
SV
3492 mutex_init(&power_domains->lock);
3493
3494 /*
3495 * The enabling order will be from lower to higher indexed wells,
3496 * the disabling order is reversed.
3497 */
39564ae8 3498 if (IS_GEN(dev_priv, 11)) {
f28ec6f4 3499 err = set_power_wells(power_domains, icl_power_wells);
8bcd3dd4 3500 } else if (IS_CANNONLAKE(dev_priv)) {
f28ec6f4 3501 err = set_power_wells(power_domains, cnl_power_wells);
a324fcac
RV
3502
3503 /*
9787e835 3504 * DDI and Aux IO are getting enabled for all ports
a324fcac 3505 * regardless the presence or use. So, in order to avoid
9787e835 3506 * timeouts, lets remove them from the list
a324fcac
RV
3507 * for the SKUs without port F.
3508 */
3509 if (!IS_CNL_WITH_PORT_F(dev_priv))
9787e835 3510 power_domains->power_well_count -= 2;
0d03926d 3511 } else if (IS_GEMINILAKE(dev_priv)) {
f28ec6f4 3512 err = set_power_wells(power_domains, glk_power_wells);
fb72deae
RV
3513 } else if (IS_BROXTON(dev_priv)) {
3514 err = set_power_wells(power_domains, bxt_power_wells);
3515 } else if (IS_GEN9_BC(dev_priv)) {
3516 err = set_power_wells(power_domains, skl_power_wells);
2d1fe073 3517 } else if (IS_CHERRYVIEW(dev_priv)) {
f28ec6f4 3518 err = set_power_wells(power_domains, chv_power_wells);
fb72deae
RV
3519 } else if (IS_BROADWELL(dev_priv)) {
3520 err = set_power_wells(power_domains, bdw_power_wells);
3521 } else if (IS_HASWELL(dev_priv)) {
3522 err = set_power_wells(power_domains, hsw_power_wells);
2d1fe073 3523 } else if (IS_VALLEYVIEW(dev_priv)) {
f28ec6f4 3524 err = set_power_wells(power_domains, vlv_power_wells);
2ee0da16 3525 } else if (IS_I830(dev_priv)) {
f28ec6f4 3526 err = set_power_wells(power_domains, i830_power_wells);
9c065a7d 3527 } else {
f28ec6f4 3528 err = set_power_wells(power_domains, i9xx_always_on_power_well);
9c065a7d
SV
3529 }
3530
f28ec6f4
ID
3531 return err;
3532}
21792c60 3533
f28ec6f4
ID
3534/**
3535 * intel_power_domains_cleanup - clean up power domains resources
3536 * @dev_priv: i915 device instance
3537 *
3538 * Release any resources acquired by intel_power_domains_init()
3539 */
3540void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3541{
3542 kfree(dev_priv->power_domains.power_wells);
9c065a7d
SV
3543}
3544
30eade12 3545static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
9c065a7d
SV
3546{
3547 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3548 struct i915_power_well *power_well;
9c065a7d
SV
3549
3550 mutex_lock(&power_domains->lock);
75ccb2ec 3551 for_each_power_well(dev_priv, power_well) {
f28ec6f4
ID
3552 power_well->desc->ops->sync_hw(dev_priv, power_well);
3553 power_well->hw_enabled =
3554 power_well->desc->ops->is_enabled(dev_priv, power_well);
9c065a7d
SV
3555 }
3556 mutex_unlock(&power_domains->lock);
3557}
3558
aa9664ff
MK
3559static inline
3560bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3561 i915_reg_t reg, bool enable)
70c2c184 3562{
aa9664ff 3563 u32 val, status;
70c2c184 3564
aa9664ff
MK
3565 val = I915_READ(reg);
3566 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3567 I915_WRITE(reg, val);
3568 POSTING_READ(reg);
70c2c184
VS
3569 udelay(10);
3570
aa9664ff
MK
3571 status = I915_READ(reg) & DBUF_POWER_STATE;
3572 if ((enable && !status) || (!enable && status)) {
3573 DRM_ERROR("DBus power %s timeout!\n",
3574 enable ? "enable" : "disable");
3575 return false;
3576 }
3577 return true;
3578}
3579
3580static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3581{
3582 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
70c2c184
VS
3583}
3584
3585static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3586{
aa9664ff
MK
3587 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3588}
70c2c184 3589
aa9664ff
MK
3590static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3591{
3592 if (INTEL_GEN(dev_priv) < 11)
3593 return 1;
3594 return 2;
3595}
70c2c184 3596
aa9664ff
MK
3597void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3598 u8 req_slices)
3599{
8577c319 3600 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
aa9664ff
MK
3601 bool ret;
3602
3603 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3604 DRM_ERROR("Invalid number of dbuf slices requested\n");
3605 return;
3606 }
3607
3608 if (req_slices == hw_enabled_slices || req_slices == 0)
3609 return;
3610
aa9664ff
MK
3611 if (req_slices > hw_enabled_slices)
3612 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3613 else
3614 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3615
3616 if (ret)
3617 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
70c2c184
VS
3618}
3619
746edf8f
MK
3620static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3621{
3622 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3623 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3624 POSTING_READ(DBUF_CTL_S2);
3625
3626 udelay(10);
3627
3628 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3629 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3630 DRM_ERROR("DBuf power enable timeout\n");
74bd8004 3631 else
209d7353
ID
3632 /*
3633 * FIXME: for now pretend that we only have 1 slice, see
3634 * intel_enabled_dbuf_slices_num().
3635 */
3636 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
746edf8f
MK
3637}
3638
3639static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3640{
3641 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3642 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3643 POSTING_READ(DBUF_CTL_S2);
3644
3645 udelay(10);
3646
3647 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3648 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3649 DRM_ERROR("DBuf power disable timeout!\n");
74bd8004 3650 else
209d7353
ID
3651 /*
3652 * FIXME: for now pretend that the first slice is always
3653 * enabled, see intel_enabled_dbuf_slices_num().
3654 */
3655 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
746edf8f
MK
3656}
3657
4cb4585e
MK
3658static void icl_mbus_init(struct drm_i915_private *dev_priv)
3659{
739f3abd 3660 u32 val;
4cb4585e
MK
3661
3662 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3663 MBUS_ABOX_BT_CREDIT_POOL2(16) |
3664 MBUS_ABOX_B_CREDIT(1) |
3665 MBUS_ABOX_BW_CREDIT(1);
3666
3667 I915_WRITE(MBUS_ABOX_CTL, val);
3668}
3669
8f91cfd2
VS
3670static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
3671{
3672 u32 val = I915_READ(LCPLL_CTL);
3673
3674 /*
3675 * The LCPLL register should be turned on by the BIOS. For now
3676 * let's just check its state and print errors in case
3677 * something is wrong. Don't even try to turn it on.
3678 */
3679
3680 if (val & LCPLL_CD_SOURCE_FCLK)
3681 DRM_ERROR("CDCLK source is not LCPLL\n");
3682
3683 if (val & LCPLL_PLL_DISABLE)
3684 DRM_ERROR("LCPLL is disabled\n");
3685}
3686
46034d2b
VS
3687static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
3688{
3689 struct drm_device *dev = &dev_priv->drm;
3690 struct intel_crtc *crtc;
3691
3692 for_each_intel_crtc(dev, crtc)
3693 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
3694 pipe_name(crtc->pipe));
3695
3696 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
3697 "Display power well on\n");
3698 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
3699 "SPLL enabled\n");
3700 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
3701 "WRPLL1 enabled\n");
3702 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
3703 "WRPLL2 enabled\n");
3704 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
3705 "Panel power on\n");
3706 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
3707 "CPU PWM1 enabled\n");
3708 if (IS_HASWELL(dev_priv))
3709 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
3710 "CPU PWM2 enabled\n");
3711 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
3712 "PCH PWM1 enabled\n");
3713 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
3714 "Utility pin enabled\n");
3715 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
3716 "PCH GTC enabled\n");
3717
3718 /*
3719 * In theory we can still leave IRQs enabled, as long as only the HPD
3720 * interrupts remain enabled. We used to check for that, but since it's
3721 * gen-specific and since we only disable LCPLL after we fully disable
3722 * the interrupts, the check below should be enough.
3723 */
3724 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
3725}
3726
3727static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
3728{
3729 if (IS_HASWELL(dev_priv))
3730 return I915_READ(D_COMP_HSW);
3731 else
3732 return I915_READ(D_COMP_BDW);
3733}
3734
3735static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
3736{
3737 if (IS_HASWELL(dev_priv)) {
3738 if (sandybridge_pcode_write(dev_priv,
3739 GEN6_PCODE_WRITE_D_COMP, val))
3740 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
3741 } else {
3742 I915_WRITE(D_COMP_BDW, val);
3743 POSTING_READ(D_COMP_BDW);
3744 }
3745}
3746
3747/*
3748 * This function implements pieces of two sequences from BSpec:
3749 * - Sequence for display software to disable LCPLL
3750 * - Sequence for display software to allow package C8+
3751 * The steps implemented here are just the steps that actually touch the LCPLL
3752 * register. Callers should take care of disabling all the display engine
3753 * functions, doing the mode unset, fixing interrupts, etc.
3754 */
3755static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
3756 bool switch_to_fclk, bool allow_power_down)
3757{
3758 u32 val;
3759
3760 assert_can_disable_lcpll(dev_priv);
3761
3762 val = I915_READ(LCPLL_CTL);
3763
3764 if (switch_to_fclk) {
3765 val |= LCPLL_CD_SOURCE_FCLK;
3766 I915_WRITE(LCPLL_CTL, val);
3767
3768 if (wait_for_us(I915_READ(LCPLL_CTL) &
3769 LCPLL_CD_SOURCE_FCLK_DONE, 1))
3770 DRM_ERROR("Switching to FCLK failed\n");
3771
3772 val = I915_READ(LCPLL_CTL);
3773 }
3774
3775 val |= LCPLL_PLL_DISABLE;
3776 I915_WRITE(LCPLL_CTL, val);
3777 POSTING_READ(LCPLL_CTL);
3778
3779 if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
3780 LCPLL_PLL_LOCK, 0, 1))
3781 DRM_ERROR("LCPLL still locked\n");
3782
3783 val = hsw_read_dcomp(dev_priv);
3784 val |= D_COMP_COMP_DISABLE;
3785 hsw_write_dcomp(dev_priv, val);
3786 ndelay(100);
3787
3788 if (wait_for((hsw_read_dcomp(dev_priv) &
3789 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
3790 DRM_ERROR("D_COMP RCOMP still in progress\n");
3791
3792 if (allow_power_down) {
3793 val = I915_READ(LCPLL_CTL);
3794 val |= LCPLL_POWER_DOWN_ALLOW;
3795 I915_WRITE(LCPLL_CTL, val);
3796 POSTING_READ(LCPLL_CTL);
3797 }
3798}
3799
3800/*
3801 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
3802 * source.
3803 */
3804static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
3805{
3806 u32 val;
3807
3808 val = I915_READ(LCPLL_CTL);
3809
3810 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
3811 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
3812 return;
3813
3814 /*
3815 * Make sure we're not on PC8 state before disabling PC8, otherwise
3816 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
3817 */
3818 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
3819
3820 if (val & LCPLL_POWER_DOWN_ALLOW) {
3821 val &= ~LCPLL_POWER_DOWN_ALLOW;
3822 I915_WRITE(LCPLL_CTL, val);
3823 POSTING_READ(LCPLL_CTL);
3824 }
3825
3826 val = hsw_read_dcomp(dev_priv);
3827 val |= D_COMP_COMP_FORCE;
3828 val &= ~D_COMP_COMP_DISABLE;
3829 hsw_write_dcomp(dev_priv, val);
3830
3831 val = I915_READ(LCPLL_CTL);
3832 val &= ~LCPLL_PLL_DISABLE;
3833 I915_WRITE(LCPLL_CTL, val);
3834
3835 if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
3836 LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
3837 DRM_ERROR("LCPLL not locked yet\n");
3838
3839 if (val & LCPLL_CD_SOURCE_FCLK) {
3840 val = I915_READ(LCPLL_CTL);
3841 val &= ~LCPLL_CD_SOURCE_FCLK;
3842 I915_WRITE(LCPLL_CTL, val);
3843
3844 if (wait_for_us((I915_READ(LCPLL_CTL) &
3845 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
3846 DRM_ERROR("Switching back to LCPLL failed\n");
3847 }
3848
3849 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
3850
3851 intel_update_cdclk(dev_priv);
3852 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
3853}
3854
3855/*
3856 * Package states C8 and deeper are really deep PC states that can only be
3857 * reached when all the devices on the system allow it, so even if the graphics
3858 * device allows PC8+, it doesn't mean the system will actually get to these
3859 * states. Our driver only allows PC8+ when going into runtime PM.
3860 *
3861 * The requirements for PC8+ are that all the outputs are disabled, the power
3862 * well is disabled and most interrupts are disabled, and these are also
3863 * requirements for runtime PM. When these conditions are met, we manually do
3864 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
3865 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
3866 * hang the machine.
3867 *
3868 * When we really reach PC8 or deeper states (not just when we allow it) we lose
3869 * the state of some registers, so when we come back from PC8+ we need to
3870 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
3871 * need to take care of the registers kept by RC6. Notice that this happens even
3872 * if we don't put the device in PCI D3 state (which is what currently happens
3873 * because of the runtime PM support).
3874 *
3875 * For more, read "Display Sequences for Package C8" on the hardware
3876 * documentation.
3877 */
3878void hsw_enable_pc8(struct drm_i915_private *dev_priv)
3879{
3880 u32 val;
3881
3882 DRM_DEBUG_KMS("Enabling package C8+\n");
3883
3884 if (HAS_PCH_LPT_LP(dev_priv)) {
3885 val = I915_READ(SOUTH_DSPCLK_GATE_D);
3886 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
3887 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
3888 }
3889
3890 lpt_disable_clkout_dp(dev_priv);
3891 hsw_disable_lcpll(dev_priv, true, true);
3892}
3893
3894void hsw_disable_pc8(struct drm_i915_private *dev_priv)
3895{
3896 u32 val;
3897
3898 DRM_DEBUG_KMS("Disabling package C8+\n");
3899
3900 hsw_restore_lcpll(dev_priv);
3901 intel_init_pch_refclk(dev_priv);
3902
3903 if (HAS_PCH_LPT_LP(dev_priv)) {
3904 val = I915_READ(SOUTH_DSPCLK_GATE_D);
3905 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
3906 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
3907 }
3908}
3909
7c86828d
JRS
3910static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3911 bool enable)
3912{
6edafc4e
JRS
3913 i915_reg_t reg;
3914 u32 reset_bits, val;
3915
3916 if (IS_IVYBRIDGE(dev_priv)) {
3917 reg = GEN7_MSG_CTL;
3918 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3919 } else {
3920 reg = HSW_NDE_RSTWRN_OPT;
3921 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3922 }
3923
3924 val = I915_READ(reg);
7c86828d
JRS
3925
3926 if (enable)
6edafc4e 3927 val |= reset_bits;
7c86828d 3928 else
6edafc4e 3929 val &= ~reset_bits;
7c86828d 3930
6edafc4e 3931 I915_WRITE(reg, val);
7c86828d
JRS
3932}
3933
73dfc227 3934static void skl_display_core_init(struct drm_i915_private *dev_priv,
443a93ac 3935 bool resume)
73dfc227
ID
3936{
3937 struct i915_power_domains *power_domains = &dev_priv->power_domains;
443a93ac 3938 struct i915_power_well *well;
73dfc227 3939
d26fa1d5
ID
3940 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3941
73dfc227 3942 /* enable PCH reset handshake */
6edafc4e 3943 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
73dfc227
ID
3944
3945 /* enable PG1 and Misc I/O */
3946 mutex_lock(&power_domains->lock);
443a93ac
ID
3947
3948 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3949 intel_power_well_enable(dev_priv, well);
3950
3951 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3952 intel_power_well_enable(dev_priv, well);
3953
73dfc227
ID
3954 mutex_unlock(&power_domains->lock);
3955
93a643f2 3956 intel_cdclk_init(dev_priv);
73dfc227 3957
70c2c184
VS
3958 gen9_dbuf_enable(dev_priv);
3959
9f7eb31a 3960 if (resume && dev_priv->csr.dmc_payload)
2abc525b 3961 intel_csr_load_program(dev_priv);
73dfc227
ID
3962}
3963
3964static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3965{
3966 struct i915_power_domains *power_domains = &dev_priv->power_domains;
443a93ac 3967 struct i915_power_well *well;
73dfc227 3968
d26fa1d5
ID
3969 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3970
70c2c184
VS
3971 gen9_dbuf_disable(dev_priv);
3972
93a643f2 3973 intel_cdclk_uninit(dev_priv);
73dfc227
ID
3974
3975 /* The spec doesn't call for removing the reset handshake flag */
3976 /* disable PG1 and Misc I/O */
443a93ac 3977
73dfc227 3978 mutex_lock(&power_domains->lock);
443a93ac 3979
edfda8e3
ID
3980 /*
3981 * BSpec says to keep the MISC IO power well enabled here, only
3982 * remove our request for power well 1.
42d9366d
ID
3983 * Note that even though the driver's request is removed power well 1
3984 * may stay enabled after this due to DMC's own request on it.
edfda8e3 3985 */
443a93ac
ID
3986 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3987 intel_power_well_disable(dev_priv, well);
3988
73dfc227 3989 mutex_unlock(&power_domains->lock);
846c6b26
ID
3990
3991 usleep_range(10, 30); /* 10 us delay per Bspec */
73dfc227
ID
3992}
3993
d7d7c9ee
ID
3994void bxt_display_core_init(struct drm_i915_private *dev_priv,
3995 bool resume)
3996{
3997 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3998 struct i915_power_well *well;
d7d7c9ee
ID
3999
4000 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4001
4002 /*
4003 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4004 * or else the reset will hang because there is no PCH to respond.
4005 * Move the handshake programming to initialization sequence.
4006 * Previously was left up to BIOS.
4007 */
7c86828d 4008 intel_pch_reset_handshake(dev_priv, false);
d7d7c9ee
ID
4009
4010 /* Enable PG1 */
4011 mutex_lock(&power_domains->lock);
4012
4013 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4014 intel_power_well_enable(dev_priv, well);
4015
4016 mutex_unlock(&power_domains->lock);
4017
93a643f2 4018 intel_cdclk_init(dev_priv);
70c2c184
VS
4019
4020 gen9_dbuf_enable(dev_priv);
4021
d7d7c9ee
ID
4022 if (resume && dev_priv->csr.dmc_payload)
4023 intel_csr_load_program(dev_priv);
4024}
4025
4026void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4027{
4028 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4029 struct i915_power_well *well;
4030
4031 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4032
70c2c184
VS
4033 gen9_dbuf_disable(dev_priv);
4034
93a643f2 4035 intel_cdclk_uninit(dev_priv);
d7d7c9ee
ID
4036
4037 /* The spec doesn't call for removing the reset handshake flag */
4038
42d9366d
ID
4039 /*
4040 * Disable PW1 (PG1).
4041 * Note that even though the driver's request is removed power well 1
4042 * may stay enabled after this due to DMC's own request on it.
4043 */
d7d7c9ee
ID
4044 mutex_lock(&power_domains->lock);
4045
4046 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4047 intel_power_well_disable(dev_priv, well);
4048
4049 mutex_unlock(&power_domains->lock);
846c6b26
ID
4050
4051 usleep_range(10, 30); /* 10 us delay per Bspec */
d7d7c9ee
ID
4052}
4053
ade5ee7e
PZ
4054static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4055{
4056 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4057 struct i915_power_well *well;
ade5ee7e
PZ
4058
4059 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4060
4061 /* 1. Enable PCH Reset Handshake */
6edafc4e 4062 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
ade5ee7e 4063
c45198b1 4064 /* 2-3. */
c9fd9166 4065 intel_combo_phy_init(dev_priv);
d8d4a512 4066
b38131fb
ID
4067 /*
4068 * 4. Enable Power Well 1 (PG1).
4069 * The AUX IO power wells will be enabled on demand.
4070 */
d8d4a512
VS
4071 mutex_lock(&power_domains->lock);
4072 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4073 intel_power_well_enable(dev_priv, well);
4074 mutex_unlock(&power_domains->lock);
4075
4076 /* 5. Enable CD clock */
93a643f2 4077 intel_cdclk_init(dev_priv);
d8d4a512
VS
4078
4079 /* 6. Enable DBUF */
4080 gen9_dbuf_enable(dev_priv);
57522c4c
ID
4081
4082 if (resume && dev_priv->csr.dmc_payload)
4083 intel_csr_load_program(dev_priv);
d8d4a512
VS
4084}
4085
d8d4a512
VS
4086static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4087{
4088 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4089 struct i915_power_well *well;
d8d4a512
VS
4090
4091 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4092
4093 /* 1. Disable all display engine functions -> aready done */
4094
4095 /* 2. Disable DBUF */
4096 gen9_dbuf_disable(dev_priv);
4097
4098 /* 3. Disable CD clock */
93a643f2 4099 intel_cdclk_uninit(dev_priv);
d8d4a512 4100
b38131fb
ID
4101 /*
4102 * 4. Disable Power Well 1 (PG1).
4103 * The AUX IO power wells are toggled on demand, so they are already
4104 * disabled at this point.
4105 */
d8d4a512
VS
4106 mutex_lock(&power_domains->lock);
4107 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4108 intel_power_well_disable(dev_priv, well);
4109 mutex_unlock(&power_domains->lock);
4110
846c6b26
ID
4111 usleep_range(10, 30); /* 10 us delay per Bspec */
4112
c45198b1 4113 /* 5. */
c9fd9166 4114 intel_combo_phy_uninit(dev_priv);
d8d4a512
VS
4115}
4116
3e68928b
AM
4117void icl_display_core_init(struct drm_i915_private *dev_priv,
4118 bool resume)
ad186f3f 4119{
67ca07e7
ID
4120 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4121 struct i915_power_well *well;
ad186f3f
PZ
4122
4123 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4124
4125 /* 1. Enable PCH reset handshake. */
6edafc4e 4126 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
ad186f3f 4127
fcfec1fc 4128 /* 2. Initialize all combo phys */
c9fd9166 4129 intel_combo_phy_init(dev_priv);
ad186f3f 4130
67ca07e7 4131 /*
fcfec1fc 4132 * 3. Enable Power Well 1 (PG1).
67ca07e7
ID
4133 * The AUX IO power wells will be enabled on demand.
4134 */
4135 mutex_lock(&power_domains->lock);
d9fcdc8d 4136 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
67ca07e7
ID
4137 intel_power_well_enable(dev_priv, well);
4138 mutex_unlock(&power_domains->lock);
ad186f3f 4139
fcfec1fc 4140 /* 4. Enable CDCLK. */
93a643f2 4141 intel_cdclk_init(dev_priv);
ad186f3f 4142
fcfec1fc 4143 /* 5. Enable DBUF. */
746edf8f 4144 icl_dbuf_enable(dev_priv);
ad186f3f 4145
fcfec1fc 4146 /* 6. Setup MBUS. */
4cb4585e 4147 icl_mbus_init(dev_priv);
4445930f
AS
4148
4149 if (resume && dev_priv->csr.dmc_payload)
4150 intel_csr_load_program(dev_priv);
ad186f3f
PZ
4151}
4152
3e68928b 4153void icl_display_core_uninit(struct drm_i915_private *dev_priv)
ad186f3f 4154{
67ca07e7
ID
4155 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4156 struct i915_power_well *well;
ad186f3f
PZ
4157
4158 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4159
4160 /* 1. Disable all display engine functions -> aready done */
4161
4162 /* 2. Disable DBUF */
746edf8f 4163 icl_dbuf_disable(dev_priv);
ad186f3f
PZ
4164
4165 /* 3. Disable CD clock */
93a643f2 4166 intel_cdclk_uninit(dev_priv);
ad186f3f 4167
67ca07e7
ID
4168 /*
4169 * 4. Disable Power Well 1 (PG1).
4170 * The AUX IO power wells are toggled on demand, so they are already
4171 * disabled at this point.
4172 */
4173 mutex_lock(&power_domains->lock);
d9fcdc8d 4174 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
67ca07e7
ID
4175 intel_power_well_disable(dev_priv, well);
4176 mutex_unlock(&power_domains->lock);
ad186f3f 4177
c45198b1 4178 /* 5. */
c9fd9166 4179 intel_combo_phy_uninit(dev_priv);
ad186f3f
PZ
4180}
4181
70722468
VS
4182static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4183{
4184 struct i915_power_well *cmn_bc =
2183b499 4185 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
70722468 4186 struct i915_power_well *cmn_d =
2183b499 4187 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
70722468
VS
4188
4189 /*
4190 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4191 * workaround never ever read DISPLAY_PHY_CONTROL, and
4192 * instead maintain a shadow copy ourselves. Use the actual
e0fce78f
VS
4193 * power well state and lane status to reconstruct the
4194 * expected initial value.
70722468
VS
4195 */
4196 dev_priv->chv_phy_control =
bc284542
VS
4197 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4198 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
e0fce78f
VS
4199 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4200 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4201 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4202
4203 /*
4204 * If all lanes are disabled we leave the override disabled
4205 * with all power down bits cleared to match the state we
4206 * would use after disabling the port. Otherwise enable the
4207 * override and set the lane powerdown bits accding to the
4208 * current lane status.
4209 */
f28ec6f4 4210 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
739f3abd 4211 u32 status = I915_READ(DPLL(PIPE_A));
e0fce78f
VS
4212 unsigned int mask;
4213
4214 mask = status & DPLL_PORTB_READY_MASK;
4215 if (mask == 0xf)
4216 mask = 0x0;
4217 else
4218 dev_priv->chv_phy_control |=
4219 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4220
4221 dev_priv->chv_phy_control |=
4222 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4223
4224 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4225 if (mask == 0xf)
4226 mask = 0x0;
4227 else
4228 dev_priv->chv_phy_control |=
4229 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4230
4231 dev_priv->chv_phy_control |=
4232 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4233
70722468 4234 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3be60de9
VS
4235
4236 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4237 } else {
4238 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
e0fce78f
VS
4239 }
4240
f28ec6f4 4241 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
739f3abd 4242 u32 status = I915_READ(DPIO_PHY_STATUS);
e0fce78f
VS
4243 unsigned int mask;
4244
4245 mask = status & DPLL_PORTD_READY_MASK;
4246
4247 if (mask == 0xf)
4248 mask = 0x0;
4249 else
4250 dev_priv->chv_phy_control |=
4251 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4252
4253 dev_priv->chv_phy_control |=
4254 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4255
70722468 4256 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3be60de9
VS
4257
4258 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4259 } else {
4260 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
e0fce78f
VS
4261 }
4262
4263 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4264
4265 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4266 dev_priv->chv_phy_control);
70722468
VS
4267}
4268
9c065a7d
SV
4269static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4270{
4271 struct i915_power_well *cmn =
2183b499 4272 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9c065a7d 4273 struct i915_power_well *disp2d =
2183b499 4274 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
9c065a7d 4275
9c065a7d 4276 /* If the display might be already active skip this */
f28ec6f4
ID
4277 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4278 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
9c065a7d
SV
4279 I915_READ(DPIO_CTL) & DPIO_CMNRST)
4280 return;
4281
4282 DRM_DEBUG_KMS("toggling display PHY side reset\n");
4283
4284 /* cmnlane needs DPLL registers */
f28ec6f4 4285 disp2d->desc->ops->enable(dev_priv, disp2d);
9c065a7d
SV
4286
4287 /*
4288 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4289 * Need to assert and de-assert PHY SB reset by gating the
4290 * common lane power, then un-gating it.
4291 * Simply ungating isn't enough to reset the PHY enough to get
4292 * ports and lanes running.
4293 */
f28ec6f4 4294 cmn->desc->ops->disable(dev_priv, cmn);
9c065a7d
SV
4295}
4296
5e0b6697
VS
4297static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4298{
4299 bool ret;
4300
337fa6e0 4301 vlv_punit_get(dev_priv);
5e0b6697 4302 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
337fa6e0 4303 vlv_punit_put(dev_priv);
5e0b6697
VS
4304
4305 return ret;
4306}
4307
4308static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4309{
4310 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4311 "VED not power gated\n");
4312}
4313
4314static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4315{
4316 static const struct pci_device_id isp_ids[] = {
4317 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4318 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4319 {}
4320 };
4321
4322 WARN(!pci_dev_present(isp_ids) &&
4323 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4324 "ISP not power gated\n");
4325}
4326
6dfc4a8f
ID
4327static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4328
e4e7684f
SV
4329/**
4330 * intel_power_domains_init_hw - initialize hardware power domain state
25c896bd 4331 * @i915: i915 device instance
14bb2c11 4332 * @resume: Called from resume code paths or not
e4e7684f
SV
4333 *
4334 * This function initializes the hardware power domain state and enables all
8d8c386c 4335 * power wells belonging to the INIT power domain. Power wells in other
d8c5d29f
ID
4336 * domains (and not in the INIT domain) are referenced or disabled by
4337 * intel_modeset_readout_hw_state(). After that the reference count of each
4338 * power well must match its HW enabled state, see
4339 * intel_power_domains_verify_state().
2cd9a689
ID
4340 *
4341 * It will return with power domains disabled (to be enabled later by
4342 * intel_power_domains_enable()) and must be paired with
4343 * intel_power_domains_fini_hw().
e4e7684f 4344 */
25c896bd 4345void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
9c065a7d 4346{
25c896bd 4347 struct i915_power_domains *power_domains = &i915->power_domains;
9c065a7d
SV
4348
4349 power_domains->initializing = true;
4350
39564ae8 4351 if (INTEL_GEN(i915) >= 11) {
25c896bd
CW
4352 icl_display_core_init(i915, resume);
4353 } else if (IS_CANNONLAKE(i915)) {
4354 cnl_display_core_init(i915, resume);
4355 } else if (IS_GEN9_BC(i915)) {
4356 skl_display_core_init(i915, resume);
4357 } else if (IS_GEN9_LP(i915)) {
4358 bxt_display_core_init(i915, resume);
4359 } else if (IS_CHERRYVIEW(i915)) {
770effb1 4360 mutex_lock(&power_domains->lock);
25c896bd 4361 chv_phy_control_init(i915);
770effb1 4362 mutex_unlock(&power_domains->lock);
5e0b6697 4363 assert_isp_power_gated(i915);
25c896bd 4364 } else if (IS_VALLEYVIEW(i915)) {
9c065a7d 4365 mutex_lock(&power_domains->lock);
25c896bd 4366 vlv_cmnlane_wa(i915);
9c065a7d 4367 mutex_unlock(&power_domains->lock);
5e0b6697
VS
4368 assert_ved_power_gated(i915);
4369 assert_isp_power_gated(i915);
8f91cfd2
VS
4370 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4371 hsw_assert_cdclk(i915);
4372 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4373 } else if (IS_IVYBRIDGE(i915)) {
25c896bd
CW
4374 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4375 }
9c065a7d 4376
2cd9a689
ID
4377 /*
4378 * Keep all power wells enabled for any dependent HW access during
4379 * initialization and to make sure we keep BIOS enabled display HW
4380 * resources powered until display HW readout is complete. We drop
4381 * this reference in intel_power_domains_enable().
4382 */
25c896bd
CW
4383 power_domains->wakeref =
4384 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4385
d314cd43 4386 /* Disable power support if the user asked so. */
4f044a88 4387 if (!i915_modparams.disable_power_well)
25c896bd
CW
4388 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4389 intel_power_domains_sync_hw(i915);
6dfc4a8f 4390
d8c5d29f 4391 power_domains->initializing = false;
9c065a7d
SV
4392}
4393
48a287ed
ID
4394/**
4395 * intel_power_domains_fini_hw - deinitialize hw power domain state
25c896bd 4396 * @i915: i915 device instance
48a287ed
ID
4397 *
4398 * De-initializes the display power domain HW state. It also ensures that the
4399 * device stays powered up so that the driver can be reloaded.
2cd9a689
ID
4400 *
4401 * It must be called with power domains already disabled (after a call to
4402 * intel_power_domains_disable()) and must be paired with
4403 * intel_power_domains_init_hw().
48a287ed 4404 */
25c896bd 4405void intel_power_domains_fini_hw(struct drm_i915_private *i915)
48a287ed 4406{
25c896bd
CW
4407 intel_wakeref_t wakeref __maybe_unused =
4408 fetch_and_zero(&i915->power_domains.wakeref);
48a287ed
ID
4409
4410 /* Remove the refcount we took to keep power well support disabled. */
4411 if (!i915_modparams.disable_power_well)
25c896bd
CW
4412 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4413
4414 intel_power_domains_verify_state(i915);
6dfc4a8f 4415
25c896bd
CW
4416 /* Keep the power well enabled, but cancel its rpm wakeref. */
4417 intel_runtime_pm_put(i915, wakeref);
48a287ed
ID
4418}
4419
2cd9a689
ID
4420/**
4421 * intel_power_domains_enable - enable toggling of display power wells
25c896bd 4422 * @i915: i915 device instance
2cd9a689
ID
4423 *
4424 * Enable the ondemand enabling/disabling of the display power wells. Note that
4425 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4426 * only at specific points of the display modeset sequence, thus they are not
4427 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4428 * of these function is to keep the rest of power wells enabled until the end
4429 * of display HW readout (which will acquire the power references reflecting
4430 * the current HW state).
4431 */
25c896bd 4432void intel_power_domains_enable(struct drm_i915_private *i915)
2cd9a689 4433{
25c896bd
CW
4434 intel_wakeref_t wakeref __maybe_unused =
4435 fetch_and_zero(&i915->power_domains.wakeref);
6dfc4a8f 4436
25c896bd
CW
4437 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4438 intel_power_domains_verify_state(i915);
2cd9a689
ID
4439}
4440
4441/**
4442 * intel_power_domains_disable - disable toggling of display power wells
25c896bd 4443 * @i915: i915 device instance
2cd9a689
ID
4444 *
4445 * Disable the ondemand enabling/disabling of the display power wells. See
4446 * intel_power_domains_enable() for which power wells this call controls.
4447 */
25c896bd 4448void intel_power_domains_disable(struct drm_i915_private *i915)
2cd9a689 4449{
25c896bd 4450 struct i915_power_domains *power_domains = &i915->power_domains;
6dfc4a8f 4451
25c896bd
CW
4452 WARN_ON(power_domains->wakeref);
4453 power_domains->wakeref =
4454 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4455
4456 intel_power_domains_verify_state(i915);
2cd9a689
ID
4457}
4458
73dfc227
ID
4459/**
4460 * intel_power_domains_suspend - suspend power domain state
25c896bd 4461 * @i915: i915 device instance
2cd9a689 4462 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
73dfc227
ID
4463 *
4464 * This function prepares the hardware power domain state before entering
2cd9a689
ID
4465 * system suspend.
4466 *
4467 * It must be called with power domains already disabled (after a call to
4468 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
73dfc227 4469 */
25c896bd 4470void intel_power_domains_suspend(struct drm_i915_private *i915,
2cd9a689 4471 enum i915_drm_suspend_mode suspend_mode)
73dfc227 4472{
25c896bd
CW
4473 struct i915_power_domains *power_domains = &i915->power_domains;
4474 intel_wakeref_t wakeref __maybe_unused =
4475 fetch_and_zero(&power_domains->wakeref);
2cd9a689 4476
25c896bd 4477 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2cd9a689
ID
4478
4479 /*
a61d904f
ID
4480 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4481 * support don't manually deinit the power domains. This also means the
4482 * CSR/DMC firmware will stay active, it will power down any HW
4483 * resources as required and also enable deeper system power states
4484 * that would be blocked if the firmware was inactive.
2cd9a689 4485 */
25c896bd 4486 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
a61d904f 4487 suspend_mode == I915_DRM_SUSPEND_IDLE &&
25c896bd
CW
4488 i915->csr.dmc_payload) {
4489 intel_power_domains_verify_state(i915);
2cd9a689 4490 return;
6dfc4a8f 4491 }
2cd9a689 4492
d314cd43
ID
4493 /*
4494 * Even if power well support was disabled we still want to disable
2cd9a689 4495 * power wells if power domains must be deinitialized for suspend.
d314cd43 4496 */
6dfc4a8f 4497 if (!i915_modparams.disable_power_well) {
25c896bd
CW
4498 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4499 intel_power_domains_verify_state(i915);
6dfc4a8f 4500 }
2622d79b 4501
39564ae8 4502 if (INTEL_GEN(i915) >= 11)
25c896bd
CW
4503 icl_display_core_uninit(i915);
4504 else if (IS_CANNONLAKE(i915))
4505 cnl_display_core_uninit(i915);
4506 else if (IS_GEN9_BC(i915))
4507 skl_display_core_uninit(i915);
4508 else if (IS_GEN9_LP(i915))
4509 bxt_display_core_uninit(i915);
2cd9a689
ID
4510
4511 power_domains->display_core_suspended = true;
4512}
4513
4514/**
4515 * intel_power_domains_resume - resume power domain state
25c896bd 4516 * @i915: i915 device instance
2cd9a689
ID
4517 *
4518 * This function resume the hardware power domain state during system resume.
4519 *
4520 * It will return with power domain support disabled (to be enabled later by
4521 * intel_power_domains_enable()) and must be paired with
4522 * intel_power_domains_suspend().
4523 */
25c896bd 4524void intel_power_domains_resume(struct drm_i915_private *i915)
2cd9a689 4525{
25c896bd 4526 struct i915_power_domains *power_domains = &i915->power_domains;
2cd9a689
ID
4527
4528 if (power_domains->display_core_suspended) {
25c896bd 4529 intel_power_domains_init_hw(i915, true);
2cd9a689 4530 power_domains->display_core_suspended = false;
6dfc4a8f 4531 } else {
25c896bd
CW
4532 WARN_ON(power_domains->wakeref);
4533 power_domains->wakeref =
4534 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2cd9a689
ID
4535 }
4536
25c896bd 4537 intel_power_domains_verify_state(i915);
73dfc227
ID
4538}
4539
6dfc4a8f
ID
4540#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4541
25c896bd 4542static void intel_power_domains_dump_info(struct drm_i915_private *i915)
8d8c386c 4543{
25c896bd 4544 struct i915_power_domains *power_domains = &i915->power_domains;
8d8c386c
ID
4545 struct i915_power_well *power_well;
4546
25c896bd 4547 for_each_power_well(i915, power_well) {
8d8c386c
ID
4548 enum intel_display_power_domain domain;
4549
4550 DRM_DEBUG_DRIVER("%-25s %d\n",
f28ec6f4 4551 power_well->desc->name, power_well->count);
8d8c386c 4552
f28ec6f4 4553 for_each_power_domain(domain, power_well->desc->domains)
8d8c386c
ID
4554 DRM_DEBUG_DRIVER(" %-23s %d\n",
4555 intel_display_power_domain_str(domain),
4556 power_domains->domain_use_count[domain]);
4557 }
4558}
4559
4560/**
4561 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
25c896bd 4562 * @i915: i915 device instance
8d8c386c
ID
4563 *
4564 * Verify if the reference count of each power well matches its HW enabled
4565 * state and the total refcount of the domains it belongs to. This must be
4566 * called after modeset HW state sanitization, which is responsible for
4567 * acquiring reference counts for any power wells in use and disabling the
4568 * ones left on by BIOS but not required by any active output.
4569 */
25c896bd 4570static void intel_power_domains_verify_state(struct drm_i915_private *i915)
8d8c386c 4571{
25c896bd 4572 struct i915_power_domains *power_domains = &i915->power_domains;
8d8c386c
ID
4573 struct i915_power_well *power_well;
4574 bool dump_domain_info;
4575
4576 mutex_lock(&power_domains->lock);
4577
4578 dump_domain_info = false;
25c896bd 4579 for_each_power_well(i915, power_well) {
8d8c386c
ID
4580 enum intel_display_power_domain domain;
4581 int domains_count;
4582 bool enabled;
4583
25c896bd 4584 enabled = power_well->desc->ops->is_enabled(i915, power_well);
f28ec6f4
ID
4585 if ((power_well->count || power_well->desc->always_on) !=
4586 enabled)
8d8c386c 4587 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
f28ec6f4
ID
4588 power_well->desc->name,
4589 power_well->count, enabled);
8d8c386c
ID
4590
4591 domains_count = 0;
f28ec6f4 4592 for_each_power_domain(domain, power_well->desc->domains)
8d8c386c
ID
4593 domains_count += power_domains->domain_use_count[domain];
4594
4595 if (power_well->count != domains_count) {
4596 DRM_ERROR("power well %s refcount/domain refcount mismatch "
4597 "(refcount %d/domains refcount %d)\n",
f28ec6f4 4598 power_well->desc->name, power_well->count,
8d8c386c
ID
4599 domains_count);
4600 dump_domain_info = true;
4601 }
4602 }
4603
4604 if (dump_domain_info) {
4605 static bool dumped;
4606
4607 if (!dumped) {
25c896bd 4608 intel_power_domains_dump_info(i915);
8d8c386c
ID
4609 dumped = true;
4610 }
4611 }
4612
4613 mutex_unlock(&power_domains->lock);
4614}
4615
6dfc4a8f
ID
4616#else
4617
25c896bd 4618static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6dfc4a8f
ID
4619{
4620}
4621
4622#endif
4623
4547c255
ID
4624static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915,
4625 bool wakelock)
4626{
4627 struct pci_dev *pdev = i915->drm.pdev;
4628 struct device *kdev = &pdev->dev;
4629 int ret;
4630
4631 ret = pm_runtime_get_sync(kdev);
4632 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4633
4634 intel_runtime_pm_acquire(i915, wakelock);
4635
4636 return track_intel_runtime_pm_wakeref(i915);
4637}
4638
e4e7684f
SV
4639/**
4640 * intel_runtime_pm_get - grab a runtime pm reference
bd780f37 4641 * @i915: i915 device instance
e4e7684f
SV
4642 *
4643 * This function grabs a device-level runtime pm reference (mostly used for GEM
4644 * code to ensure the GTT or GT is on) and ensures that it is powered up.
4645 *
4646 * Any runtime pm reference obtained by this function must have a symmetric
4647 * call to intel_runtime_pm_put() to release the reference again.
16e4dd03
CW
4648 *
4649 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
e4e7684f 4650 */
16e4dd03 4651intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
9c065a7d 4652{
4547c255 4653 return __intel_runtime_pm_get(i915, true);
9c065a7d
SV
4654}
4655
09731280
ID
4656/**
4657 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
bd780f37 4658 * @i915: i915 device instance
09731280
ID
4659 *
4660 * This function grabs a device-level runtime pm reference if the device is
acb79148
CW
4661 * already in use and ensures that it is powered up. It is illegal to try
4662 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
09731280
ID
4663 *
4664 * Any runtime pm reference obtained by this function must have a symmetric
4665 * call to intel_runtime_pm_put() to release the reference again.
acb79148 4666 *
16e4dd03
CW
4667 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4668 * as True if the wakeref was acquired, or False otherwise.
09731280 4669 */
16e4dd03 4670intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
09731280 4671{
135dc79e 4672 if (IS_ENABLED(CONFIG_PM)) {
bd780f37 4673 struct pci_dev *pdev = i915->drm.pdev;
acb79148 4674 struct device *kdev = &pdev->dev;
09731280 4675
135dc79e
CW
4676 /*
4677 * In cases runtime PM is disabled by the RPM core and we get
4678 * an -EINVAL return value we are not supposed to call this
4679 * function, since the power state is undefined. This applies
4680 * atm to the late/early system suspend/resume handlers.
4681 */
acb79148 4682 if (pm_runtime_get_if_in_use(kdev) <= 0)
16e4dd03 4683 return 0;
135dc79e 4684 }
09731280 4685
4547c255
ID
4686 intel_runtime_pm_acquire(i915, true);
4687
16e4dd03 4688 return track_intel_runtime_pm_wakeref(i915);
09731280
ID
4689}
4690
e4e7684f
SV
4691/**
4692 * intel_runtime_pm_get_noresume - grab a runtime pm reference
bd780f37 4693 * @i915: i915 device instance
e4e7684f
SV
4694 *
4695 * This function grabs a device-level runtime pm reference (mostly used for GEM
4696 * code to ensure the GTT or GT is on).
4697 *
4698 * It will _not_ power up the device but instead only check that it's powered
4699 * on. Therefore it is only valid to call this functions from contexts where
4700 * the device is known to be powered up and where trying to power it up would
4701 * result in hilarity and deadlocks. That pretty much means only the system
4702 * suspend/resume code where this is used to grab runtime pm references for
4703 * delayed setup down in work items.
4704 *
4705 * Any runtime pm reference obtained by this function must have a symmetric
4706 * call to intel_runtime_pm_put() to release the reference again.
16e4dd03
CW
4707 *
4708 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
e4e7684f 4709 */
16e4dd03 4710intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
9c065a7d 4711{
bd780f37 4712 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4713 struct device *kdev = &pdev->dev;
9c065a7d 4714
bd780f37 4715 assert_rpm_wakelock_held(i915);
c49d13ee 4716 pm_runtime_get_noresume(kdev);
1f814dac 4717
4547c255
ID
4718 intel_runtime_pm_acquire(i915, true);
4719
16e4dd03 4720 return track_intel_runtime_pm_wakeref(i915);
9c065a7d
SV
4721}
4722
4547c255
ID
4723static void __intel_runtime_pm_put(struct drm_i915_private *i915,
4724 intel_wakeref_t wref,
4725 bool wakelock)
4726{
4727 struct pci_dev *pdev = i915->drm.pdev;
4728 struct device *kdev = &pdev->dev;
4729
4730 untrack_intel_runtime_pm_wakeref(i915, wref);
4731
4732 intel_runtime_pm_release(i915, wakelock);
4733
4734 pm_runtime_mark_last_busy(kdev);
4735 pm_runtime_put_autosuspend(kdev);
4736}
4737
e4e7684f 4738/**
4547c255 4739 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
bd780f37 4740 * @i915: i915 device instance
e4e7684f
SV
4741 *
4742 * This function drops the device-level runtime pm reference obtained by
4743 * intel_runtime_pm_get() and might power down the corresponding
4744 * hardware block right away if this is the last reference.
4547c255
ID
4745 *
4746 * This function exists only for historical reasons and should be avoided in
4747 * new code, as the correctness of its use cannot be checked. Always use
4748 * intel_runtime_pm_put() instead.
e4e7684f 4749 */
16e4dd03 4750void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
9c065a7d 4751{
4547c255 4752 __intel_runtime_pm_put(i915, -1, true);
9c065a7d
SV
4753}
4754
16e4dd03 4755#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4547c255
ID
4756/**
4757 * intel_runtime_pm_put - release a runtime pm reference
4758 * @i915: i915 device instance
4759 * @wref: wakeref acquired for the reference that is being released
4760 *
4761 * This function drops the device-level runtime pm reference obtained by
4762 * intel_runtime_pm_get() and might power down the corresponding
4763 * hardware block right away if this is the last reference.
4764 */
16e4dd03
CW
4765void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
4766{
4547c255 4767 __intel_runtime_pm_put(i915, wref, true);
16e4dd03
CW
4768}
4769#endif
4770
e4e7684f
SV
4771/**
4772 * intel_runtime_pm_enable - enable runtime pm
bd780f37 4773 * @i915: i915 device instance
e4e7684f
SV
4774 *
4775 * This function enables runtime pm at the end of the driver load sequence.
4776 *
4777 * Note that this function does currently not enable runtime pm for the
2cd9a689
ID
4778 * subordinate display power domains. That is done by
4779 * intel_power_domains_enable().
e4e7684f 4780 */
bd780f37 4781void intel_runtime_pm_enable(struct drm_i915_private *i915)
9c065a7d 4782{
bd780f37 4783 struct pci_dev *pdev = i915->drm.pdev;
52a05c30 4784 struct device *kdev = &pdev->dev;
9c065a7d 4785
07d80572
CW
4786 /*
4787 * Disable the system suspend direct complete optimization, which can
4788 * leave the device suspended skipping the driver's suspend handlers
4789 * if the device was already runtime suspended. This is needed due to
4790 * the difference in our runtime and system suspend sequence and
4791 * becaue the HDA driver may require us to enable the audio power
4792 * domain during system suspend.
4793 */
4794 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4795
c49d13ee
DW
4796 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4797 pm_runtime_mark_last_busy(kdev);
cbc68dc9 4798
25b181b4
ID
4799 /*
4800 * Take a permanent reference to disable the RPM functionality and drop
4801 * it only when unloading the driver. Use the low level get/put helpers,
4802 * so the driver's own RPM reference tracking asserts also work on
4803 * platforms without RPM support.
4804 */
bd780f37 4805 if (!HAS_RUNTIME_PM(i915)) {
f5073824
ID
4806 int ret;
4807
c49d13ee 4808 pm_runtime_dont_use_autosuspend(kdev);
f5073824
ID
4809 ret = pm_runtime_get_sync(kdev);
4810 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
cbc68dc9 4811 } else {
c49d13ee 4812 pm_runtime_use_autosuspend(kdev);
cbc68dc9 4813 }
9c065a7d 4814
aabee1bb
ID
4815 /*
4816 * The core calls the driver load handler with an RPM reference held.
4817 * We drop that here and will reacquire it during unloading in
4818 * intel_power_domains_fini().
4819 */
c49d13ee 4820 pm_runtime_put_autosuspend(kdev);
9c065a7d 4821}
07d80572 4822
bd780f37 4823void intel_runtime_pm_disable(struct drm_i915_private *i915)
07d80572 4824{
bd780f37 4825 struct pci_dev *pdev = i915->drm.pdev;
07d80572
CW
4826 struct device *kdev = &pdev->dev;
4827
4828 /* Transfer rpm ownership back to core */
bd780f37 4829 WARN(pm_runtime_get_sync(kdev) < 0,
07d80572
CW
4830 "Failed to pass rpm ownership back to core\n");
4831
4832 pm_runtime_dont_use_autosuspend(kdev);
4833
bd780f37 4834 if (!HAS_RUNTIME_PM(i915))
07d80572
CW
4835 pm_runtime_put(kdev);
4836}
bd780f37
CW
4837
4838void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
4839{
4840 struct i915_runtime_pm *rpm = &i915->runtime_pm;
4841 int count;
4842
4843 count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
4844 WARN(count,
4547c255
ID
4845 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
4846 intel_rpm_raw_wakeref_count(count),
4847 intel_rpm_wakelock_count(count));
bd780f37 4848
4547c255 4849 intel_runtime_pm_release(i915, false);
bd780f37
CW
4850}
4851
4852void intel_runtime_pm_init_early(struct drm_i915_private *i915)
4853{
4854 init_intel_runtime_pm_wakeref(i915);
4855}