]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/i915/i915_irq.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
b2c88f5b 31#include <linux/circ_buf.h>
55367a27
JN
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
fcd70cd3 35#include <drm/drm_drv.h>
55367a27 36#include <drm/drm_irq.h>
55367a27 37
1d455f8d 38#include "display/intel_display_types.h"
df0566a6
JN
39#include "display/intel_fifo_underrun.h"
40#include "display/intel_hotplug.h"
41#include "display/intel_lpe_audio.h"
42#include "display/intel_psr.h"
43
2239e6df 44#include "gt/intel_gt.h"
cf1c97dc 45#include "gt/intel_gt_irq.h"
d762043f 46#include "gt/intel_gt_pm_irq.h"
3e7abf81 47#include "gt/intel_rps.h"
2239e6df 48
1da177e4 49#include "i915_drv.h"
440e2b3d 50#include "i915_irq.h"
1c5d22f7 51#include "i915_trace.h"
d13616db 52#include "intel_pm.h"
1da177e4 53
fca52a55
DV
54/**
55 * DOC: interrupt handling
56 *
57 * These functions provide the basic support for enabling and disabling the
58 * interrupt handling support. There's a lot more functionality in i915_irq.c
59 * and related files, but that will be described in separate chapters.
60 */
61
48ef15d3
JRS
62typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
63
e4ce95aa
VS
64static const u32 hpd_ilk[HPD_NUM_PINS] = {
65 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
66};
67
23bb4cb5
VS
68static const u32 hpd_ivb[HPD_NUM_PINS] = {
69 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
70};
71
3a3b3c7d
VS
72static const u32 hpd_bdw[HPD_NUM_PINS] = {
73 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
74};
75
7c7e10db 76static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
77 [HPD_CRT] = SDE_CRT_HOTPLUG,
78 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
79 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
80 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
7203d49c 81 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
e5868a31
EE
82};
83
7c7e10db 84static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 85 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 86 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
87 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
88 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
7203d49c 89 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
e5868a31
EE
90};
91
26951caf 92static const u32 hpd_spt[HPD_NUM_PINS] = {
74c0b395 93 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
26951caf
XZ
94 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
95 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
96 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
7203d49c 97 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
26951caf
XZ
98};
99
7c7e10db 100static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
101 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
102 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
103 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
104 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
105 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
7203d49c 106 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
e5868a31
EE
107};
108
7c7e10db 109static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
110 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
111 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
112 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
113 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
114 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
7203d49c 115 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
e5868a31
EE
116};
117
4bca26d0 118static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
119 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
120 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
121 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
122 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
123 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
7203d49c 124 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
e5868a31
EE
125};
126
e0a20ad7
SS
127/* BXT hpd list */
128static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 129 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7 130 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
7203d49c 131 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
e0a20ad7
SS
132};
133
b796b971
DP
134static const u32 hpd_gen11[HPD_NUM_PINS] = {
135 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
136 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
137 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
7203d49c 138 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
121e758e
DP
139};
140
48ef15d3
JRS
141static const u32 hpd_gen12[HPD_NUM_PINS] = {
142 [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
143 [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
144 [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
145 [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
146 [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
7203d49c 147 [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
48ef15d3
JRS
148};
149
31604222 150static const u32 hpd_icp[HPD_NUM_PINS] = {
b32821c0
LDM
151 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
152 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
153 [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
154 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
155 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
156 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
31604222
AS
157};
158
52dfdba0 159static const u32 hpd_tgp[HPD_NUM_PINS] = {
b32821c0
LDM
160 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
161 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
162 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
163 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
164 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
165 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
166 [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
167 [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
168 [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
52dfdba0
LDM
169};
170
aca9310a
AG
171static void
172intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
173{
174 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
175
176 drm_crtc_handle_vblank(&crtc->base);
177}
178
cf1c97dc
AS
179void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
180 i915_reg_t iir, i915_reg_t ier)
68eb49b1 181{
65f42cdc
PZ
182 intel_uncore_write(uncore, imr, 0xffffffff);
183 intel_uncore_posting_read(uncore, imr);
68eb49b1 184
65f42cdc 185 intel_uncore_write(uncore, ier, 0);
68eb49b1
PZ
186
187 /* IIR can theoretically queue up two events. Be paranoid. */
65f42cdc
PZ
188 intel_uncore_write(uncore, iir, 0xffffffff);
189 intel_uncore_posting_read(uncore, iir);
190 intel_uncore_write(uncore, iir, 0xffffffff);
191 intel_uncore_posting_read(uncore, iir);
68eb49b1
PZ
192}
193
cf1c97dc 194void gen2_irq_reset(struct intel_uncore *uncore)
68eb49b1 195{
65f42cdc
PZ
196 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
197 intel_uncore_posting_read16(uncore, GEN2_IMR);
68eb49b1 198
65f42cdc 199 intel_uncore_write16(uncore, GEN2_IER, 0);
68eb49b1
PZ
200
201 /* IIR can theoretically queue up two events. Be paranoid. */
65f42cdc
PZ
202 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
203 intel_uncore_posting_read16(uncore, GEN2_IIR);
204 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
205 intel_uncore_posting_read16(uncore, GEN2_IIR);
68eb49b1
PZ
206}
207
337ba017
PZ
208/*
209 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
210 */
65f42cdc 211static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
b51a2842 212{
65f42cdc 213 u32 val = intel_uncore_read(uncore, reg);
b51a2842
VS
214
215 if (val == 0)
216 return;
217
a9f236d1
PB
218 drm_WARN(&uncore->i915->drm, 1,
219 "Interrupt register 0x%x is not zero: 0x%08x\n",
220 i915_mmio_reg_offset(reg), val);
65f42cdc
PZ
221 intel_uncore_write(uncore, reg, 0xffffffff);
222 intel_uncore_posting_read(uncore, reg);
223 intel_uncore_write(uncore, reg, 0xffffffff);
224 intel_uncore_posting_read(uncore, reg);
b51a2842 225}
337ba017 226
65f42cdc 227static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
e9e9848a 228{
65f42cdc 229 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
e9e9848a
VS
230
231 if (val == 0)
232 return;
233
a9f236d1
PB
234 drm_WARN(&uncore->i915->drm, 1,
235 "Interrupt register 0x%x is not zero: 0x%08x\n",
236 i915_mmio_reg_offset(GEN2_IIR), val);
65f42cdc
PZ
237 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
238 intel_uncore_posting_read16(uncore, GEN2_IIR);
239 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
240 intel_uncore_posting_read16(uncore, GEN2_IIR);
e9e9848a
VS
241}
242
cf1c97dc
AS
243void gen3_irq_init(struct intel_uncore *uncore,
244 i915_reg_t imr, u32 imr_val,
245 i915_reg_t ier, u32 ier_val,
246 i915_reg_t iir)
68eb49b1 247{
65f42cdc 248 gen3_assert_iir_is_zero(uncore, iir);
68eb49b1 249
65f42cdc
PZ
250 intel_uncore_write(uncore, ier, ier_val);
251 intel_uncore_write(uncore, imr, imr_val);
252 intel_uncore_posting_read(uncore, imr);
68eb49b1
PZ
253}
254
cf1c97dc
AS
255void gen2_irq_init(struct intel_uncore *uncore,
256 u32 imr_val, u32 ier_val)
68eb49b1 257{
65f42cdc 258 gen2_assert_iir_is_zero(uncore);
68eb49b1 259
65f42cdc
PZ
260 intel_uncore_write16(uncore, GEN2_IER, ier_val);
261 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
262 intel_uncore_posting_read16(uncore, GEN2_IMR);
68eb49b1
PZ
263}
264
0706f17c
EE
265/* For display hotplug interrupt */
266static inline void
267i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
a9c287c9
JN
268 u32 mask,
269 u32 bits)
0706f17c 270{
a9c287c9 271 u32 val;
0706f17c 272
67520415 273 lockdep_assert_held(&dev_priv->irq_lock);
48a1b8d4 274 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
0706f17c
EE
275
276 val = I915_READ(PORT_HOTPLUG_EN);
277 val &= ~mask;
278 val |= bits;
279 I915_WRITE(PORT_HOTPLUG_EN, val);
280}
281
282/**
283 * i915_hotplug_interrupt_update - update hotplug interrupt enable
284 * @dev_priv: driver private
285 * @mask: bits to update
286 * @bits: bits to enable
287 * NOTE: the HPD enable bits are modified both inside and outside
288 * of an interrupt context. To avoid that read-modify-write cycles
289 * interfer, these bits are protected by a spinlock. Since this
290 * function is usually not called from a context where the lock is
291 * held already, this function acquires the lock itself. A non-locking
292 * version is also available.
293 */
294void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
a9c287c9
JN
295 u32 mask,
296 u32 bits)
0706f17c
EE
297{
298 spin_lock_irq(&dev_priv->irq_lock);
299 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
300 spin_unlock_irq(&dev_priv->irq_lock);
301}
302
d9dc34f1
VS
303/**
304 * ilk_update_display_irq - update DEIMR
305 * @dev_priv: driver private
306 * @interrupt_mask: mask of interrupt bits to update
307 * @enabled_irq_mask: mask of interrupt bits to enable
308 */
fbdedaea 309void ilk_update_display_irq(struct drm_i915_private *dev_priv,
a9c287c9
JN
310 u32 interrupt_mask,
311 u32 enabled_irq_mask)
036a4a7d 312{
a9c287c9 313 u32 new_val;
d9dc34f1 314
67520415 315 lockdep_assert_held(&dev_priv->irq_lock);
4bc9d430 316
48a1b8d4 317 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
d9dc34f1 318
48a1b8d4 319 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
c67a470b 320 return;
c67a470b 321
d9dc34f1
VS
322 new_val = dev_priv->irq_mask;
323 new_val &= ~interrupt_mask;
324 new_val |= (~enabled_irq_mask & interrupt_mask);
325
326 if (new_val != dev_priv->irq_mask) {
327 dev_priv->irq_mask = new_val;
1ec14ad3 328 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 329 POSTING_READ(DEIMR);
036a4a7d
ZW
330 }
331}
332
3a3b3c7d 333/**
81fd874e
VS
334 * bdw_update_port_irq - update DE port interrupt
335 * @dev_priv: driver private
336 * @interrupt_mask: mask of interrupt bits to update
337 * @enabled_irq_mask: mask of interrupt bits to enable
338 */
3a3b3c7d 339static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
a9c287c9
JN
340 u32 interrupt_mask,
341 u32 enabled_irq_mask)
3a3b3c7d 342{
a9c287c9
JN
343 u32 new_val;
344 u32 old_val;
3a3b3c7d 345
67520415 346 lockdep_assert_held(&dev_priv->irq_lock);
3a3b3c7d 347
48a1b8d4 348 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
3a3b3c7d 349
48a1b8d4 350 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
3a3b3c7d
VS
351 return;
352
353 old_val = I915_READ(GEN8_DE_PORT_IMR);
354
355 new_val = old_val;
356 new_val &= ~interrupt_mask;
357 new_val |= (~enabled_irq_mask & interrupt_mask);
358
359 if (new_val != old_val) {
360 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
361 POSTING_READ(GEN8_DE_PORT_IMR);
362 }
363}
364
013d3752
VS
365/**
366 * bdw_update_pipe_irq - update DE pipe interrupt
367 * @dev_priv: driver private
368 * @pipe: pipe whose interrupt to update
369 * @interrupt_mask: mask of interrupt bits to update
370 * @enabled_irq_mask: mask of interrupt bits to enable
371 */
372void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
373 enum pipe pipe,
a9c287c9
JN
374 u32 interrupt_mask,
375 u32 enabled_irq_mask)
013d3752 376{
a9c287c9 377 u32 new_val;
013d3752 378
67520415 379 lockdep_assert_held(&dev_priv->irq_lock);
013d3752 380
48a1b8d4 381 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
013d3752 382
48a1b8d4 383 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
013d3752
VS
384 return;
385
386 new_val = dev_priv->de_irq_mask[pipe];
387 new_val &= ~interrupt_mask;
388 new_val |= (~enabled_irq_mask & interrupt_mask);
389
390 if (new_val != dev_priv->de_irq_mask[pipe]) {
391 dev_priv->de_irq_mask[pipe] = new_val;
392 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
393 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
394 }
395}
396
fee884ed
DV
397/**
398 * ibx_display_interrupt_update - update SDEIMR
399 * @dev_priv: driver private
400 * @interrupt_mask: mask of interrupt bits to update
401 * @enabled_irq_mask: mask of interrupt bits to enable
402 */
47339cd9 403void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
a9c287c9
JN
404 u32 interrupt_mask,
405 u32 enabled_irq_mask)
fee884ed 406{
a9c287c9 407 u32 sdeimr = I915_READ(SDEIMR);
fee884ed
DV
408 sdeimr &= ~interrupt_mask;
409 sdeimr |= (~enabled_irq_mask & interrupt_mask);
410
48a1b8d4 411 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
15a17aae 412
67520415 413 lockdep_assert_held(&dev_priv->irq_lock);
fee884ed 414
48a1b8d4 415 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
c67a470b 416 return;
c67a470b 417
fee884ed
DV
418 I915_WRITE(SDEIMR, sdeimr);
419 POSTING_READ(SDEIMR);
420}
8664281b 421
6b12ca56
VS
422u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
423 enum pipe pipe)
7c463586 424{
6b12ca56
VS
425 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
426 u32 enable_mask = status_mask << 16;
7c463586 427
67520415 428 lockdep_assert_held(&dev_priv->irq_lock);
7c463586 429
6b12ca56
VS
430 if (INTEL_GEN(dev_priv) < 5)
431 goto out;
10c59c51
ID
432
433 /*
724a6905
VS
434 * On pipe A we don't support the PSR interrupt yet,
435 * on pipe B and C the same bit MBZ.
10c59c51 436 */
48a1b8d4
PB
437 if (drm_WARN_ON_ONCE(&dev_priv->drm,
438 status_mask & PIPE_A_PSR_STATUS_VLV))
10c59c51 439 return 0;
724a6905
VS
440 /*
441 * On pipe B and C we don't support the PSR interrupt yet, on pipe
442 * A the same bit is for perf counters which we don't use either.
443 */
48a1b8d4
PB
444 if (drm_WARN_ON_ONCE(&dev_priv->drm,
445 status_mask & PIPE_B_PSR_STATUS_VLV))
724a6905 446 return 0;
10c59c51
ID
447
448 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
449 SPRITE0_FLIP_DONE_INT_EN_VLV |
450 SPRITE1_FLIP_DONE_INT_EN_VLV);
451 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
452 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
453 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
454 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
455
6b12ca56 456out:
48a1b8d4
PB
457 drm_WARN_ONCE(&dev_priv->drm,
458 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
459 status_mask & ~PIPESTAT_INT_STATUS_MASK,
460 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
461 pipe_name(pipe), enable_mask, status_mask);
6b12ca56 462
10c59c51
ID
463 return enable_mask;
464}
465
6b12ca56
VS
466void i915_enable_pipestat(struct drm_i915_private *dev_priv,
467 enum pipe pipe, u32 status_mask)
755e9019 468{
6b12ca56 469 i915_reg_t reg = PIPESTAT(pipe);
755e9019
ID
470 u32 enable_mask;
471
48a1b8d4
PB
472 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
473 "pipe %c: status_mask=0x%x\n",
474 pipe_name(pipe), status_mask);
6b12ca56
VS
475
476 lockdep_assert_held(&dev_priv->irq_lock);
48a1b8d4 477 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
6b12ca56
VS
478
479 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
480 return;
481
482 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
483 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
484
485 I915_WRITE(reg, enable_mask | status_mask);
486 POSTING_READ(reg);
755e9019
ID
487}
488
6b12ca56
VS
489void i915_disable_pipestat(struct drm_i915_private *dev_priv,
490 enum pipe pipe, u32 status_mask)
755e9019 491{
6b12ca56 492 i915_reg_t reg = PIPESTAT(pipe);
755e9019
ID
493 u32 enable_mask;
494
48a1b8d4
PB
495 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
496 "pipe %c: status_mask=0x%x\n",
497 pipe_name(pipe), status_mask);
6b12ca56
VS
498
499 lockdep_assert_held(&dev_priv->irq_lock);
48a1b8d4 500 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
6b12ca56
VS
501
502 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
503 return;
504
505 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
506 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
507
508 I915_WRITE(reg, enable_mask | status_mask);
509 POSTING_READ(reg);
755e9019
ID
510}
511
f3e30485
VS
512static bool i915_has_asle(struct drm_i915_private *dev_priv)
513{
514 if (!dev_priv->opregion.asle)
515 return false;
516
517 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
518}
519
01c66889 520/**
f49e38dd 521 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
14bb2c11 522 * @dev_priv: i915 device private
01c66889 523 */
91d14251 524static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
01c66889 525{
f3e30485 526 if (!i915_has_asle(dev_priv))
f49e38dd
JN
527 return;
528
13321786 529 spin_lock_irq(&dev_priv->irq_lock);
01c66889 530
755e9019 531 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
91d14251 532 if (INTEL_GEN(dev_priv) >= 4)
3b6c42e8 533 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 534 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 535
13321786 536 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
537}
538
f75f3746
VS
539/*
540 * This timing diagram depicts the video signal in and
541 * around the vertical blanking period.
542 *
543 * Assumptions about the fictitious mode used in this example:
544 * vblank_start >= 3
545 * vsync_start = vblank_start + 1
546 * vsync_end = vblank_start + 2
547 * vtotal = vblank_start + 3
548 *
549 * start of vblank:
550 * latch double buffered registers
551 * increment frame counter (ctg+)
552 * generate start of vblank interrupt (gen4+)
553 * |
554 * | frame start:
555 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
556 * | may be shifted forward 1-3 extra lines via PIPECONF
557 * | |
558 * | | start of vsync:
559 * | | generate vsync interrupt
560 * | | |
561 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
562 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
563 * ----va---> <-----------------vb--------------------> <--------va-------------
564 * | | <----vs-----> |
565 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
566 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
567 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
568 * | | |
569 * last visible pixel first visible pixel
570 * | increment frame counter (gen3/4)
571 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
572 *
573 * x = horizontal active
574 * _ = horizontal blanking
575 * hs = horizontal sync
576 * va = vertical active
577 * vb = vertical blanking
578 * vs = vertical sync
579 * vbs = vblank_start (number)
580 *
581 * Summary:
582 * - most events happen at the start of horizontal sync
583 * - frame start happens at the start of horizontal blank, 1-4 lines
584 * (depending on PIPECONF settings) after the start of vblank
585 * - gen3/4 pixel and frame counter are synchronized with the start
586 * of horizontal active on the first line of vertical active
587 */
588
42f52ef8
KP
589/* Called from drm generic code, passed a 'crtc', which
590 * we use as a pipe index
591 */
08fa8fd0 592u32 i915_get_vblank_counter(struct drm_crtc *crtc)
0a3e67a4 593{
08fa8fd0
VS
594 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
595 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
32db0b65 596 const struct drm_display_mode *mode = &vblank->hwmode;
08fa8fd0 597 enum pipe pipe = to_intel_crtc(crtc)->pipe;
f0f59a00 598 i915_reg_t high_frame, low_frame;
0b2a8e09 599 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
694e409d 600 unsigned long irqflags;
0a3e67a4 601
32db0b65
VS
602 /*
603 * On i965gm TV output the frame counter only works up to
604 * the point when we enable the TV encoder. After that the
605 * frame counter ceases to work and reads zero. We need a
606 * vblank wait before enabling the TV encoder and so we
607 * have to enable vblank interrupts while the frame counter
608 * is still in a working state. However the core vblank code
609 * does not like us returning non-zero frame counter values
610 * when we've told it that we don't have a working frame
611 * counter. Thus we must stop non-zero values leaking out.
612 */
613 if (!vblank->max_vblank_count)
614 return 0;
615
f3a5c3f6
DV
616 htotal = mode->crtc_htotal;
617 hsync_start = mode->crtc_hsync_start;
618 vbl_start = mode->crtc_vblank_start;
619 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
620 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 621
0b2a8e09
VS
622 /* Convert to pixel count */
623 vbl_start *= htotal;
624
625 /* Start of vblank event occurs at start of hsync */
626 vbl_start -= htotal - hsync_start;
627
9db4a9c7
JB
628 high_frame = PIPEFRAME(pipe);
629 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 630
694e409d
VS
631 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
632
0a3e67a4
JB
633 /*
634 * High & low register fields aren't synchronized, so make sure
635 * we get a low value that's stable across two reads of the high
636 * register.
637 */
638 do {
8cbda6b2
JN
639 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
640 low = intel_de_read_fw(dev_priv, low_frame);
641 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
642 } while (high1 != high2);
643
694e409d
VS
644 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
645
5eddb70b 646 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 647 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 648 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
649
650 /*
651 * The frame counter increments at beginning of active.
652 * Cook up a vblank counter by also checking the pixel
653 * counter against vblank start.
654 */
edc08d0a 655 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
656}
657
08fa8fd0 658u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
9880b7a5 659{
08fa8fd0
VS
660 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
661 enum pipe pipe = to_intel_crtc(crtc)->pipe;
9880b7a5 662
649636ef 663 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
9880b7a5
JB
664}
665
aec0246f
US
666/*
667 * On certain encoders on certain platforms, pipe
668 * scanline register will not work to get the scanline,
669 * since the timings are driven from the PORT or issues
670 * with scanline register updates.
671 * This function will use Framestamp and current
672 * timestamp registers to calculate the scanline.
673 */
674static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
675{
676 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
677 struct drm_vblank_crtc *vblank =
678 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
679 const struct drm_display_mode *mode = &vblank->hwmode;
680 u32 vblank_start = mode->crtc_vblank_start;
681 u32 vtotal = mode->crtc_vtotal;
682 u32 htotal = mode->crtc_htotal;
683 u32 clock = mode->crtc_clock;
684 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
685
686 /*
687 * To avoid the race condition where we might cross into the
688 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
689 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
690 * during the same frame.
691 */
692 do {
693 /*
694 * This field provides read back of the display
695 * pipe frame time stamp. The time stamp value
696 * is sampled at every start of vertical blank.
697 */
8cbda6b2
JN
698 scan_prev_time = intel_de_read_fw(dev_priv,
699 PIPE_FRMTMSTMP(crtc->pipe));
aec0246f
US
700
701 /*
702 * The TIMESTAMP_CTR register has the current
703 * time stamp value.
704 */
8cbda6b2 705 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
aec0246f 706
8cbda6b2
JN
707 scan_post_time = intel_de_read_fw(dev_priv,
708 PIPE_FRMTMSTMP(crtc->pipe));
aec0246f
US
709 } while (scan_post_time != scan_prev_time);
710
711 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
712 clock), 1000 * htotal);
713 scanline = min(scanline, vtotal - 1);
714 scanline = (scanline + vblank_start) % vtotal;
715
716 return scanline;
717}
718
8cbda6b2
JN
719/*
720 * intel_de_read_fw(), only for fast reads of display block, no need for
721 * forcewake etc.
722 */
a225f079
VS
723static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724{
725 struct drm_device *dev = crtc->base.dev;
fac5e23e 726 struct drm_i915_private *dev_priv = to_i915(dev);
5caa0fea
DV
727 const struct drm_display_mode *mode;
728 struct drm_vblank_crtc *vblank;
a225f079 729 enum pipe pipe = crtc->pipe;
80715b2f 730 int position, vtotal;
a225f079 731
72259536
VS
732 if (!crtc->active)
733 return -1;
734
5caa0fea
DV
735 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
736 mode = &vblank->hwmode;
737
aec0246f
US
738 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
739 return __intel_get_crtc_scanline_from_timestamp(crtc);
740
80715b2f 741 vtotal = mode->crtc_vtotal;
a225f079
VS
742 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
743 vtotal /= 2;
744
cf819eff 745 if (IS_GEN(dev_priv, 2))
8cbda6b2 746 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
a225f079 747 else
8cbda6b2 748 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
a225f079 749
41b578fb
JB
750 /*
751 * On HSW, the DSL reg (0x70000) appears to return 0 if we
752 * read it just before the start of vblank. So try it again
753 * so we don't accidentally end up spanning a vblank frame
754 * increment, causing the pipe_update_end() code to squak at us.
755 *
756 * The nature of this problem means we can't simply check the ISR
757 * bit and return the vblank start value; nor can we use the scanline
758 * debug register in the transcoder as it appears to have the same
759 * problem. We may need to extend this to include other platforms,
760 * but so far testing only shows the problem on HSW.
761 */
91d14251 762 if (HAS_DDI(dev_priv) && !position) {
41b578fb
JB
763 int i, temp;
764
765 for (i = 0; i < 100; i++) {
766 udelay(1);
8cbda6b2 767 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
41b578fb
JB
768 if (temp != position) {
769 position = temp;
770 break;
771 }
772 }
773 }
774
a225f079 775 /*
80715b2f
VS
776 * See update_scanline_offset() for the details on the
777 * scanline_offset adjustment.
a225f079 778 */
80715b2f 779 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
780}
781
4bbffbf3
TZ
782static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
783 bool in_vblank_irq,
784 int *vpos, int *hpos,
785 ktime_t *stime, ktime_t *etime,
786 const struct drm_display_mode *mode)
0af7e4df 787{
4bbffbf3 788 struct drm_device *dev = _crtc->dev;
fac5e23e 789 struct drm_i915_private *dev_priv = to_i915(dev);
4bbffbf3 790 struct intel_crtc *crtc = to_intel_crtc(_crtc);
e8edae54 791 enum pipe pipe = crtc->pipe;
3aa18df8 792 int position;
78e8fc6b 793 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
ad3543ed 794 unsigned long irqflags;
8a920e24
VS
795 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
796 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
797 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
0af7e4df 798
48a1b8d4 799 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
00376ccf
WK
800 drm_dbg(&dev_priv->drm,
801 "trying to get scanoutpos for disabled "
802 "pipe %c\n", pipe_name(pipe));
1bf6ad62 803 return false;
0af7e4df
MK
804 }
805
c2baf4b7 806 htotal = mode->crtc_htotal;
78e8fc6b 807 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
808 vtotal = mode->crtc_vtotal;
809 vbl_start = mode->crtc_vblank_start;
810 vbl_end = mode->crtc_vblank_end;
0af7e4df 811
d31faf65
VS
812 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
813 vbl_start = DIV_ROUND_UP(vbl_start, 2);
814 vbl_end /= 2;
815 vtotal /= 2;
816 }
817
ad3543ed
MK
818 /*
819 * Lock uncore.lock, as we will do multiple timing critical raw
820 * register reads, potentially with preemption disabled, so the
821 * following code must not block on uncore.lock.
822 */
823 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 824
ad3543ed
MK
825 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
826
827 /* Get optional system timestamp before query. */
828 if (stime)
829 *stime = ktime_get();
830
8a920e24 831 if (use_scanline_counter) {
0af7e4df
MK
832 /* No obvious pixelcount register. Only query vertical
833 * scanout position from Display scan line register.
834 */
e8edae54 835 position = __intel_get_crtc_scanline(crtc);
0af7e4df
MK
836 } else {
837 /* Have access to pixelcount since start of frame.
838 * We can split this into vertical and horizontal
839 * scanout position.
840 */
8cbda6b2 841 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 842
3aa18df8
VS
843 /* convert to pixel counts */
844 vbl_start *= htotal;
845 vbl_end *= htotal;
846 vtotal *= htotal;
78e8fc6b 847
7e78f1cb
VS
848 /*
849 * In interlaced modes, the pixel counter counts all pixels,
850 * so one field will have htotal more pixels. In order to avoid
851 * the reported position from jumping backwards when the pixel
852 * counter is beyond the length of the shorter field, just
853 * clamp the position the length of the shorter field. This
854 * matches how the scanline counter based position works since
855 * the scanline counter doesn't count the two half lines.
856 */
857 if (position >= vtotal)
858 position = vtotal - 1;
859
78e8fc6b
VS
860 /*
861 * Start of vblank interrupt is triggered at start of hsync,
862 * just prior to the first active line of vblank. However we
863 * consider lines to start at the leading edge of horizontal
864 * active. So, should we get here before we've crossed into
865 * the horizontal active of the first line in vblank, we would
866 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
867 * always add htotal-hsync_start to the current pixel position.
868 */
869 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
870 }
871
ad3543ed
MK
872 /* Get optional system timestamp after query. */
873 if (etime)
874 *etime = ktime_get();
875
876 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
877
878 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
879
3aa18df8
VS
880 /*
881 * While in vblank, position will be negative
882 * counting up towards 0 at vbl_end. And outside
883 * vblank, position will be positive counting
884 * up since vbl_end.
885 */
886 if (position >= vbl_start)
887 position -= vbl_end;
888 else
889 position += vtotal - vbl_end;
0af7e4df 890
8a920e24 891 if (use_scanline_counter) {
3aa18df8
VS
892 *vpos = position;
893 *hpos = 0;
894 } else {
895 *vpos = position / htotal;
896 *hpos = position - (*vpos * htotal);
897 }
0af7e4df 898
1bf6ad62 899 return true;
0af7e4df
MK
900}
901
4bbffbf3
TZ
902bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
903 ktime_t *vblank_time, bool in_vblank_irq)
904{
905 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
906 crtc, max_error, vblank_time, in_vblank_irq,
48e67807 907 i915_get_crtc_scanoutpos);
4bbffbf3
TZ
908}
909
a225f079
VS
910int intel_get_crtc_scanline(struct intel_crtc *crtc)
911{
fac5e23e 912 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
a225f079
VS
913 unsigned long irqflags;
914 int position;
915
916 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
917 position = __intel_get_crtc_scanline(crtc);
918 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
919
920 return position;
921}
922
e3689190 923/**
74bb98ba 924 * ivb_parity_work - Workqueue called when a parity error interrupt
e3689190
BW
925 * occurred.
926 * @work: workqueue struct
927 *
928 * Doesn't actually do anything except notify userspace. As a consequence of
929 * this event, userspace should try to remap the bad rows since statistically
930 * it is likely the same row is more likely to go bad again.
931 */
74bb98ba 932static void ivb_parity_work(struct work_struct *work)
e3689190 933{
2d1013dd 934 struct drm_i915_private *dev_priv =
cefcff8f 935 container_of(work, typeof(*dev_priv), l3_parity.error_work);
cf1c97dc 936 struct intel_gt *gt = &dev_priv->gt;
e3689190 937 u32 error_status, row, bank, subbank;
35a85ac6 938 char *parity_event[6];
a9c287c9
JN
939 u32 misccpctl;
940 u8 slice = 0;
e3689190
BW
941
942 /* We must turn off DOP level clock gating to access the L3 registers.
943 * In order to prevent a get/put style interface, acquire struct mutex
944 * any time we access those registers.
945 */
91c8a326 946 mutex_lock(&dev_priv->drm.struct_mutex);
e3689190 947
35a85ac6 948 /* If we've screwed up tracking, just let the interrupt fire again */
48a1b8d4 949 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
35a85ac6
BW
950 goto out;
951
e3689190
BW
952 misccpctl = I915_READ(GEN7_MISCCPCTL);
953 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
954 POSTING_READ(GEN7_MISCCPCTL);
955
35a85ac6 956 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
f0f59a00 957 i915_reg_t reg;
e3689190 958
35a85ac6 959 slice--;
48a1b8d4
PB
960 if (drm_WARN_ON_ONCE(&dev_priv->drm,
961 slice >= NUM_L3_SLICES(dev_priv)))
35a85ac6 962 break;
e3689190 963
35a85ac6 964 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 965
6fa1c5f1 966 reg = GEN7_L3CDERRST1(slice);
e3689190 967
35a85ac6
BW
968 error_status = I915_READ(reg);
969 row = GEN7_PARITY_ERROR_ROW(error_status);
970 bank = GEN7_PARITY_ERROR_BANK(error_status);
971 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
972
973 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
974 POSTING_READ(reg);
975
976 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
977 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
978 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
979 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
980 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
981 parity_event[5] = NULL;
982
91c8a326 983 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
35a85ac6 984 KOBJ_CHANGE, parity_event);
e3689190 985
35a85ac6
BW
986 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
987 slice, row, bank, subbank);
e3689190 988
35a85ac6
BW
989 kfree(parity_event[4]);
990 kfree(parity_event[3]);
991 kfree(parity_event[2]);
992 kfree(parity_event[1]);
993 }
e3689190 994
35a85ac6 995 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 996
35a85ac6 997out:
48a1b8d4 998 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
cf1c97dc
AS
999 spin_lock_irq(&gt->irq_lock);
1000 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1001 spin_unlock_irq(&gt->irq_lock);
35a85ac6 1002
91c8a326 1003 mutex_unlock(&dev_priv->drm.struct_mutex);
e3689190
BW
1004}
1005
af92058f 1006static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
121e758e 1007{
af92058f
VS
1008 switch (pin) {
1009 case HPD_PORT_C:
121e758e 1010 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
af92058f 1011 case HPD_PORT_D:
121e758e 1012 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
af92058f 1013 case HPD_PORT_E:
121e758e 1014 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
af92058f 1015 case HPD_PORT_F:
121e758e
DP
1016 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1017 default:
1018 return false;
1019 }
1020}
1021
48ef15d3
JRS
1022static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1023{
1024 switch (pin) {
1025 case HPD_PORT_D:
1026 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1027 case HPD_PORT_E:
1028 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1029 case HPD_PORT_F:
1030 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1031 case HPD_PORT_G:
1032 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1033 case HPD_PORT_H:
1034 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1035 case HPD_PORT_I:
1036 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1037 default:
1038 return false;
1039 }
1040}
1041
af92058f 1042static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
63c88d22 1043{
af92058f
VS
1044 switch (pin) {
1045 case HPD_PORT_A:
195baa06 1046 return val & PORTA_HOTPLUG_LONG_DETECT;
af92058f 1047 case HPD_PORT_B:
63c88d22 1048 return val & PORTB_HOTPLUG_LONG_DETECT;
af92058f 1049 case HPD_PORT_C:
63c88d22 1050 return val & PORTC_HOTPLUG_LONG_DETECT;
63c88d22
ID
1051 default:
1052 return false;
1053 }
1054}
1055
af92058f 1056static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
31604222 1057{
af92058f
VS
1058 switch (pin) {
1059 case HPD_PORT_A:
ed3126fa 1060 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
af92058f 1061 case HPD_PORT_B:
ed3126fa 1062 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
8ef7e340 1063 case HPD_PORT_C:
ed3126fa 1064 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
31604222
AS
1065 default:
1066 return false;
1067 }
1068}
1069
af92058f 1070static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
31604222 1071{
af92058f
VS
1072 switch (pin) {
1073 case HPD_PORT_C:
31604222 1074 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
af92058f 1075 case HPD_PORT_D:
31604222 1076 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
af92058f 1077 case HPD_PORT_E:
31604222 1078 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
af92058f 1079 case HPD_PORT_F:
31604222
AS
1080 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1081 default:
1082 return false;
1083 }
1084}
1085
52dfdba0
LDM
1086static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1087{
1088 switch (pin) {
1089 case HPD_PORT_D:
1090 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1091 case HPD_PORT_E:
1092 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1093 case HPD_PORT_F:
1094 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1095 case HPD_PORT_G:
1096 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1097 case HPD_PORT_H:
1098 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1099 case HPD_PORT_I:
1100 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1101 default:
1102 return false;
1103 }
1104}
1105
af92058f 1106static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
6dbf30ce 1107{
af92058f
VS
1108 switch (pin) {
1109 case HPD_PORT_E:
6dbf30ce
VS
1110 return val & PORTE_HOTPLUG_LONG_DETECT;
1111 default:
1112 return false;
1113 }
1114}
1115
af92058f 1116static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
74c0b395 1117{
af92058f
VS
1118 switch (pin) {
1119 case HPD_PORT_A:
74c0b395 1120 return val & PORTA_HOTPLUG_LONG_DETECT;
af92058f 1121 case HPD_PORT_B:
74c0b395 1122 return val & PORTB_HOTPLUG_LONG_DETECT;
af92058f 1123 case HPD_PORT_C:
74c0b395 1124 return val & PORTC_HOTPLUG_LONG_DETECT;
af92058f 1125 case HPD_PORT_D:
74c0b395
VS
1126 return val & PORTD_HOTPLUG_LONG_DETECT;
1127 default:
1128 return false;
1129 }
1130}
1131
af92058f 1132static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
e4ce95aa 1133{
af92058f
VS
1134 switch (pin) {
1135 case HPD_PORT_A:
e4ce95aa
VS
1136 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1137 default:
1138 return false;
1139 }
1140}
1141
af92058f 1142static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
13cf5504 1143{
af92058f
VS
1144 switch (pin) {
1145 case HPD_PORT_B:
676574df 1146 return val & PORTB_HOTPLUG_LONG_DETECT;
af92058f 1147 case HPD_PORT_C:
676574df 1148 return val & PORTC_HOTPLUG_LONG_DETECT;
af92058f 1149 case HPD_PORT_D:
676574df
JN
1150 return val & PORTD_HOTPLUG_LONG_DETECT;
1151 default:
1152 return false;
13cf5504
DA
1153 }
1154}
1155
af92058f 1156static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
13cf5504 1157{
af92058f
VS
1158 switch (pin) {
1159 case HPD_PORT_B:
676574df 1160 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
af92058f 1161 case HPD_PORT_C:
676574df 1162 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
af92058f 1163 case HPD_PORT_D:
676574df
JN
1164 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1165 default:
1166 return false;
13cf5504
DA
1167 }
1168}
1169
42db67d6
VS
1170/*
1171 * Get a bit mask of pins that have triggered, and which ones may be long.
1172 * This can be called multiple times with the same masks to accumulate
1173 * hotplug detection results from several registers.
1174 *
1175 * Note that the caller is expected to zero out the masks initially.
1176 */
cf53902f
RV
1177static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1178 u32 *pin_mask, u32 *long_mask,
1179 u32 hotplug_trigger, u32 dig_hotplug_reg,
1180 const u32 hpd[HPD_NUM_PINS],
af92058f 1181 bool long_pulse_detect(enum hpd_pin pin, u32 val))
676574df 1182{
e9be2850 1183 enum hpd_pin pin;
676574df 1184
52dfdba0
LDM
1185 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1186
e9be2850
VS
1187 for_each_hpd_pin(pin) {
1188 if ((hpd[pin] & hotplug_trigger) == 0)
8c841e57 1189 continue;
676574df 1190
e9be2850 1191 *pin_mask |= BIT(pin);
8c841e57 1192
af92058f 1193 if (long_pulse_detect(pin, dig_hotplug_reg))
e9be2850 1194 *long_mask |= BIT(pin);
676574df
JN
1195 }
1196
00376ccf
WK
1197 drm_dbg(&dev_priv->drm,
1198 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1199 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
676574df
JN
1200
1201}
1202
91d14251 1203static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
515ac2bb 1204{
28c70f16 1205 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1206}
1207
91d14251 1208static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
ce99c256 1209{
9ee32fea 1210 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1211}
1212
8bf1e9f1 1213#if defined(CONFIG_DEBUG_FS)
91d14251
TU
1214static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1215 enum pipe pipe,
a9c287c9
JN
1216 u32 crc0, u32 crc1,
1217 u32 crc2, u32 crc3,
1218 u32 crc4)
8bf1e9f1 1219{
8c6b709d 1220 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
00535527 1221 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
5cee6c45
VS
1222 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1223
1224 trace_intel_pipe_crc(crtc, crcs);
b2c88f5b 1225
d538bbdf 1226 spin_lock(&pipe_crc->lock);
6cc42152
ML
1227 /*
1228 * For some not yet identified reason, the first CRC is
1229 * bonkers. So let's just wait for the next vblank and read
1230 * out the buggy result.
1231 *
1232 * On GEN8+ sometimes the second CRC is bonkers as well, so
1233 * don't trust that one either.
1234 */
1235 if (pipe_crc->skipped <= 0 ||
1236 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1237 pipe_crc->skipped++;
8c6b709d 1238 spin_unlock(&pipe_crc->lock);
6cc42152 1239 return;
8c6b709d 1240 }
6cc42152
ML
1241 spin_unlock(&pipe_crc->lock);
1242
6cc42152
ML
1243 drm_crtc_add_crc_entry(&crtc->base, true,
1244 drm_crtc_accurate_vblank_count(&crtc->base),
1245 crcs);
8bf1e9f1 1246}
277de95e
DV
1247#else
1248static inline void
91d14251
TU
1249display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1250 enum pipe pipe,
a9c287c9
JN
1251 u32 crc0, u32 crc1,
1252 u32 crc2, u32 crc3,
1253 u32 crc4) {}
277de95e
DV
1254#endif
1255
eba94eb9 1256
91d14251
TU
1257static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1258 enum pipe pipe)
5a69b89f 1259{
91d14251 1260 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1261 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1262 0, 0, 0, 0);
5a69b89f
DV
1263}
1264
91d14251
TU
1265static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1266 enum pipe pipe)
eba94eb9 1267{
91d14251 1268 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1269 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1270 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1271 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1272 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1273 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1274}
5b3a856b 1275
91d14251
TU
1276static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1277 enum pipe pipe)
5b3a856b 1278{
a9c287c9 1279 u32 res1, res2;
0b5c5ed0 1280
91d14251 1281 if (INTEL_GEN(dev_priv) >= 3)
0b5c5ed0
DV
1282 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1283 else
1284 res1 = 0;
1285
91d14251 1286 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
0b5c5ed0
DV
1287 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1288 else
1289 res2 = 0;
5b3a856b 1290
91d14251 1291 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1292 I915_READ(PIPE_CRC_RES_RED(pipe)),
1293 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1294 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1295 res1, res2);
5b3a856b 1296}
8bf1e9f1 1297
44d9241e
VS
1298static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1299{
1300 enum pipe pipe;
1301
1302 for_each_pipe(dev_priv, pipe) {
1303 I915_WRITE(PIPESTAT(pipe),
1304 PIPESTAT_INT_STATUS_MASK |
1305 PIPE_FIFO_UNDERRUN_STATUS);
1306
1307 dev_priv->pipestat_irq_mask[pipe] = 0;
1308 }
1309}
1310
eb64343c
VS
1311static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1312 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
c1874ed7 1313{
d048a268 1314 enum pipe pipe;
c1874ed7 1315
58ead0d7 1316 spin_lock(&dev_priv->irq_lock);
1ca993d2
VS
1317
1318 if (!dev_priv->display_irqs_enabled) {
1319 spin_unlock(&dev_priv->irq_lock);
1320 return;
1321 }
1322
055e393f 1323 for_each_pipe(dev_priv, pipe) {
f0f59a00 1324 i915_reg_t reg;
6b12ca56 1325 u32 status_mask, enable_mask, iir_bit = 0;
91d181dd 1326
bbb5eebf
DV
1327 /*
1328 * PIPESTAT bits get signalled even when the interrupt is
1329 * disabled with the mask bits, and some of the status bits do
1330 * not generate interrupts at all (like the underrun bit). Hence
1331 * we need to be careful that we only handle what we want to
1332 * handle.
1333 */
0f239f4c
DV
1334
1335 /* fifo underruns are filterered in the underrun handler. */
6b12ca56 1336 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1337
1338 switch (pipe) {
d048a268 1339 default:
bbb5eebf
DV
1340 case PIPE_A:
1341 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1342 break;
1343 case PIPE_B:
1344 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1345 break;
3278f67f
VS
1346 case PIPE_C:
1347 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1348 break;
bbb5eebf
DV
1349 }
1350 if (iir & iir_bit)
6b12ca56 1351 status_mask |= dev_priv->pipestat_irq_mask[pipe];
bbb5eebf 1352
6b12ca56 1353 if (!status_mask)
91d181dd
ID
1354 continue;
1355
1356 reg = PIPESTAT(pipe);
6b12ca56
VS
1357 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1358 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
c1874ed7
ID
1359
1360 /*
1361 * Clear the PIPE*STAT regs before the IIR
132c27c9
VS
1362 *
1363 * Toggle the enable bits to make sure we get an
1364 * edge in the ISR pipe event bit if we don't clear
1365 * all the enabled status bits. Otherwise the edge
1366 * triggered IIR on i965/g4x wouldn't notice that
1367 * an interrupt is still pending.
c1874ed7 1368 */
132c27c9
VS
1369 if (pipe_stats[pipe]) {
1370 I915_WRITE(reg, pipe_stats[pipe]);
1371 I915_WRITE(reg, enable_mask);
1372 }
c1874ed7 1373 }
58ead0d7 1374 spin_unlock(&dev_priv->irq_lock);
2ecb8ca4
VS
1375}
1376
eb64343c
VS
1377static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1378 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1379{
1380 enum pipe pipe;
1381
1382 for_each_pipe(dev_priv, pipe) {
1383 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
aca9310a 1384 intel_handle_vblank(dev_priv, pipe);
eb64343c
VS
1385
1386 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1387 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1388
1389 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1390 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1391 }
1392}
1393
1394static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1395 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1396{
1397 bool blc_event = false;
1398 enum pipe pipe;
1399
1400 for_each_pipe(dev_priv, pipe) {
1401 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
aca9310a 1402 intel_handle_vblank(dev_priv, pipe);
eb64343c
VS
1403
1404 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1405 blc_event = true;
1406
1407 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1408 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1409
1410 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1411 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1412 }
1413
1414 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1415 intel_opregion_asle_intr(dev_priv);
1416}
1417
1418static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1419 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1420{
1421 bool blc_event = false;
1422 enum pipe pipe;
1423
1424 for_each_pipe(dev_priv, pipe) {
1425 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
aca9310a 1426 intel_handle_vblank(dev_priv, pipe);
eb64343c
VS
1427
1428 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1429 blc_event = true;
1430
1431 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1432 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1433
1434 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1435 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1436 }
1437
1438 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1439 intel_opregion_asle_intr(dev_priv);
1440
1441 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1442 gmbus_irq_handler(dev_priv);
1443}
1444
91d14251 1445static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2ecb8ca4
VS
1446 u32 pipe_stats[I915_MAX_PIPES])
1447{
2ecb8ca4 1448 enum pipe pipe;
c1874ed7 1449
055e393f 1450 for_each_pipe(dev_priv, pipe) {
fd3a4024 1451 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
aca9310a 1452 intel_handle_vblank(dev_priv, pipe);
c1874ed7
ID
1453
1454 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 1455 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
c1874ed7 1456
1f7247c0
DV
1457 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1458 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1459 }
1460
1461 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
91d14251 1462 gmbus_irq_handler(dev_priv);
c1874ed7
ID
1463}
1464
1ae3c34c 1465static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
16c6c56b 1466{
0ba7c51a
VS
1467 u32 hotplug_status = 0, hotplug_status_mask;
1468 int i;
1469
1470 if (IS_G4X(dev_priv) ||
1471 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1472 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1473 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1474 else
1475 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
16c6c56b 1476
0ba7c51a
VS
1477 /*
1478 * We absolutely have to clear all the pending interrupt
1479 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1480 * interrupt bit won't have an edge, and the i965/g4x
1481 * edge triggered IIR will not notice that an interrupt
1482 * is still pending. We can't use PORT_HOTPLUG_EN to
1483 * guarantee the edge as the act of toggling the enable
1484 * bits can itself generate a new hotplug interrupt :(
1485 */
1486 for (i = 0; i < 10; i++) {
1487 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1488
1489 if (tmp == 0)
1490 return hotplug_status;
1491
1492 hotplug_status |= tmp;
1ae3c34c 1493 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
0ba7c51a
VS
1494 }
1495
48a1b8d4
PB
1496 drm_WARN_ONCE(&dev_priv->drm, 1,
1497 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1498 I915_READ(PORT_HOTPLUG_STAT));
16c6c56b 1499
1ae3c34c
VS
1500 return hotplug_status;
1501}
1502
91d14251 1503static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1ae3c34c
VS
1504 u32 hotplug_status)
1505{
1506 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1507
91d14251
TU
1508 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1509 IS_CHERRYVIEW(dev_priv)) {
0d2e4297 1510 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1511
58f2cf24 1512 if (hotplug_trigger) {
cf53902f
RV
1513 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1514 hotplug_trigger, hotplug_trigger,
1515 hpd_status_g4x,
58f2cf24
VS
1516 i9xx_port_hotplug_long_detect);
1517
91d14251 1518 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
58f2cf24 1519 }
369712e8
JN
1520
1521 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
91d14251 1522 dp_aux_irq_handler(dev_priv);
0d2e4297
JN
1523 } else {
1524 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1525
58f2cf24 1526 if (hotplug_trigger) {
cf53902f
RV
1527 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1528 hotplug_trigger, hotplug_trigger,
1529 hpd_status_i915,
58f2cf24 1530 i9xx_port_hotplug_long_detect);
91d14251 1531 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
58f2cf24 1532 }
3ff60f89 1533 }
16c6c56b
VS
1534}
1535
ff1f525e 1536static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1537{
b318b824 1538 struct drm_i915_private *dev_priv = arg;
7e231dbe 1539 irqreturn_t ret = IRQ_NONE;
7e231dbe 1540
2dd2a883
ID
1541 if (!intel_irqs_enabled(dev_priv))
1542 return IRQ_NONE;
1543
1f814dac 1544 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 1545 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 1546
1e1cace9 1547 do {
6e814800 1548 u32 iir, gt_iir, pm_iir;
2ecb8ca4 1549 u32 pipe_stats[I915_MAX_PIPES] = {};
1ae3c34c 1550 u32 hotplug_status = 0;
a5e485a9 1551 u32 ier = 0;
3ff60f89 1552
7e231dbe
JB
1553 gt_iir = I915_READ(GTIIR);
1554 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89 1555 iir = I915_READ(VLV_IIR);
7e231dbe
JB
1556
1557 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1e1cace9 1558 break;
7e231dbe
JB
1559
1560 ret = IRQ_HANDLED;
1561
a5e485a9
VS
1562 /*
1563 * Theory on interrupt generation, based on empirical evidence:
1564 *
1565 * x = ((VLV_IIR & VLV_IER) ||
1566 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1567 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1568 *
1569 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1570 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1571 * guarantee the CPU interrupt will be raised again even if we
1572 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1573 * bits this time around.
1574 */
4a0a0202 1575 I915_WRITE(VLV_MASTER_IER, 0);
a5e485a9
VS
1576 ier = I915_READ(VLV_IER);
1577 I915_WRITE(VLV_IER, 0);
4a0a0202
VS
1578
1579 if (gt_iir)
1580 I915_WRITE(GTIIR, gt_iir);
1581 if (pm_iir)
1582 I915_WRITE(GEN6_PMIIR, pm_iir);
1583
7ce4d1f2 1584 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1ae3c34c 1585 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
7ce4d1f2 1586
3ff60f89
OM
1587 /* Call regardless, as some status bits might not be
1588 * signalled in iir */
eb64343c 1589 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
7ce4d1f2 1590
eef57324
JA
1591 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1592 I915_LPE_PIPE_B_INTERRUPT))
1593 intel_lpe_audio_irq_handler(dev_priv);
1594
7ce4d1f2
VS
1595 /*
1596 * VLV_IIR is single buffered, and reflects the level
1597 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1598 */
1599 if (iir)
1600 I915_WRITE(VLV_IIR, iir);
4a0a0202 1601
a5e485a9 1602 I915_WRITE(VLV_IER, ier);
4a0a0202 1603 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1ae3c34c 1604
52894874 1605 if (gt_iir)
cf1c97dc 1606 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
52894874 1607 if (pm_iir)
3e7abf81 1608 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
52894874 1609
1ae3c34c 1610 if (hotplug_status)
91d14251 1611 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2ecb8ca4 1612
91d14251 1613 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1e1cace9 1614 } while (0);
7e231dbe 1615
9102650f 1616 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 1617
7e231dbe
JB
1618 return ret;
1619}
1620
43f328d7
VS
1621static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1622{
b318b824 1623 struct drm_i915_private *dev_priv = arg;
43f328d7 1624 irqreturn_t ret = IRQ_NONE;
43f328d7 1625
2dd2a883
ID
1626 if (!intel_irqs_enabled(dev_priv))
1627 return IRQ_NONE;
1628
1f814dac 1629 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 1630 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 1631
579de73b 1632 do {
6e814800 1633 u32 master_ctl, iir;
2ecb8ca4 1634 u32 pipe_stats[I915_MAX_PIPES] = {};
1ae3c34c 1635 u32 hotplug_status = 0;
a5e485a9
VS
1636 u32 ier = 0;
1637
8e5fd599
VS
1638 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1639 iir = I915_READ(VLV_IIR);
43f328d7 1640
8e5fd599
VS
1641 if (master_ctl == 0 && iir == 0)
1642 break;
43f328d7 1643
27b6c122
OM
1644 ret = IRQ_HANDLED;
1645
a5e485a9
VS
1646 /*
1647 * Theory on interrupt generation, based on empirical evidence:
1648 *
1649 * x = ((VLV_IIR & VLV_IER) ||
1650 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1651 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1652 *
1653 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1654 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1655 * guarantee the CPU interrupt will be raised again even if we
1656 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1657 * bits this time around.
1658 */
8e5fd599 1659 I915_WRITE(GEN8_MASTER_IRQ, 0);
a5e485a9
VS
1660 ier = I915_READ(VLV_IER);
1661 I915_WRITE(VLV_IER, 0);
43f328d7 1662
6cc32f15 1663 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
43f328d7 1664
7ce4d1f2 1665 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1ae3c34c 1666 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
7ce4d1f2 1667
27b6c122
OM
1668 /* Call regardless, as some status bits might not be
1669 * signalled in iir */
eb64343c 1670 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
43f328d7 1671
eef57324
JA
1672 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1673 I915_LPE_PIPE_B_INTERRUPT |
1674 I915_LPE_PIPE_C_INTERRUPT))
1675 intel_lpe_audio_irq_handler(dev_priv);
1676
7ce4d1f2
VS
1677 /*
1678 * VLV_IIR is single buffered, and reflects the level
1679 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1680 */
1681 if (iir)
1682 I915_WRITE(VLV_IIR, iir);
1683
a5e485a9 1684 I915_WRITE(VLV_IER, ier);
e5328c43 1685 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1ae3c34c
VS
1686
1687 if (hotplug_status)
91d14251 1688 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2ecb8ca4 1689
91d14251 1690 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
579de73b 1691 } while (0);
3278f67f 1692
9102650f 1693 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 1694
43f328d7
VS
1695 return ret;
1696}
1697
91d14251
TU
1698static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1699 u32 hotplug_trigger,
40e56410
VS
1700 const u32 hpd[HPD_NUM_PINS])
1701{
40e56410
VS
1702 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1703
6a39d7c9
JN
1704 /*
1705 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1706 * unless we touch the hotplug register, even if hotplug_trigger is
1707 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1708 * errors.
1709 */
40e56410 1710 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
6a39d7c9
JN
1711 if (!hotplug_trigger) {
1712 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1713 PORTD_HOTPLUG_STATUS_MASK |
1714 PORTC_HOTPLUG_STATUS_MASK |
1715 PORTB_HOTPLUG_STATUS_MASK;
1716 dig_hotplug_reg &= ~mask;
1717 }
1718
40e56410 1719 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
6a39d7c9
JN
1720 if (!hotplug_trigger)
1721 return;
40e56410 1722
cf53902f 1723 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
40e56410
VS
1724 dig_hotplug_reg, hpd,
1725 pch_port_hotplug_long_detect);
1726
91d14251 1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
40e56410
VS
1728}
1729
91d14251 1730static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
776ad806 1731{
d048a268 1732 enum pipe pipe;
b543fb04 1733 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1734
91d14251 1735 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
91d131d2 1736
cfc33bf7
VS
1737 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1738 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1739 SDE_AUDIO_POWER_SHIFT);
00376ccf
WK
1740 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1741 port_name(port));
cfc33bf7 1742 }
776ad806 1743
ce99c256 1744 if (pch_iir & SDE_AUX_MASK)
91d14251 1745 dp_aux_irq_handler(dev_priv);
ce99c256 1746
776ad806 1747 if (pch_iir & SDE_GMBUS)
91d14251 1748 gmbus_irq_handler(dev_priv);
776ad806
JB
1749
1750 if (pch_iir & SDE_AUDIO_HDCP_MASK)
00376ccf 1751 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
776ad806
JB
1752
1753 if (pch_iir & SDE_AUDIO_TRANS_MASK)
00376ccf 1754 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
776ad806
JB
1755
1756 if (pch_iir & SDE_POISON)
00376ccf 1757 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
776ad806 1758
b8b65ccd 1759 if (pch_iir & SDE_FDI_MASK) {
055e393f 1760 for_each_pipe(dev_priv, pipe)
00376ccf
WK
1761 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1762 pipe_name(pipe),
1763 I915_READ(FDI_RX_IIR(pipe)));
b8b65ccd 1764 }
776ad806
JB
1765
1766 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
00376ccf 1767 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
776ad806
JB
1768
1769 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
00376ccf
WK
1770 drm_dbg(&dev_priv->drm,
1771 "PCH transcoder CRC error interrupt\n");
776ad806 1772
776ad806 1773 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
a2196033 1774 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
8664281b
PZ
1775
1776 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
a2196033 1777 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
8664281b
PZ
1778}
1779
91d14251 1780static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
8664281b 1781{
8664281b 1782 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1783 enum pipe pipe;
8664281b 1784
de032bf4 1785 if (err_int & ERR_INT_POISON)
00376ccf 1786 drm_err(&dev_priv->drm, "Poison interrupt\n");
de032bf4 1787
055e393f 1788 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1789 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1790 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1791
5a69b89f 1792 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
91d14251
TU
1793 if (IS_IVYBRIDGE(dev_priv))
1794 ivb_pipe_crc_irq_handler(dev_priv, pipe);
5a69b89f 1795 else
91d14251 1796 hsw_pipe_crc_irq_handler(dev_priv, pipe);
5a69b89f
DV
1797 }
1798 }
8bf1e9f1 1799
8664281b
PZ
1800 I915_WRITE(GEN7_ERR_INT, err_int);
1801}
1802
91d14251 1803static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
8664281b 1804{
8664281b 1805 u32 serr_int = I915_READ(SERR_INT);
45c1cd87 1806 enum pipe pipe;
8664281b 1807
de032bf4 1808 if (serr_int & SERR_INT_POISON)
00376ccf 1809 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
de032bf4 1810
45c1cd87
MK
1811 for_each_pipe(dev_priv, pipe)
1812 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1813 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
8664281b
PZ
1814
1815 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1816}
1817
91d14251 1818static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23e81d69 1819{
d048a268 1820 enum pipe pipe;
6dbf30ce 1821 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 1822
91d14251 1823 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
91d131d2 1824
cfc33bf7
VS
1825 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1826 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1827 SDE_AUDIO_POWER_SHIFT_CPT);
00376ccf
WK
1828 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1829 port_name(port));
cfc33bf7 1830 }
23e81d69
AJ
1831
1832 if (pch_iir & SDE_AUX_MASK_CPT)
91d14251 1833 dp_aux_irq_handler(dev_priv);
23e81d69
AJ
1834
1835 if (pch_iir & SDE_GMBUS_CPT)
91d14251 1836 gmbus_irq_handler(dev_priv);
23e81d69
AJ
1837
1838 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
00376ccf 1839 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
23e81d69
AJ
1840
1841 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
00376ccf 1842 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
23e81d69 1843
b8b65ccd 1844 if (pch_iir & SDE_FDI_MASK_CPT) {
055e393f 1845 for_each_pipe(dev_priv, pipe)
00376ccf
WK
1846 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1847 pipe_name(pipe),
1848 I915_READ(FDI_RX_IIR(pipe)));
b8b65ccd 1849 }
8664281b
PZ
1850
1851 if (pch_iir & SDE_ERROR_CPT)
91d14251 1852 cpt_serr_int_handler(dev_priv);
23e81d69
AJ
1853}
1854
58676af6 1855static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
31604222 1856{
58676af6 1857 u32 ddi_hotplug_trigger, tc_hotplug_trigger;
31604222 1858 u32 pin_mask = 0, long_mask = 0;
58676af6
LDM
1859 bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
1860 const u32 *pins;
31604222 1861
58676af6
LDM
1862 if (HAS_PCH_TGP(dev_priv)) {
1863 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1864 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1865 tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
1866 pins = hpd_tgp;
943682e3
MR
1867 } else if (HAS_PCH_JSP(dev_priv)) {
1868 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1869 tc_hotplug_trigger = 0;
1870 pins = hpd_tgp;
58676af6 1871 } else if (HAS_PCH_MCC(dev_priv)) {
53448aed
VK
1872 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1873 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
fcb9bba4 1874 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
d09ad3e7 1875 pins = hpd_icp;
8ef7e340 1876 } else {
48a1b8d4
PB
1877 drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
1878 "Unrecognized PCH type 0x%x\n",
1879 INTEL_PCH_TYPE(dev_priv));
943682e3 1880
8ef7e340
MR
1881 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1882 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
58676af6
LDM
1883 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1884 pins = hpd_icp;
8ef7e340
MR
1885 }
1886
31604222
AS
1887 if (ddi_hotplug_trigger) {
1888 u32 dig_hotplug_reg;
1889
1890 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1891 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1892
1893 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1894 ddi_hotplug_trigger,
c6f7acb8 1895 dig_hotplug_reg, pins,
31604222
AS
1896 icp_ddi_port_hotplug_long_detect);
1897 }
1898
1899 if (tc_hotplug_trigger) {
1900 u32 dig_hotplug_reg;
1901
1902 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1903 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1904
1905 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1906 tc_hotplug_trigger,
c6f7acb8 1907 dig_hotplug_reg, pins,
58676af6 1908 tc_port_hotplug_long_detect);
52dfdba0
LDM
1909 }
1910
1911 if (pin_mask)
1912 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1913
1914 if (pch_iir & SDE_GMBUS_ICP)
1915 gmbus_irq_handler(dev_priv);
1916}
1917
91d14251 1918static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
6dbf30ce 1919{
6dbf30ce
VS
1920 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1921 ~SDE_PORTE_HOTPLUG_SPT;
1922 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1923 u32 pin_mask = 0, long_mask = 0;
1924
1925 if (hotplug_trigger) {
1926 u32 dig_hotplug_reg;
1927
1928 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1929 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1930
cf53902f
RV
1931 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1932 hotplug_trigger, dig_hotplug_reg, hpd_spt,
74c0b395 1933 spt_port_hotplug_long_detect);
6dbf30ce
VS
1934 }
1935
1936 if (hotplug2_trigger) {
1937 u32 dig_hotplug_reg;
1938
1939 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1940 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1941
cf53902f
RV
1942 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1943 hotplug2_trigger, dig_hotplug_reg, hpd_spt,
6dbf30ce
VS
1944 spt_port_hotplug2_long_detect);
1945 }
1946
1947 if (pin_mask)
91d14251 1948 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
6dbf30ce
VS
1949
1950 if (pch_iir & SDE_GMBUS_CPT)
91d14251 1951 gmbus_irq_handler(dev_priv);
6dbf30ce
VS
1952}
1953
91d14251
TU
1954static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1955 u32 hotplug_trigger,
40e56410
VS
1956 const u32 hpd[HPD_NUM_PINS])
1957{
40e56410
VS
1958 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1959
1960 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1961 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1962
cf53902f 1963 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
40e56410
VS
1964 dig_hotplug_reg, hpd,
1965 ilk_port_hotplug_long_detect);
1966
91d14251 1967 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
40e56410
VS
1968}
1969
91d14251
TU
1970static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1971 u32 de_iir)
c008bc6e 1972{
40da17c2 1973 enum pipe pipe;
e4ce95aa
VS
1974 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1975
40e56410 1976 if (hotplug_trigger)
91d14251 1977 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
c008bc6e
PZ
1978
1979 if (de_iir & DE_AUX_CHANNEL_A)
91d14251 1980 dp_aux_irq_handler(dev_priv);
c008bc6e
PZ
1981
1982 if (de_iir & DE_GSE)
91d14251 1983 intel_opregion_asle_intr(dev_priv);
c008bc6e 1984
c008bc6e 1985 if (de_iir & DE_POISON)
00376ccf 1986 drm_err(&dev_priv->drm, "Poison interrupt\n");
c008bc6e 1987
055e393f 1988 for_each_pipe(dev_priv, pipe) {
fd3a4024 1989 if (de_iir & DE_PIPE_VBLANK(pipe))
aca9310a 1990 intel_handle_vblank(dev_priv, pipe);
5b3a856b 1991
40da17c2 1992 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 1993 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 1994
40da17c2 1995 if (de_iir & DE_PIPE_CRC_DONE(pipe))
91d14251 1996 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
c008bc6e
PZ
1997 }
1998
1999 /* check event from PCH */
2000 if (de_iir & DE_PCH_EVENT) {
2001 u32 pch_iir = I915_READ(SDEIIR);
2002
91d14251
TU
2003 if (HAS_PCH_CPT(dev_priv))
2004 cpt_irq_handler(dev_priv, pch_iir);
c008bc6e 2005 else
91d14251 2006 ibx_irq_handler(dev_priv, pch_iir);
c008bc6e
PZ
2007
2008 /* should clear PCH hotplug event before clear CPU irq */
2009 I915_WRITE(SDEIIR, pch_iir);
2010 }
2011
cf819eff 2012 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
3e7abf81 2013 gen5_rps_irq_handler(&dev_priv->gt.rps);
c008bc6e
PZ
2014}
2015
91d14251
TU
2016static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2017 u32 de_iir)
9719fb98 2018{
07d27e20 2019 enum pipe pipe;
23bb4cb5
VS
2020 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2021
40e56410 2022 if (hotplug_trigger)
91d14251 2023 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
9719fb98
PZ
2024
2025 if (de_iir & DE_ERR_INT_IVB)
91d14251 2026 ivb_err_int_handler(dev_priv);
9719fb98 2027
54fd3149
DP
2028 if (de_iir & DE_EDP_PSR_INT_HSW) {
2029 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2030
2031 intel_psr_irq_handler(dev_priv, psr_iir);
2032 I915_WRITE(EDP_PSR_IIR, psr_iir);
2033 }
fc340442 2034
9719fb98 2035 if (de_iir & DE_AUX_CHANNEL_A_IVB)
91d14251 2036 dp_aux_irq_handler(dev_priv);
9719fb98
PZ
2037
2038 if (de_iir & DE_GSE_IVB)
91d14251 2039 intel_opregion_asle_intr(dev_priv);
9719fb98 2040
055e393f 2041 for_each_pipe(dev_priv, pipe) {
fd3a4024 2042 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
aca9310a 2043 intel_handle_vblank(dev_priv, pipe);
9719fb98
PZ
2044 }
2045
2046 /* check event from PCH */
91d14251 2047 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
9719fb98
PZ
2048 u32 pch_iir = I915_READ(SDEIIR);
2049
91d14251 2050 cpt_irq_handler(dev_priv, pch_iir);
9719fb98
PZ
2051
2052 /* clear PCH hotplug event before clear CPU irq */
2053 I915_WRITE(SDEIIR, pch_iir);
2054 }
2055}
2056
72c90f62
OM
2057/*
2058 * To handle irqs with the minimum potential races with fresh interrupts, we:
2059 * 1 - Disable Master Interrupt Control.
2060 * 2 - Find the source(s) of the interrupt.
2061 * 3 - Clear the Interrupt Identity bits (IIR).
2062 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2063 * 5 - Re-enable Master Interrupt Control.
2064 */
9eae5e27 2065static irqreturn_t ilk_irq_handler(int irq, void *arg)
b1f14ad0 2066{
b318b824 2067 struct drm_i915_private *dev_priv = arg;
f1af8fc1 2068 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2069 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2070
2dd2a883
ID
2071 if (!intel_irqs_enabled(dev_priv))
2072 return IRQ_NONE;
2073
1f814dac 2074 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 2075 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 2076
b1f14ad0
JB
2077 /* disable master interrupt before clearing iir */
2078 de_ier = I915_READ(DEIER);
2079 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
b1f14ad0 2080
44498aea
PZ
2081 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2082 * interrupts will will be stored on its back queue, and then we'll be
2083 * able to process them after we restore SDEIER (as soon as we restore
2084 * it, we'll get an interrupt if SDEIIR still has something to process
2085 * due to its back queue). */
91d14251 2086 if (!HAS_PCH_NOP(dev_priv)) {
ab5c608b
BW
2087 sde_ier = I915_READ(SDEIER);
2088 I915_WRITE(SDEIER, 0);
ab5c608b 2089 }
44498aea 2090
72c90f62
OM
2091 /* Find, clear, then process each source of interrupt */
2092
b1f14ad0 2093 gt_iir = I915_READ(GTIIR);
0e43406b 2094 if (gt_iir) {
72c90f62
OM
2095 I915_WRITE(GTIIR, gt_iir);
2096 ret = IRQ_HANDLED;
91d14251 2097 if (INTEL_GEN(dev_priv) >= 6)
cf1c97dc 2098 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
d8fc8a47 2099 else
cf1c97dc 2100 gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
b1f14ad0
JB
2101 }
2102
0e43406b
CW
2103 de_iir = I915_READ(DEIIR);
2104 if (de_iir) {
72c90f62
OM
2105 I915_WRITE(DEIIR, de_iir);
2106 ret = IRQ_HANDLED;
91d14251
TU
2107 if (INTEL_GEN(dev_priv) >= 7)
2108 ivb_display_irq_handler(dev_priv, de_iir);
f1af8fc1 2109 else
91d14251 2110 ilk_display_irq_handler(dev_priv, de_iir);
b1f14ad0
JB
2111 }
2112
91d14251 2113 if (INTEL_GEN(dev_priv) >= 6) {
f1af8fc1
PZ
2114 u32 pm_iir = I915_READ(GEN6_PMIIR);
2115 if (pm_iir) {
f1af8fc1
PZ
2116 I915_WRITE(GEN6_PMIIR, pm_iir);
2117 ret = IRQ_HANDLED;
3e7abf81 2118 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
f1af8fc1 2119 }
0e43406b 2120 }
b1f14ad0 2121
b1f14ad0 2122 I915_WRITE(DEIER, de_ier);
74093f3e 2123 if (!HAS_PCH_NOP(dev_priv))
ab5c608b 2124 I915_WRITE(SDEIER, sde_ier);
b1f14ad0 2125
1f814dac 2126 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 2127 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 2128
b1f14ad0
JB
2129 return ret;
2130}
2131
91d14251
TU
2132static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2133 u32 hotplug_trigger,
40e56410 2134 const u32 hpd[HPD_NUM_PINS])
d04a492d 2135{
cebd87a0 2136 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
d04a492d 2137
a52bb15b
VS
2138 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2139 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
d04a492d 2140
cf53902f 2141 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
40e56410 2142 dig_hotplug_reg, hpd,
cebd87a0 2143 bxt_port_hotplug_long_detect);
40e56410 2144
91d14251 2145 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
d04a492d
SS
2146}
2147
121e758e
DP
2148static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2149{
2150 u32 pin_mask = 0, long_mask = 0;
b796b971
DP
2151 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2152 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
48ef15d3
JRS
2153 long_pulse_detect_func long_pulse_detect;
2154 const u32 *hpd;
2155
2156 if (INTEL_GEN(dev_priv) >= 12) {
2157 long_pulse_detect = gen12_port_hotplug_long_detect;
2158 hpd = hpd_gen12;
2159 } else {
2160 long_pulse_detect = gen11_port_hotplug_long_detect;
2161 hpd = hpd_gen11;
2162 }
121e758e 2163
121e758e 2164 if (trigger_tc) {
b796b971
DP
2165 u32 dig_hotplug_reg;
2166
121e758e
DP
2167 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2168 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2169
2170 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
48ef15d3 2171 dig_hotplug_reg, hpd, long_pulse_detect);
b796b971
DP
2172 }
2173
2174 if (trigger_tbt) {
2175 u32 dig_hotplug_reg;
2176
2177 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2178 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2179
2180 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
48ef15d3 2181 dig_hotplug_reg, hpd, long_pulse_detect);
b796b971
DP
2182 }
2183
2184 if (pin_mask)
121e758e 2185 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
b796b971 2186 else
00376ccf
WK
2187 drm_err(&dev_priv->drm,
2188 "Unexpected DE HPD interrupt 0x%08x\n", iir);
121e758e
DP
2189}
2190
9d17210f
LDM
2191static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2192{
55523360 2193 u32 mask;
9d17210f 2194
55523360 2195 if (INTEL_GEN(dev_priv) >= 12)
55523360
LDM
2196 return TGL_DE_PORT_AUX_DDIA |
2197 TGL_DE_PORT_AUX_DDIB |
e5df52dc
MR
2198 TGL_DE_PORT_AUX_DDIC |
2199 TGL_DE_PORT_AUX_USBC1 |
2200 TGL_DE_PORT_AUX_USBC2 |
2201 TGL_DE_PORT_AUX_USBC3 |
2202 TGL_DE_PORT_AUX_USBC4 |
2203 TGL_DE_PORT_AUX_USBC5 |
2204 TGL_DE_PORT_AUX_USBC6;
2205
55523360
LDM
2206
2207 mask = GEN8_AUX_CHANNEL_A;
9d17210f
LDM
2208 if (INTEL_GEN(dev_priv) >= 9)
2209 mask |= GEN9_AUX_CHANNEL_B |
2210 GEN9_AUX_CHANNEL_C |
2211 GEN9_AUX_CHANNEL_D;
2212
55523360 2213 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
9d17210f
LDM
2214 mask |= CNL_AUX_CHANNEL_F;
2215
55523360
LDM
2216 if (IS_GEN(dev_priv, 11))
2217 mask |= ICL_AUX_CHANNEL_E;
9d17210f
LDM
2218
2219 return mask;
2220}
2221
5270130d
VS
2222static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2223{
d506a65d
MR
2224 if (INTEL_GEN(dev_priv) >= 11)
2225 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2226 else if (INTEL_GEN(dev_priv) >= 9)
5270130d
VS
2227 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2228 else
2229 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2230}
2231
46c63d24
JRS
2232static void
2233gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2234{
2235 bool found = false;
2236
2237 if (iir & GEN8_DE_MISC_GSE) {
2238 intel_opregion_asle_intr(dev_priv);
2239 found = true;
2240 }
2241
2242 if (iir & GEN8_DE_EDP_PSR) {
8241cfbe
JRS
2243 u32 psr_iir;
2244 i915_reg_t iir_reg;
2245
2246 if (INTEL_GEN(dev_priv) >= 12)
2247 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2248 else
2249 iir_reg = EDP_PSR_IIR;
2250
2251 psr_iir = I915_READ(iir_reg);
2252 I915_WRITE(iir_reg, psr_iir);
2253
2254 if (psr_iir)
2255 found = true;
46c63d24
JRS
2256
2257 intel_psr_irq_handler(dev_priv, psr_iir);
46c63d24
JRS
2258 }
2259
2260 if (!found)
00376ccf 2261 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
46c63d24
JRS
2262}
2263
f11a0f46
TU
2264static irqreturn_t
2265gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
abd58f01 2266{
abd58f01 2267 irqreturn_t ret = IRQ_NONE;
f11a0f46 2268 u32 iir;
c42664cc 2269 enum pipe pipe;
88e04703 2270
abd58f01 2271 if (master_ctl & GEN8_DE_MISC_IRQ) {
e32192e1
TU
2272 iir = I915_READ(GEN8_DE_MISC_IIR);
2273 if (iir) {
2274 I915_WRITE(GEN8_DE_MISC_IIR, iir);
abd58f01 2275 ret = IRQ_HANDLED;
46c63d24
JRS
2276 gen8_de_misc_irq_handler(dev_priv, iir);
2277 } else {
00376ccf
WK
2278 drm_err(&dev_priv->drm,
2279 "The master control interrupt lied (DE MISC)!\n");
46c63d24 2280 }
abd58f01
BW
2281 }
2282
121e758e
DP
2283 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2284 iir = I915_READ(GEN11_DE_HPD_IIR);
2285 if (iir) {
2286 I915_WRITE(GEN11_DE_HPD_IIR, iir);
2287 ret = IRQ_HANDLED;
2288 gen11_hpd_irq_handler(dev_priv, iir);
2289 } else {
00376ccf
WK
2290 drm_err(&dev_priv->drm,
2291 "The master control interrupt lied, (DE HPD)!\n");
121e758e
DP
2292 }
2293 }
2294
6d766f02 2295 if (master_ctl & GEN8_DE_PORT_IRQ) {
e32192e1
TU
2296 iir = I915_READ(GEN8_DE_PORT_IIR);
2297 if (iir) {
2298 u32 tmp_mask;
d04a492d 2299 bool found = false;
cebd87a0 2300
e32192e1 2301 I915_WRITE(GEN8_DE_PORT_IIR, iir);
6d766f02 2302 ret = IRQ_HANDLED;
88e04703 2303
9d17210f 2304 if (iir & gen8_de_port_aux_mask(dev_priv)) {
91d14251 2305 dp_aux_irq_handler(dev_priv);
d04a492d
SS
2306 found = true;
2307 }
2308
cc3f90f0 2309 if (IS_GEN9_LP(dev_priv)) {
e32192e1
TU
2310 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2311 if (tmp_mask) {
91d14251
TU
2312 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2313 hpd_bxt);
e32192e1
TU
2314 found = true;
2315 }
2316 } else if (IS_BROADWELL(dev_priv)) {
2317 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2318 if (tmp_mask) {
91d14251
TU
2319 ilk_hpd_irq_handler(dev_priv,
2320 tmp_mask, hpd_bdw);
e32192e1
TU
2321 found = true;
2322 }
d04a492d
SS
2323 }
2324
cc3f90f0 2325 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
91d14251 2326 gmbus_irq_handler(dev_priv);
9e63743e
SS
2327 found = true;
2328 }
2329
d04a492d 2330 if (!found)
00376ccf
WK
2331 drm_err(&dev_priv->drm,
2332 "Unexpected DE Port interrupt\n");
6d766f02 2333 }
38cc46d7 2334 else
00376ccf
WK
2335 drm_err(&dev_priv->drm,
2336 "The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2337 }
2338
055e393f 2339 for_each_pipe(dev_priv, pipe) {
fd3a4024 2340 u32 fault_errors;
abd58f01 2341
c42664cc
DV
2342 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2343 continue;
abd58f01 2344
e32192e1
TU
2345 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2346 if (!iir) {
00376ccf
WK
2347 drm_err(&dev_priv->drm,
2348 "The master control interrupt lied (DE PIPE)!\n");
e32192e1
TU
2349 continue;
2350 }
770de83d 2351
e32192e1
TU
2352 ret = IRQ_HANDLED;
2353 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
38cc46d7 2354
fd3a4024 2355 if (iir & GEN8_PIPE_VBLANK)
aca9310a 2356 intel_handle_vblank(dev_priv, pipe);
38cc46d7 2357
e32192e1 2358 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
91d14251 2359 hsw_pipe_crc_irq_handler(dev_priv, pipe);
38cc46d7 2360
e32192e1
TU
2361 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2362 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
770de83d 2363
5270130d 2364 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
e32192e1 2365 if (fault_errors)
00376ccf
WK
2366 drm_err(&dev_priv->drm,
2367 "Fault errors on pipe %c: 0x%08x\n",
2368 pipe_name(pipe),
2369 fault_errors);
abd58f01
BW
2370 }
2371
91d14251 2372 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
266ea3d9 2373 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2374 /*
2375 * FIXME(BDW): Assume for now that the new interrupt handling
2376 * scheme also closed the SDE interrupt handling race we've seen
2377 * on older pch-split platforms. But this needs testing.
2378 */
e32192e1
TU
2379 iir = I915_READ(SDEIIR);
2380 if (iir) {
2381 I915_WRITE(SDEIIR, iir);
92d03a80 2382 ret = IRQ_HANDLED;
6dbf30ce 2383
58676af6
LDM
2384 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2385 icp_irq_handler(dev_priv, iir);
c6c30b91 2386 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
91d14251 2387 spt_irq_handler(dev_priv, iir);
6dbf30ce 2388 else
91d14251 2389 cpt_irq_handler(dev_priv, iir);
2dfb0b81
JN
2390 } else {
2391 /*
2392 * Like on previous PCH there seems to be something
2393 * fishy going on with forwarding PCH interrupts.
2394 */
00376ccf
WK
2395 drm_dbg(&dev_priv->drm,
2396 "The master control interrupt lied (SDE)!\n");
2dfb0b81 2397 }
92d03a80
DV
2398 }
2399
f11a0f46
TU
2400 return ret;
2401}
2402
4376b9c9
MK
2403static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2404{
2405 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2406
2407 /*
2408 * Now with master disabled, get a sample of level indications
2409 * for this interrupt. Indications will be cleared on related acks.
2410 * New indications can and will light up during processing,
2411 * and will generate new interrupt after enabling master.
2412 */
2413 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2414}
2415
2416static inline void gen8_master_intr_enable(void __iomem * const regs)
2417{
2418 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2419}
2420
f11a0f46
TU
2421static irqreturn_t gen8_irq_handler(int irq, void *arg)
2422{
b318b824 2423 struct drm_i915_private *dev_priv = arg;
25286aac 2424 void __iomem * const regs = dev_priv->uncore.regs;
f11a0f46 2425 u32 master_ctl;
f11a0f46
TU
2426
2427 if (!intel_irqs_enabled(dev_priv))
2428 return IRQ_NONE;
2429
4376b9c9
MK
2430 master_ctl = gen8_master_intr_disable(regs);
2431 if (!master_ctl) {
2432 gen8_master_intr_enable(regs);
f11a0f46 2433 return IRQ_NONE;
4376b9c9 2434 }
f11a0f46 2435
6cc32f15
CW
2436 /* Find, queue (onto bottom-halves), then clear each source */
2437 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
f0fd96f5
CW
2438
2439 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2440 if (master_ctl & ~GEN8_GT_IRQS) {
9102650f 2441 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
f0fd96f5 2442 gen8_de_irq_handler(dev_priv, master_ctl);
9102650f 2443 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
f0fd96f5 2444 }
f11a0f46 2445
4376b9c9 2446 gen8_master_intr_enable(regs);
abd58f01 2447
55ef72f2 2448 return IRQ_HANDLED;
abd58f01
BW
2449}
2450
7a909383 2451static u32
9b77011e 2452gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
df0d28c1 2453{
9b77011e 2454 void __iomem * const regs = gt->uncore->regs;
7a909383 2455 u32 iir;
df0d28c1
DP
2456
2457 if (!(master_ctl & GEN11_GU_MISC_IRQ))
7a909383
CW
2458 return 0;
2459
2460 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2461 if (likely(iir))
2462 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
df0d28c1 2463
7a909383 2464 return iir;
df0d28c1
DP
2465}
2466
2467static void
9b77011e 2468gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
df0d28c1 2469{
df0d28c1 2470 if (iir & GEN11_GU_MISC_GSE)
9b77011e 2471 intel_opregion_asle_intr(gt->i915);
df0d28c1
DP
2472}
2473
81067b71
MK
2474static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2475{
2476 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2477
2478 /*
2479 * Now with master disabled, get a sample of level indications
2480 * for this interrupt. Indications will be cleared on related acks.
2481 * New indications can and will light up during processing,
2482 * and will generate new interrupt after enabling master.
2483 */
2484 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2485}
2486
2487static inline void gen11_master_intr_enable(void __iomem * const regs)
2488{
2489 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2490}
2491
a3265d85
MR
2492static void
2493gen11_display_irq_handler(struct drm_i915_private *i915)
2494{
2495 void __iomem * const regs = i915->uncore.regs;
2496 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2497
2498 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2499 /*
2500 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2501 * for the display related bits.
2502 */
2503 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2504 gen8_de_irq_handler(i915, disp_ctl);
2505 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2506 GEN11_DISPLAY_IRQ_ENABLE);
2507
2508 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2509}
2510
7be8782a
LDM
2511static __always_inline irqreturn_t
2512__gen11_irq_handler(struct drm_i915_private * const i915,
2513 u32 (*intr_disable)(void __iomem * const regs),
2514 void (*intr_enable)(void __iomem * const regs))
51951ae7 2515{
25286aac 2516 void __iomem * const regs = i915->uncore.regs;
9b77011e 2517 struct intel_gt *gt = &i915->gt;
51951ae7 2518 u32 master_ctl;
df0d28c1 2519 u32 gu_misc_iir;
51951ae7
MK
2520
2521 if (!intel_irqs_enabled(i915))
2522 return IRQ_NONE;
2523
7be8782a 2524 master_ctl = intr_disable(regs);
81067b71 2525 if (!master_ctl) {
7be8782a 2526 intr_enable(regs);
51951ae7 2527 return IRQ_NONE;
81067b71 2528 }
51951ae7 2529
6cc32f15 2530 /* Find, queue (onto bottom-halves), then clear each source */
9b77011e 2531 gen11_gt_irq_handler(gt, master_ctl);
51951ae7
MK
2532
2533 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
a3265d85
MR
2534 if (master_ctl & GEN11_DISPLAY_IRQ)
2535 gen11_display_irq_handler(i915);
51951ae7 2536
9b77011e 2537 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
df0d28c1 2538
7be8782a 2539 intr_enable(regs);
51951ae7 2540
9b77011e 2541 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
df0d28c1 2542
51951ae7
MK
2543 return IRQ_HANDLED;
2544}
2545
7be8782a
LDM
2546static irqreturn_t gen11_irq_handler(int irq, void *arg)
2547{
2548 return __gen11_irq_handler(arg,
2549 gen11_master_intr_disable,
2550 gen11_master_intr_enable);
2551}
2552
42f52ef8
KP
2553/* Called from drm generic code, passed 'crtc' which
2554 * we use as a pipe index
2555 */
08fa8fd0 2556int i8xx_enable_vblank(struct drm_crtc *crtc)
0a3e67a4 2557{
08fa8fd0
VS
2558 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2559 enum pipe pipe = to_intel_crtc(crtc)->pipe;
e9d21d7f 2560 unsigned long irqflags;
71e0ffa5 2561
1ec14ad3 2562 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
86e83e35 2563 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2564 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2565
0a3e67a4
JB
2566 return 0;
2567}
2568
7d423af9 2569int i915gm_enable_vblank(struct drm_crtc *crtc)
d938da6b 2570{
08fa8fd0 2571 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
d938da6b 2572
7d423af9
VS
2573 /*
2574 * Vblank interrupts fail to wake the device up from C2+.
2575 * Disabling render clock gating during C-states avoids
2576 * the problem. There is a small power cost so we do this
2577 * only when vblank interrupts are actually enabled.
2578 */
2579 if (dev_priv->vblank_enabled++ == 0)
2580 I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
d938da6b 2581
08fa8fd0 2582 return i8xx_enable_vblank(crtc);
d938da6b
VS
2583}
2584
08fa8fd0 2585int i965_enable_vblank(struct drm_crtc *crtc)
f796cf8f 2586{
08fa8fd0
VS
2587 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2588 enum pipe pipe = to_intel_crtc(crtc)->pipe;
f796cf8f
JB
2589 unsigned long irqflags;
2590
f796cf8f 2591 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
86e83e35
CW
2592 i915_enable_pipestat(dev_priv, pipe,
2593 PIPE_START_VBLANK_INTERRUPT_STATUS);
b1f14ad0
JB
2594 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2595
2596 return 0;
2597}
2598
08fa8fd0 2599int ilk_enable_vblank(struct drm_crtc *crtc)
7e231dbe 2600{
08fa8fd0
VS
2601 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2602 enum pipe pipe = to_intel_crtc(crtc)->pipe;
7e231dbe 2603 unsigned long irqflags;
a9c287c9 2604 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
86e83e35 2605 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
7e231dbe 2606
7e231dbe 2607 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
86e83e35 2608 ilk_enable_display_irq(dev_priv, bit);
7e231dbe
JB
2609 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2610
2e8bf223
DP
2611 /* Even though there is no DMC, frame counter can get stuck when
2612 * PSR is active as no frames are generated.
2613 */
2614 if (HAS_PSR(dev_priv))
08fa8fd0 2615 drm_crtc_vblank_restore(crtc);
2e8bf223 2616
7e231dbe
JB
2617 return 0;
2618}
2619
08fa8fd0 2620int bdw_enable_vblank(struct drm_crtc *crtc)
abd58f01 2621{
08fa8fd0
VS
2622 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2623 enum pipe pipe = to_intel_crtc(crtc)->pipe;
abd58f01 2624 unsigned long irqflags;
abd58f01 2625
abd58f01 2626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2627 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01 2628 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
013d3752 2629
2e8bf223
DP
2630 /* Even if there is no DMC, frame counter can get stuck when
2631 * PSR is active as no frames are generated, so check only for PSR.
2632 */
2633 if (HAS_PSR(dev_priv))
08fa8fd0 2634 drm_crtc_vblank_restore(crtc);
2e8bf223 2635
abd58f01
BW
2636 return 0;
2637}
2638
42f52ef8
KP
2639/* Called from drm generic code, passed 'crtc' which
2640 * we use as a pipe index
2641 */
08fa8fd0 2642void i8xx_disable_vblank(struct drm_crtc *crtc)
0a3e67a4 2643{
08fa8fd0
VS
2644 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2645 enum pipe pipe = to_intel_crtc(crtc)->pipe;
e9d21d7f 2646 unsigned long irqflags;
0a3e67a4 2647
1ec14ad3 2648 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
86e83e35 2649 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2651}
2652
7d423af9 2653void i915gm_disable_vblank(struct drm_crtc *crtc)
d938da6b 2654{
08fa8fd0 2655 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
d938da6b 2656
08fa8fd0 2657 i8xx_disable_vblank(crtc);
d938da6b 2658
7d423af9
VS
2659 if (--dev_priv->vblank_enabled == 0)
2660 I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
d938da6b
VS
2661}
2662
08fa8fd0 2663void i965_disable_vblank(struct drm_crtc *crtc)
f796cf8f 2664{
08fa8fd0
VS
2665 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2666 enum pipe pipe = to_intel_crtc(crtc)->pipe;
f796cf8f
JB
2667 unsigned long irqflags;
2668
2669 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
86e83e35
CW
2670 i915_disable_pipestat(dev_priv, pipe,
2671 PIPE_START_VBLANK_INTERRUPT_STATUS);
b1f14ad0
JB
2672 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2673}
2674
08fa8fd0 2675void ilk_disable_vblank(struct drm_crtc *crtc)
7e231dbe 2676{
08fa8fd0
VS
2677 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2678 enum pipe pipe = to_intel_crtc(crtc)->pipe;
7e231dbe 2679 unsigned long irqflags;
a9c287c9 2680 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
86e83e35 2681 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
7e231dbe
JB
2682
2683 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
86e83e35 2684 ilk_disable_display_irq(dev_priv, bit);
7e231dbe
JB
2685 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2686}
2687
08fa8fd0 2688void bdw_disable_vblank(struct drm_crtc *crtc)
abd58f01 2689{
08fa8fd0
VS
2690 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2691 enum pipe pipe = to_intel_crtc(crtc)->pipe;
abd58f01 2692 unsigned long irqflags;
abd58f01 2693
abd58f01 2694 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2695 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01
BW
2696 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2697}
2698
b243f530 2699static void ibx_irq_reset(struct drm_i915_private *dev_priv)
91738a95 2700{
b16b2a2f
PZ
2701 struct intel_uncore *uncore = &dev_priv->uncore;
2702
6e266956 2703 if (HAS_PCH_NOP(dev_priv))
91738a95
PZ
2704 return;
2705
b16b2a2f 2706 GEN3_IRQ_RESET(uncore, SDE);
105b122e 2707
6e266956 2708 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
105b122e 2709 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 2710}
105b122e 2711
622364b6
PZ
2712/*
2713 * SDEIER is also touched by the interrupt handler to work around missed PCH
2714 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2715 * instead we unconditionally enable all PCH interrupt sources here, but then
2716 * only unmask them as needed with SDEIMR.
2717 *
2718 * This function needs to be called before interrupts are enabled.
2719 */
b318b824 2720static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
622364b6 2721{
6e266956 2722 if (HAS_PCH_NOP(dev_priv))
622364b6
PZ
2723 return;
2724
48a1b8d4 2725 drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
91738a95
PZ
2726 I915_WRITE(SDEIER, 0xffffffff);
2727 POSTING_READ(SDEIER);
2728}
2729
70591a41
VS
2730static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2731{
b16b2a2f
PZ
2732 struct intel_uncore *uncore = &dev_priv->uncore;
2733
71b8b41d 2734 if (IS_CHERRYVIEW(dev_priv))
f0818984 2735 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
71b8b41d 2736 else
f0818984 2737 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
71b8b41d 2738
ad22d106 2739 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
f0818984 2740 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
70591a41 2741
44d9241e 2742 i9xx_pipestat_irq_reset(dev_priv);
70591a41 2743
b16b2a2f 2744 GEN3_IRQ_RESET(uncore, VLV_);
8bd099a7 2745 dev_priv->irq_mask = ~0u;
70591a41
VS
2746}
2747
8bb61306
VS
2748static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2749{
b16b2a2f
PZ
2750 struct intel_uncore *uncore = &dev_priv->uncore;
2751
8bb61306 2752 u32 pipestat_mask;
9ab981f2 2753 u32 enable_mask;
8bb61306
VS
2754 enum pipe pipe;
2755
842ebf7a 2756 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
8bb61306
VS
2757
2758 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2759 for_each_pipe(dev_priv, pipe)
2760 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2761
9ab981f2
VS
2762 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2763 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
ebf5f921
VS
2764 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2765 I915_LPE_PIPE_A_INTERRUPT |
2766 I915_LPE_PIPE_B_INTERRUPT;
2767
8bb61306 2768 if (IS_CHERRYVIEW(dev_priv))
ebf5f921
VS
2769 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2770 I915_LPE_PIPE_C_INTERRUPT;
6b7eafc1 2771
48a1b8d4 2772 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
6b7eafc1 2773
9ab981f2
VS
2774 dev_priv->irq_mask = ~enable_mask;
2775
b16b2a2f 2776 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
8bb61306
VS
2777}
2778
2779/* drm_dma.h hooks
2780*/
9eae5e27 2781static void ilk_irq_reset(struct drm_i915_private *dev_priv)
8bb61306 2782{
b16b2a2f 2783 struct intel_uncore *uncore = &dev_priv->uncore;
8bb61306 2784
b16b2a2f 2785 GEN3_IRQ_RESET(uncore, DE);
cf819eff 2786 if (IS_GEN(dev_priv, 7))
f0818984 2787 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
8bb61306 2788
fc340442 2789 if (IS_HASWELL(dev_priv)) {
f0818984
TU
2790 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2791 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
fc340442
DV
2792 }
2793
cf1c97dc 2794 gen5_gt_irq_reset(&dev_priv->gt);
8bb61306 2795
b243f530 2796 ibx_irq_reset(dev_priv);
8bb61306
VS
2797}
2798
b318b824 2799static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
7e231dbe 2800{
34c7b8a7
VS
2801 I915_WRITE(VLV_MASTER_IER, 0);
2802 POSTING_READ(VLV_MASTER_IER);
2803
cf1c97dc 2804 gen5_gt_irq_reset(&dev_priv->gt);
7e231dbe 2805
ad22d106 2806 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
2807 if (dev_priv->display_irqs_enabled)
2808 vlv_display_irq_reset(dev_priv);
ad22d106 2809 spin_unlock_irq(&dev_priv->irq_lock);
7e231dbe
JB
2810}
2811
b318b824 2812static void gen8_irq_reset(struct drm_i915_private *dev_priv)
abd58f01 2813{
b16b2a2f 2814 struct intel_uncore *uncore = &dev_priv->uncore;
d048a268 2815 enum pipe pipe;
abd58f01 2816
25286aac 2817 gen8_master_intr_disable(dev_priv->uncore.regs);
abd58f01 2818
cf1c97dc 2819 gen8_gt_irq_reset(&dev_priv->gt);
abd58f01 2820
f0818984
TU
2821 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2822 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
e04f7ece 2823
055e393f 2824 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
2825 if (intel_display_power_is_enabled(dev_priv,
2826 POWER_DOMAIN_PIPE(pipe)))
b16b2a2f 2827 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
abd58f01 2828
b16b2a2f
PZ
2829 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2830 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2831 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
abd58f01 2832
6e266956 2833 if (HAS_PCH_SPLIT(dev_priv))
b243f530 2834 ibx_irq_reset(dev_priv);
abd58f01 2835}
09f2344d 2836
a3265d85 2837static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
51951ae7 2838{
b16b2a2f 2839 struct intel_uncore *uncore = &dev_priv->uncore;
d048a268 2840 enum pipe pipe;
51951ae7 2841
f0818984 2842 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
51951ae7 2843
8241cfbe
JRS
2844 if (INTEL_GEN(dev_priv) >= 12) {
2845 enum transcoder trans;
2846
2847 for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
2848 enum intel_display_power_domain domain;
2849
2850 domain = POWER_DOMAIN_TRANSCODER(trans);
2851 if (!intel_display_power_is_enabled(dev_priv, domain))
2852 continue;
2853
2854 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2855 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2856 }
2857 } else {
2858 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2859 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2860 }
62819dfd 2861
51951ae7
MK
2862 for_each_pipe(dev_priv, pipe)
2863 if (intel_display_power_is_enabled(dev_priv,
2864 POWER_DOMAIN_PIPE(pipe)))
b16b2a2f 2865 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
51951ae7 2866
b16b2a2f
PZ
2867 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2868 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2869 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
31604222 2870
29b43ae2 2871 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
b16b2a2f 2872 GEN3_IRQ_RESET(uncore, SDE);
51951ae7
MK
2873}
2874
a3265d85
MR
2875static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2876{
2877 struct intel_uncore *uncore = &dev_priv->uncore;
2878
2879 gen11_master_intr_disable(dev_priv->uncore.regs);
2880
2881 gen11_gt_irq_reset(&dev_priv->gt);
2882 gen11_display_irq_reset(dev_priv);
2883
2884 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2885 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2886}
2887
4c6c03be 2888void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
001bd2cb 2889 u8 pipe_mask)
d49bdb0e 2890{
b16b2a2f
PZ
2891 struct intel_uncore *uncore = &dev_priv->uncore;
2892
a9c287c9 2893 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
6831f3e3 2894 enum pipe pipe;
d49bdb0e 2895
13321786 2896 spin_lock_irq(&dev_priv->irq_lock);
9dfe2e3a
ID
2897
2898 if (!intel_irqs_enabled(dev_priv)) {
2899 spin_unlock_irq(&dev_priv->irq_lock);
2900 return;
2901 }
2902
6831f3e3 2903 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
b16b2a2f 2904 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
6831f3e3
VS
2905 dev_priv->de_irq_mask[pipe],
2906 ~dev_priv->de_irq_mask[pipe] | extra_ier);
9dfe2e3a 2907
13321786 2908 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
2909}
2910
aae8ba84 2911void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
001bd2cb 2912 u8 pipe_mask)
aae8ba84 2913{
b16b2a2f 2914 struct intel_uncore *uncore = &dev_priv->uncore;
6831f3e3
VS
2915 enum pipe pipe;
2916
aae8ba84 2917 spin_lock_irq(&dev_priv->irq_lock);
9dfe2e3a
ID
2918
2919 if (!intel_irqs_enabled(dev_priv)) {
2920 spin_unlock_irq(&dev_priv->irq_lock);
2921 return;
2922 }
2923
6831f3e3 2924 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
b16b2a2f 2925 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
9dfe2e3a 2926
aae8ba84
VS
2927 spin_unlock_irq(&dev_priv->irq_lock);
2928
2929 /* make sure we're done processing display irqs */
315ca4c4 2930 intel_synchronize_irq(dev_priv);
aae8ba84
VS
2931}
2932
b318b824 2933static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
43f328d7 2934{
b16b2a2f 2935 struct intel_uncore *uncore = &dev_priv->uncore;
43f328d7
VS
2936
2937 I915_WRITE(GEN8_MASTER_IRQ, 0);
2938 POSTING_READ(GEN8_MASTER_IRQ);
2939
cf1c97dc 2940 gen8_gt_irq_reset(&dev_priv->gt);
43f328d7 2941
b16b2a2f 2942 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
43f328d7 2943
ad22d106 2944 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
2945 if (dev_priv->display_irqs_enabled)
2946 vlv_display_irq_reset(dev_priv);
ad22d106 2947 spin_unlock_irq(&dev_priv->irq_lock);
43f328d7
VS
2948}
2949
91d14251 2950static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
87a02106
VS
2951 const u32 hpd[HPD_NUM_PINS])
2952{
87a02106
VS
2953 struct intel_encoder *encoder;
2954 u32 enabled_irqs = 0;
2955
91c8a326 2956 for_each_intel_encoder(&dev_priv->drm, encoder)
87a02106
VS
2957 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
2958 enabled_irqs |= hpd[encoder->hpd_pin];
2959
2960 return enabled_irqs;
2961}
2962
1a56b1a2 2963static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
7fe0b973 2964{
1a56b1a2 2965 u32 hotplug;
82a28bcf
DV
2966
2967 /*
2968 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
2969 * duration to 2ms (which is the minimum in the Display Port spec).
2970 * The pulse duration bits are reserved on LPT+.
82a28bcf 2971 */
7fe0b973 2972 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1a56b1a2
ID
2973 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
2974 PORTC_PULSE_DURATION_MASK |
2975 PORTD_PULSE_DURATION_MASK);
7fe0b973 2976 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1a56b1a2
ID
2977 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2978 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
0b2eb33e
VS
2979 /*
2980 * When CPU and PCH are on the same package, port A
2981 * HPD must be enabled in both north and south.
2982 */
91d14251 2983 if (HAS_PCH_LPT_LP(dev_priv))
0b2eb33e 2984 hotplug |= PORTA_HOTPLUG_ENABLE;
7fe0b973 2985 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 2986}
26951caf 2987
1a56b1a2
ID
2988static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2989{
2990 u32 hotplug_irqs, enabled_irqs;
2991
2992 if (HAS_PCH_IBX(dev_priv)) {
2993 hotplug_irqs = SDE_HOTPLUG_MASK;
2994 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
2995 } else {
2996 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2997 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
2998 }
2999
3000 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3001
3002 ibx_hpd_detection_setup(dev_priv);
3003}
3004
52dfdba0
LDM
3005static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
3006 u32 ddi_hotplug_enable_mask,
3007 u32 tc_hotplug_enable_mask)
31604222
AS
3008{
3009 u32 hotplug;
3010
3011 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
52dfdba0 3012 hotplug |= ddi_hotplug_enable_mask;
31604222
AS
3013 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3014
8ef7e340
MR
3015 if (tc_hotplug_enable_mask) {
3016 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3017 hotplug |= tc_hotplug_enable_mask;
3018 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3019 }
31604222
AS
3020}
3021
40e98130
LDM
3022static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3023 u32 sde_ddi_mask, u32 sde_tc_mask,
3024 u32 ddi_enable_mask, u32 tc_enable_mask,
3025 const u32 *pins)
31604222
AS
3026{
3027 u32 hotplug_irqs, enabled_irqs;
3028
40e98130
LDM
3029 hotplug_irqs = sde_ddi_mask | sde_tc_mask;
3030 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
31604222 3031
f49108d0
MR
3032 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3033
31604222
AS
3034 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3035
40e98130 3036 icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
52dfdba0
LDM
3037}
3038
40e98130
LDM
3039/*
3040 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3041 * equivalent of SDE.
3042 */
8ef7e340
MR
3043static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3044{
40e98130 3045 icp_hpd_irq_setup(dev_priv,
53448aed
VK
3046 SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
3047 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
d09ad3e7 3048 hpd_icp);
31604222
AS
3049}
3050
943682e3
MR
3051/*
3052 * JSP behaves exactly the same as MCC above except that port C is mapped to
3053 * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's
3054 * masks & tables rather than ICP's masks & tables.
3055 */
3056static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3057{
3058 icp_hpd_irq_setup(dev_priv,
3059 SDE_DDI_MASK_TGP, 0,
3060 TGP_DDI_HPD_ENABLE_MASK, 0,
3061 hpd_tgp);
3062}
3063
121e758e
DP
3064static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3065{
3066 u32 hotplug;
3067
3068 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3069 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3070 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3071 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3072 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3073 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
b796b971
DP
3074
3075 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3076 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3077 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3078 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3079 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3080 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
121e758e
DP
3081}
3082
3083static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3084{
3085 u32 hotplug_irqs, enabled_irqs;
48ef15d3 3086 const u32 *hpd;
121e758e
DP
3087 u32 val;
3088
48ef15d3
JRS
3089 hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
3090 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
b796b971 3091 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
121e758e
DP
3092
3093 val = I915_READ(GEN11_DE_HPD_IMR);
3094 val &= ~hotplug_irqs;
3095 I915_WRITE(GEN11_DE_HPD_IMR, val);
3096 POSTING_READ(GEN11_DE_HPD_IMR);
3097
3098 gen11_hpd_detection_setup(dev_priv);
31604222 3099
52dfdba0 3100 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
40e98130
LDM
3101 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
3102 TGP_DDI_HPD_ENABLE_MASK,
3103 TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
52dfdba0 3104 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
40e98130
LDM
3105 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
3106 ICP_DDI_HPD_ENABLE_MASK,
3107 ICP_TC_HPD_ENABLE_MASK, hpd_icp);
121e758e
DP
3108}
3109
2a57d9cc 3110static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
6dbf30ce 3111{
3b92e263
RV
3112 u32 val, hotplug;
3113
3114 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3115 if (HAS_PCH_CNP(dev_priv)) {
3116 val = I915_READ(SOUTH_CHICKEN1);
3117 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3118 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3119 I915_WRITE(SOUTH_CHICKEN1, val);
3120 }
6dbf30ce
VS
3121
3122 /* Enable digital hotplug on the PCH */
3123 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2a57d9cc
ID
3124 hotplug |= PORTA_HOTPLUG_ENABLE |
3125 PORTB_HOTPLUG_ENABLE |
3126 PORTC_HOTPLUG_ENABLE |
3127 PORTD_HOTPLUG_ENABLE;
6dbf30ce
VS
3128 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3129
3130 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3131 hotplug |= PORTE_HOTPLUG_ENABLE;
3132 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3133}
3134
2a57d9cc
ID
3135static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3136{
3137 u32 hotplug_irqs, enabled_irqs;
3138
f49108d0
MR
3139 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3140 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3141
2a57d9cc
ID
3142 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3143 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3144
3145 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3146
3147 spt_hpd_detection_setup(dev_priv);
3148}
3149
1a56b1a2
ID
3150static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3151{
3152 u32 hotplug;
3153
3154 /*
3155 * Enable digital hotplug on the CPU, and configure the DP short pulse
3156 * duration to 2ms (which is the minimum in the Display Port spec)
3157 * The pulse duration bits are reserved on HSW+.
3158 */
3159 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3160 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3161 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3162 DIGITAL_PORTA_PULSE_DURATION_2ms;
3163 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3164}
3165
91d14251 3166static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
e4ce95aa 3167{
1a56b1a2 3168 u32 hotplug_irqs, enabled_irqs;
e4ce95aa 3169
91d14251 3170 if (INTEL_GEN(dev_priv) >= 8) {
3a3b3c7d 3171 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
91d14251 3172 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3a3b3c7d
VS
3173
3174 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
91d14251 3175 } else if (INTEL_GEN(dev_priv) >= 7) {
23bb4cb5 3176 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
91d14251 3177 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3a3b3c7d
VS
3178
3179 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
23bb4cb5
VS
3180 } else {
3181 hotplug_irqs = DE_DP_A_HOTPLUG;
91d14251 3182 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
e4ce95aa 3183
3a3b3c7d
VS
3184 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3185 }
e4ce95aa 3186
1a56b1a2 3187 ilk_hpd_detection_setup(dev_priv);
e4ce95aa 3188
91d14251 3189 ibx_hpd_irq_setup(dev_priv);
e4ce95aa
VS
3190}
3191
2a57d9cc
ID
3192static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3193 u32 enabled_irqs)
e0a20ad7 3194{
2a57d9cc 3195 u32 hotplug;
e0a20ad7 3196
a52bb15b 3197 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2a57d9cc
ID
3198 hotplug |= PORTA_HOTPLUG_ENABLE |
3199 PORTB_HOTPLUG_ENABLE |
3200 PORTC_HOTPLUG_ENABLE;
d252bf68 3201
00376ccf
WK
3202 drm_dbg_kms(&dev_priv->drm,
3203 "Invert bit setting: hp_ctl:%x hp_port:%x\n",
3204 hotplug, enabled_irqs);
d252bf68
SS
3205 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3206
3207 /*
3208 * For BXT invert bit has to be set based on AOB design
3209 * for HPD detection logic, update it based on VBT fields.
3210 */
d252bf68
SS
3211 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3212 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3213 hotplug |= BXT_DDIA_HPD_INVERT;
3214 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3215 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3216 hotplug |= BXT_DDIB_HPD_INVERT;
3217 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3218 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3219 hotplug |= BXT_DDIC_HPD_INVERT;
3220
a52bb15b 3221 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
e0a20ad7
SS
3222}
3223
2a57d9cc
ID
3224static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3225{
3226 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3227}
3228
3229static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3230{
3231 u32 hotplug_irqs, enabled_irqs;
3232
3233 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3234 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3235
3236 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3237
3238 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3239}
3240
b318b824 3241static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
d46da437 3242{
82a28bcf 3243 u32 mask;
e5868a31 3244
6e266956 3245 if (HAS_PCH_NOP(dev_priv))
692a04cf
DV
3246 return;
3247
6e266956 3248 if (HAS_PCH_IBX(dev_priv))
5c673b60 3249 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
4ebc6509 3250 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
5c673b60 3251 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4ebc6509
DP
3252 else
3253 mask = SDE_GMBUS_CPT;
8664281b 3254
65f42cdc 3255 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
d46da437 3256 I915_WRITE(SDEIMR, ~mask);
2a57d9cc
ID
3257
3258 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3259 HAS_PCH_LPT(dev_priv))
1a56b1a2 3260 ibx_hpd_detection_setup(dev_priv);
2a57d9cc
ID
3261 else
3262 spt_hpd_detection_setup(dev_priv);
d46da437
PZ
3263}
3264
9eae5e27 3265static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
036a4a7d 3266{
b16b2a2f 3267 struct intel_uncore *uncore = &dev_priv->uncore;
8e76f8dc
PZ
3268 u32 display_mask, extra_mask;
3269
b243f530 3270 if (INTEL_GEN(dev_priv) >= 7) {
8e76f8dc 3271 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
842ebf7a 3272 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3273 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23bb4cb5
VS
3274 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3275 DE_DP_A_HOTPLUG_IVB);
8e76f8dc
PZ
3276 } else {
3277 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
842ebf7a
VS
3278 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3279 DE_PIPEA_CRC_DONE | DE_POISON);
e4ce95aa
VS
3280 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3281 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3282 DE_DP_A_HOTPLUG);
8e76f8dc 3283 }
036a4a7d 3284
fc340442 3285 if (IS_HASWELL(dev_priv)) {
b16b2a2f 3286 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
fc340442
DV
3287 display_mask |= DE_EDP_PSR_INT_HSW;
3288 }
3289
1ec14ad3 3290 dev_priv->irq_mask = ~display_mask;
036a4a7d 3291
b318b824 3292 ibx_irq_pre_postinstall(dev_priv);
622364b6 3293
b16b2a2f
PZ
3294 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3295 display_mask | extra_mask);
036a4a7d 3296
cf1c97dc 3297 gen5_gt_irq_postinstall(&dev_priv->gt);
036a4a7d 3298
1a56b1a2
ID
3299 ilk_hpd_detection_setup(dev_priv);
3300
b318b824 3301 ibx_irq_postinstall(dev_priv);
7fe0b973 3302
50a0bc90 3303 if (IS_IRONLAKE_M(dev_priv)) {
6005ce42
DV
3304 /* Enable PCU event interrupts
3305 *
3306 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3307 * setup is guaranteed to run in single-threaded context. But we
3308 * need it to make the assert_spin_locked happy. */
d6207435 3309 spin_lock_irq(&dev_priv->irq_lock);
fbdedaea 3310 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3311 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1 3312 }
036a4a7d
ZW
3313}
3314
f8b79e58
ID
3315void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3316{
67520415 3317 lockdep_assert_held(&dev_priv->irq_lock);
f8b79e58
ID
3318
3319 if (dev_priv->display_irqs_enabled)
3320 return;
3321
3322 dev_priv->display_irqs_enabled = true;
3323
d6c69803
VS
3324 if (intel_irqs_enabled(dev_priv)) {
3325 vlv_display_irq_reset(dev_priv);
ad22d106 3326 vlv_display_irq_postinstall(dev_priv);
d6c69803 3327 }
f8b79e58
ID
3328}
3329
3330void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3331{
67520415 3332 lockdep_assert_held(&dev_priv->irq_lock);
f8b79e58
ID
3333
3334 if (!dev_priv->display_irqs_enabled)
3335 return;
3336
3337 dev_priv->display_irqs_enabled = false;
3338
950eabaf 3339 if (intel_irqs_enabled(dev_priv))
ad22d106 3340 vlv_display_irq_reset(dev_priv);
f8b79e58
ID
3341}
3342
0e6c9a9e 3343
b318b824 3344static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
0e6c9a9e 3345{
cf1c97dc 3346 gen5_gt_irq_postinstall(&dev_priv->gt);
7e231dbe 3347
ad22d106 3348 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3349 if (dev_priv->display_irqs_enabled)
3350 vlv_display_irq_postinstall(dev_priv);
ad22d106
VS
3351 spin_unlock_irq(&dev_priv->irq_lock);
3352
7e231dbe 3353 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
34c7b8a7 3354 POSTING_READ(VLV_MASTER_IER);
20afbda2
DV
3355}
3356
abd58f01
BW
3357static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3358{
b16b2a2f
PZ
3359 struct intel_uncore *uncore = &dev_priv->uncore;
3360
8598eb78
MR
3361 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3362 GEN8_PIPE_CDCLK_CRC_DONE;
a9c287c9 3363 u32 de_pipe_enables;
4457a9db 3364 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3a3b3c7d 3365 u32 de_port_enables;
df0d28c1 3366 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3a3b3c7d 3367 enum pipe pipe;
770de83d 3368
df0d28c1
DP
3369 if (INTEL_GEN(dev_priv) <= 10)
3370 de_misc_masked |= GEN8_DE_MISC_GSE;
3371
4457a9db
ID
3372 if (IS_GEN9_LP(dev_priv))
3373 de_port_masked |= BXT_DE_PORT_GMBUS;
a324fcac 3374
770de83d
DL
3375 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3376 GEN8_PIPE_FIFO_UNDERRUN;
3377
3a3b3c7d 3378 de_port_enables = de_port_masked;
cc3f90f0 3379 if (IS_GEN9_LP(dev_priv))
a52bb15b
VS
3380 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3381 else if (IS_BROADWELL(dev_priv))
3a3b3c7d
VS
3382 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3383
8241cfbe
JRS
3384 if (INTEL_GEN(dev_priv) >= 12) {
3385 enum transcoder trans;
3386
3387 for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
3388 enum intel_display_power_domain domain;
3389
3390 domain = POWER_DOMAIN_TRANSCODER(trans);
3391 if (!intel_display_power_is_enabled(dev_priv, domain))
3392 continue;
3393
3394 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3395 }
3396 } else {
3397 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3398 }
e04f7ece 3399
0a195c02
MK
3400 for_each_pipe(dev_priv, pipe) {
3401 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
abd58f01 3402
f458ebbc 3403 if (intel_display_power_is_enabled(dev_priv,
813bde43 3404 POWER_DOMAIN_PIPE(pipe)))
b16b2a2f 3405 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
813bde43
PZ
3406 dev_priv->de_irq_mask[pipe],
3407 de_pipe_enables);
0a195c02 3408 }
abd58f01 3409
b16b2a2f
PZ
3410 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3411 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
2a57d9cc 3412
121e758e
DP
3413 if (INTEL_GEN(dev_priv) >= 11) {
3414 u32 de_hpd_masked = 0;
b796b971
DP
3415 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3416 GEN11_DE_TBT_HOTPLUG_MASK;
121e758e 3417
b16b2a2f
PZ
3418 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3419 de_hpd_enables);
121e758e
DP
3420 gen11_hpd_detection_setup(dev_priv);
3421 } else if (IS_GEN9_LP(dev_priv)) {
2a57d9cc 3422 bxt_hpd_detection_setup(dev_priv);
121e758e 3423 } else if (IS_BROADWELL(dev_priv)) {
1a56b1a2 3424 ilk_hpd_detection_setup(dev_priv);
121e758e 3425 }
abd58f01
BW
3426}
3427
b318b824 3428static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
abd58f01 3429{
6e266956 3430 if (HAS_PCH_SPLIT(dev_priv))
b318b824 3431 ibx_irq_pre_postinstall(dev_priv);
622364b6 3432
cf1c97dc 3433 gen8_gt_irq_postinstall(&dev_priv->gt);
abd58f01
BW
3434 gen8_de_irq_postinstall(dev_priv);
3435
6e266956 3436 if (HAS_PCH_SPLIT(dev_priv))
b318b824 3437 ibx_irq_postinstall(dev_priv);
abd58f01 3438
25286aac 3439 gen8_master_intr_enable(dev_priv->uncore.regs);
abd58f01
BW
3440}
3441
b318b824 3442static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
31604222 3443{
31604222
AS
3444 u32 mask = SDE_GMBUS_ICP;
3445
48a1b8d4 3446 drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
31604222
AS
3447 I915_WRITE(SDEIER, 0xffffffff);
3448 POSTING_READ(SDEIER);
3449
65f42cdc 3450 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
31604222
AS
3451 I915_WRITE(SDEIMR, ~mask);
3452
52dfdba0
LDM
3453 if (HAS_PCH_TGP(dev_priv))
3454 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3455 TGP_TC_HPD_ENABLE_MASK);
e83c4673 3456 else if (HAS_PCH_JSP(dev_priv))
8ef7e340 3457 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
e83c4673
VK
3458 else if (HAS_PCH_MCC(dev_priv))
3459 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3460 ICP_TC_HPD_ENABLE(PORT_TC1));
52dfdba0
LDM
3461 else
3462 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3463 ICP_TC_HPD_ENABLE_MASK);
31604222
AS
3464}
3465
b318b824 3466static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
51951ae7 3467{
b16b2a2f 3468 struct intel_uncore *uncore = &dev_priv->uncore;
df0d28c1 3469 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
51951ae7 3470
29b43ae2 3471 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
b318b824 3472 icp_irq_postinstall(dev_priv);
31604222 3473
9b77011e 3474 gen11_gt_irq_postinstall(&dev_priv->gt);
51951ae7
MK
3475 gen8_de_irq_postinstall(dev_priv);
3476
b16b2a2f 3477 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
df0d28c1 3478
51951ae7
MK
3479 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3480
9b77011e 3481 gen11_master_intr_enable(uncore->regs);
c25f0c6a 3482 POSTING_READ(GEN11_GFX_MSTR_IRQ);
51951ae7
MK
3483}
3484
b318b824 3485static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
43f328d7 3486{
cf1c97dc 3487 gen8_gt_irq_postinstall(&dev_priv->gt);
43f328d7 3488
ad22d106 3489 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3490 if (dev_priv->display_irqs_enabled)
3491 vlv_display_irq_postinstall(dev_priv);
ad22d106
VS
3492 spin_unlock_irq(&dev_priv->irq_lock);
3493
e5328c43 3494 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
43f328d7 3495 POSTING_READ(GEN8_MASTER_IRQ);
43f328d7
VS
3496}
3497
b318b824 3498static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
1da177e4 3499{
b16b2a2f 3500 struct intel_uncore *uncore = &dev_priv->uncore;
91e3738e 3501
44d9241e
VS
3502 i9xx_pipestat_irq_reset(dev_priv);
3503
b16b2a2f 3504 GEN2_IRQ_RESET(uncore);
c2798b19
CW
3505}
3506
b318b824 3507static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
c2798b19 3508{
b16b2a2f 3509 struct intel_uncore *uncore = &dev_priv->uncore;
e9e9848a 3510 u16 enable_mask;
c2798b19 3511
4f5fd91f
TU
3512 intel_uncore_write16(uncore,
3513 EMR,
3514 ~(I915_ERROR_PAGE_TABLE |
3515 I915_ERROR_MEMORY_REFRESH));
c2798b19
CW
3516
3517 /* Unmask the interrupts that we always want on. */
3518 dev_priv->irq_mask =
3519 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
16659bc5
VS
3520 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3521 I915_MASTER_ERROR_INTERRUPT);
c2798b19 3522
e9e9848a
VS
3523 enable_mask =
3524 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3525 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
16659bc5 3526 I915_MASTER_ERROR_INTERRUPT |
e9e9848a
VS
3527 I915_USER_INTERRUPT;
3528
b16b2a2f 3529 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
c2798b19 3530
379ef82d
DV
3531 /* Interrupt setup is already guaranteed to be single-threaded, this is
3532 * just to make the assert_spin_locked check happy. */
d6207435 3533 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3534 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3535 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3536 spin_unlock_irq(&dev_priv->irq_lock);
c2798b19
CW
3537}
3538
4f5fd91f 3539static void i8xx_error_irq_ack(struct drm_i915_private *i915,
78c357dd
VS
3540 u16 *eir, u16 *eir_stuck)
3541{
4f5fd91f 3542 struct intel_uncore *uncore = &i915->uncore;
78c357dd
VS
3543 u16 emr;
3544
4f5fd91f 3545 *eir = intel_uncore_read16(uncore, EIR);
78c357dd
VS
3546
3547 if (*eir)
4f5fd91f 3548 intel_uncore_write16(uncore, EIR, *eir);
78c357dd 3549
4f5fd91f 3550 *eir_stuck = intel_uncore_read16(uncore, EIR);
78c357dd
VS
3551 if (*eir_stuck == 0)
3552 return;
3553
3554 /*
3555 * Toggle all EMR bits to make sure we get an edge
3556 * in the ISR master error bit if we don't clear
3557 * all the EIR bits. Otherwise the edge triggered
3558 * IIR on i965/g4x wouldn't notice that an interrupt
3559 * is still pending. Also some EIR bits can't be
3560 * cleared except by handling the underlying error
3561 * (or by a GPU reset) so we mask any bit that
3562 * remains set.
3563 */
4f5fd91f
TU
3564 emr = intel_uncore_read16(uncore, EMR);
3565 intel_uncore_write16(uncore, EMR, 0xffff);
3566 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
78c357dd
VS
3567}
3568
3569static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3570 u16 eir, u16 eir_stuck)
3571{
3572 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3573
3574 if (eir_stuck)
00376ccf
WK
3575 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3576 eir_stuck);
78c357dd
VS
3577}
3578
3579static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3580 u32 *eir, u32 *eir_stuck)
3581{
3582 u32 emr;
3583
3584 *eir = I915_READ(EIR);
3585
3586 I915_WRITE(EIR, *eir);
3587
3588 *eir_stuck = I915_READ(EIR);
3589 if (*eir_stuck == 0)
3590 return;
3591
3592 /*
3593 * Toggle all EMR bits to make sure we get an edge
3594 * in the ISR master error bit if we don't clear
3595 * all the EIR bits. Otherwise the edge triggered
3596 * IIR on i965/g4x wouldn't notice that an interrupt
3597 * is still pending. Also some EIR bits can't be
3598 * cleared except by handling the underlying error
3599 * (or by a GPU reset) so we mask any bit that
3600 * remains set.
3601 */
3602 emr = I915_READ(EMR);
3603 I915_WRITE(EMR, 0xffffffff);
3604 I915_WRITE(EMR, emr | *eir_stuck);
3605}
3606
3607static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3608 u32 eir, u32 eir_stuck)
3609{
3610 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3611
3612 if (eir_stuck)
00376ccf
WK
3613 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3614 eir_stuck);
78c357dd
VS
3615}
3616
ff1f525e 3617static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3618{
b318b824 3619 struct drm_i915_private *dev_priv = arg;
af722d28 3620 irqreturn_t ret = IRQ_NONE;
c2798b19 3621
2dd2a883
ID
3622 if (!intel_irqs_enabled(dev_priv))
3623 return IRQ_NONE;
3624
1f814dac 3625 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 3626 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 3627
af722d28 3628 do {
eb64343c 3629 u32 pipe_stats[I915_MAX_PIPES] = {};
78c357dd 3630 u16 eir = 0, eir_stuck = 0;
af722d28 3631 u16 iir;
eb64343c 3632
4f5fd91f 3633 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
af722d28
VS
3634 if (iir == 0)
3635 break;
3636
3637 ret = IRQ_HANDLED;
c2798b19 3638
eb64343c
VS
3639 /* Call regardless, as some status bits might not be
3640 * signalled in iir */
3641 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
c2798b19 3642
78c357dd
VS
3643 if (iir & I915_MASTER_ERROR_INTERRUPT)
3644 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3645
4f5fd91f 3646 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
c2798b19 3647
c2798b19 3648 if (iir & I915_USER_INTERRUPT)
54400257 3649 intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
c2798b19 3650
78c357dd
VS
3651 if (iir & I915_MASTER_ERROR_INTERRUPT)
3652 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
c2798b19 3653
af722d28
VS
3654 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3655 } while (0);
1f814dac 3656
9102650f 3657 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
c2798b19 3658
1f814dac 3659 return ret;
c2798b19
CW
3660}
3661
b318b824 3662static void i915_irq_reset(struct drm_i915_private *dev_priv)
a266c7d5 3663{
b16b2a2f 3664 struct intel_uncore *uncore = &dev_priv->uncore;
a266c7d5 3665
56b857a5 3666 if (I915_HAS_HOTPLUG(dev_priv)) {
0706f17c 3667 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
3668 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3669 }
3670
44d9241e
VS
3671 i9xx_pipestat_irq_reset(dev_priv);
3672
b16b2a2f 3673 GEN3_IRQ_RESET(uncore, GEN2_);
a266c7d5
CW
3674}
3675
b318b824 3676static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
a266c7d5 3677{
b16b2a2f 3678 struct intel_uncore *uncore = &dev_priv->uncore;
38bde180 3679 u32 enable_mask;
a266c7d5 3680
045cebd2
VS
3681 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3682 I915_ERROR_MEMORY_REFRESH));
38bde180
CW
3683
3684 /* Unmask the interrupts that we always want on. */
3685 dev_priv->irq_mask =
3686 ~(I915_ASLE_INTERRUPT |
3687 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
16659bc5
VS
3688 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3689 I915_MASTER_ERROR_INTERRUPT);
38bde180
CW
3690
3691 enable_mask =
3692 I915_ASLE_INTERRUPT |
3693 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3694 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
16659bc5 3695 I915_MASTER_ERROR_INTERRUPT |
38bde180
CW
3696 I915_USER_INTERRUPT;
3697
56b857a5 3698 if (I915_HAS_HOTPLUG(dev_priv)) {
a266c7d5
CW
3699 /* Enable in IER... */
3700 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3701 /* and unmask in IMR */
3702 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3703 }
3704
b16b2a2f 3705 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
a266c7d5 3706
379ef82d
DV
3707 /* Interrupt setup is already guaranteed to be single-threaded, this is
3708 * just to make the assert_spin_locked check happy. */
d6207435 3709 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3710 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3711 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3712 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3713
c30bb1fd 3714 i915_enable_asle_pipestat(dev_priv);
20afbda2
DV
3715}
3716
ff1f525e 3717static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3718{
b318b824 3719 struct drm_i915_private *dev_priv = arg;
af722d28 3720 irqreturn_t ret = IRQ_NONE;
a266c7d5 3721
2dd2a883
ID
3722 if (!intel_irqs_enabled(dev_priv))
3723 return IRQ_NONE;
3724
1f814dac 3725 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 3726 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 3727
38bde180 3728 do {
eb64343c 3729 u32 pipe_stats[I915_MAX_PIPES] = {};
78c357dd 3730 u32 eir = 0, eir_stuck = 0;
af722d28
VS
3731 u32 hotplug_status = 0;
3732 u32 iir;
a266c7d5 3733
9d9523d8 3734 iir = I915_READ(GEN2_IIR);
af722d28
VS
3735 if (iir == 0)
3736 break;
3737
3738 ret = IRQ_HANDLED;
3739
3740 if (I915_HAS_HOTPLUG(dev_priv) &&
3741 iir & I915_DISPLAY_PORT_INTERRUPT)
3742 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
a266c7d5 3743
eb64343c
VS
3744 /* Call regardless, as some status bits might not be
3745 * signalled in iir */
3746 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
a266c7d5 3747
78c357dd
VS
3748 if (iir & I915_MASTER_ERROR_INTERRUPT)
3749 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3750
9d9523d8 3751 I915_WRITE(GEN2_IIR, iir);
a266c7d5 3752
a266c7d5 3753 if (iir & I915_USER_INTERRUPT)
54400257 3754 intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
a266c7d5 3755
78c357dd
VS
3756 if (iir & I915_MASTER_ERROR_INTERRUPT)
3757 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
a266c7d5 3758
af722d28
VS
3759 if (hotplug_status)
3760 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3761
3762 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3763 } while (0);
a266c7d5 3764
9102650f 3765 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 3766
a266c7d5
CW
3767 return ret;
3768}
3769
b318b824 3770static void i965_irq_reset(struct drm_i915_private *dev_priv)
a266c7d5 3771{
b16b2a2f 3772 struct intel_uncore *uncore = &dev_priv->uncore;
a266c7d5 3773
0706f17c 3774 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 3775 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5 3776
44d9241e
VS
3777 i9xx_pipestat_irq_reset(dev_priv);
3778
b16b2a2f 3779 GEN3_IRQ_RESET(uncore, GEN2_);
a266c7d5
CW
3780}
3781
b318b824 3782static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
a266c7d5 3783{
b16b2a2f 3784 struct intel_uncore *uncore = &dev_priv->uncore;
bbba0a97 3785 u32 enable_mask;
a266c7d5
CW
3786 u32 error_mask;
3787
045cebd2
VS
3788 /*
3789 * Enable some error detection, note the instruction error mask
3790 * bit is reserved, so we leave it masked.
3791 */
3792 if (IS_G4X(dev_priv)) {
3793 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3794 GM45_ERROR_MEM_PRIV |
3795 GM45_ERROR_CP_PRIV |
3796 I915_ERROR_MEMORY_REFRESH);
3797 } else {
3798 error_mask = ~(I915_ERROR_PAGE_TABLE |
3799 I915_ERROR_MEMORY_REFRESH);
3800 }
3801 I915_WRITE(EMR, error_mask);
3802
a266c7d5 3803 /* Unmask the interrupts that we always want on. */
c30bb1fd
VS
3804 dev_priv->irq_mask =
3805 ~(I915_ASLE_INTERRUPT |
3806 I915_DISPLAY_PORT_INTERRUPT |
3807 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3808 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
78c357dd 3809 I915_MASTER_ERROR_INTERRUPT);
bbba0a97 3810
c30bb1fd
VS
3811 enable_mask =
3812 I915_ASLE_INTERRUPT |
3813 I915_DISPLAY_PORT_INTERRUPT |
3814 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3815 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
78c357dd 3816 I915_MASTER_ERROR_INTERRUPT |
c30bb1fd 3817 I915_USER_INTERRUPT;
bbba0a97 3818
91d14251 3819 if (IS_G4X(dev_priv))
bbba0a97 3820 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 3821
b16b2a2f 3822 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
c30bb1fd 3823
b79480ba
DV
3824 /* Interrupt setup is already guaranteed to be single-threaded, this is
3825 * just to make the assert_spin_locked check happy. */
d6207435 3826 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3827 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3828 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3829 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3830 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 3831
91d14251 3832 i915_enable_asle_pipestat(dev_priv);
20afbda2
DV
3833}
3834
91d14251 3835static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
20afbda2 3836{
20afbda2
DV
3837 u32 hotplug_en;
3838
67520415 3839 lockdep_assert_held(&dev_priv->irq_lock);
b5ea2d56 3840
778eb334
VS
3841 /* Note HDMI and DP share hotplug bits */
3842 /* enable bits are the same for all generations */
91d14251 3843 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
778eb334
VS
3844 /* Programming the CRT detection parameters tends
3845 to generate a spurious hotplug event about three
3846 seconds later. So just do it once.
3847 */
91d14251 3848 if (IS_G4X(dev_priv))
778eb334 3849 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
778eb334
VS
3850 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3851
3852 /* Ignore TV since it's buggy */
0706f17c 3853 i915_hotplug_interrupt_update_locked(dev_priv,
f9e3dc78
JN
3854 HOTPLUG_INT_EN_MASK |
3855 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3856 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3857 hotplug_en);
a266c7d5
CW
3858}
3859
ff1f525e 3860static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 3861{
b318b824 3862 struct drm_i915_private *dev_priv = arg;
af722d28 3863 irqreturn_t ret = IRQ_NONE;
a266c7d5 3864
2dd2a883
ID
3865 if (!intel_irqs_enabled(dev_priv))
3866 return IRQ_NONE;
3867
1f814dac 3868 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
9102650f 3869 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 3870
af722d28 3871 do {
eb64343c 3872 u32 pipe_stats[I915_MAX_PIPES] = {};
78c357dd 3873 u32 eir = 0, eir_stuck = 0;
af722d28
VS
3874 u32 hotplug_status = 0;
3875 u32 iir;
a266c7d5 3876
9d9523d8 3877 iir = I915_READ(GEN2_IIR);
af722d28 3878 if (iir == 0)
a266c7d5
CW
3879 break;
3880
3881 ret = IRQ_HANDLED;
3882
af722d28
VS
3883 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3884 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3885
3886 /* Call regardless, as some status bits might not be
3887 * signalled in iir */
3888 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
a266c7d5 3889
78c357dd
VS
3890 if (iir & I915_MASTER_ERROR_INTERRUPT)
3891 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3892
9d9523d8 3893 I915_WRITE(GEN2_IIR, iir);
a266c7d5 3894
a266c7d5 3895 if (iir & I915_USER_INTERRUPT)
54400257 3896 intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
af722d28 3897
a266c7d5 3898 if (iir & I915_BSD_USER_INTERRUPT)
54400257 3899 intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
a266c7d5 3900
78c357dd
VS
3901 if (iir & I915_MASTER_ERROR_INTERRUPT)
3902 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
515ac2bb 3903
af722d28
VS
3904 if (hotplug_status)
3905 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3906
3907 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3908 } while (0);
a266c7d5 3909
9102650f 3910 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1f814dac 3911
a266c7d5
CW
3912 return ret;
3913}
3914
fca52a55
DV
3915/**
3916 * intel_irq_init - initializes irq support
3917 * @dev_priv: i915 device instance
3918 *
3919 * This function initializes all the irq support including work items, timers
3920 * and all the vtables. It does not setup the interrupt itself though.
3921 */
b963291c 3922void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 3923{
91c8a326 3924 struct drm_device *dev = &dev_priv->drm;
cefcff8f 3925 int i;
8b2e326d 3926
77913b39
JN
3927 intel_hpd_init_work(dev_priv);
3928
74bb98ba 3929 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
cefcff8f
JL
3930 for (i = 0; i < MAX_L3_SLICES; ++i)
3931 dev_priv->l3_parity.remap_info[i] = NULL;
8b2e326d 3932
633023a4 3933 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
702668e6 3934 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
2239e6df 3935 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
26705e20 3936
0df3f09d 3937 dev->vblank_disable_immediate = true;
21da2700 3938
262fd485
CW
3939 /* Most platforms treat the display irq block as an always-on
3940 * power domain. vlv/chv can disable it at runtime and need
3941 * special care to avoid writing any of the display block registers
3942 * outside of the power domain. We defer setting up the display irqs
3943 * in this case to the runtime pm.
3944 */
3945 dev_priv->display_irqs_enabled = true;
3946 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3947 dev_priv->display_irqs_enabled = false;
3948
317eaa95 3949 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
9a64c650
LP
3950 /* If we have MST support, we want to avoid doing short HPD IRQ storm
3951 * detection, as short HPD storms will occur as a natural part of
3952 * sideband messaging with MST.
3953 * On older platforms however, IRQ storms can occur with both long and
3954 * short pulses, as seen on some G4x systems.
3955 */
3956 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
317eaa95 3957
b318b824
VS
3958 if (HAS_GMCH(dev_priv)) {
3959 if (I915_HAS_HOTPLUG(dev_priv))
3960 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3961 } else {
943682e3
MR
3962 if (HAS_PCH_JSP(dev_priv))
3963 dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
3964 else if (HAS_PCH_MCC(dev_priv))
8ef7e340
MR
3965 dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
3966 else if (INTEL_GEN(dev_priv) >= 11)
b318b824
VS
3967 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
3968 else if (IS_GEN9_LP(dev_priv))
e0a20ad7 3969 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
c6c30b91 3970 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
6dbf30ce
VS
3971 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
3972 else
3a3b3c7d 3973 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4
JB
3974 }
3975}
20afbda2 3976
cefcff8f
JL
3977/**
3978 * intel_irq_fini - deinitializes IRQ support
3979 * @i915: i915 device instance
3980 *
3981 * This function deinitializes all the IRQ support.
3982 */
3983void intel_irq_fini(struct drm_i915_private *i915)
3984{
3985 int i;
3986
3987 for (i = 0; i < MAX_L3_SLICES; ++i)
3988 kfree(i915->l3_parity.remap_info[i]);
3989}
3990
b318b824
VS
3991static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
3992{
3993 if (HAS_GMCH(dev_priv)) {
3994 if (IS_CHERRYVIEW(dev_priv))
3995 return cherryview_irq_handler;
3996 else if (IS_VALLEYVIEW(dev_priv))
3997 return valleyview_irq_handler;
3998 else if (IS_GEN(dev_priv, 4))
3999 return i965_irq_handler;
4000 else if (IS_GEN(dev_priv, 3))
4001 return i915_irq_handler;
4002 else
4003 return i8xx_irq_handler;
4004 } else {
4005 if (INTEL_GEN(dev_priv) >= 11)
4006 return gen11_irq_handler;
4007 else if (INTEL_GEN(dev_priv) >= 8)
4008 return gen8_irq_handler;
4009 else
9eae5e27 4010 return ilk_irq_handler;
b318b824
VS
4011 }
4012}
4013
4014static void intel_irq_reset(struct drm_i915_private *dev_priv)
4015{
4016 if (HAS_GMCH(dev_priv)) {
4017 if (IS_CHERRYVIEW(dev_priv))
4018 cherryview_irq_reset(dev_priv);
4019 else if (IS_VALLEYVIEW(dev_priv))
4020 valleyview_irq_reset(dev_priv);
4021 else if (IS_GEN(dev_priv, 4))
4022 i965_irq_reset(dev_priv);
4023 else if (IS_GEN(dev_priv, 3))
4024 i915_irq_reset(dev_priv);
4025 else
4026 i8xx_irq_reset(dev_priv);
4027 } else {
4028 if (INTEL_GEN(dev_priv) >= 11)
4029 gen11_irq_reset(dev_priv);
4030 else if (INTEL_GEN(dev_priv) >= 8)
4031 gen8_irq_reset(dev_priv);
4032 else
9eae5e27 4033 ilk_irq_reset(dev_priv);
b318b824
VS
4034 }
4035}
4036
4037static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4038{
4039 if (HAS_GMCH(dev_priv)) {
4040 if (IS_CHERRYVIEW(dev_priv))
4041 cherryview_irq_postinstall(dev_priv);
4042 else if (IS_VALLEYVIEW(dev_priv))
4043 valleyview_irq_postinstall(dev_priv);
4044 else if (IS_GEN(dev_priv, 4))
4045 i965_irq_postinstall(dev_priv);
4046 else if (IS_GEN(dev_priv, 3))
4047 i915_irq_postinstall(dev_priv);
4048 else
4049 i8xx_irq_postinstall(dev_priv);
4050 } else {
4051 if (INTEL_GEN(dev_priv) >= 11)
4052 gen11_irq_postinstall(dev_priv);
4053 else if (INTEL_GEN(dev_priv) >= 8)
4054 gen8_irq_postinstall(dev_priv);
4055 else
9eae5e27 4056 ilk_irq_postinstall(dev_priv);
b318b824
VS
4057 }
4058}
4059
fca52a55
DV
4060/**
4061 * intel_irq_install - enables the hardware interrupt
4062 * @dev_priv: i915 device instance
4063 *
4064 * This function enables the hardware interrupt handling, but leaves the hotplug
4065 * handling still disabled. It is called after intel_irq_init().
4066 *
4067 * In the driver load and resume code we need working interrupts in a few places
4068 * but don't want to deal with the hassle of concurrent probe and hotplug
4069 * workers. Hence the split into this two-stage approach.
4070 */
2aeb7d3a
DV
4071int intel_irq_install(struct drm_i915_private *dev_priv)
4072{
b318b824
VS
4073 int irq = dev_priv->drm.pdev->irq;
4074 int ret;
4075
2aeb7d3a
DV
4076 /*
4077 * We enable some interrupt sources in our postinstall hooks, so mark
4078 * interrupts as enabled _before_ actually enabling them to avoid
4079 * special cases in our ordering checks.
4080 */
ad1443f0 4081 dev_priv->runtime_pm.irqs_enabled = true;
2aeb7d3a 4082
b318b824
VS
4083 dev_priv->drm.irq_enabled = true;
4084
4085 intel_irq_reset(dev_priv);
4086
4087 ret = request_irq(irq, intel_irq_handler(dev_priv),
4088 IRQF_SHARED, DRIVER_NAME, dev_priv);
4089 if (ret < 0) {
4090 dev_priv->drm.irq_enabled = false;
4091 return ret;
4092 }
4093
4094 intel_irq_postinstall(dev_priv);
4095
4096 return ret;
2aeb7d3a
DV
4097}
4098
fca52a55
DV
4099/**
4100 * intel_irq_uninstall - finilizes all irq handling
4101 * @dev_priv: i915 device instance
4102 *
4103 * This stops interrupt and hotplug handling and unregisters and frees all
4104 * resources acquired in the init functions.
4105 */
2aeb7d3a
DV
4106void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4107{
b318b824
VS
4108 int irq = dev_priv->drm.pdev->irq;
4109
4110 /*
789fa874
JK
4111 * FIXME we can get called twice during driver probe
4112 * error handling as well as during driver remove due to
4113 * intel_modeset_driver_remove() calling us out of sequence.
4114 * Would be nice if it didn't do that...
b318b824
VS
4115 */
4116 if (!dev_priv->drm.irq_enabled)
4117 return;
4118
4119 dev_priv->drm.irq_enabled = false;
4120
4121 intel_irq_reset(dev_priv);
4122
4123 free_irq(irq, dev_priv);
4124
2aeb7d3a 4125 intel_hpd_cancel_work(dev_priv);
ad1443f0 4126 dev_priv->runtime_pm.irqs_enabled = false;
2aeb7d3a
DV
4127}
4128
fca52a55
DV
4129/**
4130 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4131 * @dev_priv: i915 device instance
4132 *
4133 * This function is used to disable interrupts at runtime, both in the runtime
4134 * pm and the system suspend/resume code.
4135 */
b963291c 4136void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4137{
b318b824 4138 intel_irq_reset(dev_priv);
ad1443f0 4139 dev_priv->runtime_pm.irqs_enabled = false;
315ca4c4 4140 intel_synchronize_irq(dev_priv);
c67a470b
PZ
4141}
4142
fca52a55
DV
4143/**
4144 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4145 * @dev_priv: i915 device instance
4146 *
4147 * This function is used to enable interrupts at runtime, both in the runtime
4148 * pm and the system suspend/resume code.
4149 */
b963291c 4150void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4151{
ad1443f0 4152 dev_priv->runtime_pm.irqs_enabled = true;
b318b824
VS
4153 intel_irq_reset(dev_priv);
4154 intel_irq_postinstall(dev_priv);
c67a470b 4155}
d64575ee
JN
4156
4157bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4158{
4159 /*
4160 * We only use drm_irq_uninstall() at unload and VT switch, so
4161 * this is the only thing we need to check.
4162 */
4163 return dev_priv->runtime_pm.irqs_enabled;
4164}
4165
4166void intel_synchronize_irq(struct drm_i915_private *i915)
4167{
4168 synchronize_irq(i915->drm.pdev->irq);
4169}