2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/kernel.h>
27 #include "intel_display_types.h"
28 #include "intel_hotplug.h"
33 * Simply put, hotplug occurs when a display is connected to or disconnected
34 * from the system. However, there may be adapters and docking stations and
35 * Display Port short pulses and MST devices involved, complicating matters.
37 * Hotplug in i915 is handled in many different levels of abstraction.
39 * The platform dependent interrupt handling code in i915_irq.c enables,
40 * disables, and does preliminary handling of the interrupts. The interrupt
41 * handlers gather the hotplug detect (HPD) information from relevant registers
42 * into a platform independent mask of hotplug pins that have fired.
44 * The platform independent interrupt handler intel_hpd_irq_handler() in
45 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
46 * further processing to appropriate bottom halves (Display Port specific and
49 * The Display Port work function i915_digport_work_func() calls into
50 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
51 * pulses, with failures and non-MST long pulses triggering regular hotplug
52 * processing on the connector.
54 * The regular hotplug work function i915_hotplug_work_func() calls connector
55 * detect hooks, and, if connector status changes, triggers sending of hotplug
56 * uevent to userspace via drm_kms_helper_hotplug_event().
58 * Finally, the userspace is responsible for triggering a modeset upon receiving
59 * the hotplug uevent, disabling or enabling the crtc as needed.
61 * The hotplug interrupt storm detection and mitigation code keeps track of the
62 * number of interrupts per hotplug pin per a period of time, and if the number
63 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
64 * while before being re-enabled. The intention is to mitigate issues raising
65 * from broken hardware triggering massive amounts of interrupts and grinding
66 * the system to a halt.
68 * Current implementation expects that hotplug interrupt storm will not be
69 * seen when display port sink is connected, hence on platforms whose DP
70 * callback is handled by i915_digport_work_func reenabling of hpd is not
71 * performed (it was never expected to be disabled in the first place ;) )
72 * this is specific to DP sinks handled by this routine and any other display
73 * such as HDMI or DVI enabled on the same port will have proper logic since
74 * it will use i915_hotplug_work_func where this logic is handled.
78 * intel_hpd_pin_default - return default pin associated with certain port.
79 * @dev_priv: private driver data pointer
80 * @port: the hpd port to get associated pin
82 * It is only valid and used by digital port encoder.
84 * Return pin that is associatade with @port and HDP_NONE if no pin is
85 * hard associated with that @port.
87 enum hpd_pin
intel_hpd_pin_default(struct drm_i915_private
*dev_priv
,
90 enum phy phy
= intel_port_to_phy(dev_priv
, port
);
93 * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
94 * based on the DDI rather than the PHY (i.e., the last two outputs
95 * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs
96 * from the behavior of both TGL+TGP and RKL+CMP.
98 if (IS_ROCKETLAKE(dev_priv
) && HAS_PCH_TGP(dev_priv
))
99 return HPD_PORT_A
+ port
- PORT_A
;
103 return IS_CNL_WITH_PORT_F(dev_priv
) ? HPD_PORT_E
: HPD_PORT_F
;
104 case PHY_A
... PHY_E
:
105 case PHY_G
... PHY_I
:
106 return HPD_PORT_A
+ phy
- PHY_A
;
113 #define HPD_STORM_DETECT_PERIOD 1000
114 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
115 #define HPD_RETRY_DELAY 1000
118 intel_connector_hpd_pin(struct intel_connector
*connector
)
120 struct intel_encoder
*encoder
= intel_attached_encoder(connector
);
123 * MST connectors get their encoder attached dynamically
124 * so need to make sure we have an encoder here. But since
125 * MST encoders have their hpd_pin set to HPD_NONE we don't
126 * have to special case them beyond that.
128 return encoder
? encoder
->hpd_pin
: HPD_NONE
;
132 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
133 * @dev_priv: private driver data pointer
134 * @pin: the pin to gather stats on
135 * @long_hpd: whether the HPD IRQ was long or short
137 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
138 * storms. Only the pin specific stats and state are changed, the caller is
139 * responsible for further action.
141 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
142 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
143 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
144 * short IRQs count as +1. If this threshold is exceeded, it's considered an
145 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
147 * By default, most systems will only count long IRQs towards
148 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
149 * suffer from short IRQ storms and must also track these. Because short IRQ
150 * storms are naturally caused by sideband interactions with DP MST devices,
151 * short IRQ detection is only enabled for systems without DP MST support.
152 * Systems which are new enough to support DP MST are far less likely to
153 * suffer from IRQ storms at all, so this is fine.
155 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
156 * and should only be adjusted for automated hotplug testing.
158 * Return true if an IRQ storm was detected on @pin.
160 static bool intel_hpd_irq_storm_detect(struct drm_i915_private
*dev_priv
,
161 enum hpd_pin pin
, bool long_hpd
)
163 struct i915_hotplug
*hpd
= &dev_priv
->hotplug
;
164 unsigned long start
= hpd
->stats
[pin
].last_jiffies
;
165 unsigned long end
= start
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
);
166 const int increment
= long_hpd
? 10 : 1;
167 const int threshold
= hpd
->hpd_storm_threshold
;
171 (!long_hpd
&& !dev_priv
->hotplug
.hpd_short_storm_enabled
))
174 if (!time_in_range(jiffies
, start
, end
)) {
175 hpd
->stats
[pin
].last_jiffies
= jiffies
;
176 hpd
->stats
[pin
].count
= 0;
179 hpd
->stats
[pin
].count
+= increment
;
180 if (hpd
->stats
[pin
].count
> threshold
) {
181 hpd
->stats
[pin
].state
= HPD_MARK_DISABLED
;
182 drm_dbg_kms(&dev_priv
->drm
,
183 "HPD interrupt storm detected on PIN %d\n", pin
);
186 drm_dbg_kms(&dev_priv
->drm
,
187 "Received HPD interrupt on PIN %d - cnt: %d\n",
189 hpd
->stats
[pin
].count
);
196 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private
*dev_priv
)
198 struct drm_device
*dev
= &dev_priv
->drm
;
199 struct drm_connector_list_iter conn_iter
;
200 struct intel_connector
*connector
;
201 bool hpd_disabled
= false;
203 lockdep_assert_held(&dev_priv
->irq_lock
);
205 drm_connector_list_iter_begin(dev
, &conn_iter
);
206 for_each_intel_connector_iter(connector
, &conn_iter
) {
209 if (connector
->base
.polled
!= DRM_CONNECTOR_POLL_HPD
)
212 pin
= intel_connector_hpd_pin(connector
);
213 if (pin
== HPD_NONE
||
214 dev_priv
->hotplug
.stats
[pin
].state
!= HPD_MARK_DISABLED
)
217 drm_info(&dev_priv
->drm
,
218 "HPD interrupt storm detected on connector %s: "
219 "switching from hotplug detection to polling\n",
220 connector
->base
.name
);
222 dev_priv
->hotplug
.stats
[pin
].state
= HPD_DISABLED
;
223 connector
->base
.polled
= DRM_CONNECTOR_POLL_CONNECT
|
224 DRM_CONNECTOR_POLL_DISCONNECT
;
227 drm_connector_list_iter_end(&conn_iter
);
229 /* Enable polling and queue hotplug re-enabling. */
231 drm_kms_helper_poll_enable(dev
);
232 mod_delayed_work(system_wq
, &dev_priv
->hotplug
.reenable_work
,
233 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY
));
237 static void intel_hpd_irq_storm_reenable_work(struct work_struct
*work
)
239 struct drm_i915_private
*dev_priv
=
240 container_of(work
, typeof(*dev_priv
),
241 hotplug
.reenable_work
.work
);
242 struct drm_device
*dev
= &dev_priv
->drm
;
243 struct drm_connector_list_iter conn_iter
;
244 struct intel_connector
*connector
;
245 intel_wakeref_t wakeref
;
248 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
250 spin_lock_irq(&dev_priv
->irq_lock
);
252 drm_connector_list_iter_begin(dev
, &conn_iter
);
253 for_each_intel_connector_iter(connector
, &conn_iter
) {
254 pin
= intel_connector_hpd_pin(connector
);
255 if (pin
== HPD_NONE
||
256 dev_priv
->hotplug
.stats
[pin
].state
!= HPD_DISABLED
)
259 if (connector
->base
.polled
!= connector
->polled
)
260 drm_dbg(&dev_priv
->drm
,
261 "Reenabling HPD on connector %s\n",
262 connector
->base
.name
);
263 connector
->base
.polled
= connector
->polled
;
265 drm_connector_list_iter_end(&conn_iter
);
267 for_each_hpd_pin(pin
) {
268 if (dev_priv
->hotplug
.stats
[pin
].state
== HPD_DISABLED
)
269 dev_priv
->hotplug
.stats
[pin
].state
= HPD_ENABLED
;
272 if (dev_priv
->display_irqs_enabled
&& dev_priv
->display
.hpd_irq_setup
)
273 dev_priv
->display
.hpd_irq_setup(dev_priv
);
275 spin_unlock_irq(&dev_priv
->irq_lock
);
277 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
280 enum intel_hotplug_state
281 intel_encoder_hotplug(struct intel_encoder
*encoder
,
282 struct intel_connector
*connector
)
284 struct drm_device
*dev
= connector
->base
.dev
;
285 enum drm_connector_status old_status
;
287 drm_WARN_ON(dev
, !mutex_is_locked(&dev
->mode_config
.mutex
));
288 old_status
= connector
->base
.status
;
290 connector
->base
.status
=
291 drm_helper_probe_detect(&connector
->base
, NULL
, false);
293 if (old_status
== connector
->base
.status
)
294 return INTEL_HOTPLUG_UNCHANGED
;
296 drm_dbg_kms(&to_i915(dev
)->drm
,
297 "[CONNECTOR:%d:%s] status updated from %s to %s\n",
298 connector
->base
.base
.id
,
299 connector
->base
.name
,
300 drm_get_connector_status_name(old_status
),
301 drm_get_connector_status_name(connector
->base
.status
));
303 return INTEL_HOTPLUG_CHANGED
;
306 static bool intel_encoder_has_hpd_pulse(struct intel_encoder
*encoder
)
308 return intel_encoder_is_dig_port(encoder
) &&
309 enc_to_dig_port(encoder
)->hpd_pulse
!= NULL
;
312 static void i915_digport_work_func(struct work_struct
*work
)
314 struct drm_i915_private
*dev_priv
=
315 container_of(work
, struct drm_i915_private
, hotplug
.dig_port_work
);
316 u32 long_port_mask
, short_port_mask
;
317 struct intel_encoder
*encoder
;
320 spin_lock_irq(&dev_priv
->irq_lock
);
321 long_port_mask
= dev_priv
->hotplug
.long_port_mask
;
322 dev_priv
->hotplug
.long_port_mask
= 0;
323 short_port_mask
= dev_priv
->hotplug
.short_port_mask
;
324 dev_priv
->hotplug
.short_port_mask
= 0;
325 spin_unlock_irq(&dev_priv
->irq_lock
);
327 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
328 struct intel_digital_port
*dig_port
;
329 enum port port
= encoder
->port
;
330 bool long_hpd
, short_hpd
;
333 if (!intel_encoder_has_hpd_pulse(encoder
))
336 long_hpd
= long_port_mask
& BIT(port
);
337 short_hpd
= short_port_mask
& BIT(port
);
339 if (!long_hpd
&& !short_hpd
)
342 dig_port
= enc_to_dig_port(encoder
);
344 ret
= dig_port
->hpd_pulse(dig_port
, long_hpd
);
345 if (ret
== IRQ_NONE
) {
346 /* fall back to old school hpd */
347 old_bits
|= BIT(encoder
->hpd_pin
);
352 spin_lock_irq(&dev_priv
->irq_lock
);
353 dev_priv
->hotplug
.event_bits
|= old_bits
;
354 spin_unlock_irq(&dev_priv
->irq_lock
);
355 queue_delayed_work(system_wq
, &dev_priv
->hotplug
.hotplug_work
, 0);
360 * intel_hpd_trigger_irq - trigger an hpd irq event for a port
361 * @dig_port: digital port
363 * Trigger an HPD interrupt event for the given port, emulating a short pulse
364 * generated by the sink, and schedule the dig port work to handle it.
366 void intel_hpd_trigger_irq(struct intel_digital_port
*dig_port
)
368 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
370 spin_lock_irq(&i915
->irq_lock
);
371 i915
->hotplug
.short_port_mask
|= BIT(dig_port
->base
.port
);
372 spin_unlock_irq(&i915
->irq_lock
);
374 queue_work(i915
->hotplug
.dp_wq
, &i915
->hotplug
.dig_port_work
);
378 * Handle hotplug events outside the interrupt handler proper.
380 static void i915_hotplug_work_func(struct work_struct
*work
)
382 struct drm_i915_private
*dev_priv
=
383 container_of(work
, struct drm_i915_private
,
384 hotplug
.hotplug_work
.work
);
385 struct drm_device
*dev
= &dev_priv
->drm
;
386 struct drm_connector_list_iter conn_iter
;
387 struct intel_connector
*connector
;
388 u32 changed
= 0, retry
= 0;
392 mutex_lock(&dev
->mode_config
.mutex
);
393 drm_dbg_kms(&dev_priv
->drm
, "running encoder hotplug functions\n");
395 spin_lock_irq(&dev_priv
->irq_lock
);
397 hpd_event_bits
= dev_priv
->hotplug
.event_bits
;
398 dev_priv
->hotplug
.event_bits
= 0;
399 hpd_retry_bits
= dev_priv
->hotplug
.retry_bits
;
400 dev_priv
->hotplug
.retry_bits
= 0;
402 /* Enable polling for connectors which had HPD IRQ storms */
403 intel_hpd_irq_storm_switch_to_polling(dev_priv
);
405 spin_unlock_irq(&dev_priv
->irq_lock
);
407 drm_connector_list_iter_begin(dev
, &conn_iter
);
408 for_each_intel_connector_iter(connector
, &conn_iter
) {
412 pin
= intel_connector_hpd_pin(connector
);
417 if ((hpd_event_bits
| hpd_retry_bits
) & hpd_bit
) {
418 struct intel_encoder
*encoder
=
419 intel_attached_encoder(connector
);
421 if (hpd_event_bits
& hpd_bit
)
422 connector
->hotplug_retries
= 0;
424 connector
->hotplug_retries
++;
426 drm_dbg_kms(&dev_priv
->drm
,
427 "Connector %s (pin %i) received hotplug event. (retry %d)\n",
428 connector
->base
.name
, pin
,
429 connector
->hotplug_retries
);
431 switch (encoder
->hotplug(encoder
, connector
)) {
432 case INTEL_HOTPLUG_UNCHANGED
:
434 case INTEL_HOTPLUG_CHANGED
:
437 case INTEL_HOTPLUG_RETRY
:
443 drm_connector_list_iter_end(&conn_iter
);
444 mutex_unlock(&dev
->mode_config
.mutex
);
447 drm_kms_helper_hotplug_event(dev
);
449 /* Remove shared HPD pins that have changed */
452 spin_lock_irq(&dev_priv
->irq_lock
);
453 dev_priv
->hotplug
.retry_bits
|= retry
;
454 spin_unlock_irq(&dev_priv
->irq_lock
);
456 mod_delayed_work(system_wq
, &dev_priv
->hotplug
.hotplug_work
,
457 msecs_to_jiffies(HPD_RETRY_DELAY
));
463 * intel_hpd_irq_handler - main hotplug irq handler
464 * @dev_priv: drm_i915_private
465 * @pin_mask: a mask of hpd pins that have triggered the irq
466 * @long_mask: a mask of hpd pins that may be long hpd pulses
468 * This is the main hotplug irq handler for all platforms. The platform specific
469 * irq handlers call the platform specific hotplug irq handlers, which read and
470 * decode the appropriate registers into bitmasks about hpd pins that have
471 * triggered (@pin_mask), and which of those pins may be long pulses
472 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
473 * is not a digital port.
475 * Here, we do hotplug irq storm detection and mitigation, and pass further
476 * processing to appropriate bottom halves.
478 void intel_hpd_irq_handler(struct drm_i915_private
*dev_priv
,
479 u32 pin_mask
, u32 long_mask
)
481 struct intel_encoder
*encoder
;
482 bool storm_detected
= false;
483 bool queue_dig
= false, queue_hp
= false;
484 u32 long_hpd_pulse_mask
= 0;
485 u32 short_hpd_pulse_mask
= 0;
491 spin_lock(&dev_priv
->irq_lock
);
494 * Determine whether ->hpd_pulse() exists for each pin, and
495 * whether we have a short or a long pulse. This is needed
496 * as each pin may have up to two encoders (HDMI and DP) and
497 * only the one of them (DP) will have ->hpd_pulse().
499 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
500 bool has_hpd_pulse
= intel_encoder_has_hpd_pulse(encoder
);
501 enum port port
= encoder
->port
;
504 pin
= encoder
->hpd_pin
;
505 if (!(BIT(pin
) & pin_mask
))
511 long_hpd
= long_mask
& BIT(pin
);
513 drm_dbg(&dev_priv
->drm
,
514 "digital hpd on [ENCODER:%d:%s] - %s\n",
515 encoder
->base
.base
.id
, encoder
->base
.name
,
516 long_hpd
? "long" : "short");
520 long_hpd_pulse_mask
|= BIT(pin
);
521 dev_priv
->hotplug
.long_port_mask
|= BIT(port
);
523 short_hpd_pulse_mask
|= BIT(pin
);
524 dev_priv
->hotplug
.short_port_mask
|= BIT(port
);
528 /* Now process each pin just once */
529 for_each_hpd_pin(pin
) {
532 if (!(BIT(pin
) & pin_mask
))
535 if (dev_priv
->hotplug
.stats
[pin
].state
== HPD_DISABLED
) {
537 * On GMCH platforms the interrupt mask bits only
538 * prevent irq generation, not the setting of the
539 * hotplug bits itself. So only WARN about unexpected
540 * interrupts on saner platforms.
542 drm_WARN_ONCE(&dev_priv
->drm
, !HAS_GMCH(dev_priv
),
543 "Received HPD interrupt on pin %d although disabled\n",
548 if (dev_priv
->hotplug
.stats
[pin
].state
!= HPD_ENABLED
)
552 * Delegate to ->hpd_pulse() if one of the encoders for this
553 * pin has it, otherwise let the hotplug_work deal with this
556 if (((short_hpd_pulse_mask
| long_hpd_pulse_mask
) & BIT(pin
))) {
557 long_hpd
= long_hpd_pulse_mask
& BIT(pin
);
559 dev_priv
->hotplug
.event_bits
|= BIT(pin
);
564 if (intel_hpd_irq_storm_detect(dev_priv
, pin
, long_hpd
)) {
565 dev_priv
->hotplug
.event_bits
&= ~BIT(pin
);
566 storm_detected
= true;
572 * Disable any IRQs that storms were detected on. Polling enablement
573 * happens later in our hotplug work.
575 if (storm_detected
&& dev_priv
->display_irqs_enabled
)
576 dev_priv
->display
.hpd_irq_setup(dev_priv
);
577 spin_unlock(&dev_priv
->irq_lock
);
580 * Our hotplug handler can grab modeset locks (by calling down into the
581 * fb helpers). Hence it must not be run on our own dev-priv->wq work
582 * queue for otherwise the flush_work in the pageflip code will
586 queue_work(dev_priv
->hotplug
.dp_wq
, &dev_priv
->hotplug
.dig_port_work
);
588 queue_delayed_work(system_wq
, &dev_priv
->hotplug
.hotplug_work
, 0);
592 * intel_hpd_init - initializes and enables hpd support
593 * @dev_priv: i915 device instance
595 * This function enables the hotplug support. It requires that interrupts have
596 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
597 * poll request can run concurrently to other code, so locking rules must be
600 * This is a separate step from interrupt enabling to simplify the locking rules
601 * in the driver load and resume code.
603 * Also see: intel_hpd_poll_init(), which enables connector polling
605 void intel_hpd_init(struct drm_i915_private
*dev_priv
)
609 for_each_hpd_pin(i
) {
610 dev_priv
->hotplug
.stats
[i
].count
= 0;
611 dev_priv
->hotplug
.stats
[i
].state
= HPD_ENABLED
;
614 WRITE_ONCE(dev_priv
->hotplug
.poll_enabled
, false);
615 schedule_work(&dev_priv
->hotplug
.poll_init_work
);
618 * Interrupt setup is already guaranteed to be single-threaded, this is
619 * just to make the assert_spin_locked checks happy.
621 if (dev_priv
->display_irqs_enabled
&& dev_priv
->display
.hpd_irq_setup
) {
622 spin_lock_irq(&dev_priv
->irq_lock
);
623 if (dev_priv
->display_irqs_enabled
)
624 dev_priv
->display
.hpd_irq_setup(dev_priv
);
625 spin_unlock_irq(&dev_priv
->irq_lock
);
629 static void i915_hpd_poll_init_work(struct work_struct
*work
)
631 struct drm_i915_private
*dev_priv
=
632 container_of(work
, struct drm_i915_private
,
633 hotplug
.poll_init_work
);
634 struct drm_device
*dev
= &dev_priv
->drm
;
635 struct drm_connector_list_iter conn_iter
;
636 struct intel_connector
*connector
;
639 mutex_lock(&dev
->mode_config
.mutex
);
641 enabled
= READ_ONCE(dev_priv
->hotplug
.poll_enabled
);
643 drm_connector_list_iter_begin(dev
, &conn_iter
);
644 for_each_intel_connector_iter(connector
, &conn_iter
) {
647 pin
= intel_connector_hpd_pin(connector
);
651 connector
->base
.polled
= connector
->polled
;
653 if (enabled
&& connector
->base
.polled
== DRM_CONNECTOR_POLL_HPD
)
654 connector
->base
.polled
= DRM_CONNECTOR_POLL_CONNECT
|
655 DRM_CONNECTOR_POLL_DISCONNECT
;
657 drm_connector_list_iter_end(&conn_iter
);
660 drm_kms_helper_poll_enable(dev
);
662 mutex_unlock(&dev
->mode_config
.mutex
);
665 * We might have missed any hotplugs that happened while we were
666 * in the middle of disabling polling
669 drm_helper_hpd_irq_event(dev
);
673 * intel_hpd_poll_init - enables/disables polling for connectors with hpd
674 * @dev_priv: i915 device instance
676 * This function enables polling for all connectors, regardless of whether or
677 * not they support hotplug detection. Under certain conditions HPD may not be
678 * functional. On most Intel GPUs, this happens when we enter runtime suspend.
679 * On Valleyview and Cherryview systems, this also happens when we shut off all
682 * Since this function can get called in contexts where we're already holding
683 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
686 * Also see: intel_hpd_init(), which restores hpd handling.
688 void intel_hpd_poll_init(struct drm_i915_private
*dev_priv
)
690 WRITE_ONCE(dev_priv
->hotplug
.poll_enabled
, true);
693 * We might already be holding dev->mode_config.mutex, so do this in a
695 * As well, there's no issue if we race here since we always reschedule
698 schedule_work(&dev_priv
->hotplug
.poll_init_work
);
701 void intel_hpd_init_work(struct drm_i915_private
*dev_priv
)
703 INIT_DELAYED_WORK(&dev_priv
->hotplug
.hotplug_work
,
704 i915_hotplug_work_func
);
705 INIT_WORK(&dev_priv
->hotplug
.dig_port_work
, i915_digport_work_func
);
706 INIT_WORK(&dev_priv
->hotplug
.poll_init_work
, i915_hpd_poll_init_work
);
707 INIT_DELAYED_WORK(&dev_priv
->hotplug
.reenable_work
,
708 intel_hpd_irq_storm_reenable_work
);
711 void intel_hpd_cancel_work(struct drm_i915_private
*dev_priv
)
713 spin_lock_irq(&dev_priv
->irq_lock
);
715 dev_priv
->hotplug
.long_port_mask
= 0;
716 dev_priv
->hotplug
.short_port_mask
= 0;
717 dev_priv
->hotplug
.event_bits
= 0;
718 dev_priv
->hotplug
.retry_bits
= 0;
720 spin_unlock_irq(&dev_priv
->irq_lock
);
722 cancel_work_sync(&dev_priv
->hotplug
.dig_port_work
);
723 cancel_delayed_work_sync(&dev_priv
->hotplug
.hotplug_work
);
724 cancel_work_sync(&dev_priv
->hotplug
.poll_init_work
);
725 cancel_delayed_work_sync(&dev_priv
->hotplug
.reenable_work
);
728 bool intel_hpd_disable(struct drm_i915_private
*dev_priv
, enum hpd_pin pin
)
735 spin_lock_irq(&dev_priv
->irq_lock
);
736 if (dev_priv
->hotplug
.stats
[pin
].state
== HPD_ENABLED
) {
737 dev_priv
->hotplug
.stats
[pin
].state
= HPD_DISABLED
;
740 spin_unlock_irq(&dev_priv
->irq_lock
);
745 void intel_hpd_enable(struct drm_i915_private
*dev_priv
, enum hpd_pin pin
)
750 spin_lock_irq(&dev_priv
->irq_lock
);
751 dev_priv
->hotplug
.stats
[pin
].state
= HPD_ENABLED
;
752 spin_unlock_irq(&dev_priv
->irq_lock
);