2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool psr_global_enabled(u32 debug
)
61 switch (debug
& I915_PSR_DEBUG_MODE_MASK
) {
62 case I915_PSR_DEBUG_DEFAULT
:
63 return i915_modparams
.enable_psr
;
64 case I915_PSR_DEBUG_DISABLE
:
71 static bool intel_psr2_enabled(struct drm_i915_private
*dev_priv
,
72 const struct intel_crtc_state
*crtc_state
)
74 /* Disable PSR2 by default for all platforms */
75 if (i915_modparams
.enable_psr
== -1)
78 /* Cannot enable DSC and PSR2 simultaneously */
79 WARN_ON(crtc_state
->dsc_params
.compression_enable
&&
80 crtc_state
->has_psr2
);
82 switch (dev_priv
->psr
.debug
& I915_PSR_DEBUG_MODE_MASK
) {
83 case I915_PSR_DEBUG_FORCE_PSR1
:
86 return crtc_state
->has_psr2
;
90 static int edp_psr_shift(enum transcoder cpu_transcoder
)
92 switch (cpu_transcoder
) {
94 return EDP_PSR_TRANSCODER_A_SHIFT
;
96 return EDP_PSR_TRANSCODER_B_SHIFT
;
98 return EDP_PSR_TRANSCODER_C_SHIFT
;
100 MISSING_CASE(cpu_transcoder
);
103 return EDP_PSR_TRANSCODER_EDP_SHIFT
;
107 void intel_psr_irq_control(struct drm_i915_private
*dev_priv
, u32 debug
)
109 u32 debug_mask
, mask
;
110 enum transcoder cpu_transcoder
;
111 u32 transcoders
= BIT(TRANSCODER_EDP
);
113 if (INTEL_GEN(dev_priv
) >= 8)
114 transcoders
|= BIT(TRANSCODER_A
) |
120 for_each_cpu_transcoder_masked(dev_priv
, cpu_transcoder
, transcoders
) {
121 int shift
= edp_psr_shift(cpu_transcoder
);
123 mask
|= EDP_PSR_ERROR(shift
);
124 debug_mask
|= EDP_PSR_POST_EXIT(shift
) |
125 EDP_PSR_PRE_ENTRY(shift
);
128 if (debug
& I915_PSR_DEBUG_IRQ
)
131 I915_WRITE(EDP_PSR_IMR
, ~mask
);
134 static void psr_event_print(u32 val
, bool psr2_enabled
)
136 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val
);
137 if (val
& PSR_EVENT_PSR2_WD_TIMER_EXPIRE
)
138 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
139 if ((val
& PSR_EVENT_PSR2_DISABLED
) && psr2_enabled
)
140 DRM_DEBUG_KMS("\tPSR2 disabled\n");
141 if (val
& PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN
)
142 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
143 if (val
& PSR_EVENT_SU_CRC_FIFO_UNDERRUN
)
144 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
145 if (val
& PSR_EVENT_GRAPHICS_RESET
)
146 DRM_DEBUG_KMS("\tGraphics reset\n");
147 if (val
& PSR_EVENT_PCH_INTERRUPT
)
148 DRM_DEBUG_KMS("\tPCH interrupt\n");
149 if (val
& PSR_EVENT_MEMORY_UP
)
150 DRM_DEBUG_KMS("\tMemory up\n");
151 if (val
& PSR_EVENT_FRONT_BUFFER_MODIFY
)
152 DRM_DEBUG_KMS("\tFront buffer modification\n");
153 if (val
& PSR_EVENT_WD_TIMER_EXPIRE
)
154 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
155 if (val
& PSR_EVENT_PIPE_REGISTERS_UPDATE
)
156 DRM_DEBUG_KMS("\tPIPE registers updated\n");
157 if (val
& PSR_EVENT_REGISTER_UPDATE
)
158 DRM_DEBUG_KMS("\tRegister updated\n");
159 if (val
& PSR_EVENT_HDCP_ENABLE
)
160 DRM_DEBUG_KMS("\tHDCP enabled\n");
161 if (val
& PSR_EVENT_KVMR_SESSION_ENABLE
)
162 DRM_DEBUG_KMS("\tKVMR session enabled\n");
163 if (val
& PSR_EVENT_VBI_ENABLE
)
164 DRM_DEBUG_KMS("\tVBI enabled\n");
165 if (val
& PSR_EVENT_LPSP_MODE_EXIT
)
166 DRM_DEBUG_KMS("\tLPSP mode exited\n");
167 if ((val
& PSR_EVENT_PSR_DISABLE
) && !psr2_enabled
)
168 DRM_DEBUG_KMS("\tPSR disabled\n");
171 void intel_psr_irq_handler(struct drm_i915_private
*dev_priv
, u32 psr_iir
)
173 u32 transcoders
= BIT(TRANSCODER_EDP
);
174 enum transcoder cpu_transcoder
;
175 ktime_t time_ns
= ktime_get();
178 if (INTEL_GEN(dev_priv
) >= 8)
179 transcoders
|= BIT(TRANSCODER_A
) |
183 for_each_cpu_transcoder_masked(dev_priv
, cpu_transcoder
, transcoders
) {
184 int shift
= edp_psr_shift(cpu_transcoder
);
186 if (psr_iir
& EDP_PSR_ERROR(shift
)) {
187 DRM_WARN("[transcoder %s] PSR aux error\n",
188 transcoder_name(cpu_transcoder
));
190 dev_priv
->psr
.irq_aux_error
= true;
193 * If this interruption is not masked it will keep
194 * interrupting so fast that it prevents the scheduled
196 * Also after a PSR error, we don't want to arm PSR
197 * again so we don't care about unmask the interruption
198 * or unset irq_aux_error.
200 mask
|= EDP_PSR_ERROR(shift
);
203 if (psr_iir
& EDP_PSR_PRE_ENTRY(shift
)) {
204 dev_priv
->psr
.last_entry_attempt
= time_ns
;
205 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
206 transcoder_name(cpu_transcoder
));
209 if (psr_iir
& EDP_PSR_POST_EXIT(shift
)) {
210 dev_priv
->psr
.last_exit
= time_ns
;
211 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
212 transcoder_name(cpu_transcoder
));
214 if (INTEL_GEN(dev_priv
) >= 9) {
215 u32 val
= I915_READ(PSR_EVENT(cpu_transcoder
));
216 bool psr2_enabled
= dev_priv
->psr
.psr2_enabled
;
218 I915_WRITE(PSR_EVENT(cpu_transcoder
), val
);
219 psr_event_print(val
, psr2_enabled
);
225 mask
|= I915_READ(EDP_PSR_IMR
);
226 I915_WRITE(EDP_PSR_IMR
, mask
);
228 schedule_work(&dev_priv
->psr
.work
);
232 static bool intel_dp_get_colorimetry_status(struct intel_dp
*intel_dp
)
236 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_DPRX_FEATURE_ENUMERATION_LIST
,
239 return dprx
& DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED
;
242 static bool intel_dp_get_alpm_status(struct intel_dp
*intel_dp
)
244 uint8_t alpm_caps
= 0;
246 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CAP
,
249 return alpm_caps
& DP_ALPM_CAP
;
252 static u8
intel_dp_get_sink_sync_latency(struct intel_dp
*intel_dp
)
254 u8 val
= 8; /* assume the worst if we can't read the value */
256 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
257 DP_SYNCHRONIZATION_LATENCY_IN_SINK
, &val
) == 1)
258 val
&= DP_MAX_RESYNC_FRAME_COUNT_MASK
;
260 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
264 void intel_psr_init_dpcd(struct intel_dp
*intel_dp
)
266 struct drm_i915_private
*dev_priv
=
267 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
269 drm_dp_dpcd_read(&intel_dp
->aux
, DP_PSR_SUPPORT
, intel_dp
->psr_dpcd
,
270 sizeof(intel_dp
->psr_dpcd
));
272 if (!intel_dp
->psr_dpcd
[0])
274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
275 intel_dp
->psr_dpcd
[0]);
277 if (!(intel_dp
->edp_dpcd
[1] & DP_EDP_SET_POWER_CAP
)) {
278 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
281 dev_priv
->psr
.sink_support
= true;
282 dev_priv
->psr
.sink_sync_latency
=
283 intel_dp_get_sink_sync_latency(intel_dp
);
285 WARN_ON(dev_priv
->psr
.dp
);
286 dev_priv
->psr
.dp
= intel_dp
;
288 if (INTEL_GEN(dev_priv
) >= 9 &&
289 (intel_dp
->psr_dpcd
[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED
)) {
290 bool y_req
= intel_dp
->psr_dpcd
[1] &
291 DP_PSR2_SU_Y_COORDINATE_REQUIRED
;
292 bool alpm
= intel_dp_get_alpm_status(intel_dp
);
295 * All panels that supports PSR version 03h (PSR2 +
296 * Y-coordinate) can handle Y-coordinates in VSC but we are
297 * only sure that it is going to be used when required by the
298 * panel. This way panel is capable to do selective update
299 * without a aux frame sync.
301 * To support PSR version 02h and PSR version 03h without
302 * Y-coordinate requirement panels we would need to enable
305 dev_priv
->psr
.sink_psr2_support
= y_req
&& alpm
;
306 DRM_DEBUG_KMS("PSR2 %ssupported\n",
307 dev_priv
->psr
.sink_psr2_support
? "" : "not ");
309 if (dev_priv
->psr
.sink_psr2_support
) {
310 dev_priv
->psr
.colorimetry_support
=
311 intel_dp_get_colorimetry_status(intel_dp
);
316 static void intel_psr_setup_vsc(struct intel_dp
*intel_dp
,
317 const struct intel_crtc_state
*crtc_state
)
319 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
320 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
321 struct edp_vsc_psr psr_vsc
;
323 if (dev_priv
->psr
.psr2_enabled
) {
324 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
325 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
326 psr_vsc
.sdp_header
.HB0
= 0;
327 psr_vsc
.sdp_header
.HB1
= 0x7;
328 if (dev_priv
->psr
.colorimetry_support
) {
329 psr_vsc
.sdp_header
.HB2
= 0x5;
330 psr_vsc
.sdp_header
.HB3
= 0x13;
332 psr_vsc
.sdp_header
.HB2
= 0x4;
333 psr_vsc
.sdp_header
.HB3
= 0xe;
336 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
337 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
338 psr_vsc
.sdp_header
.HB0
= 0;
339 psr_vsc
.sdp_header
.HB1
= 0x7;
340 psr_vsc
.sdp_header
.HB2
= 0x2;
341 psr_vsc
.sdp_header
.HB3
= 0x8;
344 intel_dig_port
->write_infoframe(&intel_dig_port
->base
,
346 DP_SDP_VSC
, &psr_vsc
, sizeof(psr_vsc
));
349 static void hsw_psr_setup_aux(struct intel_dp
*intel_dp
)
351 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
352 u32 aux_clock_divider
, aux_ctl
;
354 static const uint8_t aux_msg
[] = {
355 [0] = DP_AUX_NATIVE_WRITE
<< 4,
356 [1] = DP_SET_POWER
>> 8,
357 [2] = DP_SET_POWER
& 0xff,
359 [4] = DP_SET_POWER_D0
,
361 u32 psr_aux_mask
= EDP_PSR_AUX_CTL_TIME_OUT_MASK
|
362 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK
|
363 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK
|
364 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK
;
366 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
367 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
368 I915_WRITE(EDP_PSR_AUX_DATA(i
>> 2),
369 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
371 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
373 /* Start with bits set for DDI_AUX_CTL register */
374 aux_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
, sizeof(aux_msg
),
377 /* Select only valid bits for SRD_AUX_CTL */
378 aux_ctl
&= psr_aux_mask
;
379 I915_WRITE(EDP_PSR_AUX_CTL
, aux_ctl
);
382 static void intel_psr_enable_sink(struct intel_dp
*intel_dp
)
384 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
385 u8 dpcd_val
= DP_PSR_ENABLE
;
387 /* Enable ALPM at sink for psr2 */
388 if (dev_priv
->psr
.psr2_enabled
) {
389 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CONFIG
,
391 dpcd_val
|= DP_PSR_ENABLE_PSR2
;
394 if (dev_priv
->psr
.link_standby
)
395 dpcd_val
|= DP_PSR_MAIN_LINK_ACTIVE
;
396 if (!dev_priv
->psr
.psr2_enabled
&& INTEL_GEN(dev_priv
) >= 8)
397 dpcd_val
|= DP_PSR_CRC_VERIFICATION
;
398 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, dpcd_val
);
400 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
403 static void hsw_activate_psr1(struct intel_dp
*intel_dp
)
405 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
406 u32 max_sleep_time
= 0x1f;
407 u32 val
= EDP_PSR_ENABLE
;
409 /* Let's use 6 as the minimum to cover all known cases including the
410 * off-by-one issue that HW has in some cases.
412 int idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
414 /* sink_sync_latency of 8 means source has to wait for more than 8
415 * frames, we'll go with 9 frames for now
417 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
418 val
|= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
420 val
|= max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
;
421 if (IS_HASWELL(dev_priv
))
422 val
|= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
424 if (dev_priv
->psr
.link_standby
)
425 val
|= EDP_PSR_LINK_STANDBY
;
427 if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
== 0)
428 val
|= EDP_PSR_TP1_TIME_0us
;
429 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 100)
430 val
|= EDP_PSR_TP1_TIME_100us
;
431 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 500)
432 val
|= EDP_PSR_TP1_TIME_500us
;
434 val
|= EDP_PSR_TP1_TIME_2500us
;
436 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
== 0)
437 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
438 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 100)
439 val
|= EDP_PSR_TP2_TP3_TIME_100us
;
440 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 500)
441 val
|= EDP_PSR_TP2_TP3_TIME_500us
;
443 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
445 if (intel_dp_source_supports_hbr2(intel_dp
) &&
446 drm_dp_tps3_supported(intel_dp
->dpcd
))
447 val
|= EDP_PSR_TP1_TP3_SEL
;
449 val
|= EDP_PSR_TP1_TP2_SEL
;
451 if (INTEL_GEN(dev_priv
) >= 8)
452 val
|= EDP_PSR_CRC_ENABLE
;
454 val
|= I915_READ(EDP_PSR_CTL
) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK
;
455 I915_WRITE(EDP_PSR_CTL
, val
);
458 static void hsw_activate_psr2(struct intel_dp
*intel_dp
)
460 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
463 /* Let's use 6 as the minimum to cover all known cases including the
464 * off-by-one issue that HW has in some cases.
466 int idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
468 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
469 val
= idle_frames
<< EDP_PSR2_IDLE_FRAME_SHIFT
;
471 /* FIXME: selective update is probably totally broken because it doesn't
472 * mesh at all with our frontbuffer tracking. And the hw alone isn't
474 val
|= EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
;
475 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
476 val
|= EDP_Y_COORDINATE_ENABLE
;
478 val
|= EDP_PSR2_FRAME_BEFORE_SU(dev_priv
->psr
.sink_sync_latency
+ 1);
480 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
>= 0 &&
481 dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 50)
482 val
|= EDP_PSR2_TP2_TIME_50us
;
483 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 100)
484 val
|= EDP_PSR2_TP2_TIME_100us
;
485 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 500)
486 val
|= EDP_PSR2_TP2_TIME_500us
;
488 val
|= EDP_PSR2_TP2_TIME_2500us
;
490 I915_WRITE(EDP_PSR2_CTL
, val
);
493 static bool intel_psr2_config_valid(struct intel_dp
*intel_dp
,
494 struct intel_crtc_state
*crtc_state
)
496 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
497 int crtc_hdisplay
= crtc_state
->base
.adjusted_mode
.crtc_hdisplay
;
498 int crtc_vdisplay
= crtc_state
->base
.adjusted_mode
.crtc_vdisplay
;
499 int psr_max_h
= 0, psr_max_v
= 0;
502 * FIXME psr2_support is messed up. It's both computed
503 * dynamically during PSR enable, and extracted from sink
504 * caps during eDP detection.
506 if (!dev_priv
->psr
.sink_psr2_support
)
510 * DSC and PSR2 cannot be enabled simultaneously. If a requested
511 * resolution requires DSC to be enabled, priority is given to DSC
514 if (crtc_state
->dsc_params
.compression_enable
) {
515 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
519 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
522 } else if (IS_GEN9(dev_priv
)) {
527 if (crtc_hdisplay
> psr_max_h
|| crtc_vdisplay
> psr_max_v
) {
528 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
529 crtc_hdisplay
, crtc_vdisplay
,
530 psr_max_h
, psr_max_v
);
537 void intel_psr_compute_config(struct intel_dp
*intel_dp
,
538 struct intel_crtc_state
*crtc_state
)
540 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
541 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
542 const struct drm_display_mode
*adjusted_mode
=
543 &crtc_state
->base
.adjusted_mode
;
546 if (!CAN_PSR(dev_priv
))
549 if (intel_dp
!= dev_priv
->psr
.dp
)
553 * HSW spec explicitly says PSR is tied to port A.
554 * BDW+ platforms with DDI implementation of PSR have different
555 * PSR registers per transcoder and we only implement transcoder EDP
556 * ones. Since by Display design transcoder EDP is tied to port A
557 * we can safely escape based on the port A.
559 if (dig_port
->base
.port
!= PORT_A
) {
560 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
564 if (dev_priv
->psr
.sink_not_reliable
) {
565 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
569 if (IS_HASWELL(dev_priv
) &&
570 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
571 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
575 psr_setup_time
= drm_dp_psr_setup_time(intel_dp
->psr_dpcd
);
576 if (psr_setup_time
< 0) {
577 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
578 intel_dp
->psr_dpcd
[1]);
582 if (intel_usecs_to_scanlines(adjusted_mode
, psr_setup_time
) >
583 adjusted_mode
->crtc_vtotal
- adjusted_mode
->crtc_vdisplay
- 1) {
584 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
589 crtc_state
->has_psr
= true;
590 crtc_state
->has_psr2
= intel_psr2_config_valid(intel_dp
, crtc_state
);
593 static void intel_psr_activate(struct intel_dp
*intel_dp
)
595 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
597 if (INTEL_GEN(dev_priv
) >= 9)
598 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
599 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
600 WARN_ON(dev_priv
->psr
.active
);
601 lockdep_assert_held(&dev_priv
->psr
.lock
);
603 /* psr1 and psr2 are mutually exclusive.*/
604 if (dev_priv
->psr
.psr2_enabled
)
605 hsw_activate_psr2(intel_dp
);
607 hsw_activate_psr1(intel_dp
);
609 dev_priv
->psr
.active
= true;
612 static i915_reg_t
gen9_chicken_trans_reg(struct drm_i915_private
*dev_priv
,
613 enum transcoder cpu_transcoder
)
615 static const i915_reg_t regs
[] = {
616 [TRANSCODER_A
] = CHICKEN_TRANS_A
,
617 [TRANSCODER_B
] = CHICKEN_TRANS_B
,
618 [TRANSCODER_C
] = CHICKEN_TRANS_C
,
619 [TRANSCODER_EDP
] = CHICKEN_TRANS_EDP
,
622 WARN_ON(INTEL_GEN(dev_priv
) < 9);
624 if (WARN_ON(cpu_transcoder
>= ARRAY_SIZE(regs
) ||
625 !regs
[cpu_transcoder
].reg
))
626 cpu_transcoder
= TRANSCODER_A
;
628 return regs
[cpu_transcoder
];
631 static void intel_psr_enable_source(struct intel_dp
*intel_dp
,
632 const struct intel_crtc_state
*crtc_state
)
634 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
635 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
638 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
639 * use hardcoded values PSR AUX transactions
641 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
642 hsw_psr_setup_aux(intel_dp
);
644 if (dev_priv
->psr
.psr2_enabled
) {
645 i915_reg_t reg
= gen9_chicken_trans_reg(dev_priv
,
647 u32 chicken
= I915_READ(reg
);
649 if (IS_GEN9(dev_priv
) && !IS_GEMINILAKE(dev_priv
))
650 chicken
|= (PSR2_VSC_ENABLE_PROG_HEADER
651 | PSR2_ADD_VERTICAL_LINE_COUNT
);
654 chicken
&= ~VSC_DATA_SEL_SOFTWARE_CONTROL
;
655 I915_WRITE(reg
, chicken
);
659 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
660 * mask LPSP to avoid dependency on other drivers that might block
661 * runtime_pm besides preventing other hw tracking issues now we
662 * can rely on frontbuffer tracking.
664 mask
= EDP_PSR_DEBUG_MASK_MEMUP
|
665 EDP_PSR_DEBUG_MASK_HPD
|
666 EDP_PSR_DEBUG_MASK_LPSP
|
667 EDP_PSR_DEBUG_MASK_MAX_SLEEP
;
669 if (INTEL_GEN(dev_priv
) < 11)
670 mask
|= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
;
672 I915_WRITE(EDP_PSR_DEBUG
, mask
);
675 static void intel_psr_enable_locked(struct drm_i915_private
*dev_priv
,
676 const struct intel_crtc_state
*crtc_state
)
678 struct intel_dp
*intel_dp
= dev_priv
->psr
.dp
;
680 if (dev_priv
->psr
.enabled
)
683 DRM_DEBUG_KMS("Enabling PSR%s\n",
684 dev_priv
->psr
.psr2_enabled
? "2" : "1");
685 intel_psr_setup_vsc(intel_dp
, crtc_state
);
686 intel_psr_enable_sink(intel_dp
);
687 intel_psr_enable_source(intel_dp
, crtc_state
);
688 dev_priv
->psr
.enabled
= true;
690 intel_psr_activate(intel_dp
);
694 * intel_psr_enable - Enable PSR
695 * @intel_dp: Intel DP
696 * @crtc_state: new CRTC state
698 * This function can only be called after the pipe is fully trained and enabled.
700 void intel_psr_enable(struct intel_dp
*intel_dp
,
701 const struct intel_crtc_state
*crtc_state
)
703 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
705 if (!crtc_state
->has_psr
)
708 if (WARN_ON(!CAN_PSR(dev_priv
)))
711 WARN_ON(dev_priv
->drrs
.dp
);
713 mutex_lock(&dev_priv
->psr
.lock
);
714 if (dev_priv
->psr
.prepared
) {
715 DRM_DEBUG_KMS("PSR already in use\n");
719 dev_priv
->psr
.psr2_enabled
= intel_psr2_enabled(dev_priv
, crtc_state
);
720 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
721 dev_priv
->psr
.prepared
= true;
722 dev_priv
->psr
.pipe
= to_intel_crtc(crtc_state
->base
.crtc
)->pipe
;
724 if (psr_global_enabled(dev_priv
->psr
.debug
))
725 intel_psr_enable_locked(dev_priv
, crtc_state
);
727 DRM_DEBUG_KMS("PSR disabled by flag\n");
730 mutex_unlock(&dev_priv
->psr
.lock
);
733 static void intel_psr_exit(struct drm_i915_private
*dev_priv
)
737 if (!dev_priv
->psr
.active
) {
738 if (INTEL_GEN(dev_priv
) >= 9)
739 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
740 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
744 if (dev_priv
->psr
.psr2_enabled
) {
745 val
= I915_READ(EDP_PSR2_CTL
);
746 WARN_ON(!(val
& EDP_PSR2_ENABLE
));
747 I915_WRITE(EDP_PSR2_CTL
, val
& ~EDP_PSR2_ENABLE
);
749 val
= I915_READ(EDP_PSR_CTL
);
750 WARN_ON(!(val
& EDP_PSR_ENABLE
));
751 I915_WRITE(EDP_PSR_CTL
, val
& ~EDP_PSR_ENABLE
);
753 dev_priv
->psr
.active
= false;
756 static void intel_psr_disable_locked(struct intel_dp
*intel_dp
)
758 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
759 i915_reg_t psr_status
;
762 lockdep_assert_held(&dev_priv
->psr
.lock
);
764 if (!dev_priv
->psr
.enabled
)
767 DRM_DEBUG_KMS("Disabling PSR%s\n",
768 dev_priv
->psr
.psr2_enabled
? "2" : "1");
770 intel_psr_exit(dev_priv
);
772 if (dev_priv
->psr
.psr2_enabled
) {
773 psr_status
= EDP_PSR2_STATUS
;
774 psr_status_mask
= EDP_PSR2_STATUS_STATE_MASK
;
776 psr_status
= EDP_PSR_STATUS
;
777 psr_status_mask
= EDP_PSR_STATUS_STATE_MASK
;
780 /* Wait till PSR is idle */
781 if (intel_wait_for_register(dev_priv
, psr_status
, psr_status_mask
, 0,
783 DRM_ERROR("Timed out waiting PSR idle state\n");
785 /* Disable PSR on Sink */
786 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, 0);
788 dev_priv
->psr
.enabled
= false;
792 * intel_psr_disable - Disable PSR
793 * @intel_dp: Intel DP
794 * @old_crtc_state: old CRTC state
796 * This function needs to be called before disabling pipe.
798 void intel_psr_disable(struct intel_dp
*intel_dp
,
799 const struct intel_crtc_state
*old_crtc_state
)
801 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
803 if (!old_crtc_state
->has_psr
)
806 if (WARN_ON(!CAN_PSR(dev_priv
)))
809 mutex_lock(&dev_priv
->psr
.lock
);
810 if (!dev_priv
->psr
.prepared
) {
811 mutex_unlock(&dev_priv
->psr
.lock
);
815 intel_psr_disable_locked(intel_dp
);
817 dev_priv
->psr
.prepared
= false;
818 mutex_unlock(&dev_priv
->psr
.lock
);
819 cancel_work_sync(&dev_priv
->psr
.work
);
823 * intel_psr_wait_for_idle - wait for PSR1 to idle
824 * @new_crtc_state: new CRTC state
825 * @out_value: PSR status in case of failure
827 * This function is expected to be called from pipe_update_start() where it is
828 * not expected to race with PSR enable or disable.
830 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
832 int intel_psr_wait_for_idle(const struct intel_crtc_state
*new_crtc_state
,
835 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->base
.crtc
);
836 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
838 if (!dev_priv
->psr
.enabled
|| !new_crtc_state
->has_psr
)
841 /* FIXME: Update this for PSR2 if we need to wait for idle */
842 if (READ_ONCE(dev_priv
->psr
.psr2_enabled
))
846 * From bspec: Panel Self Refresh (BDW+)
847 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
848 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
849 * defensive enough to cover everything.
852 return __intel_wait_for_register(dev_priv
, EDP_PSR_STATUS
,
853 EDP_PSR_STATUS_STATE_MASK
,
854 EDP_PSR_STATUS_STATE_IDLE
, 2, 50,
858 static bool __psr_wait_for_idle_locked(struct drm_i915_private
*dev_priv
)
864 if (!dev_priv
->psr
.enabled
)
867 if (dev_priv
->psr
.psr2_enabled
) {
868 reg
= EDP_PSR2_STATUS
;
869 mask
= EDP_PSR2_STATUS_STATE_MASK
;
871 reg
= EDP_PSR_STATUS
;
872 mask
= EDP_PSR_STATUS_STATE_MASK
;
875 mutex_unlock(&dev_priv
->psr
.lock
);
877 err
= intel_wait_for_register(dev_priv
, reg
, mask
, 0, 50);
879 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
881 /* After the unlocked wait, verify that PSR is still wanted! */
882 mutex_lock(&dev_priv
->psr
.lock
);
883 return err
== 0 && dev_priv
->psr
.enabled
;
886 static bool switching_psr(struct drm_i915_private
*dev_priv
,
887 struct intel_crtc_state
*crtc_state
,
890 /* Can't switch psr state anyway if PSR2 is not supported. */
891 if (!crtc_state
|| !crtc_state
->has_psr2
)
894 if (dev_priv
->psr
.psr2_enabled
&& mode
== I915_PSR_DEBUG_FORCE_PSR1
)
897 if (!dev_priv
->psr
.psr2_enabled
&& mode
!= I915_PSR_DEBUG_FORCE_PSR1
)
903 int intel_psr_set_debugfs_mode(struct drm_i915_private
*dev_priv
,
904 struct drm_modeset_acquire_ctx
*ctx
,
907 struct drm_device
*dev
= &dev_priv
->drm
;
908 struct drm_connector_state
*conn_state
;
909 struct intel_crtc_state
*crtc_state
= NULL
;
910 struct drm_crtc_commit
*commit
;
911 struct drm_crtc
*crtc
;
915 u32 mode
= val
& I915_PSR_DEBUG_MODE_MASK
;
917 if (val
& ~(I915_PSR_DEBUG_IRQ
| I915_PSR_DEBUG_MODE_MASK
) ||
918 mode
> I915_PSR_DEBUG_FORCE_PSR1
) {
919 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val
);
923 ret
= drm_modeset_lock(&dev
->mode_config
.connection_mutex
, ctx
);
927 /* dev_priv->psr.dp should be set once and then never touched again. */
928 dp
= READ_ONCE(dev_priv
->psr
.dp
);
929 conn_state
= dp
->attached_connector
->base
.state
;
930 crtc
= conn_state
->crtc
;
932 ret
= drm_modeset_lock(&crtc
->mutex
, ctx
);
936 crtc_state
= to_intel_crtc_state(crtc
->state
);
937 commit
= crtc_state
->base
.commit
;
939 commit
= conn_state
->commit
;
942 ret
= wait_for_completion_interruptible(&commit
->hw_done
);
947 ret
= mutex_lock_interruptible(&dev_priv
->psr
.lock
);
951 enable
= psr_global_enabled(val
);
953 if (!enable
|| switching_psr(dev_priv
, crtc_state
, mode
))
954 intel_psr_disable_locked(dev_priv
->psr
.dp
);
956 dev_priv
->psr
.debug
= val
;
958 dev_priv
->psr
.psr2_enabled
= intel_psr2_enabled(dev_priv
, crtc_state
);
960 intel_psr_irq_control(dev_priv
, dev_priv
->psr
.debug
);
962 if (dev_priv
->psr
.prepared
&& enable
)
963 intel_psr_enable_locked(dev_priv
, crtc_state
);
965 mutex_unlock(&dev_priv
->psr
.lock
);
969 static void intel_psr_handle_irq(struct drm_i915_private
*dev_priv
)
971 struct i915_psr
*psr
= &dev_priv
->psr
;
973 intel_psr_disable_locked(psr
->dp
);
974 psr
->sink_not_reliable
= true;
975 /* let's make sure that sink is awaken */
976 drm_dp_dpcd_writeb(&psr
->dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
979 static void intel_psr_work(struct work_struct
*work
)
981 struct drm_i915_private
*dev_priv
=
982 container_of(work
, typeof(*dev_priv
), psr
.work
);
984 mutex_lock(&dev_priv
->psr
.lock
);
986 if (!dev_priv
->psr
.enabled
)
989 if (READ_ONCE(dev_priv
->psr
.irq_aux_error
))
990 intel_psr_handle_irq(dev_priv
);
993 * We have to make sure PSR is ready for re-enable
994 * otherwise it keeps disabled until next full enable/disable cycle.
995 * PSR might take some time to get fully disabled
996 * and be ready for re-enable.
998 if (!__psr_wait_for_idle_locked(dev_priv
))
1002 * The delayed work can race with an invalidate hence we need to
1003 * recheck. Since psr_flush first clears this and then reschedules we
1004 * won't ever miss a flush when bailing out here.
1006 if (dev_priv
->psr
.busy_frontbuffer_bits
|| dev_priv
->psr
.active
)
1009 intel_psr_activate(dev_priv
->psr
.dp
);
1011 mutex_unlock(&dev_priv
->psr
.lock
);
1015 * intel_psr_invalidate - Invalidade PSR
1016 * @dev_priv: i915 device
1017 * @frontbuffer_bits: frontbuffer plane tracking bits
1018 * @origin: which operation caused the invalidate
1020 * Since the hardware frontbuffer tracking has gaps we need to integrate
1021 * with the software frontbuffer tracking. This function gets called every
1022 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1023 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1025 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1027 void intel_psr_invalidate(struct drm_i915_private
*dev_priv
,
1028 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
1030 if (!CAN_PSR(dev_priv
))
1033 if (origin
== ORIGIN_FLIP
)
1036 mutex_lock(&dev_priv
->psr
.lock
);
1037 if (!dev_priv
->psr
.enabled
) {
1038 mutex_unlock(&dev_priv
->psr
.lock
);
1042 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
);
1043 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
1045 if (frontbuffer_bits
)
1046 intel_psr_exit(dev_priv
);
1048 mutex_unlock(&dev_priv
->psr
.lock
);
1052 * intel_psr_flush - Flush PSR
1053 * @dev_priv: i915 device
1054 * @frontbuffer_bits: frontbuffer plane tracking bits
1055 * @origin: which operation caused the flush
1057 * Since the hardware frontbuffer tracking has gaps we need to integrate
1058 * with the software frontbuffer tracking. This function gets called every
1059 * time frontbuffer rendering has completed and flushed out to memory. PSR
1060 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1062 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1064 void intel_psr_flush(struct drm_i915_private
*dev_priv
,
1065 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
1067 if (!CAN_PSR(dev_priv
))
1070 if (origin
== ORIGIN_FLIP
)
1073 mutex_lock(&dev_priv
->psr
.lock
);
1074 if (!dev_priv
->psr
.enabled
) {
1075 mutex_unlock(&dev_priv
->psr
.lock
);
1079 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
);
1080 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
1082 /* By definition flush = invalidate + flush */
1083 if (frontbuffer_bits
) {
1085 * Display WA #0884: all
1086 * This documented WA for bxt can be safely applied
1087 * broadly so we can force HW tracking to exit PSR
1088 * instead of disabling and re-enabling.
1089 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1090 * but it makes more sense write to the current active
1093 I915_WRITE(CURSURFLIVE(dev_priv
->psr
.pipe
), 0);
1096 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
1097 schedule_work(&dev_priv
->psr
.work
);
1098 mutex_unlock(&dev_priv
->psr
.lock
);
1102 * intel_psr_init - Init basic PSR work and mutex.
1103 * @dev_priv: i915 device private
1105 * This function is called only once at driver load to initialize basic
1108 void intel_psr_init(struct drm_i915_private
*dev_priv
)
1112 if (!HAS_PSR(dev_priv
))
1115 dev_priv
->psr_mmio_base
= IS_HASWELL(dev_priv
) ?
1116 HSW_EDP_PSR_BASE
: BDW_EDP_PSR_BASE
;
1118 if (!dev_priv
->psr
.sink_support
)
1121 if (i915_modparams
.enable_psr
== -1)
1122 if (INTEL_GEN(dev_priv
) < 9 || !dev_priv
->vbt
.psr
.enable
)
1123 i915_modparams
.enable_psr
= 0;
1126 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1127 * will still keep the error set even after the reset done in the
1128 * irq_preinstall and irq_uninstall hooks.
1129 * And enabling in this situation cause the screen to freeze in the
1130 * first time that PSR HW tries to activate so lets keep PSR disabled
1131 * to avoid any rendering problems.
1133 val
= I915_READ(EDP_PSR_IIR
);
1134 val
&= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP
));
1136 DRM_DEBUG_KMS("PSR interruption error set\n");
1137 dev_priv
->psr
.sink_not_reliable
= true;
1141 /* Set link_standby x link_off defaults */
1142 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1143 /* HSW and BDW require workarounds that we don't implement. */
1144 dev_priv
->psr
.link_standby
= false;
1146 /* For new platforms let's respect VBT back again */
1147 dev_priv
->psr
.link_standby
= dev_priv
->vbt
.psr
.full_link
;
1149 INIT_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
1150 mutex_init(&dev_priv
->psr
.lock
);
1153 void intel_psr_short_pulse(struct intel_dp
*intel_dp
)
1155 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1156 struct i915_psr
*psr
= &dev_priv
->psr
;
1158 const u8 errors
= DP_PSR_RFB_STORAGE_ERROR
|
1159 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
|
1160 DP_PSR_LINK_CRC_ERROR
;
1162 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
1165 mutex_lock(&psr
->lock
);
1167 if (!psr
->enabled
|| psr
->dp
!= intel_dp
)
1170 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_STATUS
, &val
) != 1) {
1171 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1175 if ((val
& DP_PSR_SINK_STATE_MASK
) == DP_PSR_SINK_INTERNAL_ERROR
) {
1176 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1177 intel_psr_disable_locked(intel_dp
);
1178 psr
->sink_not_reliable
= true;
1181 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_ERROR_STATUS
, &val
) != 1) {
1182 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1186 if (val
& DP_PSR_RFB_STORAGE_ERROR
)
1187 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1188 if (val
& DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
)
1189 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1190 if (val
& DP_PSR_LINK_CRC_ERROR
)
1191 DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1194 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1197 intel_psr_disable_locked(intel_dp
);
1198 psr
->sink_not_reliable
= true;
1200 /* clear status register */
1201 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_ERROR_STATUS
, val
);
1203 mutex_unlock(&psr
->lock
);
1206 bool intel_psr_enabled(struct intel_dp
*intel_dp
)
1208 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1211 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
1214 mutex_lock(&dev_priv
->psr
.lock
);
1215 ret
= (dev_priv
->psr
.dp
== intel_dp
&& dev_priv
->psr
.enabled
);
1216 mutex_unlock(&dev_priv
->psr
.lock
);