1 /* SPDX-License-Identifier: MIT */
3 * Copyright (C) 2017 Google, Inc.
6 * Sean Paul <seanpaul@chromium.org>
9 #include <drm/drm_hdcp.h>
10 #include <linux/i2c.h>
11 #include <linux/random.h>
13 #include "intel_drv.h"
16 #define KEY_LOAD_TRIES 5
17 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
20 bool intel_hdcp_is_ksv_valid(u8
*ksv
)
23 /* KSV has 20 1's and 20 0's */
24 for (i
= 0; i
< DRM_HDCP_KSV_LEN
; i
++)
25 ones
+= hweight8(ksv
[i
]);
33 int intel_hdcp_read_valid_bksv(struct intel_digital_port
*intel_dig_port
,
34 const struct intel_hdcp_shim
*shim
, u8
*bksv
)
36 int ret
, i
, tries
= 2;
38 /* HDCP spec states that we must retry the bksv if it is invalid */
39 for (i
= 0; i
< tries
; i
++) {
40 ret
= shim
->read_bksv(intel_dig_port
, bksv
);
43 if (intel_hdcp_is_ksv_valid(bksv
))
47 DRM_DEBUG_KMS("Bksv is invalid\n");
54 /* Is HDCP1.4 capable on Platform and Sink */
55 bool intel_hdcp_capable(struct intel_connector
*connector
)
57 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
58 const struct intel_hdcp_shim
*shim
= connector
->hdcp
.shim
;
65 if (shim
->hdcp_capable
) {
66 shim
->hdcp_capable(intel_dig_port
, &capable
);
68 if (!intel_hdcp_read_valid_bksv(intel_dig_port
, shim
, bksv
))
75 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port
*intel_dig_port
,
76 const struct intel_hdcp_shim
*shim
)
81 /* Poll for ksv list ready (spec says max time allowed is 5s) */
82 ret
= __wait_for(read_ret
= shim
->read_ksv_ready(intel_dig_port
,
84 read_ret
|| ksv_ready
, 5 * 1000 * 1000, 1000,
96 static bool hdcp_key_loadable(struct drm_i915_private
*dev_priv
)
98 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
99 struct i915_power_well
*power_well
;
100 enum i915_power_well_id id
;
101 bool enabled
= false;
104 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
105 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
107 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
108 id
= HSW_DISP_PW_GLOBAL
;
112 mutex_lock(&power_domains
->lock
);
114 /* PG1 (power well #1) needs to be enabled */
115 for_each_power_well(dev_priv
, power_well
) {
116 if (power_well
->desc
->id
== id
) {
117 enabled
= power_well
->desc
->ops
->is_enabled(dev_priv
,
122 mutex_unlock(&power_domains
->lock
);
125 * Another req for hdcp key loadability is enabled state of pll for
126 * cdclk. Without active crtc we wont land here. So we are assuming that
127 * cdclk is already on.
133 static void intel_hdcp_clear_keys(struct drm_i915_private
*dev_priv
)
135 I915_WRITE(HDCP_KEY_CONF
, HDCP_CLEAR_KEYS_TRIGGER
);
136 I915_WRITE(HDCP_KEY_STATUS
, HDCP_KEY_LOAD_DONE
| HDCP_KEY_LOAD_STATUS
|
137 HDCP_FUSE_IN_PROGRESS
| HDCP_FUSE_ERROR
| HDCP_FUSE_DONE
);
140 static int intel_hdcp_load_keys(struct drm_i915_private
*dev_priv
)
145 val
= I915_READ(HDCP_KEY_STATUS
);
146 if ((val
& HDCP_KEY_LOAD_DONE
) && (val
& HDCP_KEY_LOAD_STATUS
))
150 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
151 * out of reset. So if Key is not already loaded, its an error state.
153 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
154 if (!(I915_READ(HDCP_KEY_STATUS
) & HDCP_KEY_LOAD_DONE
))
158 * Initiate loading the HDCP key from fuses.
160 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
161 * platforms except BXT and GLK, differ in the key load trigger process
162 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
164 if (IS_GEN9_BC(dev_priv
)) {
165 mutex_lock(&dev_priv
->pcu_lock
);
166 ret
= sandybridge_pcode_write(dev_priv
,
167 SKL_PCODE_LOAD_HDCP_KEYS
, 1);
168 mutex_unlock(&dev_priv
->pcu_lock
);
170 DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
175 I915_WRITE(HDCP_KEY_CONF
, HDCP_KEY_LOAD_TRIGGER
);
178 /* Wait for the keys to load (500us) */
179 ret
= __intel_wait_for_register(dev_priv
, HDCP_KEY_STATUS
,
180 HDCP_KEY_LOAD_DONE
, HDCP_KEY_LOAD_DONE
,
184 else if (!(val
& HDCP_KEY_LOAD_STATUS
))
187 /* Send Aksv over to PCH display for use in authentication */
188 I915_WRITE(HDCP_KEY_CONF
, HDCP_AKSV_SEND_TRIGGER
);
193 /* Returns updated SHA-1 index */
194 static int intel_write_sha_text(struct drm_i915_private
*dev_priv
, u32 sha_text
)
196 I915_WRITE(HDCP_SHA_TEXT
, sha_text
);
197 if (intel_wait_for_register(dev_priv
, HDCP_REP_CTL
,
198 HDCP_SHA1_READY
, HDCP_SHA1_READY
, 1)) {
199 DRM_ERROR("Timed out waiting for SHA1 ready\n");
206 u32
intel_hdcp_get_repeater_ctl(struct intel_digital_port
*intel_dig_port
)
208 enum port port
= intel_dig_port
->base
.port
;
211 return HDCP_DDIA_REP_PRESENT
| HDCP_DDIA_SHA1_M0
;
213 return HDCP_DDIB_REP_PRESENT
| HDCP_DDIB_SHA1_M0
;
215 return HDCP_DDIC_REP_PRESENT
| HDCP_DDIC_SHA1_M0
;
217 return HDCP_DDID_REP_PRESENT
| HDCP_DDID_SHA1_M0
;
219 return HDCP_DDIE_REP_PRESENT
| HDCP_DDIE_SHA1_M0
;
223 DRM_ERROR("Unknown port %d\n", port
);
228 int intel_hdcp_validate_v_prime(struct intel_digital_port
*intel_dig_port
,
229 const struct intel_hdcp_shim
*shim
,
230 u8
*ksv_fifo
, u8 num_downstream
, u8
*bstatus
)
232 struct drm_i915_private
*dev_priv
;
233 u32 vprime
, sha_text
, sha_leftovers
, rep_ctl
;
234 int ret
, i
, j
, sha_idx
;
236 dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
238 /* Process V' values from the receiver */
239 for (i
= 0; i
< DRM_HDCP_V_PRIME_NUM_PARTS
; i
++) {
240 ret
= shim
->read_v_prime_part(intel_dig_port
, i
, &vprime
);
243 I915_WRITE(HDCP_SHA_V_PRIME(i
), vprime
);
247 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
248 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
249 * stream is written via the HDCP_SHA_TEXT register in 32-bit
250 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
251 * index will keep track of our progress through the 64 bytes as well as
252 * helping us work the 40-bit KSVs through our 32-bit register.
254 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
259 rep_ctl
= intel_hdcp_get_repeater_ctl(intel_dig_port
);
260 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
261 for (i
= 0; i
< num_downstream
; i
++) {
262 unsigned int sha_empty
;
263 u8
*ksv
= &ksv_fifo
[i
* DRM_HDCP_KSV_LEN
];
265 /* Fill up the empty slots in sha_text and write it out */
266 sha_empty
= sizeof(sha_text
) - sha_leftovers
;
267 for (j
= 0; j
< sha_empty
; j
++)
268 sha_text
|= ksv
[j
] << ((sizeof(sha_text
) - j
- 1) * 8);
270 ret
= intel_write_sha_text(dev_priv
, sha_text
);
274 /* Programming guide writes this every 64 bytes */
275 sha_idx
+= sizeof(sha_text
);
277 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
279 /* Store the leftover bytes from the ksv in sha_text */
280 sha_leftovers
= DRM_HDCP_KSV_LEN
- sha_empty
;
282 for (j
= 0; j
< sha_leftovers
; j
++)
283 sha_text
|= ksv
[sha_empty
+ j
] <<
284 ((sizeof(sha_text
) - j
- 1) * 8);
287 * If we still have room in sha_text for more data, continue.
288 * Otherwise, write it out immediately.
290 if (sizeof(sha_text
) > sha_leftovers
)
293 ret
= intel_write_sha_text(dev_priv
, sha_text
);
298 sha_idx
+= sizeof(sha_text
);
302 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
303 * bytes are leftover from the last ksv, we might be able to fit them
304 * all in sha_text (first 2 cases), or we might need to split them up
305 * into 2 writes (last 2 cases).
307 if (sha_leftovers
== 0) {
308 /* Write 16 bits of text, 16 bits of M0 */
309 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_16
);
310 ret
= intel_write_sha_text(dev_priv
,
311 bstatus
[0] << 8 | bstatus
[1]);
314 sha_idx
+= sizeof(sha_text
);
316 /* Write 32 bits of M0 */
317 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
318 ret
= intel_write_sha_text(dev_priv
, 0);
321 sha_idx
+= sizeof(sha_text
);
323 /* Write 16 bits of M0 */
324 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_16
);
325 ret
= intel_write_sha_text(dev_priv
, 0);
328 sha_idx
+= sizeof(sha_text
);
330 } else if (sha_leftovers
== 1) {
331 /* Write 24 bits of text, 8 bits of M0 */
332 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_24
);
333 sha_text
|= bstatus
[0] << 16 | bstatus
[1] << 8;
334 /* Only 24-bits of data, must be in the LSB */
335 sha_text
= (sha_text
& 0xffffff00) >> 8;
336 ret
= intel_write_sha_text(dev_priv
, sha_text
);
339 sha_idx
+= sizeof(sha_text
);
341 /* Write 32 bits of M0 */
342 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
343 ret
= intel_write_sha_text(dev_priv
, 0);
346 sha_idx
+= sizeof(sha_text
);
348 /* Write 24 bits of M0 */
349 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_8
);
350 ret
= intel_write_sha_text(dev_priv
, 0);
353 sha_idx
+= sizeof(sha_text
);
355 } else if (sha_leftovers
== 2) {
356 /* Write 32 bits of text */
357 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
358 sha_text
|= bstatus
[0] << 24 | bstatus
[1] << 16;
359 ret
= intel_write_sha_text(dev_priv
, sha_text
);
362 sha_idx
+= sizeof(sha_text
);
364 /* Write 64 bits of M0 */
365 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
366 for (i
= 0; i
< 2; i
++) {
367 ret
= intel_write_sha_text(dev_priv
, 0);
370 sha_idx
+= sizeof(sha_text
);
372 } else if (sha_leftovers
== 3) {
373 /* Write 32 bits of text */
374 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
375 sha_text
|= bstatus
[0] << 24;
376 ret
= intel_write_sha_text(dev_priv
, sha_text
);
379 sha_idx
+= sizeof(sha_text
);
381 /* Write 8 bits of text, 24 bits of M0 */
382 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_8
);
383 ret
= intel_write_sha_text(dev_priv
, bstatus
[1]);
386 sha_idx
+= sizeof(sha_text
);
388 /* Write 32 bits of M0 */
389 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
390 ret
= intel_write_sha_text(dev_priv
, 0);
393 sha_idx
+= sizeof(sha_text
);
395 /* Write 8 bits of M0 */
396 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_24
);
397 ret
= intel_write_sha_text(dev_priv
, 0);
400 sha_idx
+= sizeof(sha_text
);
402 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
407 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
408 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
409 while ((sha_idx
% 64) < (64 - sizeof(sha_text
))) {
410 ret
= intel_write_sha_text(dev_priv
, 0);
413 sha_idx
+= sizeof(sha_text
);
417 * Last write gets the length of the concatenation in bits. That is:
418 * - 5 bytes per device
419 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
421 sha_text
= (num_downstream
* 5 + 10) * 8;
422 ret
= intel_write_sha_text(dev_priv
, sha_text
);
426 /* Tell the HW we're done with the hash and wait for it to ACK */
427 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_COMPLETE_HASH
);
428 if (intel_wait_for_register(dev_priv
, HDCP_REP_CTL
,
430 HDCP_SHA1_COMPLETE
, 1)) {
431 DRM_ERROR("Timed out waiting for SHA1 complete\n");
434 if (!(I915_READ(HDCP_REP_CTL
) & HDCP_SHA1_V_MATCH
)) {
435 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
442 /* Implements Part 2 of the HDCP authorization procedure */
444 int intel_hdcp_auth_downstream(struct intel_digital_port
*intel_dig_port
,
445 const struct intel_hdcp_shim
*shim
)
447 u8 bstatus
[2], num_downstream
, *ksv_fifo
;
448 int ret
, i
, tries
= 3;
450 ret
= intel_hdcp_poll_ksv_fifo(intel_dig_port
, shim
);
452 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret
);
456 ret
= shim
->read_bstatus(intel_dig_port
, bstatus
);
460 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus
[0]) ||
461 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus
[1])) {
462 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
467 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
468 * the HDCP encryption. That implies that repeater can't have its own
469 * display. As there is no consumption of encrypted content in the
470 * repeater with 0 downstream devices, we are failing the
473 num_downstream
= DRM_HDCP_NUM_DOWNSTREAM(bstatus
[0]);
474 if (num_downstream
== 0)
477 ksv_fifo
= kcalloc(DRM_HDCP_KSV_LEN
, num_downstream
, GFP_KERNEL
);
481 ret
= shim
->read_ksv_fifo(intel_dig_port
, num_downstream
, ksv_fifo
);
486 * When V prime mismatches, DP Spec mandates re-read of
487 * V prime atleast twice.
489 for (i
= 0; i
< tries
; i
++) {
490 ret
= intel_hdcp_validate_v_prime(intel_dig_port
, shim
,
491 ksv_fifo
, num_downstream
,
498 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret
);
502 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
510 /* Implements Part 1 of the HDCP authorization procedure */
511 static int intel_hdcp_auth(struct intel_digital_port
*intel_dig_port
,
512 const struct intel_hdcp_shim
*shim
)
514 struct drm_i915_private
*dev_priv
;
516 unsigned long r0_prime_gen_start
;
517 int ret
, i
, tries
= 2;
520 u8 shim
[DRM_HDCP_AN_LEN
];
524 u8 shim
[DRM_HDCP_KSV_LEN
];
528 u8 shim
[DRM_HDCP_RI_LEN
];
530 bool repeater_present
, hdcp_capable
;
532 dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
534 port
= intel_dig_port
->base
.port
;
537 * Detects whether the display is HDCP capable. Although we check for
538 * valid Bksv below, the HDCP over DP spec requires that we check
539 * whether the display supports HDCP before we write An. For HDMI
540 * displays, this is not necessary.
542 if (shim
->hdcp_capable
) {
543 ret
= shim
->hdcp_capable(intel_dig_port
, &hdcp_capable
);
547 DRM_DEBUG_KMS("Panel is not HDCP capable\n");
552 /* Initialize An with 2 random values and acquire it */
553 for (i
= 0; i
< 2; i
++)
554 I915_WRITE(PORT_HDCP_ANINIT(port
), get_random_u32());
555 I915_WRITE(PORT_HDCP_CONF(port
), HDCP_CONF_CAPTURE_AN
);
557 /* Wait for An to be acquired */
558 if (intel_wait_for_register(dev_priv
, PORT_HDCP_STATUS(port
),
559 HDCP_STATUS_AN_READY
,
560 HDCP_STATUS_AN_READY
, 1)) {
561 DRM_ERROR("Timed out waiting for An\n");
565 an
.reg
[0] = I915_READ(PORT_HDCP_ANLO(port
));
566 an
.reg
[1] = I915_READ(PORT_HDCP_ANHI(port
));
567 ret
= shim
->write_an_aksv(intel_dig_port
, an
.shim
);
571 r0_prime_gen_start
= jiffies
;
573 memset(&bksv
, 0, sizeof(bksv
));
575 ret
= intel_hdcp_read_valid_bksv(intel_dig_port
, shim
, bksv
.shim
);
579 I915_WRITE(PORT_HDCP_BKSVLO(port
), bksv
.reg
[0]);
580 I915_WRITE(PORT_HDCP_BKSVHI(port
), bksv
.reg
[1]);
582 ret
= shim
->repeater_present(intel_dig_port
, &repeater_present
);
585 if (repeater_present
)
586 I915_WRITE(HDCP_REP_CTL
,
587 intel_hdcp_get_repeater_ctl(intel_dig_port
));
589 ret
= shim
->toggle_signalling(intel_dig_port
, true);
593 I915_WRITE(PORT_HDCP_CONF(port
), HDCP_CONF_AUTH_AND_ENC
);
595 /* Wait for R0 ready */
596 if (wait_for(I915_READ(PORT_HDCP_STATUS(port
)) &
597 (HDCP_STATUS_R0_READY
| HDCP_STATUS_ENC
), 1)) {
598 DRM_ERROR("Timed out waiting for R0 ready\n");
603 * Wait for R0' to become available. The spec says 100ms from Aksv, but
604 * some monitors can take longer than this. We'll set the timeout at
605 * 300ms just to be sure.
607 * On DP, there's an R0_READY bit available but no such bit
608 * exists on HDMI. Since the upper-bound is the same, we'll just do
609 * the stupid thing instead of polling on one and not the other.
611 wait_remaining_ms_from_jiffies(r0_prime_gen_start
, 300);
616 * DP HDCP Spec mandates the two more reattempt to read R0, incase
619 for (i
= 0; i
< tries
; i
++) {
621 ret
= shim
->read_ri_prime(intel_dig_port
, ri
.shim
);
624 I915_WRITE(PORT_HDCP_RPRIME(port
), ri
.reg
);
626 /* Wait for Ri prime match */
627 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port
)) &
628 (HDCP_STATUS_RI_MATCH
| HDCP_STATUS_ENC
), 1))
633 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
634 I915_READ(PORT_HDCP_STATUS(port
)));
638 /* Wait for encryption confirmation */
639 if (intel_wait_for_register(dev_priv
, PORT_HDCP_STATUS(port
),
640 HDCP_STATUS_ENC
, HDCP_STATUS_ENC
,
641 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
)) {
642 DRM_ERROR("Timed out waiting for encryption\n");
647 * XXX: If we have MST-connected devices, we need to enable encryption
651 if (repeater_present
)
652 return intel_hdcp_auth_downstream(intel_dig_port
, shim
);
654 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
658 static int _intel_hdcp_disable(struct intel_connector
*connector
)
660 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
661 struct drm_i915_private
*dev_priv
= connector
->base
.dev
->dev_private
;
662 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
663 enum port port
= intel_dig_port
->base
.port
;
666 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
667 connector
->base
.name
, connector
->base
.base
.id
);
669 I915_WRITE(PORT_HDCP_CONF(port
), 0);
670 if (intel_wait_for_register(dev_priv
, PORT_HDCP_STATUS(port
), ~0, 0,
671 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
)) {
672 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
676 ret
= hdcp
->shim
->toggle_signalling(intel_dig_port
, false);
678 DRM_ERROR("Failed to disable HDCP signalling\n");
682 DRM_DEBUG_KMS("HDCP is disabled\n");
686 static int _intel_hdcp_enable(struct intel_connector
*connector
)
688 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
689 struct drm_i915_private
*dev_priv
= connector
->base
.dev
->dev_private
;
690 int i
, ret
, tries
= 3;
692 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
693 connector
->base
.name
, connector
->base
.base
.id
);
695 if (!hdcp_key_loadable(dev_priv
)) {
696 DRM_ERROR("HDCP key Load is not possible\n");
700 for (i
= 0; i
< KEY_LOAD_TRIES
; i
++) {
701 ret
= intel_hdcp_load_keys(dev_priv
);
704 intel_hdcp_clear_keys(dev_priv
);
707 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret
);
711 /* Incase of authentication failures, HDCP spec expects reauth. */
712 for (i
= 0; i
< tries
; i
++) {
713 ret
= intel_hdcp_auth(conn_to_dig_port(connector
), hdcp
->shim
);
717 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret
);
719 /* Ensuring HDCP encryption and signalling are stopped. */
720 _intel_hdcp_disable(connector
);
723 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries
, ret
);
728 struct intel_connector
*intel_hdcp_to_connector(struct intel_hdcp
*hdcp
)
730 return container_of(hdcp
, struct intel_connector
, hdcp
);
733 static void intel_hdcp_check_work(struct work_struct
*work
)
735 struct intel_hdcp
*hdcp
= container_of(to_delayed_work(work
),
738 struct intel_connector
*connector
= intel_hdcp_to_connector(hdcp
);
740 if (!intel_hdcp_check_link(connector
))
741 schedule_delayed_work(&hdcp
->check_work
,
742 DRM_HDCP_CHECK_PERIOD_MS
);
745 static void intel_hdcp_prop_work(struct work_struct
*work
)
747 struct intel_hdcp
*hdcp
= container_of(work
, struct intel_hdcp
,
749 struct intel_connector
*connector
= intel_hdcp_to_connector(hdcp
);
750 struct drm_device
*dev
= connector
->base
.dev
;
751 struct drm_connector_state
*state
;
753 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
754 mutex_lock(&hdcp
->mutex
);
757 * This worker is only used to flip between ENABLED/DESIRED. Either of
758 * those to UNDESIRED is handled by core. If value == UNDESIRED,
759 * we're running just after hdcp has been disabled, so just exit
761 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
762 state
= connector
->base
.state
;
763 state
->content_protection
= hdcp
->value
;
766 mutex_unlock(&hdcp
->mutex
);
767 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
770 bool is_hdcp_supported(struct drm_i915_private
*dev_priv
, enum port port
)
772 /* PORT E doesn't have HDCP, and PORT F is disabled */
773 return INTEL_GEN(dev_priv
) >= 9 && port
< PORT_E
;
776 int intel_hdcp_init(struct intel_connector
*connector
,
777 const struct intel_hdcp_shim
*shim
)
779 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
782 ret
= drm_connector_attach_content_protection_property(
788 mutex_init(&hdcp
->mutex
);
789 INIT_DELAYED_WORK(&hdcp
->check_work
, intel_hdcp_check_work
);
790 INIT_WORK(&hdcp
->prop_work
, intel_hdcp_prop_work
);
794 int intel_hdcp_enable(struct intel_connector
*connector
)
796 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
802 mutex_lock(&hdcp
->mutex
);
804 ret
= _intel_hdcp_enable(connector
);
808 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
809 schedule_work(&hdcp
->prop_work
);
810 schedule_delayed_work(&hdcp
->check_work
,
811 DRM_HDCP_CHECK_PERIOD_MS
);
813 mutex_unlock(&hdcp
->mutex
);
817 int intel_hdcp_disable(struct intel_connector
*connector
)
819 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
825 mutex_lock(&hdcp
->mutex
);
827 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
828 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
;
829 ret
= _intel_hdcp_disable(connector
);
832 mutex_unlock(&hdcp
->mutex
);
833 cancel_delayed_work_sync(&hdcp
->check_work
);
837 void intel_hdcp_atomic_check(struct drm_connector
*connector
,
838 struct drm_connector_state
*old_state
,
839 struct drm_connector_state
*new_state
)
841 u64 old_cp
= old_state
->content_protection
;
842 u64 new_cp
= new_state
->content_protection
;
843 struct drm_crtc_state
*crtc_state
;
845 if (!new_state
->crtc
) {
847 * If the connector is being disabled with CP enabled, mark it
848 * desired so it's re-enabled when the connector is brought back
850 if (old_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
851 new_state
->content_protection
=
852 DRM_MODE_CONTENT_PROTECTION_DESIRED
;
857 * Nothing to do if the state didn't change, or HDCP was activated since
860 if (old_cp
== new_cp
||
861 (old_cp
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&&
862 new_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
))
865 crtc_state
= drm_atomic_get_new_crtc_state(new_state
->state
,
867 crtc_state
->mode_changed
= true;
870 /* Implements Part 3 of the HDCP authorization procedure */
871 int intel_hdcp_check_link(struct intel_connector
*connector
)
873 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
874 struct drm_i915_private
*dev_priv
= connector
->base
.dev
->dev_private
;
875 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
876 enum port port
= intel_dig_port
->base
.port
;
882 mutex_lock(&hdcp
->mutex
);
884 if (hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
887 if (!(I915_READ(PORT_HDCP_STATUS(port
)) & HDCP_STATUS_ENC
)) {
888 DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
889 connector
->base
.name
, connector
->base
.base
.id
,
890 I915_READ(PORT_HDCP_STATUS(port
)));
892 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
893 schedule_work(&hdcp
->prop_work
);
897 if (hdcp
->shim
->check_link(intel_dig_port
)) {
898 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
899 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
900 schedule_work(&hdcp
->prop_work
);
905 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
906 connector
->base
.name
, connector
->base
.base
.id
);
908 ret
= _intel_hdcp_disable(connector
);
910 DRM_ERROR("Failed to disable hdcp (%d)\n", ret
);
911 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
912 schedule_work(&hdcp
->prop_work
);
916 ret
= _intel_hdcp_enable(connector
);
918 DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret
);
919 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
920 schedule_work(&hdcp
->prop_work
);
925 mutex_unlock(&hdcp
->mutex
);