]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/i915/intel_hdcp.c
Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_hdcp.c
CommitLineData
ee5e5e7a
SP
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright (C) 2017 Google, Inc.
4 *
5 * Authors:
6 * Sean Paul <seanpaul@chromium.org>
7 */
8
ee5e5e7a
SP
9#include <drm/drm_hdcp.h>
10#include <linux/i2c.h>
11#include <linux/random.h>
12
13#include "intel_drv.h"
14#include "i915_reg.h"
15
16#define KEY_LOAD_TRIES 5
7e90e8d0 17#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
ee5e5e7a 18
f106d100
R
19static
20bool intel_hdcp_is_ksv_valid(u8 *ksv)
21{
22 int i, ones = 0;
23 /* KSV has 20 1's and 20 0's */
24 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
25 ones += hweight8(ksv[i]);
26 if (ones != 20)
27 return false;
28
29 return true;
30}
31
32static
33int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
34 const struct intel_hdcp_shim *shim, u8 *bksv)
35{
36 int ret, i, tries = 2;
37
38 /* HDCP spec states that we must retry the bksv if it is invalid */
39 for (i = 0; i < tries; i++) {
40 ret = shim->read_bksv(intel_dig_port, bksv);
41 if (ret)
42 return ret;
43 if (intel_hdcp_is_ksv_valid(bksv))
44 break;
45 }
46 if (i == tries) {
3aae21fc 47 DRM_DEBUG_KMS("Bksv is invalid\n");
f106d100
R
48 return -ENODEV;
49 }
50
51 return 0;
52}
53
bdc93fe0
R
54/* Is HDCP1.4 capable on Platform and Sink */
55bool intel_hdcp_capable(struct intel_connector *connector)
56{
57 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
d3dacc70 58 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bdc93fe0
R
59 bool capable = false;
60 u8 bksv[5];
61
62 if (!shim)
63 return capable;
64
65 if (shim->hdcp_capable) {
66 shim->hdcp_capable(intel_dig_port, &capable);
67 } else {
68 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
69 capable = true;
70 }
71
72 return capable;
73}
74
ee5e5e7a
SP
75static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
76 const struct intel_hdcp_shim *shim)
77{
78 int ret, read_ret;
79 bool ksv_ready;
80
81 /* Poll for ksv list ready (spec says max time allowed is 5s) */
82 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
83 &ksv_ready),
84 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
85 100 * 1000);
86 if (ret)
87 return ret;
88 if (read_ret)
89 return read_ret;
90 if (!ksv_ready)
91 return -ETIMEDOUT;
92
93 return 0;
94}
95
6308a315
R
96static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
97{
98 struct i915_power_domains *power_domains = &dev_priv->power_domains;
99 struct i915_power_well *power_well;
100 enum i915_power_well_id id;
101 bool enabled = false;
102
103 /*
104 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
105 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
106 */
107 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
108 id = HSW_DISP_PW_GLOBAL;
109 else
110 id = SKL_DISP_PW_1;
111
112 mutex_lock(&power_domains->lock);
113
114 /* PG1 (power well #1) needs to be enabled */
115 for_each_power_well(dev_priv, power_well) {
f28ec6f4
ID
116 if (power_well->desc->id == id) {
117 enabled = power_well->desc->ops->is_enabled(dev_priv,
118 power_well);
6308a315
R
119 break;
120 }
121 }
122 mutex_unlock(&power_domains->lock);
123
124 /*
125 * Another req for hdcp key loadability is enabled state of pll for
126 * cdclk. Without active crtc we wont land here. So we are assuming that
127 * cdclk is already on.
128 */
129
130 return enabled;
131}
132
ee5e5e7a
SP
133static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
134{
135 I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
136 I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
137 HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
138}
139
140static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
141{
142 int ret;
143 u32 val;
144
7ee57988
R
145 val = I915_READ(HDCP_KEY_STATUS);
146 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
147 return 0;
148
fdddd08c
R
149 /*
150 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
151 * out of reset. So if Key is not already loaded, its an error state.
152 */
153 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
154 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
155 return -ENXIO;
156
157 /*
158 * Initiate loading the HDCP key from fuses.
159 *
083d2a07
R
160 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
161 * platforms except BXT and GLK, differ in the key load trigger process
162 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
fdddd08c 163 */
083d2a07 164 if (IS_GEN9_BC(dev_priv)) {
fdddd08c
R
165 mutex_lock(&dev_priv->pcu_lock);
166 ret = sandybridge_pcode_write(dev_priv,
167 SKL_PCODE_LOAD_HDCP_KEYS, 1);
168 mutex_unlock(&dev_priv->pcu_lock);
169 if (ret) {
170 DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
171 ret);
172 return ret;
173 }
174 } else {
175 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
ee5e5e7a
SP
176 }
177
178 /* Wait for the keys to load (500us) */
179 ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
180 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
181 10, 1, &val);
182 if (ret)
183 return ret;
184 else if (!(val & HDCP_KEY_LOAD_STATUS))
185 return -ENXIO;
186
187 /* Send Aksv over to PCH display for use in authentication */
188 I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
189
190 return 0;
191}
192
193/* Returns updated SHA-1 index */
194static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
195{
196 I915_WRITE(HDCP_SHA_TEXT, sha_text);
197 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
198 HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
199 DRM_ERROR("Timed out waiting for SHA1 ready\n");
200 return -ETIMEDOUT;
201 }
202 return 0;
203}
204
205static
206u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
207{
208 enum port port = intel_dig_port->base.port;
209 switch (port) {
210 case PORT_A:
211 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
212 case PORT_B:
213 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
214 case PORT_C:
215 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
216 case PORT_D:
217 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
218 case PORT_E:
219 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
220 default:
221 break;
222 }
223 DRM_ERROR("Unknown port %d\n", port);
224 return -EINVAL;
225}
226
ee5e5e7a 227static
41baafae
R
228int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
229 const struct intel_hdcp_shim *shim,
230 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
ee5e5e7a
SP
231{
232 struct drm_i915_private *dev_priv;
233 u32 vprime, sha_text, sha_leftovers, rep_ctl;
ee5e5e7a
SP
234 int ret, i, j, sha_idx;
235
236 dev_priv = intel_dig_port->base.base.dev->dev_private;
237
ee5e5e7a
SP
238 /* Process V' values from the receiver */
239 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
240 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
241 if (ret)
242 return ret;
243 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
244 }
245
246 /*
247 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
248 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
249 * stream is written via the HDCP_SHA_TEXT register in 32-bit
250 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
251 * index will keep track of our progress through the 64 bytes as well as
252 * helping us work the 40-bit KSVs through our 32-bit register.
253 *
254 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
255 */
256 sha_idx = 0;
257 sha_text = 0;
258 sha_leftovers = 0;
259 rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
260 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
261 for (i = 0; i < num_downstream; i++) {
262 unsigned int sha_empty;
263 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
264
265 /* Fill up the empty slots in sha_text and write it out */
266 sha_empty = sizeof(sha_text) - sha_leftovers;
267 for (j = 0; j < sha_empty; j++)
268 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
269
270 ret = intel_write_sha_text(dev_priv, sha_text);
271 if (ret < 0)
272 return ret;
273
274 /* Programming guide writes this every 64 bytes */
275 sha_idx += sizeof(sha_text);
276 if (!(sha_idx % 64))
277 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
278
279 /* Store the leftover bytes from the ksv in sha_text */
280 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
281 sha_text = 0;
282 for (j = 0; j < sha_leftovers; j++)
283 sha_text |= ksv[sha_empty + j] <<
284 ((sizeof(sha_text) - j - 1) * 8);
285
286 /*
287 * If we still have room in sha_text for more data, continue.
288 * Otherwise, write it out immediately.
289 */
290 if (sizeof(sha_text) > sha_leftovers)
291 continue;
292
293 ret = intel_write_sha_text(dev_priv, sha_text);
294 if (ret < 0)
295 return ret;
296 sha_leftovers = 0;
297 sha_text = 0;
298 sha_idx += sizeof(sha_text);
299 }
300
301 /*
302 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
303 * bytes are leftover from the last ksv, we might be able to fit them
304 * all in sha_text (first 2 cases), or we might need to split them up
305 * into 2 writes (last 2 cases).
306 */
307 if (sha_leftovers == 0) {
308 /* Write 16 bits of text, 16 bits of M0 */
309 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
310 ret = intel_write_sha_text(dev_priv,
311 bstatus[0] << 8 | bstatus[1]);
312 if (ret < 0)
313 return ret;
314 sha_idx += sizeof(sha_text);
315
316 /* Write 32 bits of M0 */
317 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
318 ret = intel_write_sha_text(dev_priv, 0);
319 if (ret < 0)
320 return ret;
321 sha_idx += sizeof(sha_text);
322
323 /* Write 16 bits of M0 */
324 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
325 ret = intel_write_sha_text(dev_priv, 0);
326 if (ret < 0)
327 return ret;
328 sha_idx += sizeof(sha_text);
329
330 } else if (sha_leftovers == 1) {
331 /* Write 24 bits of text, 8 bits of M0 */
332 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
333 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
334 /* Only 24-bits of data, must be in the LSB */
335 sha_text = (sha_text & 0xffffff00) >> 8;
336 ret = intel_write_sha_text(dev_priv, sha_text);
337 if (ret < 0)
338 return ret;
339 sha_idx += sizeof(sha_text);
340
341 /* Write 32 bits of M0 */
342 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
343 ret = intel_write_sha_text(dev_priv, 0);
344 if (ret < 0)
345 return ret;
346 sha_idx += sizeof(sha_text);
347
348 /* Write 24 bits of M0 */
349 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
350 ret = intel_write_sha_text(dev_priv, 0);
351 if (ret < 0)
352 return ret;
353 sha_idx += sizeof(sha_text);
354
355 } else if (sha_leftovers == 2) {
356 /* Write 32 bits of text */
357 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
358 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
359 ret = intel_write_sha_text(dev_priv, sha_text);
360 if (ret < 0)
361 return ret;
362 sha_idx += sizeof(sha_text);
363
364 /* Write 64 bits of M0 */
365 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
366 for (i = 0; i < 2; i++) {
367 ret = intel_write_sha_text(dev_priv, 0);
368 if (ret < 0)
369 return ret;
370 sha_idx += sizeof(sha_text);
371 }
372 } else if (sha_leftovers == 3) {
373 /* Write 32 bits of text */
374 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
375 sha_text |= bstatus[0] << 24;
376 ret = intel_write_sha_text(dev_priv, sha_text);
377 if (ret < 0)
378 return ret;
379 sha_idx += sizeof(sha_text);
380
381 /* Write 8 bits of text, 24 bits of M0 */
382 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
383 ret = intel_write_sha_text(dev_priv, bstatus[1]);
384 if (ret < 0)
385 return ret;
386 sha_idx += sizeof(sha_text);
387
388 /* Write 32 bits of M0 */
389 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
390 ret = intel_write_sha_text(dev_priv, 0);
391 if (ret < 0)
392 return ret;
393 sha_idx += sizeof(sha_text);
394
395 /* Write 8 bits of M0 */
396 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
397 ret = intel_write_sha_text(dev_priv, 0);
398 if (ret < 0)
399 return ret;
400 sha_idx += sizeof(sha_text);
401 } else {
41baafae
R
402 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
403 sha_leftovers);
ee5e5e7a
SP
404 return -EINVAL;
405 }
406
407 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
408 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
409 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
410 ret = intel_write_sha_text(dev_priv, 0);
411 if (ret < 0)
412 return ret;
413 sha_idx += sizeof(sha_text);
414 }
415
416 /*
417 * Last write gets the length of the concatenation in bits. That is:
418 * - 5 bytes per device
419 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
420 */
421 sha_text = (num_downstream * 5 + 10) * 8;
422 ret = intel_write_sha_text(dev_priv, sha_text);
423 if (ret < 0)
424 return ret;
425
426 /* Tell the HW we're done with the hash and wait for it to ACK */
427 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
428 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
429 HDCP_SHA1_COMPLETE,
430 HDCP_SHA1_COMPLETE, 1)) {
10ff7b11 431 DRM_ERROR("Timed out waiting for SHA1 complete\n");
ee5e5e7a
SP
432 return -ETIMEDOUT;
433 }
434 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
41baafae 435 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
ee5e5e7a
SP
436 return -ENXIO;
437 }
438
41baafae
R
439 return 0;
440}
441
442/* Implements Part 2 of the HDCP authorization procedure */
443static
444int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
445 const struct intel_hdcp_shim *shim)
446{
447 u8 bstatus[2], num_downstream, *ksv_fifo;
448 int ret, i, tries = 3;
449
450 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
451 if (ret) {
10ff7b11 452 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
41baafae
R
453 return ret;
454 }
455
456 ret = shim->read_bstatus(intel_dig_port, bstatus);
457 if (ret)
458 return ret;
459
460 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
461 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
10ff7b11 462 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
41baafae
R
463 return -EPERM;
464 }
465
466 /*
467 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
468 * the HDCP encryption. That implies that repeater can't have its own
469 * display. As there is no consumption of encrypted content in the
470 * repeater with 0 downstream devices, we are failing the
471 * authentication.
472 */
473 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
474 if (num_downstream == 0)
475 return -EINVAL;
476
6396bb22 477 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
41baafae
R
478 if (!ksv_fifo)
479 return -ENOMEM;
480
481 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
482 if (ret)
46a67c4d 483 goto err;
41baafae
R
484
485 /*
486 * When V prime mismatches, DP Spec mandates re-read of
487 * V prime atleast twice.
488 */
489 for (i = 0; i < tries; i++) {
490 ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
491 ksv_fifo, num_downstream,
492 bstatus);
493 if (!ret)
494 break;
495 }
496
497 if (i == tries) {
10ff7b11 498 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
46a67c4d 499 goto err;
41baafae
R
500 }
501
363932b4
SP
502 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
503 num_downstream);
46a67c4d
RS
504 ret = 0;
505err:
506 kfree(ksv_fifo);
507 return ret;
ee5e5e7a
SP
508}
509
510/* Implements Part 1 of the HDCP authorization procedure */
511static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
512 const struct intel_hdcp_shim *shim)
513{
514 struct drm_i915_private *dev_priv;
515 enum port port;
516 unsigned long r0_prime_gen_start;
f622a71d 517 int ret, i, tries = 2;
ee5e5e7a
SP
518 union {
519 u32 reg[2];
520 u8 shim[DRM_HDCP_AN_LEN];
521 } an;
522 union {
523 u32 reg[2];
524 u8 shim[DRM_HDCP_KSV_LEN];
525 } bksv;
526 union {
527 u32 reg;
528 u8 shim[DRM_HDCP_RI_LEN];
529 } ri;
791a98dd 530 bool repeater_present, hdcp_capable;
ee5e5e7a
SP
531
532 dev_priv = intel_dig_port->base.base.dev->dev_private;
533
534 port = intel_dig_port->base.port;
535
791a98dd
R
536 /*
537 * Detects whether the display is HDCP capable. Although we check for
538 * valid Bksv below, the HDCP over DP spec requires that we check
539 * whether the display supports HDCP before we write An. For HDMI
540 * displays, this is not necessary.
541 */
542 if (shim->hdcp_capable) {
543 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
544 if (ret)
545 return ret;
546 if (!hdcp_capable) {
10ff7b11 547 DRM_DEBUG_KMS("Panel is not HDCP capable\n");
791a98dd
R
548 return -EINVAL;
549 }
550 }
551
ee5e5e7a
SP
552 /* Initialize An with 2 random values and acquire it */
553 for (i = 0; i < 2; i++)
554 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
555 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
556
557 /* Wait for An to be acquired */
558 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
559 HDCP_STATUS_AN_READY,
560 HDCP_STATUS_AN_READY, 1)) {
561 DRM_ERROR("Timed out waiting for An\n");
562 return -ETIMEDOUT;
563 }
564
565 an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
566 an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
567 ret = shim->write_an_aksv(intel_dig_port, an.shim);
568 if (ret)
569 return ret;
570
571 r0_prime_gen_start = jiffies;
572
573 memset(&bksv, 0, sizeof(bksv));
f622a71d 574
f106d100
R
575 ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
576 if (ret < 0)
577 return ret;
ee5e5e7a
SP
578
579 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
580 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
581
582 ret = shim->repeater_present(intel_dig_port, &repeater_present);
583 if (ret)
584 return ret;
585 if (repeater_present)
586 I915_WRITE(HDCP_REP_CTL,
587 intel_hdcp_get_repeater_ctl(intel_dig_port));
588
589 ret = shim->toggle_signalling(intel_dig_port, true);
590 if (ret)
591 return ret;
592
593 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
594
595 /* Wait for R0 ready */
596 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
597 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
598 DRM_ERROR("Timed out waiting for R0 ready\n");
599 return -ETIMEDOUT;
600 }
601
602 /*
603 * Wait for R0' to become available. The spec says 100ms from Aksv, but
604 * some monitors can take longer than this. We'll set the timeout at
605 * 300ms just to be sure.
606 *
607 * On DP, there's an R0_READY bit available but no such bit
608 * exists on HDMI. Since the upper-bound is the same, we'll just do
609 * the stupid thing instead of polling on one and not the other.
610 */
611 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
612
4bfbec68 613 tries = 3;
ee5e5e7a 614
4bfbec68
R
615 /*
616 * DP HDCP Spec mandates the two more reattempt to read R0, incase
617 * of R0 mismatch.
618 */
619 for (i = 0; i < tries; i++) {
620 ri.reg = 0;
621 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
622 if (ret)
623 return ret;
624 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
625
626 /* Wait for Ri prime match */
627 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
628 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
629 break;
630 }
631
632 if (i == tries) {
10ff7b11
R
633 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
634 I915_READ(PORT_HDCP_STATUS(port)));
ee5e5e7a
SP
635 return -ETIMEDOUT;
636 }
637
638 /* Wait for encryption confirmation */
639 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
7e90e8d0
R
640 HDCP_STATUS_ENC, HDCP_STATUS_ENC,
641 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
ee5e5e7a
SP
642 DRM_ERROR("Timed out waiting for encryption\n");
643 return -ETIMEDOUT;
644 }
645
646 /*
647 * XXX: If we have MST-connected devices, we need to enable encryption
648 * on those as well.
649 */
650
87eb3ec8
R
651 if (repeater_present)
652 return intel_hdcp_auth_downstream(intel_dig_port, shim);
653
363932b4 654 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
87eb3ec8 655 return 0;
ee5e5e7a
SP
656}
657
ee5e5e7a
SP
658static int _intel_hdcp_disable(struct intel_connector *connector)
659{
d3dacc70 660 struct intel_hdcp *hdcp = &connector->hdcp;
ee5e5e7a
SP
661 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
662 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
663 enum port port = intel_dig_port->base.port;
664 int ret;
665
cb340bf3
R
666 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
667 connector->base.name, connector->base.base.id);
668
ee5e5e7a
SP
669 I915_WRITE(PORT_HDCP_CONF(port), 0);
670 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
7e90e8d0 671 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
ee5e5e7a
SP
672 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
673 return -ETIMEDOUT;
674 }
675
d3dacc70 676 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
ee5e5e7a
SP
677 if (ret) {
678 DRM_ERROR("Failed to disable HDCP signalling\n");
679 return ret;
680 }
681
363932b4 682 DRM_DEBUG_KMS("HDCP is disabled\n");
ee5e5e7a
SP
683 return 0;
684}
685
686static int _intel_hdcp_enable(struct intel_connector *connector)
687{
d3dacc70 688 struct intel_hdcp *hdcp = &connector->hdcp;
ee5e5e7a 689 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
6d983946 690 int i, ret, tries = 3;
ee5e5e7a 691
cb340bf3
R
692 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
693 connector->base.name, connector->base.base.id);
694
6308a315
R
695 if (!hdcp_key_loadable(dev_priv)) {
696 DRM_ERROR("HDCP key Load is not possible\n");
ee5e5e7a
SP
697 return -ENXIO;
698 }
699
700 for (i = 0; i < KEY_LOAD_TRIES; i++) {
701 ret = intel_hdcp_load_keys(dev_priv);
702 if (!ret)
703 break;
704 intel_hdcp_clear_keys(dev_priv);
705 }
706 if (ret) {
707 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
708 return ret;
709 }
710
6d983946
R
711 /* Incase of authentication failures, HDCP spec expects reauth. */
712 for (i = 0; i < tries; i++) {
d3dacc70 713 ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
6d983946
R
714 if (!ret)
715 return 0;
716
717 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
a0124496
R
718
719 /* Ensuring HDCP encryption and signalling are stopped. */
720 _intel_hdcp_disable(connector);
ee5e5e7a
SP
721 }
722
10ff7b11 723 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
6d983946 724 return ret;
ee5e5e7a
SP
725}
726
d3dacc70
R
727static inline
728struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
729{
730 return container_of(hdcp, struct intel_connector, hdcp);
731}
732
ee5e5e7a
SP
733static void intel_hdcp_check_work(struct work_struct *work)
734{
d3dacc70
R
735 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
736 struct intel_hdcp,
737 check_work);
738 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
739
ee5e5e7a 740 if (!intel_hdcp_check_link(connector))
d3dacc70 741 schedule_delayed_work(&hdcp->check_work,
ee5e5e7a
SP
742 DRM_HDCP_CHECK_PERIOD_MS);
743}
744
745static void intel_hdcp_prop_work(struct work_struct *work)
746{
d3dacc70
R
747 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
748 prop_work);
749 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
ee5e5e7a
SP
750 struct drm_device *dev = connector->base.dev;
751 struct drm_connector_state *state;
752
753 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
d3dacc70 754 mutex_lock(&hdcp->mutex);
ee5e5e7a
SP
755
756 /*
757 * This worker is only used to flip between ENABLED/DESIRED. Either of
d3dacc70 758 * those to UNDESIRED is handled by core. If value == UNDESIRED,
ee5e5e7a
SP
759 * we're running just after hdcp has been disabled, so just exit
760 */
d3dacc70 761 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
ee5e5e7a 762 state = connector->base.state;
d3dacc70 763 state->content_protection = hdcp->value;
ee5e5e7a
SP
764 }
765
d3dacc70 766 mutex_unlock(&hdcp->mutex);
ee5e5e7a
SP
767 drm_modeset_unlock(&dev->mode_config.connection_mutex);
768}
769
fdddd08c
R
770bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
771{
772 /* PORT E doesn't have HDCP, and PORT F is disabled */
a0ea697a 773 return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
fdddd08c
R
774}
775
ee5e5e7a 776int intel_hdcp_init(struct intel_connector *connector,
d3dacc70 777 const struct intel_hdcp_shim *shim)
ee5e5e7a 778{
d3dacc70 779 struct intel_hdcp *hdcp = &connector->hdcp;
ee5e5e7a
SP
780 int ret;
781
782 ret = drm_connector_attach_content_protection_property(
783 &connector->base);
784 if (ret)
785 return ret;
786
d3dacc70
R
787 hdcp->shim = shim;
788 mutex_init(&hdcp->mutex);
789 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
790 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
ee5e5e7a
SP
791 return 0;
792}
793
794int intel_hdcp_enable(struct intel_connector *connector)
795{
d3dacc70 796 struct intel_hdcp *hdcp = &connector->hdcp;
ee5e5e7a
SP
797 int ret;
798
d3dacc70 799 if (!hdcp->shim)
ee5e5e7a
SP
800 return -ENOENT;
801
d3dacc70 802 mutex_lock(&hdcp->mutex);
ee5e5e7a
SP
803
804 ret = _intel_hdcp_enable(connector);
805 if (ret)
806 goto out;
807
d3dacc70
R
808 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
809 schedule_work(&hdcp->prop_work);
810 schedule_delayed_work(&hdcp->check_work,
ee5e5e7a
SP
811 DRM_HDCP_CHECK_PERIOD_MS);
812out:
d3dacc70 813 mutex_unlock(&hdcp->mutex);
ee5e5e7a
SP
814 return ret;
815}
816
817int intel_hdcp_disable(struct intel_connector *connector)
818{
d3dacc70 819 struct intel_hdcp *hdcp = &connector->hdcp;
01468d6c 820 int ret = 0;
ee5e5e7a 821
d3dacc70 822 if (!hdcp->shim)
ee5e5e7a
SP
823 return -ENOENT;
824
d3dacc70 825 mutex_lock(&hdcp->mutex);
ee5e5e7a 826
d3dacc70
R
827 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
828 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
01468d6c
SP
829 ret = _intel_hdcp_disable(connector);
830 }
ee5e5e7a 831
d3dacc70
R
832 mutex_unlock(&hdcp->mutex);
833 cancel_delayed_work_sync(&hdcp->check_work);
ee5e5e7a
SP
834 return ret;
835}
836
837void intel_hdcp_atomic_check(struct drm_connector *connector,
838 struct drm_connector_state *old_state,
839 struct drm_connector_state *new_state)
840{
739f3abd
JN
841 u64 old_cp = old_state->content_protection;
842 u64 new_cp = new_state->content_protection;
ee5e5e7a
SP
843 struct drm_crtc_state *crtc_state;
844
845 if (!new_state->crtc) {
846 /*
847 * If the connector is being disabled with CP enabled, mark it
848 * desired so it's re-enabled when the connector is brought back
849 */
850 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
851 new_state->content_protection =
852 DRM_MODE_CONTENT_PROTECTION_DESIRED;
853 return;
854 }
855
856 /*
857 * Nothing to do if the state didn't change, or HDCP was activated since
858 * the last commit
859 */
860 if (old_cp == new_cp ||
861 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
862 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
863 return;
864
865 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
866 new_state->crtc);
867 crtc_state->mode_changed = true;
868}
869
870/* Implements Part 3 of the HDCP authorization procedure */
871int intel_hdcp_check_link(struct intel_connector *connector)
872{
d3dacc70 873 struct intel_hdcp *hdcp = &connector->hdcp;
ee5e5e7a
SP
874 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
875 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
876 enum port port = intel_dig_port->base.port;
877 int ret = 0;
878
d3dacc70 879 if (!hdcp->shim)
ee5e5e7a
SP
880 return -ENOENT;
881
d3dacc70 882 mutex_lock(&hdcp->mutex);
ee5e5e7a 883
d3dacc70 884 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
ee5e5e7a
SP
885 goto out;
886
887 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
cb340bf3
R
888 DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
889 connector->base.name, connector->base.base.id,
890 I915_READ(PORT_HDCP_STATUS(port)));
ee5e5e7a 891 ret = -ENXIO;
d3dacc70
R
892 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
893 schedule_work(&hdcp->prop_work);
ee5e5e7a
SP
894 goto out;
895 }
896
d3dacc70
R
897 if (hdcp->shim->check_link(intel_dig_port)) {
898 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
899 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
900 schedule_work(&hdcp->prop_work);
ee5e5e7a
SP
901 }
902 goto out;
903 }
904
cb340bf3
R
905 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
906 connector->base.name, connector->base.base.id);
ee5e5e7a
SP
907
908 ret = _intel_hdcp_disable(connector);
909 if (ret) {
910 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
d3dacc70
R
911 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
912 schedule_work(&hdcp->prop_work);
ee5e5e7a
SP
913 goto out;
914 }
915
916 ret = _intel_hdcp_enable(connector);
917 if (ret) {
10ff7b11 918 DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
d3dacc70
R
919 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
920 schedule_work(&hdcp->prop_work);
ee5e5e7a
SP
921 goto out;
922 }
923
924out:
d3dacc70 925 mutex_unlock(&hdcp->mutex);
ee5e5e7a
SP
926 return ret;
927}