vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / intel_hdcp.c
blob0cc6a861bcf83ece9f4a317dd9581dd9741ecf81
1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright (C) 2017 Google, Inc.
5 * Authors:
6 * Sean Paul <seanpaul@chromium.org>
7 */
9 #include <drm/drmP.h>
10 #include <drm/drm_hdcp.h>
11 #include <linux/i2c.h>
12 #include <linux/random.h>
14 #include "intel_drv.h"
15 #include "i915_reg.h"
17 #define KEY_LOAD_TRIES 5
19 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
20 const struct intel_hdcp_shim *shim)
22 int ret, read_ret;
23 bool ksv_ready;
25 /* Poll for ksv list ready (spec says max time allowed is 5s) */
26 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
27 &ksv_ready),
28 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
29 100 * 1000);
30 if (ret)
31 return ret;
32 if (read_ret)
33 return read_ret;
34 if (!ksv_ready)
35 return -ETIMEDOUT;
37 return 0;
40 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
42 struct i915_power_domains *power_domains = &dev_priv->power_domains;
43 struct i915_power_well *power_well;
44 enum i915_power_well_id id;
45 bool enabled = false;
48 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
49 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
51 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
52 id = HSW_DISP_PW_GLOBAL;
53 else
54 id = SKL_DISP_PW_1;
56 mutex_lock(&power_domains->lock);
58 /* PG1 (power well #1) needs to be enabled */
59 for_each_power_well(dev_priv, power_well) {
60 if (power_well->id == id) {
61 enabled = power_well->ops->is_enabled(dev_priv,
62 power_well);
63 break;
66 mutex_unlock(&power_domains->lock);
69 * Another req for hdcp key loadability is enabled state of pll for
70 * cdclk. Without active crtc we wont land here. So we are assuming that
71 * cdclk is already on.
74 return enabled;
77 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
79 I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
80 I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
81 HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
84 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
86 int ret;
87 u32 val;
89 val = I915_READ(HDCP_KEY_STATUS);
90 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
91 return 0;
94 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
95 * out of reset. So if Key is not already loaded, its an error state.
97 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
98 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
99 return -ENXIO;
102 * Initiate loading the HDCP key from fuses.
104 * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
105 * differ in the key load trigger process from other platforms.
107 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
108 mutex_lock(&dev_priv->pcu_lock);
109 ret = sandybridge_pcode_write(dev_priv,
110 SKL_PCODE_LOAD_HDCP_KEYS, 1);
111 mutex_unlock(&dev_priv->pcu_lock);
112 if (ret) {
113 DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
114 ret);
115 return ret;
117 } else {
118 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
121 /* Wait for the keys to load (500us) */
122 ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
123 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
124 10, 1, &val);
125 if (ret)
126 return ret;
127 else if (!(val & HDCP_KEY_LOAD_STATUS))
128 return -ENXIO;
130 /* Send Aksv over to PCH display for use in authentication */
131 I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
133 return 0;
136 /* Returns updated SHA-1 index */
137 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
139 I915_WRITE(HDCP_SHA_TEXT, sha_text);
140 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
141 HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
142 DRM_ERROR("Timed out waiting for SHA1 ready\n");
143 return -ETIMEDOUT;
145 return 0;
148 static
149 u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
151 enum port port = intel_dig_port->base.port;
152 switch (port) {
153 case PORT_A:
154 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
155 case PORT_B:
156 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
157 case PORT_C:
158 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
159 case PORT_D:
160 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
161 case PORT_E:
162 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
163 default:
164 break;
166 DRM_ERROR("Unknown port %d\n", port);
167 return -EINVAL;
170 static
171 bool intel_hdcp_is_ksv_valid(u8 *ksv)
173 int i, ones = 0;
174 /* KSV has 20 1's and 20 0's */
175 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
176 ones += hweight8(ksv[i]);
177 if (ones != 20)
178 return false;
179 return true;
182 static
183 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
184 const struct intel_hdcp_shim *shim,
185 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
187 struct drm_i915_private *dev_priv;
188 u32 vprime, sha_text, sha_leftovers, rep_ctl;
189 int ret, i, j, sha_idx;
191 dev_priv = intel_dig_port->base.base.dev->dev_private;
193 /* Process V' values from the receiver */
194 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
195 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
196 if (ret)
197 return ret;
198 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
202 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
203 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
204 * stream is written via the HDCP_SHA_TEXT register in 32-bit
205 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
206 * index will keep track of our progress through the 64 bytes as well as
207 * helping us work the 40-bit KSVs through our 32-bit register.
209 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
211 sha_idx = 0;
212 sha_text = 0;
213 sha_leftovers = 0;
214 rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
215 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
216 for (i = 0; i < num_downstream; i++) {
217 unsigned int sha_empty;
218 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
220 /* Fill up the empty slots in sha_text and write it out */
221 sha_empty = sizeof(sha_text) - sha_leftovers;
222 for (j = 0; j < sha_empty; j++)
223 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
225 ret = intel_write_sha_text(dev_priv, sha_text);
226 if (ret < 0)
227 return ret;
229 /* Programming guide writes this every 64 bytes */
230 sha_idx += sizeof(sha_text);
231 if (!(sha_idx % 64))
232 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
234 /* Store the leftover bytes from the ksv in sha_text */
235 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
236 sha_text = 0;
237 for (j = 0; j < sha_leftovers; j++)
238 sha_text |= ksv[sha_empty + j] <<
239 ((sizeof(sha_text) - j - 1) * 8);
242 * If we still have room in sha_text for more data, continue.
243 * Otherwise, write it out immediately.
245 if (sizeof(sha_text) > sha_leftovers)
246 continue;
248 ret = intel_write_sha_text(dev_priv, sha_text);
249 if (ret < 0)
250 return ret;
251 sha_leftovers = 0;
252 sha_text = 0;
253 sha_idx += sizeof(sha_text);
257 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
258 * bytes are leftover from the last ksv, we might be able to fit them
259 * all in sha_text (first 2 cases), or we might need to split them up
260 * into 2 writes (last 2 cases).
262 if (sha_leftovers == 0) {
263 /* Write 16 bits of text, 16 bits of M0 */
264 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
265 ret = intel_write_sha_text(dev_priv,
266 bstatus[0] << 8 | bstatus[1]);
267 if (ret < 0)
268 return ret;
269 sha_idx += sizeof(sha_text);
271 /* Write 32 bits of M0 */
272 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
273 ret = intel_write_sha_text(dev_priv, 0);
274 if (ret < 0)
275 return ret;
276 sha_idx += sizeof(sha_text);
278 /* Write 16 bits of M0 */
279 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
280 ret = intel_write_sha_text(dev_priv, 0);
281 if (ret < 0)
282 return ret;
283 sha_idx += sizeof(sha_text);
285 } else if (sha_leftovers == 1) {
286 /* Write 24 bits of text, 8 bits of M0 */
287 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
288 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
289 /* Only 24-bits of data, must be in the LSB */
290 sha_text = (sha_text & 0xffffff00) >> 8;
291 ret = intel_write_sha_text(dev_priv, sha_text);
292 if (ret < 0)
293 return ret;
294 sha_idx += sizeof(sha_text);
296 /* Write 32 bits of M0 */
297 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
298 ret = intel_write_sha_text(dev_priv, 0);
299 if (ret < 0)
300 return ret;
301 sha_idx += sizeof(sha_text);
303 /* Write 24 bits of M0 */
304 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
305 ret = intel_write_sha_text(dev_priv, 0);
306 if (ret < 0)
307 return ret;
308 sha_idx += sizeof(sha_text);
310 } else if (sha_leftovers == 2) {
311 /* Write 32 bits of text */
312 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
313 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
314 ret = intel_write_sha_text(dev_priv, sha_text);
315 if (ret < 0)
316 return ret;
317 sha_idx += sizeof(sha_text);
319 /* Write 64 bits of M0 */
320 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
321 for (i = 0; i < 2; i++) {
322 ret = intel_write_sha_text(dev_priv, 0);
323 if (ret < 0)
324 return ret;
325 sha_idx += sizeof(sha_text);
327 } else if (sha_leftovers == 3) {
328 /* Write 32 bits of text */
329 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
330 sha_text |= bstatus[0] << 24;
331 ret = intel_write_sha_text(dev_priv, sha_text);
332 if (ret < 0)
333 return ret;
334 sha_idx += sizeof(sha_text);
336 /* Write 8 bits of text, 24 bits of M0 */
337 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
338 ret = intel_write_sha_text(dev_priv, bstatus[1]);
339 if (ret < 0)
340 return ret;
341 sha_idx += sizeof(sha_text);
343 /* Write 32 bits of M0 */
344 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
345 ret = intel_write_sha_text(dev_priv, 0);
346 if (ret < 0)
347 return ret;
348 sha_idx += sizeof(sha_text);
350 /* Write 8 bits of M0 */
351 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
352 ret = intel_write_sha_text(dev_priv, 0);
353 if (ret < 0)
354 return ret;
355 sha_idx += sizeof(sha_text);
356 } else {
357 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
358 sha_leftovers);
359 return -EINVAL;
362 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
363 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
364 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
365 ret = intel_write_sha_text(dev_priv, 0);
366 if (ret < 0)
367 return ret;
368 sha_idx += sizeof(sha_text);
372 * Last write gets the length of the concatenation in bits. That is:
373 * - 5 bytes per device
374 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
376 sha_text = (num_downstream * 5 + 10) * 8;
377 ret = intel_write_sha_text(dev_priv, sha_text);
378 if (ret < 0)
379 return ret;
381 /* Tell the HW we're done with the hash and wait for it to ACK */
382 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
383 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
384 HDCP_SHA1_COMPLETE,
385 HDCP_SHA1_COMPLETE, 1)) {
386 DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
387 return -ETIMEDOUT;
389 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
390 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
391 return -ENXIO;
394 return 0;
397 /* Implements Part 2 of the HDCP authorization procedure */
398 static
399 int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
400 const struct intel_hdcp_shim *shim)
402 u8 bstatus[2], num_downstream, *ksv_fifo;
403 int ret, i, tries = 3;
405 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
406 if (ret) {
407 DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
408 return ret;
411 ret = shim->read_bstatus(intel_dig_port, bstatus);
412 if (ret)
413 return ret;
415 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
416 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
417 DRM_ERROR("Max Topology Limit Exceeded\n");
418 return -EPERM;
422 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
423 * the HDCP encryption. That implies that repeater can't have its own
424 * display. As there is no consumption of encrypted content in the
425 * repeater with 0 downstream devices, we are failing the
426 * authentication.
428 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
429 if (num_downstream == 0)
430 return -EINVAL;
432 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
433 if (!ksv_fifo)
434 return -ENOMEM;
436 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
437 if (ret)
438 goto err;
441 * When V prime mismatches, DP Spec mandates re-read of
442 * V prime atleast twice.
444 for (i = 0; i < tries; i++) {
445 ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
446 ksv_fifo, num_downstream,
447 bstatus);
448 if (!ret)
449 break;
452 if (i == tries) {
453 DRM_ERROR("V Prime validation failed.(%d)\n", ret);
454 goto err;
457 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
458 num_downstream);
459 ret = 0;
460 err:
461 kfree(ksv_fifo);
462 return ret;
465 /* Implements Part 1 of the HDCP authorization procedure */
466 static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
467 const struct intel_hdcp_shim *shim)
469 struct drm_i915_private *dev_priv;
470 enum port port;
471 unsigned long r0_prime_gen_start;
472 int ret, i, tries = 2;
473 union {
474 u32 reg[2];
475 u8 shim[DRM_HDCP_AN_LEN];
476 } an;
477 union {
478 u32 reg[2];
479 u8 shim[DRM_HDCP_KSV_LEN];
480 } bksv;
481 union {
482 u32 reg;
483 u8 shim[DRM_HDCP_RI_LEN];
484 } ri;
485 bool repeater_present, hdcp_capable;
487 dev_priv = intel_dig_port->base.base.dev->dev_private;
489 port = intel_dig_port->base.port;
492 * Detects whether the display is HDCP capable. Although we check for
493 * valid Bksv below, the HDCP over DP spec requires that we check
494 * whether the display supports HDCP before we write An. For HDMI
495 * displays, this is not necessary.
497 if (shim->hdcp_capable) {
498 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
499 if (ret)
500 return ret;
501 if (!hdcp_capable) {
502 DRM_ERROR("Panel is not HDCP capable\n");
503 return -EINVAL;
507 /* Initialize An with 2 random values and acquire it */
508 for (i = 0; i < 2; i++)
509 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
510 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
512 /* Wait for An to be acquired */
513 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
514 HDCP_STATUS_AN_READY,
515 HDCP_STATUS_AN_READY, 1)) {
516 DRM_ERROR("Timed out waiting for An\n");
517 return -ETIMEDOUT;
520 an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
521 an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
522 ret = shim->write_an_aksv(intel_dig_port, an.shim);
523 if (ret)
524 return ret;
526 r0_prime_gen_start = jiffies;
528 memset(&bksv, 0, sizeof(bksv));
530 /* HDCP spec states that we must retry the bksv if it is invalid */
531 for (i = 0; i < tries; i++) {
532 ret = shim->read_bksv(intel_dig_port, bksv.shim);
533 if (ret)
534 return ret;
535 if (intel_hdcp_is_ksv_valid(bksv.shim))
536 break;
538 if (i == tries) {
539 DRM_ERROR("HDCP failed, Bksv is invalid\n");
540 return -ENODEV;
543 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
544 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
546 ret = shim->repeater_present(intel_dig_port, &repeater_present);
547 if (ret)
548 return ret;
549 if (repeater_present)
550 I915_WRITE(HDCP_REP_CTL,
551 intel_hdcp_get_repeater_ctl(intel_dig_port));
553 ret = shim->toggle_signalling(intel_dig_port, true);
554 if (ret)
555 return ret;
557 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
559 /* Wait for R0 ready */
560 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
561 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
562 DRM_ERROR("Timed out waiting for R0 ready\n");
563 return -ETIMEDOUT;
567 * Wait for R0' to become available. The spec says 100ms from Aksv, but
568 * some monitors can take longer than this. We'll set the timeout at
569 * 300ms just to be sure.
571 * On DP, there's an R0_READY bit available but no such bit
572 * exists on HDMI. Since the upper-bound is the same, we'll just do
573 * the stupid thing instead of polling on one and not the other.
575 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
577 tries = 3;
580 * DP HDCP Spec mandates the two more reattempt to read R0, incase
581 * of R0 mismatch.
583 for (i = 0; i < tries; i++) {
584 ri.reg = 0;
585 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
586 if (ret)
587 return ret;
588 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
590 /* Wait for Ri prime match */
591 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
592 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
593 break;
596 if (i == tries) {
597 DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
598 I915_READ(PORT_HDCP_STATUS(port)));
599 return -ETIMEDOUT;
602 /* Wait for encryption confirmation */
603 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
604 HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
605 DRM_ERROR("Timed out waiting for encryption\n");
606 return -ETIMEDOUT;
610 * XXX: If we have MST-connected devices, we need to enable encryption
611 * on those as well.
614 if (repeater_present)
615 return intel_hdcp_auth_downstream(intel_dig_port, shim);
617 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
618 return 0;
621 static
622 struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
624 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
627 static int _intel_hdcp_disable(struct intel_connector *connector)
629 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
630 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
631 enum port port = intel_dig_port->base.port;
632 int ret;
634 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
635 connector->base.name, connector->base.base.id);
637 I915_WRITE(PORT_HDCP_CONF(port), 0);
638 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
639 20)) {
640 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
641 return -ETIMEDOUT;
644 ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
645 if (ret) {
646 DRM_ERROR("Failed to disable HDCP signalling\n");
647 return ret;
650 DRM_DEBUG_KMS("HDCP is disabled\n");
651 return 0;
654 static int _intel_hdcp_enable(struct intel_connector *connector)
656 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
657 int i, ret, tries = 3;
659 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
660 connector->base.name, connector->base.base.id);
662 if (!hdcp_key_loadable(dev_priv)) {
663 DRM_ERROR("HDCP key Load is not possible\n");
664 return -ENXIO;
667 for (i = 0; i < KEY_LOAD_TRIES; i++) {
668 ret = intel_hdcp_load_keys(dev_priv);
669 if (!ret)
670 break;
671 intel_hdcp_clear_keys(dev_priv);
673 if (ret) {
674 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
675 return ret;
678 /* Incase of authentication failures, HDCP spec expects reauth. */
679 for (i = 0; i < tries; i++) {
680 ret = intel_hdcp_auth(conn_to_dig_port(connector),
681 connector->hdcp_shim);
682 if (!ret)
683 return 0;
685 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
687 /* Ensuring HDCP encryption and signalling are stopped. */
688 _intel_hdcp_disable(connector);
691 DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
692 return ret;
695 static void intel_hdcp_check_work(struct work_struct *work)
697 struct intel_connector *connector = container_of(to_delayed_work(work),
698 struct intel_connector,
699 hdcp_check_work);
700 if (!intel_hdcp_check_link(connector))
701 schedule_delayed_work(&connector->hdcp_check_work,
702 DRM_HDCP_CHECK_PERIOD_MS);
705 static void intel_hdcp_prop_work(struct work_struct *work)
707 struct intel_connector *connector = container_of(work,
708 struct intel_connector,
709 hdcp_prop_work);
710 struct drm_device *dev = connector->base.dev;
711 struct drm_connector_state *state;
713 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
714 mutex_lock(&connector->hdcp_mutex);
717 * This worker is only used to flip between ENABLED/DESIRED. Either of
718 * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
719 * we're running just after hdcp has been disabled, so just exit
721 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
722 state = connector->base.state;
723 state->content_protection = connector->hdcp_value;
726 mutex_unlock(&connector->hdcp_mutex);
727 drm_modeset_unlock(&dev->mode_config.connection_mutex);
730 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
732 /* PORT E doesn't have HDCP, and PORT F is disabled */
733 return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
734 !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
737 int intel_hdcp_init(struct intel_connector *connector,
738 const struct intel_hdcp_shim *hdcp_shim)
740 int ret;
742 ret = drm_connector_attach_content_protection_property(
743 &connector->base);
744 if (ret)
745 return ret;
747 connector->hdcp_shim = hdcp_shim;
748 mutex_init(&connector->hdcp_mutex);
749 INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
750 INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
751 return 0;
754 int intel_hdcp_enable(struct intel_connector *connector)
756 int ret;
758 if (!connector->hdcp_shim)
759 return -ENOENT;
761 mutex_lock(&connector->hdcp_mutex);
763 ret = _intel_hdcp_enable(connector);
764 if (ret)
765 goto out;
767 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
768 schedule_work(&connector->hdcp_prop_work);
769 schedule_delayed_work(&connector->hdcp_check_work,
770 DRM_HDCP_CHECK_PERIOD_MS);
771 out:
772 mutex_unlock(&connector->hdcp_mutex);
773 return ret;
776 int intel_hdcp_disable(struct intel_connector *connector)
778 int ret = 0;
780 if (!connector->hdcp_shim)
781 return -ENOENT;
783 mutex_lock(&connector->hdcp_mutex);
785 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
786 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
787 ret = _intel_hdcp_disable(connector);
790 mutex_unlock(&connector->hdcp_mutex);
791 cancel_delayed_work_sync(&connector->hdcp_check_work);
792 return ret;
795 void intel_hdcp_atomic_check(struct drm_connector *connector,
796 struct drm_connector_state *old_state,
797 struct drm_connector_state *new_state)
799 uint64_t old_cp = old_state->content_protection;
800 uint64_t new_cp = new_state->content_protection;
801 struct drm_crtc_state *crtc_state;
803 if (!new_state->crtc) {
805 * If the connector is being disabled with CP enabled, mark it
806 * desired so it's re-enabled when the connector is brought back
808 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
809 new_state->content_protection =
810 DRM_MODE_CONTENT_PROTECTION_DESIRED;
811 return;
815 * Nothing to do if the state didn't change, or HDCP was activated since
816 * the last commit
818 if (old_cp == new_cp ||
819 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
820 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
821 return;
823 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
824 new_state->crtc);
825 crtc_state->mode_changed = true;
828 /* Implements Part 3 of the HDCP authorization procedure */
829 int intel_hdcp_check_link(struct intel_connector *connector)
831 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
832 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
833 enum port port = intel_dig_port->base.port;
834 int ret = 0;
836 if (!connector->hdcp_shim)
837 return -ENOENT;
839 mutex_lock(&connector->hdcp_mutex);
841 if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
842 goto out;
844 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
845 DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
846 connector->base.name, connector->base.base.id,
847 I915_READ(PORT_HDCP_STATUS(port)));
848 ret = -ENXIO;
849 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
850 schedule_work(&connector->hdcp_prop_work);
851 goto out;
854 if (connector->hdcp_shim->check_link(intel_dig_port)) {
855 if (connector->hdcp_value !=
856 DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
857 connector->hdcp_value =
858 DRM_MODE_CONTENT_PROTECTION_ENABLED;
859 schedule_work(&connector->hdcp_prop_work);
861 goto out;
864 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
865 connector->base.name, connector->base.base.id);
867 ret = _intel_hdcp_disable(connector);
868 if (ret) {
869 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
870 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
871 schedule_work(&connector->hdcp_prop_work);
872 goto out;
875 ret = _intel_hdcp_enable(connector);
876 if (ret) {
877 DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
878 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
879 schedule_work(&connector->hdcp_prop_work);
880 goto out;
883 out:
884 mutex_unlock(&connector->hdcp_mutex);
885 return ret;