1 /* SPDX-License-Identifier: MIT */
3 * Copyright (C) 2017 Google, Inc.
4 * Copyright _ 2017-2019, Intel Corporation.
7 * Sean Paul <seanpaul@chromium.org>
8 * Ramalingam C <ramalingam.c@intel.com>
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
25 #define KEY_LOAD_TRIES 5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
27 #define HDCP2_LC_RETRY_CNT 3
30 bool intel_hdcp_is_ksv_valid(u8
*ksv
)
33 /* KSV has 20 1's and 20 0's */
34 for (i
= 0; i
< DRM_HDCP_KSV_LEN
; i
++)
35 ones
+= hweight8(ksv
[i
]);
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port
*dig_port
,
44 const struct intel_hdcp_shim
*shim
, u8
*bksv
)
46 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
47 int ret
, i
, tries
= 2;
49 /* HDCP spec states that we must retry the bksv if it is invalid */
50 for (i
= 0; i
< tries
; i
++) {
51 ret
= shim
->read_bksv(dig_port
, bksv
);
54 if (intel_hdcp_is_ksv_valid(bksv
))
58 drm_dbg_kms(&i915
->drm
, "Bksv is invalid\n");
65 /* Is HDCP1.4 capable on Platform and Sink */
66 bool intel_hdcp_capable(struct intel_connector
*connector
)
68 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
69 const struct intel_hdcp_shim
*shim
= connector
->hdcp
.shim
;
76 if (shim
->hdcp_capable
) {
77 shim
->hdcp_capable(dig_port
, &capable
);
79 if (!intel_hdcp_read_valid_bksv(dig_port
, shim
, bksv
))
86 /* Is HDCP2.2 capable on Platform and Sink */
87 bool intel_hdcp2_capable(struct intel_connector
*connector
)
89 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
90 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
91 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
94 /* I915 support for HDCP2.2 */
95 if (!hdcp
->hdcp2_supported
)
98 /* MEI interface is solid */
99 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
100 if (!dev_priv
->hdcp_comp_added
|| !dev_priv
->hdcp_master
) {
101 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
104 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
106 /* Sink's capability for HDCP2.2 */
107 hdcp
->shim
->hdcp_2_2_capable(dig_port
, &capable
);
112 static bool intel_hdcp_in_use(struct drm_i915_private
*dev_priv
,
113 enum transcoder cpu_transcoder
, enum port port
)
115 return intel_de_read(dev_priv
,
116 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
)) &
120 static bool intel_hdcp2_in_use(struct drm_i915_private
*dev_priv
,
121 enum transcoder cpu_transcoder
, enum port port
)
123 return intel_de_read(dev_priv
,
124 HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
125 LINK_ENCRYPTION_STATUS
;
128 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port
*dig_port
,
129 const struct intel_hdcp_shim
*shim
)
134 /* Poll for ksv list ready (spec says max time allowed is 5s) */
135 ret
= __wait_for(read_ret
= shim
->read_ksv_ready(dig_port
,
137 read_ret
|| ksv_ready
, 5 * 1000 * 1000, 1000,
149 static bool hdcp_key_loadable(struct drm_i915_private
*dev_priv
)
151 enum i915_power_well_id id
;
152 intel_wakeref_t wakeref
;
153 bool enabled
= false;
156 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
157 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
159 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
160 id
= HSW_DISP_PW_GLOBAL
;
164 /* PG1 (power well #1) needs to be enabled */
165 with_intel_runtime_pm(&dev_priv
->runtime_pm
, wakeref
)
166 enabled
= intel_display_power_well_is_enabled(dev_priv
, id
);
169 * Another req for hdcp key loadability is enabled state of pll for
170 * cdclk. Without active crtc we wont land here. So we are assuming that
171 * cdclk is already on.
177 static void intel_hdcp_clear_keys(struct drm_i915_private
*dev_priv
)
179 intel_de_write(dev_priv
, HDCP_KEY_CONF
, HDCP_CLEAR_KEYS_TRIGGER
);
180 intel_de_write(dev_priv
, HDCP_KEY_STATUS
,
181 HDCP_KEY_LOAD_DONE
| HDCP_KEY_LOAD_STATUS
| HDCP_FUSE_IN_PROGRESS
| HDCP_FUSE_ERROR
| HDCP_FUSE_DONE
);
184 static int intel_hdcp_load_keys(struct drm_i915_private
*dev_priv
)
189 val
= intel_de_read(dev_priv
, HDCP_KEY_STATUS
);
190 if ((val
& HDCP_KEY_LOAD_DONE
) && (val
& HDCP_KEY_LOAD_STATUS
))
194 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
195 * out of reset. So if Key is not already loaded, its an error state.
197 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
198 if (!(intel_de_read(dev_priv
, HDCP_KEY_STATUS
) & HDCP_KEY_LOAD_DONE
))
202 * Initiate loading the HDCP key from fuses.
204 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
205 * platforms except BXT and GLK, differ in the key load trigger process
206 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
208 if (IS_GEN9_BC(dev_priv
)) {
209 ret
= sandybridge_pcode_write(dev_priv
,
210 SKL_PCODE_LOAD_HDCP_KEYS
, 1);
212 drm_err(&dev_priv
->drm
,
213 "Failed to initiate HDCP key load (%d)\n",
218 intel_de_write(dev_priv
, HDCP_KEY_CONF
, HDCP_KEY_LOAD_TRIGGER
);
221 /* Wait for the keys to load (500us) */
222 ret
= __intel_wait_for_register(&dev_priv
->uncore
, HDCP_KEY_STATUS
,
223 HDCP_KEY_LOAD_DONE
, HDCP_KEY_LOAD_DONE
,
227 else if (!(val
& HDCP_KEY_LOAD_STATUS
))
230 /* Send Aksv over to PCH display for use in authentication */
231 intel_de_write(dev_priv
, HDCP_KEY_CONF
, HDCP_AKSV_SEND_TRIGGER
);
236 /* Returns updated SHA-1 index */
237 static int intel_write_sha_text(struct drm_i915_private
*dev_priv
, u32 sha_text
)
239 intel_de_write(dev_priv
, HDCP_SHA_TEXT
, sha_text
);
240 if (intel_de_wait_for_set(dev_priv
, HDCP_REP_CTL
, HDCP_SHA1_READY
, 1)) {
241 drm_err(&dev_priv
->drm
, "Timed out waiting for SHA1 ready\n");
248 u32
intel_hdcp_get_repeater_ctl(struct drm_i915_private
*dev_priv
,
249 enum transcoder cpu_transcoder
, enum port port
)
251 if (INTEL_GEN(dev_priv
) >= 12) {
252 switch (cpu_transcoder
) {
254 return HDCP_TRANSA_REP_PRESENT
|
257 return HDCP_TRANSB_REP_PRESENT
|
260 return HDCP_TRANSC_REP_PRESENT
|
263 return HDCP_TRANSD_REP_PRESENT
|
266 drm_err(&dev_priv
->drm
, "Unknown transcoder %d\n",
274 return HDCP_DDIA_REP_PRESENT
| HDCP_DDIA_SHA1_M0
;
276 return HDCP_DDIB_REP_PRESENT
| HDCP_DDIB_SHA1_M0
;
278 return HDCP_DDIC_REP_PRESENT
| HDCP_DDIC_SHA1_M0
;
280 return HDCP_DDID_REP_PRESENT
| HDCP_DDID_SHA1_M0
;
282 return HDCP_DDIE_REP_PRESENT
| HDCP_DDIE_SHA1_M0
;
284 drm_err(&dev_priv
->drm
, "Unknown port %d\n", port
);
290 int intel_hdcp_validate_v_prime(struct intel_connector
*connector
,
291 const struct intel_hdcp_shim
*shim
,
292 u8
*ksv_fifo
, u8 num_downstream
, u8
*bstatus
)
294 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
295 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
296 enum transcoder cpu_transcoder
= connector
->hdcp
.cpu_transcoder
;
297 enum port port
= dig_port
->base
.port
;
298 u32 vprime
, sha_text
, sha_leftovers
, rep_ctl
;
299 int ret
, i
, j
, sha_idx
;
301 /* Process V' values from the receiver */
302 for (i
= 0; i
< DRM_HDCP_V_PRIME_NUM_PARTS
; i
++) {
303 ret
= shim
->read_v_prime_part(dig_port
, i
, &vprime
);
306 intel_de_write(dev_priv
, HDCP_SHA_V_PRIME(i
), vprime
);
310 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
311 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
312 * stream is written via the HDCP_SHA_TEXT register in 32-bit
313 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
314 * index will keep track of our progress through the 64 bytes as well as
315 * helping us work the 40-bit KSVs through our 32-bit register.
317 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
322 rep_ctl
= intel_hdcp_get_repeater_ctl(dev_priv
, cpu_transcoder
, port
);
323 intel_de_write(dev_priv
, HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
324 for (i
= 0; i
< num_downstream
; i
++) {
325 unsigned int sha_empty
;
326 u8
*ksv
= &ksv_fifo
[i
* DRM_HDCP_KSV_LEN
];
328 /* Fill up the empty slots in sha_text and write it out */
329 sha_empty
= sizeof(sha_text
) - sha_leftovers
;
330 for (j
= 0; j
< sha_empty
; j
++) {
331 u8 off
= ((sizeof(sha_text
) - j
- 1 - sha_leftovers
) * 8);
332 sha_text
|= ksv
[j
] << off
;
335 ret
= intel_write_sha_text(dev_priv
, sha_text
);
339 /* Programming guide writes this every 64 bytes */
340 sha_idx
+= sizeof(sha_text
);
342 intel_de_write(dev_priv
, HDCP_REP_CTL
,
343 rep_ctl
| HDCP_SHA1_TEXT_32
);
345 /* Store the leftover bytes from the ksv in sha_text */
346 sha_leftovers
= DRM_HDCP_KSV_LEN
- sha_empty
;
348 for (j
= 0; j
< sha_leftovers
; j
++)
349 sha_text
|= ksv
[sha_empty
+ j
] <<
350 ((sizeof(sha_text
) - j
- 1) * 8);
353 * If we still have room in sha_text for more data, continue.
354 * Otherwise, write it out immediately.
356 if (sizeof(sha_text
) > sha_leftovers
)
359 ret
= intel_write_sha_text(dev_priv
, sha_text
);
364 sha_idx
+= sizeof(sha_text
);
368 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
369 * bytes are leftover from the last ksv, we might be able to fit them
370 * all in sha_text (first 2 cases), or we might need to split them up
371 * into 2 writes (last 2 cases).
373 if (sha_leftovers
== 0) {
374 /* Write 16 bits of text, 16 bits of M0 */
375 intel_de_write(dev_priv
, HDCP_REP_CTL
,
376 rep_ctl
| HDCP_SHA1_TEXT_16
);
377 ret
= intel_write_sha_text(dev_priv
,
378 bstatus
[0] << 8 | bstatus
[1]);
381 sha_idx
+= sizeof(sha_text
);
383 /* Write 32 bits of M0 */
384 intel_de_write(dev_priv
, HDCP_REP_CTL
,
385 rep_ctl
| HDCP_SHA1_TEXT_0
);
386 ret
= intel_write_sha_text(dev_priv
, 0);
389 sha_idx
+= sizeof(sha_text
);
391 /* Write 16 bits of M0 */
392 intel_de_write(dev_priv
, HDCP_REP_CTL
,
393 rep_ctl
| HDCP_SHA1_TEXT_16
);
394 ret
= intel_write_sha_text(dev_priv
, 0);
397 sha_idx
+= sizeof(sha_text
);
399 } else if (sha_leftovers
== 1) {
400 /* Write 24 bits of text, 8 bits of M0 */
401 intel_de_write(dev_priv
, HDCP_REP_CTL
,
402 rep_ctl
| HDCP_SHA1_TEXT_24
);
403 sha_text
|= bstatus
[0] << 16 | bstatus
[1] << 8;
404 /* Only 24-bits of data, must be in the LSB */
405 sha_text
= (sha_text
& 0xffffff00) >> 8;
406 ret
= intel_write_sha_text(dev_priv
, sha_text
);
409 sha_idx
+= sizeof(sha_text
);
411 /* Write 32 bits of M0 */
412 intel_de_write(dev_priv
, HDCP_REP_CTL
,
413 rep_ctl
| HDCP_SHA1_TEXT_0
);
414 ret
= intel_write_sha_text(dev_priv
, 0);
417 sha_idx
+= sizeof(sha_text
);
419 /* Write 24 bits of M0 */
420 intel_de_write(dev_priv
, HDCP_REP_CTL
,
421 rep_ctl
| HDCP_SHA1_TEXT_8
);
422 ret
= intel_write_sha_text(dev_priv
, 0);
425 sha_idx
+= sizeof(sha_text
);
427 } else if (sha_leftovers
== 2) {
428 /* Write 32 bits of text */
429 intel_de_write(dev_priv
, HDCP_REP_CTL
,
430 rep_ctl
| HDCP_SHA1_TEXT_32
);
431 sha_text
|= bstatus
[0] << 8 | bstatus
[1];
432 ret
= intel_write_sha_text(dev_priv
, sha_text
);
435 sha_idx
+= sizeof(sha_text
);
437 /* Write 64 bits of M0 */
438 intel_de_write(dev_priv
, HDCP_REP_CTL
,
439 rep_ctl
| HDCP_SHA1_TEXT_0
);
440 for (i
= 0; i
< 2; i
++) {
441 ret
= intel_write_sha_text(dev_priv
, 0);
444 sha_idx
+= sizeof(sha_text
);
448 * Terminate the SHA-1 stream by hand. For the other leftover
449 * cases this is appended by the hardware.
451 intel_de_write(dev_priv
, HDCP_REP_CTL
,
452 rep_ctl
| HDCP_SHA1_TEXT_32
);
453 sha_text
= DRM_HDCP_SHA1_TERMINATOR
<< 24;
454 ret
= intel_write_sha_text(dev_priv
, sha_text
);
457 sha_idx
+= sizeof(sha_text
);
458 } else if (sha_leftovers
== 3) {
459 /* Write 32 bits of text (filled from LSB) */
460 intel_de_write(dev_priv
, HDCP_REP_CTL
,
461 rep_ctl
| HDCP_SHA1_TEXT_32
);
462 sha_text
|= bstatus
[0];
463 ret
= intel_write_sha_text(dev_priv
, sha_text
);
466 sha_idx
+= sizeof(sha_text
);
468 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
469 intel_de_write(dev_priv
, HDCP_REP_CTL
,
470 rep_ctl
| HDCP_SHA1_TEXT_8
);
471 ret
= intel_write_sha_text(dev_priv
, bstatus
[1]);
474 sha_idx
+= sizeof(sha_text
);
476 /* Write 32 bits of M0 */
477 intel_de_write(dev_priv
, HDCP_REP_CTL
,
478 rep_ctl
| HDCP_SHA1_TEXT_0
);
479 ret
= intel_write_sha_text(dev_priv
, 0);
482 sha_idx
+= sizeof(sha_text
);
484 /* Write 8 bits of M0 */
485 intel_de_write(dev_priv
, HDCP_REP_CTL
,
486 rep_ctl
| HDCP_SHA1_TEXT_24
);
487 ret
= intel_write_sha_text(dev_priv
, 0);
490 sha_idx
+= sizeof(sha_text
);
492 drm_dbg_kms(&dev_priv
->drm
, "Invalid number of leftovers %d\n",
497 intel_de_write(dev_priv
, HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
498 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
499 while ((sha_idx
% 64) < (64 - sizeof(sha_text
))) {
500 ret
= intel_write_sha_text(dev_priv
, 0);
503 sha_idx
+= sizeof(sha_text
);
507 * Last write gets the length of the concatenation in bits. That is:
508 * - 5 bytes per device
509 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
511 sha_text
= (num_downstream
* 5 + 10) * 8;
512 ret
= intel_write_sha_text(dev_priv
, sha_text
);
516 /* Tell the HW we're done with the hash and wait for it to ACK */
517 intel_de_write(dev_priv
, HDCP_REP_CTL
,
518 rep_ctl
| HDCP_SHA1_COMPLETE_HASH
);
519 if (intel_de_wait_for_set(dev_priv
, HDCP_REP_CTL
,
520 HDCP_SHA1_COMPLETE
, 1)) {
521 drm_err(&dev_priv
->drm
, "Timed out waiting for SHA1 complete\n");
524 if (!(intel_de_read(dev_priv
, HDCP_REP_CTL
) & HDCP_SHA1_V_MATCH
)) {
525 drm_dbg_kms(&dev_priv
->drm
, "SHA-1 mismatch, HDCP failed\n");
532 /* Implements Part 2 of the HDCP authorization procedure */
534 int intel_hdcp_auth_downstream(struct intel_connector
*connector
)
536 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
537 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
538 const struct intel_hdcp_shim
*shim
= connector
->hdcp
.shim
;
539 u8 bstatus
[2], num_downstream
, *ksv_fifo
;
540 int ret
, i
, tries
= 3;
542 ret
= intel_hdcp_poll_ksv_fifo(dig_port
, shim
);
544 drm_dbg_kms(&dev_priv
->drm
,
545 "KSV list failed to become ready (%d)\n", ret
);
549 ret
= shim
->read_bstatus(dig_port
, bstatus
);
553 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus
[0]) ||
554 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus
[1])) {
555 drm_dbg_kms(&dev_priv
->drm
, "Max Topology Limit Exceeded\n");
560 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
561 * the HDCP encryption. That implies that repeater can't have its own
562 * display. As there is no consumption of encrypted content in the
563 * repeater with 0 downstream devices, we are failing the
566 num_downstream
= DRM_HDCP_NUM_DOWNSTREAM(bstatus
[0]);
567 if (num_downstream
== 0) {
568 drm_dbg_kms(&dev_priv
->drm
,
569 "Repeater with zero downstream devices\n");
573 ksv_fifo
= kcalloc(DRM_HDCP_KSV_LEN
, num_downstream
, GFP_KERNEL
);
575 drm_dbg_kms(&dev_priv
->drm
, "Out of mem: ksv_fifo\n");
579 ret
= shim
->read_ksv_fifo(dig_port
, num_downstream
, ksv_fifo
);
583 if (drm_hdcp_check_ksvs_revoked(&dev_priv
->drm
, ksv_fifo
,
584 num_downstream
) > 0) {
585 drm_err(&dev_priv
->drm
, "Revoked Ksv(s) in ksv_fifo\n");
591 * When V prime mismatches, DP Spec mandates re-read of
592 * V prime atleast twice.
594 for (i
= 0; i
< tries
; i
++) {
595 ret
= intel_hdcp_validate_v_prime(connector
, shim
,
596 ksv_fifo
, num_downstream
,
603 drm_dbg_kms(&dev_priv
->drm
,
604 "V Prime validation failed.(%d)\n", ret
);
608 drm_dbg_kms(&dev_priv
->drm
, "HDCP is enabled (%d downstream devices)\n",
616 /* Implements Part 1 of the HDCP authorization procedure */
617 static int intel_hdcp_auth(struct intel_connector
*connector
)
619 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
620 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
621 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
622 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
623 enum transcoder cpu_transcoder
= connector
->hdcp
.cpu_transcoder
;
624 enum port port
= dig_port
->base
.port
;
625 unsigned long r0_prime_gen_start
;
626 int ret
, i
, tries
= 2;
629 u8 shim
[DRM_HDCP_AN_LEN
];
633 u8 shim
[DRM_HDCP_KSV_LEN
];
637 u8 shim
[DRM_HDCP_RI_LEN
];
639 bool repeater_present
, hdcp_capable
;
642 * Detects whether the display is HDCP capable. Although we check for
643 * valid Bksv below, the HDCP over DP spec requires that we check
644 * whether the display supports HDCP before we write An. For HDMI
645 * displays, this is not necessary.
647 if (shim
->hdcp_capable
) {
648 ret
= shim
->hdcp_capable(dig_port
, &hdcp_capable
);
652 drm_dbg_kms(&dev_priv
->drm
,
653 "Panel is not HDCP capable\n");
658 /* Initialize An with 2 random values and acquire it */
659 for (i
= 0; i
< 2; i
++)
660 intel_de_write(dev_priv
,
661 HDCP_ANINIT(dev_priv
, cpu_transcoder
, port
),
663 intel_de_write(dev_priv
, HDCP_CONF(dev_priv
, cpu_transcoder
, port
),
664 HDCP_CONF_CAPTURE_AN
);
666 /* Wait for An to be acquired */
667 if (intel_de_wait_for_set(dev_priv
,
668 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
),
669 HDCP_STATUS_AN_READY
, 1)) {
670 drm_err(&dev_priv
->drm
, "Timed out waiting for An\n");
674 an
.reg
[0] = intel_de_read(dev_priv
,
675 HDCP_ANLO(dev_priv
, cpu_transcoder
, port
));
676 an
.reg
[1] = intel_de_read(dev_priv
,
677 HDCP_ANHI(dev_priv
, cpu_transcoder
, port
));
678 ret
= shim
->write_an_aksv(dig_port
, an
.shim
);
682 r0_prime_gen_start
= jiffies
;
684 memset(&bksv
, 0, sizeof(bksv
));
686 ret
= intel_hdcp_read_valid_bksv(dig_port
, shim
, bksv
.shim
);
690 if (drm_hdcp_check_ksvs_revoked(&dev_priv
->drm
, bksv
.shim
, 1) > 0) {
691 drm_err(&dev_priv
->drm
, "BKSV is revoked\n");
695 intel_de_write(dev_priv
, HDCP_BKSVLO(dev_priv
, cpu_transcoder
, port
),
697 intel_de_write(dev_priv
, HDCP_BKSVHI(dev_priv
, cpu_transcoder
, port
),
700 ret
= shim
->repeater_present(dig_port
, &repeater_present
);
703 if (repeater_present
)
704 intel_de_write(dev_priv
, HDCP_REP_CTL
,
705 intel_hdcp_get_repeater_ctl(dev_priv
, cpu_transcoder
, port
));
707 ret
= shim
->toggle_signalling(dig_port
, cpu_transcoder
, true);
711 intel_de_write(dev_priv
, HDCP_CONF(dev_priv
, cpu_transcoder
, port
),
712 HDCP_CONF_AUTH_AND_ENC
);
714 /* Wait for R0 ready */
715 if (wait_for(intel_de_read(dev_priv
, HDCP_STATUS(dev_priv
, cpu_transcoder
, port
)) &
716 (HDCP_STATUS_R0_READY
| HDCP_STATUS_ENC
), 1)) {
717 drm_err(&dev_priv
->drm
, "Timed out waiting for R0 ready\n");
722 * Wait for R0' to become available. The spec says 100ms from Aksv, but
723 * some monitors can take longer than this. We'll set the timeout at
724 * 300ms just to be sure.
726 * On DP, there's an R0_READY bit available but no such bit
727 * exists on HDMI. Since the upper-bound is the same, we'll just do
728 * the stupid thing instead of polling on one and not the other.
730 wait_remaining_ms_from_jiffies(r0_prime_gen_start
, 300);
735 * DP HDCP Spec mandates the two more reattempt to read R0, incase
738 for (i
= 0; i
< tries
; i
++) {
740 ret
= shim
->read_ri_prime(dig_port
, ri
.shim
);
743 intel_de_write(dev_priv
,
744 HDCP_RPRIME(dev_priv
, cpu_transcoder
, port
),
747 /* Wait for Ri prime match */
748 if (!wait_for(intel_de_read(dev_priv
, HDCP_STATUS(dev_priv
, cpu_transcoder
, port
)) &
749 (HDCP_STATUS_RI_MATCH
| HDCP_STATUS_ENC
), 1))
754 drm_dbg_kms(&dev_priv
->drm
,
755 "Timed out waiting for Ri prime match (%x)\n",
756 intel_de_read(dev_priv
, HDCP_STATUS(dev_priv
,
757 cpu_transcoder
, port
)));
761 /* Wait for encryption confirmation */
762 if (intel_de_wait_for_set(dev_priv
,
763 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
),
765 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
)) {
766 drm_err(&dev_priv
->drm
, "Timed out waiting for encryption\n");
771 * XXX: If we have MST-connected devices, we need to enable encryption
775 if (repeater_present
)
776 return intel_hdcp_auth_downstream(connector
);
778 drm_dbg_kms(&dev_priv
->drm
, "HDCP is enabled (no repeater present)\n");
782 static int _intel_hdcp_disable(struct intel_connector
*connector
)
784 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
785 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
786 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
787 enum port port
= dig_port
->base
.port
;
788 enum transcoder cpu_transcoder
= hdcp
->cpu_transcoder
;
792 drm_dbg_kms(&dev_priv
->drm
, "[%s:%d] HDCP is being disabled...\n",
793 connector
->base
.name
, connector
->base
.base
.id
);
796 * If there are other connectors on this port using HDCP, don't disable
797 * it. Instead, toggle the HDCP signalling off on that particular
798 * connector/pipe and exit.
800 if (dig_port
->num_hdcp_streams
> 0) {
801 ret
= hdcp
->shim
->toggle_signalling(dig_port
,
802 cpu_transcoder
, false);
804 DRM_ERROR("Failed to disable HDCP signalling\n");
808 hdcp
->hdcp_encrypted
= false;
809 intel_de_write(dev_priv
, HDCP_CONF(dev_priv
, cpu_transcoder
, port
), 0);
810 if (intel_de_wait_for_clear(dev_priv
,
811 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
),
812 ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
)) {
813 drm_err(&dev_priv
->drm
,
814 "Failed to disable HDCP, timeout clearing status\n");
818 repeater_ctl
= intel_hdcp_get_repeater_ctl(dev_priv
, cpu_transcoder
,
820 intel_de_write(dev_priv
, HDCP_REP_CTL
,
821 intel_de_read(dev_priv
, HDCP_REP_CTL
) & ~repeater_ctl
);
823 ret
= hdcp
->shim
->toggle_signalling(dig_port
, cpu_transcoder
, false);
825 drm_err(&dev_priv
->drm
, "Failed to disable HDCP signalling\n");
829 drm_dbg_kms(&dev_priv
->drm
, "HDCP is disabled\n");
833 static int _intel_hdcp_enable(struct intel_connector
*connector
)
835 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
836 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
837 int i
, ret
, tries
= 3;
839 drm_dbg_kms(&dev_priv
->drm
, "[%s:%d] HDCP is being enabled...\n",
840 connector
->base
.name
, connector
->base
.base
.id
);
842 if (!hdcp_key_loadable(dev_priv
)) {
843 drm_err(&dev_priv
->drm
, "HDCP key Load is not possible\n");
847 for (i
= 0; i
< KEY_LOAD_TRIES
; i
++) {
848 ret
= intel_hdcp_load_keys(dev_priv
);
851 intel_hdcp_clear_keys(dev_priv
);
854 drm_err(&dev_priv
->drm
, "Could not load HDCP keys, (%d)\n",
859 /* Incase of authentication failures, HDCP spec expects reauth. */
860 for (i
= 0; i
< tries
; i
++) {
861 ret
= intel_hdcp_auth(connector
);
863 hdcp
->hdcp_encrypted
= true;
867 drm_dbg_kms(&dev_priv
->drm
, "HDCP Auth failure (%d)\n", ret
);
869 /* Ensuring HDCP encryption and signalling are stopped. */
870 _intel_hdcp_disable(connector
);
873 drm_dbg_kms(&dev_priv
->drm
,
874 "HDCP authentication failed (%d tries/%d)\n", tries
, ret
);
878 static struct intel_connector
*intel_hdcp_to_connector(struct intel_hdcp
*hdcp
)
880 return container_of(hdcp
, struct intel_connector
, hdcp
);
883 static void intel_hdcp_update_value(struct intel_connector
*connector
,
884 u64 value
, bool update_property
)
886 struct drm_device
*dev
= connector
->base
.dev
;
887 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
888 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
890 drm_WARN_ON(connector
->base
.dev
, !mutex_is_locked(&hdcp
->mutex
));
892 if (hdcp
->value
== value
)
895 drm_WARN_ON(dev
, !mutex_is_locked(&dig_port
->hdcp_mutex
));
897 if (hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
898 if (!drm_WARN_ON(dev
, dig_port
->num_hdcp_streams
== 0))
899 dig_port
->num_hdcp_streams
--;
900 } else if (value
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
901 dig_port
->num_hdcp_streams
++;
905 if (update_property
) {
906 drm_connector_get(&connector
->base
);
907 schedule_work(&hdcp
->prop_work
);
911 /* Implements Part 3 of the HDCP authorization procedure */
912 static int intel_hdcp_check_link(struct intel_connector
*connector
)
914 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
915 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
916 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
917 enum port port
= dig_port
->base
.port
;
918 enum transcoder cpu_transcoder
;
921 mutex_lock(&hdcp
->mutex
);
922 mutex_lock(&dig_port
->hdcp_mutex
);
924 cpu_transcoder
= hdcp
->cpu_transcoder
;
926 /* Check_link valid only when HDCP1.4 is enabled */
927 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_ENABLED
||
928 !hdcp
->hdcp_encrypted
) {
933 if (drm_WARN_ON(&dev_priv
->drm
,
934 !intel_hdcp_in_use(dev_priv
, cpu_transcoder
, port
))) {
935 drm_err(&dev_priv
->drm
,
936 "%s:%d HDCP link stopped encryption,%x\n",
937 connector
->base
.name
, connector
->base
.base
.id
,
938 intel_de_read(dev_priv
, HDCP_STATUS(dev_priv
, cpu_transcoder
, port
)));
940 intel_hdcp_update_value(connector
,
941 DRM_MODE_CONTENT_PROTECTION_DESIRED
,
946 if (hdcp
->shim
->check_link(dig_port
, connector
)) {
947 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
948 intel_hdcp_update_value(connector
,
949 DRM_MODE_CONTENT_PROTECTION_ENABLED
, true);
954 drm_dbg_kms(&dev_priv
->drm
,
955 "[%s:%d] HDCP link failed, retrying authentication\n",
956 connector
->base
.name
, connector
->base
.base
.id
);
958 ret
= _intel_hdcp_disable(connector
);
960 drm_err(&dev_priv
->drm
, "Failed to disable hdcp (%d)\n", ret
);
961 intel_hdcp_update_value(connector
,
962 DRM_MODE_CONTENT_PROTECTION_DESIRED
,
967 ret
= _intel_hdcp_enable(connector
);
969 drm_err(&dev_priv
->drm
, "Failed to enable hdcp (%d)\n", ret
);
970 intel_hdcp_update_value(connector
,
971 DRM_MODE_CONTENT_PROTECTION_DESIRED
,
977 mutex_unlock(&dig_port
->hdcp_mutex
);
978 mutex_unlock(&hdcp
->mutex
);
982 static void intel_hdcp_prop_work(struct work_struct
*work
)
984 struct intel_hdcp
*hdcp
= container_of(work
, struct intel_hdcp
,
986 struct intel_connector
*connector
= intel_hdcp_to_connector(hdcp
);
987 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
989 drm_modeset_lock(&dev_priv
->drm
.mode_config
.connection_mutex
, NULL
);
990 mutex_lock(&hdcp
->mutex
);
993 * This worker is only used to flip between ENABLED/DESIRED. Either of
994 * those to UNDESIRED is handled by core. If value == UNDESIRED,
995 * we're running just after hdcp has been disabled, so just exit
997 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
998 drm_hdcp_update_content_protection(&connector
->base
,
1001 mutex_unlock(&hdcp
->mutex
);
1002 drm_modeset_unlock(&dev_priv
->drm
.mode_config
.connection_mutex
);
1004 drm_connector_put(&connector
->base
);
1007 bool is_hdcp_supported(struct drm_i915_private
*dev_priv
, enum port port
)
1009 return INTEL_INFO(dev_priv
)->display
.has_hdcp
&&
1010 (INTEL_GEN(dev_priv
) >= 12 || port
< PORT_E
);
1014 hdcp2_prepare_ake_init(struct intel_connector
*connector
,
1015 struct hdcp2_ake_init
*ake_data
)
1017 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1018 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1019 struct i915_hdcp_comp_master
*comp
;
1022 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1023 comp
= dev_priv
->hdcp_master
;
1025 if (!comp
|| !comp
->ops
) {
1026 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1030 ret
= comp
->ops
->initiate_hdcp2_session(comp
->mei_dev
, data
, ake_data
);
1032 drm_dbg_kms(&dev_priv
->drm
, "Prepare_ake_init failed. %d\n",
1034 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1040 hdcp2_verify_rx_cert_prepare_km(struct intel_connector
*connector
,
1041 struct hdcp2_ake_send_cert
*rx_cert
,
1043 struct hdcp2_ake_no_stored_km
*ek_pub_km
,
1046 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1047 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1048 struct i915_hdcp_comp_master
*comp
;
1051 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1052 comp
= dev_priv
->hdcp_master
;
1054 if (!comp
|| !comp
->ops
) {
1055 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1059 ret
= comp
->ops
->verify_receiver_cert_prepare_km(comp
->mei_dev
, data
,
1063 drm_dbg_kms(&dev_priv
->drm
, "Verify rx_cert failed. %d\n",
1065 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1070 static int hdcp2_verify_hprime(struct intel_connector
*connector
,
1071 struct hdcp2_ake_send_hprime
*rx_hprime
)
1073 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1074 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1075 struct i915_hdcp_comp_master
*comp
;
1078 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1079 comp
= dev_priv
->hdcp_master
;
1081 if (!comp
|| !comp
->ops
) {
1082 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1086 ret
= comp
->ops
->verify_hprime(comp
->mei_dev
, data
, rx_hprime
);
1088 drm_dbg_kms(&dev_priv
->drm
, "Verify hprime failed. %d\n", ret
);
1089 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1095 hdcp2_store_pairing_info(struct intel_connector
*connector
,
1096 struct hdcp2_ake_send_pairing_info
*pairing_info
)
1098 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1099 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1100 struct i915_hdcp_comp_master
*comp
;
1103 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1104 comp
= dev_priv
->hdcp_master
;
1106 if (!comp
|| !comp
->ops
) {
1107 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1111 ret
= comp
->ops
->store_pairing_info(comp
->mei_dev
, data
, pairing_info
);
1113 drm_dbg_kms(&dev_priv
->drm
, "Store pairing info failed. %d\n",
1115 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1121 hdcp2_prepare_lc_init(struct intel_connector
*connector
,
1122 struct hdcp2_lc_init
*lc_init
)
1124 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1125 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1126 struct i915_hdcp_comp_master
*comp
;
1129 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1130 comp
= dev_priv
->hdcp_master
;
1132 if (!comp
|| !comp
->ops
) {
1133 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1137 ret
= comp
->ops
->initiate_locality_check(comp
->mei_dev
, data
, lc_init
);
1139 drm_dbg_kms(&dev_priv
->drm
, "Prepare lc_init failed. %d\n",
1141 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1147 hdcp2_verify_lprime(struct intel_connector
*connector
,
1148 struct hdcp2_lc_send_lprime
*rx_lprime
)
1150 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1151 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1152 struct i915_hdcp_comp_master
*comp
;
1155 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1156 comp
= dev_priv
->hdcp_master
;
1158 if (!comp
|| !comp
->ops
) {
1159 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1163 ret
= comp
->ops
->verify_lprime(comp
->mei_dev
, data
, rx_lprime
);
1165 drm_dbg_kms(&dev_priv
->drm
, "Verify L_Prime failed. %d\n",
1167 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1172 static int hdcp2_prepare_skey(struct intel_connector
*connector
,
1173 struct hdcp2_ske_send_eks
*ske_data
)
1175 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1176 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1177 struct i915_hdcp_comp_master
*comp
;
1180 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1181 comp
= dev_priv
->hdcp_master
;
1183 if (!comp
|| !comp
->ops
) {
1184 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1188 ret
= comp
->ops
->get_session_key(comp
->mei_dev
, data
, ske_data
);
1190 drm_dbg_kms(&dev_priv
->drm
, "Get session key failed. %d\n",
1192 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1198 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector
*connector
,
1199 struct hdcp2_rep_send_receiverid_list
1201 struct hdcp2_rep_send_ack
*rep_send_ack
)
1203 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1204 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1205 struct i915_hdcp_comp_master
*comp
;
1208 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1209 comp
= dev_priv
->hdcp_master
;
1211 if (!comp
|| !comp
->ops
) {
1212 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1216 ret
= comp
->ops
->repeater_check_flow_prepare_ack(comp
->mei_dev
, data
,
1220 drm_dbg_kms(&dev_priv
->drm
,
1221 "Verify rep topology failed. %d\n", ret
);
1222 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1228 hdcp2_verify_mprime(struct intel_connector
*connector
,
1229 struct hdcp2_rep_stream_ready
*stream_ready
)
1231 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1232 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1233 struct i915_hdcp_comp_master
*comp
;
1236 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1237 comp
= dev_priv
->hdcp_master
;
1239 if (!comp
|| !comp
->ops
) {
1240 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1244 ret
= comp
->ops
->verify_mprime(comp
->mei_dev
, data
, stream_ready
);
1246 drm_dbg_kms(&dev_priv
->drm
, "Verify mprime failed. %d\n", ret
);
1247 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1252 static int hdcp2_authenticate_port(struct intel_connector
*connector
)
1254 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1255 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1256 struct i915_hdcp_comp_master
*comp
;
1259 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1260 comp
= dev_priv
->hdcp_master
;
1262 if (!comp
|| !comp
->ops
) {
1263 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1267 ret
= comp
->ops
->enable_hdcp_authentication(comp
->mei_dev
, data
);
1269 drm_dbg_kms(&dev_priv
->drm
, "Enable hdcp auth failed. %d\n",
1271 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1276 static int hdcp2_close_mei_session(struct intel_connector
*connector
)
1278 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1279 struct i915_hdcp_comp_master
*comp
;
1282 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1283 comp
= dev_priv
->hdcp_master
;
1285 if (!comp
|| !comp
->ops
) {
1286 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1290 ret
= comp
->ops
->close_hdcp_session(comp
->mei_dev
,
1291 &connector
->hdcp
.port_data
);
1292 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1297 static int hdcp2_deauthenticate_port(struct intel_connector
*connector
)
1299 return hdcp2_close_mei_session(connector
);
1302 /* Authentication flow starts from here */
1303 static int hdcp2_authentication_key_exchange(struct intel_connector
*connector
)
1305 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1306 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1307 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1309 struct hdcp2_ake_init ake_init
;
1310 struct hdcp2_ake_send_cert send_cert
;
1311 struct hdcp2_ake_no_stored_km no_stored_km
;
1312 struct hdcp2_ake_send_hprime send_hprime
;
1313 struct hdcp2_ake_send_pairing_info pairing_info
;
1315 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1319 /* Init for seq_num */
1320 hdcp
->seq_num_v
= 0;
1321 hdcp
->seq_num_m
= 0;
1323 ret
= hdcp2_prepare_ake_init(connector
, &msgs
.ake_init
);
1327 ret
= shim
->write_2_2_msg(dig_port
, &msgs
.ake_init
,
1328 sizeof(msgs
.ake_init
));
1332 ret
= shim
->read_2_2_msg(dig_port
, HDCP_2_2_AKE_SEND_CERT
,
1333 &msgs
.send_cert
, sizeof(msgs
.send_cert
));
1337 if (msgs
.send_cert
.rx_caps
[0] != HDCP_2_2_RX_CAPS_VERSION_VAL
) {
1338 drm_dbg_kms(&dev_priv
->drm
, "cert.rx_caps dont claim HDCP2.2\n");
1342 hdcp
->is_repeater
= HDCP_2_2_RX_REPEATER(msgs
.send_cert
.rx_caps
[2]);
1344 if (drm_hdcp_check_ksvs_revoked(&dev_priv
->drm
,
1345 msgs
.send_cert
.cert_rx
.receiver_id
,
1347 drm_err(&dev_priv
->drm
, "Receiver ID is revoked\n");
1352 * Here msgs.no_stored_km will hold msgs corresponding to the km
1355 ret
= hdcp2_verify_rx_cert_prepare_km(connector
, &msgs
.send_cert
,
1357 &msgs
.no_stored_km
, &size
);
1361 ret
= shim
->write_2_2_msg(dig_port
, &msgs
.no_stored_km
, size
);
1365 ret
= shim
->read_2_2_msg(dig_port
, HDCP_2_2_AKE_SEND_HPRIME
,
1366 &msgs
.send_hprime
, sizeof(msgs
.send_hprime
));
1370 ret
= hdcp2_verify_hprime(connector
, &msgs
.send_hprime
);
1374 if (!hdcp
->is_paired
) {
1375 /* Pairing is required */
1376 ret
= shim
->read_2_2_msg(dig_port
,
1377 HDCP_2_2_AKE_SEND_PAIRING_INFO
,
1379 sizeof(msgs
.pairing_info
));
1383 ret
= hdcp2_store_pairing_info(connector
, &msgs
.pairing_info
);
1386 hdcp
->is_paired
= true;
1392 static int hdcp2_locality_check(struct intel_connector
*connector
)
1394 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1395 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1397 struct hdcp2_lc_init lc_init
;
1398 struct hdcp2_lc_send_lprime send_lprime
;
1400 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1401 int tries
= HDCP2_LC_RETRY_CNT
, ret
, i
;
1403 for (i
= 0; i
< tries
; i
++) {
1404 ret
= hdcp2_prepare_lc_init(connector
, &msgs
.lc_init
);
1408 ret
= shim
->write_2_2_msg(dig_port
, &msgs
.lc_init
,
1409 sizeof(msgs
.lc_init
));
1413 ret
= shim
->read_2_2_msg(dig_port
,
1414 HDCP_2_2_LC_SEND_LPRIME
,
1416 sizeof(msgs
.send_lprime
));
1420 ret
= hdcp2_verify_lprime(connector
, &msgs
.send_lprime
);
1428 static int hdcp2_session_key_exchange(struct intel_connector
*connector
)
1430 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1431 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1432 struct hdcp2_ske_send_eks send_eks
;
1435 ret
= hdcp2_prepare_skey(connector
, &send_eks
);
1439 ret
= hdcp
->shim
->write_2_2_msg(dig_port
, &send_eks
,
1448 int _hdcp2_propagate_stream_management_info(struct intel_connector
*connector
)
1450 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1451 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1453 struct hdcp2_rep_stream_manage stream_manage
;
1454 struct hdcp2_rep_stream_ready stream_ready
;
1456 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1459 if (connector
->hdcp
.seq_num_m
> HDCP_2_2_SEQ_NUM_MAX
)
1462 /* Prepare RepeaterAuth_Stream_Manage msg */
1463 msgs
.stream_manage
.msg_id
= HDCP_2_2_REP_STREAM_MANAGE
;
1464 drm_hdcp_cpu_to_be24(msgs
.stream_manage
.seq_num_m
, hdcp
->seq_num_m
);
1466 /* K no of streams is fixed as 1. Stored as big-endian. */
1467 msgs
.stream_manage
.k
= cpu_to_be16(1);
1469 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1470 msgs
.stream_manage
.streams
[0].stream_id
= 0;
1471 msgs
.stream_manage
.streams
[0].stream_type
= hdcp
->content_type
;
1473 /* Send it to Repeater */
1474 ret
= shim
->write_2_2_msg(dig_port
, &msgs
.stream_manage
,
1475 sizeof(msgs
.stream_manage
));
1479 ret
= shim
->read_2_2_msg(dig_port
, HDCP_2_2_REP_STREAM_READY
,
1480 &msgs
.stream_ready
, sizeof(msgs
.stream_ready
));
1484 hdcp
->port_data
.seq_num_m
= hdcp
->seq_num_m
;
1485 hdcp
->port_data
.streams
[0].stream_type
= hdcp
->content_type
;
1486 ret
= hdcp2_verify_mprime(connector
, &msgs
.stream_ready
);
1495 int hdcp2_authenticate_repeater_topology(struct intel_connector
*connector
)
1497 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1498 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1499 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1501 struct hdcp2_rep_send_receiverid_list recvid_list
;
1502 struct hdcp2_rep_send_ack rep_ack
;
1504 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1505 u32 seq_num_v
, device_cnt
;
1509 ret
= shim
->read_2_2_msg(dig_port
, HDCP_2_2_REP_SEND_RECVID_LIST
,
1510 &msgs
.recvid_list
, sizeof(msgs
.recvid_list
));
1514 rx_info
= msgs
.recvid_list
.rx_info
;
1516 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info
[1]) ||
1517 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info
[1])) {
1518 drm_dbg_kms(&dev_priv
->drm
, "Topology Max Size Exceeded\n");
1522 /* Converting and Storing the seq_num_v to local variable as DWORD */
1524 drm_hdcp_be24_to_cpu((const u8
*)msgs
.recvid_list
.seq_num_v
);
1526 if (!hdcp
->hdcp2_encrypted
&& seq_num_v
) {
1527 drm_dbg_kms(&dev_priv
->drm
,
1528 "Non zero Seq_num_v at first RecvId_List msg\n");
1532 if (seq_num_v
< hdcp
->seq_num_v
) {
1533 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1534 drm_dbg_kms(&dev_priv
->drm
, "Seq_num_v roll over.\n");
1538 device_cnt
= (HDCP_2_2_DEV_COUNT_HI(rx_info
[0]) << 4 |
1539 HDCP_2_2_DEV_COUNT_LO(rx_info
[1]));
1540 if (drm_hdcp_check_ksvs_revoked(&dev_priv
->drm
,
1541 msgs
.recvid_list
.receiver_ids
,
1543 drm_err(&dev_priv
->drm
, "Revoked receiver ID(s) is in list\n");
1547 ret
= hdcp2_verify_rep_topology_prepare_ack(connector
,
1553 hdcp
->seq_num_v
= seq_num_v
;
1554 ret
= shim
->write_2_2_msg(dig_port
, &msgs
.rep_ack
,
1555 sizeof(msgs
.rep_ack
));
1562 static int hdcp2_authenticate_sink(struct intel_connector
*connector
)
1564 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1565 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
1566 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1567 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1570 ret
= hdcp2_authentication_key_exchange(connector
);
1572 drm_dbg_kms(&i915
->drm
, "AKE Failed. Err : %d\n", ret
);
1576 ret
= hdcp2_locality_check(connector
);
1578 drm_dbg_kms(&i915
->drm
,
1579 "Locality Check failed. Err : %d\n", ret
);
1583 ret
= hdcp2_session_key_exchange(connector
);
1585 drm_dbg_kms(&i915
->drm
, "SKE Failed. Err : %d\n", ret
);
1589 if (shim
->config_stream_type
) {
1590 ret
= shim
->config_stream_type(dig_port
,
1592 hdcp
->content_type
);
1597 if (hdcp
->is_repeater
) {
1598 ret
= hdcp2_authenticate_repeater_topology(connector
);
1600 drm_dbg_kms(&i915
->drm
,
1601 "Repeater Auth Failed. Err: %d\n", ret
);
1609 static int hdcp2_enable_encryption(struct intel_connector
*connector
)
1611 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1612 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1613 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1614 enum port port
= dig_port
->base
.port
;
1615 enum transcoder cpu_transcoder
= hdcp
->cpu_transcoder
;
1618 drm_WARN_ON(&dev_priv
->drm
,
1619 intel_de_read(dev_priv
, HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
1620 LINK_ENCRYPTION_STATUS
);
1621 if (hdcp
->shim
->toggle_signalling
) {
1622 ret
= hdcp
->shim
->toggle_signalling(dig_port
, cpu_transcoder
,
1625 drm_err(&dev_priv
->drm
,
1626 "Failed to enable HDCP signalling. %d\n",
1632 if (intel_de_read(dev_priv
, HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
1634 /* Link is Authenticated. Now set for Encryption */
1635 intel_de_write(dev_priv
,
1636 HDCP2_CTL(dev_priv
, cpu_transcoder
, port
),
1637 intel_de_read(dev_priv
, HDCP2_CTL(dev_priv
, cpu_transcoder
, port
)) | CTL_LINK_ENCRYPTION_REQ
);
1640 ret
= intel_de_wait_for_set(dev_priv
,
1641 HDCP2_STATUS(dev_priv
, cpu_transcoder
,
1643 LINK_ENCRYPTION_STATUS
,
1644 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
);
1649 static int hdcp2_disable_encryption(struct intel_connector
*connector
)
1651 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1652 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1653 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1654 enum port port
= dig_port
->base
.port
;
1655 enum transcoder cpu_transcoder
= hdcp
->cpu_transcoder
;
1658 drm_WARN_ON(&dev_priv
->drm
, !(intel_de_read(dev_priv
, HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
1659 LINK_ENCRYPTION_STATUS
));
1661 intel_de_write(dev_priv
, HDCP2_CTL(dev_priv
, cpu_transcoder
, port
),
1662 intel_de_read(dev_priv
, HDCP2_CTL(dev_priv
, cpu_transcoder
, port
)) & ~CTL_LINK_ENCRYPTION_REQ
);
1664 ret
= intel_de_wait_for_clear(dev_priv
,
1665 HDCP2_STATUS(dev_priv
, cpu_transcoder
,
1667 LINK_ENCRYPTION_STATUS
,
1668 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
);
1669 if (ret
== -ETIMEDOUT
)
1670 drm_dbg_kms(&dev_priv
->drm
, "Disable Encryption Timedout");
1672 if (hdcp
->shim
->toggle_signalling
) {
1673 ret
= hdcp
->shim
->toggle_signalling(dig_port
, cpu_transcoder
,
1676 drm_err(&dev_priv
->drm
,
1677 "Failed to disable HDCP signalling. %d\n",
1687 hdcp2_propagate_stream_management_info(struct intel_connector
*connector
)
1689 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
1690 int i
, tries
= 3, ret
;
1692 if (!connector
->hdcp
.is_repeater
)
1695 for (i
= 0; i
< tries
; i
++) {
1696 ret
= _hdcp2_propagate_stream_management_info(connector
);
1700 /* Lets restart the auth incase of seq_num_m roll over */
1701 if (connector
->hdcp
.seq_num_m
> HDCP_2_2_SEQ_NUM_MAX
) {
1702 drm_dbg_kms(&i915
->drm
,
1703 "seq_num_m roll over.(%d)\n", ret
);
1707 drm_dbg_kms(&i915
->drm
,
1708 "HDCP2 stream management %d of %d Failed.(%d)\n",
1715 static int hdcp2_authenticate_and_encrypt(struct intel_connector
*connector
)
1717 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
1718 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1719 int ret
, i
, tries
= 3;
1721 for (i
= 0; i
< tries
; i
++) {
1722 ret
= hdcp2_authenticate_sink(connector
);
1724 ret
= hdcp2_propagate_stream_management_info(connector
);
1726 drm_dbg_kms(&i915
->drm
,
1727 "Stream management failed.(%d)\n",
1731 hdcp
->port_data
.streams
[0].stream_type
=
1733 ret
= hdcp2_authenticate_port(connector
);
1736 drm_dbg_kms(&i915
->drm
, "HDCP2 port auth failed.(%d)\n",
1740 /* Clearing the mei hdcp session */
1741 drm_dbg_kms(&i915
->drm
, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1743 if (hdcp2_deauthenticate_port(connector
) < 0)
1744 drm_dbg_kms(&i915
->drm
, "Port deauth failed.\n");
1749 * Ensuring the required 200mSec min time interval between
1750 * Session Key Exchange and encryption.
1752 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN
);
1753 ret
= hdcp2_enable_encryption(connector
);
1755 drm_dbg_kms(&i915
->drm
,
1756 "Encryption Enable Failed.(%d)\n", ret
);
1757 if (hdcp2_deauthenticate_port(connector
) < 0)
1758 drm_dbg_kms(&i915
->drm
, "Port deauth failed.\n");
1765 static int _intel_hdcp2_enable(struct intel_connector
*connector
)
1767 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
1768 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1771 drm_dbg_kms(&i915
->drm
, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1772 connector
->base
.name
, connector
->base
.base
.id
,
1773 hdcp
->content_type
);
1775 ret
= hdcp2_authenticate_and_encrypt(connector
);
1777 drm_dbg_kms(&i915
->drm
, "HDCP2 Type%d Enabling Failed. (%d)\n",
1778 hdcp
->content_type
, ret
);
1782 drm_dbg_kms(&i915
->drm
, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1783 connector
->base
.name
, connector
->base
.base
.id
,
1784 hdcp
->content_type
);
1786 hdcp
->hdcp2_encrypted
= true;
1790 static int _intel_hdcp2_disable(struct intel_connector
*connector
)
1792 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
1795 drm_dbg_kms(&i915
->drm
, "[%s:%d] HDCP2.2 is being Disabled\n",
1796 connector
->base
.name
, connector
->base
.base
.id
);
1798 ret
= hdcp2_disable_encryption(connector
);
1800 if (hdcp2_deauthenticate_port(connector
) < 0)
1801 drm_dbg_kms(&i915
->drm
, "Port deauth failed.\n");
1803 connector
->hdcp
.hdcp2_encrypted
= false;
1808 /* Implements the Link Integrity Check for HDCP2.2 */
1809 static int intel_hdcp2_check_link(struct intel_connector
*connector
)
1811 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
1812 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1813 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1814 enum port port
= dig_port
->base
.port
;
1815 enum transcoder cpu_transcoder
;
1818 mutex_lock(&hdcp
->mutex
);
1819 cpu_transcoder
= hdcp
->cpu_transcoder
;
1821 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1822 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_ENABLED
||
1823 !hdcp
->hdcp2_encrypted
) {
1828 if (drm_WARN_ON(&dev_priv
->drm
,
1829 !intel_hdcp2_in_use(dev_priv
, cpu_transcoder
, port
))) {
1830 drm_err(&dev_priv
->drm
,
1831 "HDCP2.2 link stopped the encryption, %x\n",
1832 intel_de_read(dev_priv
, HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)));
1834 intel_hdcp_update_value(connector
,
1835 DRM_MODE_CONTENT_PROTECTION_DESIRED
,
1840 ret
= hdcp
->shim
->check_2_2_link(dig_port
);
1841 if (ret
== HDCP_LINK_PROTECTED
) {
1842 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
1843 intel_hdcp_update_value(connector
,
1844 DRM_MODE_CONTENT_PROTECTION_ENABLED
,
1850 if (ret
== HDCP_TOPOLOGY_CHANGE
) {
1851 if (hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
1854 drm_dbg_kms(&dev_priv
->drm
,
1855 "HDCP2.2 Downstream topology change\n");
1856 ret
= hdcp2_authenticate_repeater_topology(connector
);
1858 intel_hdcp_update_value(connector
,
1859 DRM_MODE_CONTENT_PROTECTION_ENABLED
,
1863 drm_dbg_kms(&dev_priv
->drm
,
1864 "[%s:%d] Repeater topology auth failed.(%d)\n",
1865 connector
->base
.name
, connector
->base
.base
.id
,
1868 drm_dbg_kms(&dev_priv
->drm
,
1869 "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1870 connector
->base
.name
, connector
->base
.base
.id
);
1873 ret
= _intel_hdcp2_disable(connector
);
1875 drm_err(&dev_priv
->drm
,
1876 "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1877 connector
->base
.name
, connector
->base
.base
.id
, ret
);
1878 intel_hdcp_update_value(connector
,
1879 DRM_MODE_CONTENT_PROTECTION_DESIRED
, true);
1883 ret
= _intel_hdcp2_enable(connector
);
1885 drm_dbg_kms(&dev_priv
->drm
,
1886 "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1887 connector
->base
.name
, connector
->base
.base
.id
,
1889 intel_hdcp_update_value(connector
,
1890 DRM_MODE_CONTENT_PROTECTION_DESIRED
,
1896 mutex_unlock(&hdcp
->mutex
);
1900 static void intel_hdcp_check_work(struct work_struct
*work
)
1902 struct intel_hdcp
*hdcp
= container_of(to_delayed_work(work
),
1905 struct intel_connector
*connector
= intel_hdcp_to_connector(hdcp
);
1907 if (drm_connector_is_unregistered(&connector
->base
))
1910 if (!intel_hdcp2_check_link(connector
))
1911 schedule_delayed_work(&hdcp
->check_work
,
1912 DRM_HDCP2_CHECK_PERIOD_MS
);
1913 else if (!intel_hdcp_check_link(connector
))
1914 schedule_delayed_work(&hdcp
->check_work
,
1915 DRM_HDCP_CHECK_PERIOD_MS
);
1918 static int i915_hdcp_component_bind(struct device
*i915_kdev
,
1919 struct device
*mei_kdev
, void *data
)
1921 struct drm_i915_private
*dev_priv
= kdev_to_i915(i915_kdev
);
1923 drm_dbg(&dev_priv
->drm
, "I915 HDCP comp bind\n");
1924 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1925 dev_priv
->hdcp_master
= (struct i915_hdcp_comp_master
*)data
;
1926 dev_priv
->hdcp_master
->mei_dev
= mei_kdev
;
1927 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1932 static void i915_hdcp_component_unbind(struct device
*i915_kdev
,
1933 struct device
*mei_kdev
, void *data
)
1935 struct drm_i915_private
*dev_priv
= kdev_to_i915(i915_kdev
);
1937 drm_dbg(&dev_priv
->drm
, "I915 HDCP comp unbind\n");
1938 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1939 dev_priv
->hdcp_master
= NULL
;
1940 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1943 static const struct component_ops i915_hdcp_component_ops
= {
1944 .bind
= i915_hdcp_component_bind
,
1945 .unbind
= i915_hdcp_component_unbind
,
1948 static enum mei_fw_ddi
intel_get_mei_fw_ddi_index(enum port port
)
1953 case PORT_B
... PORT_F
:
1954 return (enum mei_fw_ddi
)port
;
1956 return MEI_DDI_INVALID_PORT
;
1960 static enum mei_fw_tc
intel_get_mei_fw_tc(enum transcoder cpu_transcoder
)
1962 switch (cpu_transcoder
) {
1963 case TRANSCODER_A
... TRANSCODER_D
:
1964 return (enum mei_fw_tc
)(cpu_transcoder
| 0x10);
1965 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1966 return MEI_INVALID_TRANSCODER
;
1970 static int initialize_hdcp_port_data(struct intel_connector
*connector
,
1972 const struct intel_hdcp_shim
*shim
)
1974 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1975 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1976 struct hdcp_port_data
*data
= &hdcp
->port_data
;
1978 if (INTEL_GEN(dev_priv
) < 12)
1979 data
->fw_ddi
= intel_get_mei_fw_ddi_index(port
);
1982 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1983 * with zero(INVALID PORT index).
1985 data
->fw_ddi
= MEI_DDI_INVALID_PORT
;
1988 * As associated transcoder is set and modified at modeset, here fw_tc
1989 * is initialized to zero (invalid transcoder index). This will be
1990 * retained for <Gen12 forever.
1992 data
->fw_tc
= MEI_INVALID_TRANSCODER
;
1994 data
->port_type
= (u8
)HDCP_PORT_TYPE_INTEGRATED
;
1995 data
->protocol
= (u8
)shim
->protocol
;
1999 data
->streams
= kcalloc(data
->k
,
2000 sizeof(struct hdcp2_streamid_type
),
2002 if (!data
->streams
) {
2003 drm_err(&dev_priv
->drm
, "Out of Memory\n");
2007 data
->streams
[0].stream_id
= 0;
2008 data
->streams
[0].stream_type
= hdcp
->content_type
;
2013 static bool is_hdcp2_supported(struct drm_i915_private
*dev_priv
)
2015 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP
))
2018 return (INTEL_GEN(dev_priv
) >= 10 ||
2019 IS_GEMINILAKE(dev_priv
) ||
2020 IS_KABYLAKE(dev_priv
) ||
2021 IS_COFFEELAKE(dev_priv
) ||
2022 IS_COMETLAKE(dev_priv
));
2025 void intel_hdcp_component_init(struct drm_i915_private
*dev_priv
)
2029 if (!is_hdcp2_supported(dev_priv
))
2032 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
2033 drm_WARN_ON(&dev_priv
->drm
, dev_priv
->hdcp_comp_added
);
2035 dev_priv
->hdcp_comp_added
= true;
2036 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
2037 ret
= component_add_typed(dev_priv
->drm
.dev
, &i915_hdcp_component_ops
,
2038 I915_COMPONENT_HDCP
);
2040 drm_dbg_kms(&dev_priv
->drm
, "Failed at component add(%d)\n",
2042 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
2043 dev_priv
->hdcp_comp_added
= false;
2044 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
2049 static void intel_hdcp2_init(struct intel_connector
*connector
, enum port port
,
2050 const struct intel_hdcp_shim
*shim
)
2052 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
2053 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2056 ret
= initialize_hdcp_port_data(connector
, port
, shim
);
2058 drm_dbg_kms(&i915
->drm
, "Mei hdcp data init failed\n");
2062 hdcp
->hdcp2_supported
= true;
2065 int intel_hdcp_init(struct intel_connector
*connector
,
2067 const struct intel_hdcp_shim
*shim
)
2069 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
2070 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2076 if (is_hdcp2_supported(dev_priv
) && !connector
->mst_port
)
2077 intel_hdcp2_init(connector
, port
, shim
);
2080 drm_connector_attach_content_protection_property(&connector
->base
,
2081 hdcp
->hdcp2_supported
);
2083 hdcp
->hdcp2_supported
= false;
2084 kfree(hdcp
->port_data
.streams
);
2089 mutex_init(&hdcp
->mutex
);
2090 INIT_DELAYED_WORK(&hdcp
->check_work
, intel_hdcp_check_work
);
2091 INIT_WORK(&hdcp
->prop_work
, intel_hdcp_prop_work
);
2092 init_waitqueue_head(&hdcp
->cp_irq_queue
);
2097 int intel_hdcp_enable(struct intel_connector
*connector
,
2098 enum transcoder cpu_transcoder
, u8 content_type
)
2100 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
2101 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
2102 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2103 unsigned long check_link_interval
= DRM_HDCP_CHECK_PERIOD_MS
;
2109 mutex_lock(&hdcp
->mutex
);
2110 mutex_lock(&dig_port
->hdcp_mutex
);
2111 drm_WARN_ON(&dev_priv
->drm
,
2112 hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_ENABLED
);
2113 hdcp
->content_type
= content_type
;
2114 hdcp
->cpu_transcoder
= cpu_transcoder
;
2116 if (INTEL_GEN(dev_priv
) >= 12)
2117 hdcp
->port_data
.fw_tc
= intel_get_mei_fw_tc(cpu_transcoder
);
2120 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2121 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2123 if (intel_hdcp2_capable(connector
)) {
2124 ret
= _intel_hdcp2_enable(connector
);
2126 check_link_interval
= DRM_HDCP2_CHECK_PERIOD_MS
;
2130 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2133 if (ret
&& intel_hdcp_capable(connector
) &&
2134 hdcp
->content_type
!= DRM_MODE_HDCP_CONTENT_TYPE1
) {
2135 ret
= _intel_hdcp_enable(connector
);
2139 schedule_delayed_work(&hdcp
->check_work
, check_link_interval
);
2140 intel_hdcp_update_value(connector
,
2141 DRM_MODE_CONTENT_PROTECTION_ENABLED
,
2145 mutex_unlock(&dig_port
->hdcp_mutex
);
2146 mutex_unlock(&hdcp
->mutex
);
2150 int intel_hdcp_disable(struct intel_connector
*connector
)
2152 struct intel_digital_port
*dig_port
= intel_attached_dig_port(connector
);
2153 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2159 mutex_lock(&hdcp
->mutex
);
2160 mutex_lock(&dig_port
->hdcp_mutex
);
2162 if (hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
2165 intel_hdcp_update_value(connector
,
2166 DRM_MODE_CONTENT_PROTECTION_UNDESIRED
, false);
2167 if (hdcp
->hdcp2_encrypted
)
2168 ret
= _intel_hdcp2_disable(connector
);
2169 else if (hdcp
->hdcp_encrypted
)
2170 ret
= _intel_hdcp_disable(connector
);
2173 mutex_unlock(&dig_port
->hdcp_mutex
);
2174 mutex_unlock(&hdcp
->mutex
);
2175 cancel_delayed_work_sync(&hdcp
->check_work
);
2179 void intel_hdcp_update_pipe(struct intel_atomic_state
*state
,
2180 struct intel_encoder
*encoder
,
2181 const struct intel_crtc_state
*crtc_state
,
2182 const struct drm_connector_state
*conn_state
)
2184 struct intel_connector
*connector
=
2185 to_intel_connector(conn_state
->connector
);
2186 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2187 bool content_protection_type_changed
, desired_and_not_enabled
= false;
2189 if (!connector
->hdcp
.shim
)
2192 content_protection_type_changed
=
2193 (conn_state
->hdcp_content_type
!= hdcp
->content_type
&&
2194 conn_state
->content_protection
!=
2195 DRM_MODE_CONTENT_PROTECTION_UNDESIRED
);
2198 * During the HDCP encryption session if Type change is requested,
2199 * disable the HDCP and reenable it with new TYPE value.
2201 if (conn_state
->content_protection
==
2202 DRM_MODE_CONTENT_PROTECTION_UNDESIRED
||
2203 content_protection_type_changed
)
2204 intel_hdcp_disable(connector
);
2207 * Mark the hdcp state as DESIRED after the hdcp disable of type
2210 if (content_protection_type_changed
) {
2211 mutex_lock(&hdcp
->mutex
);
2212 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2213 schedule_work(&hdcp
->prop_work
);
2214 mutex_unlock(&hdcp
->mutex
);
2217 if (conn_state
->content_protection
==
2218 DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
2219 mutex_lock(&hdcp
->mutex
);
2220 /* Avoid enabling hdcp, if it already ENABLED */
2221 desired_and_not_enabled
=
2222 hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
2223 mutex_unlock(&hdcp
->mutex
);
2226 if (desired_and_not_enabled
|| content_protection_type_changed
)
2227 intel_hdcp_enable(connector
,
2228 crtc_state
->cpu_transcoder
,
2229 (u8
)conn_state
->hdcp_content_type
);
2232 void intel_hdcp_component_fini(struct drm_i915_private
*dev_priv
)
2234 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
2235 if (!dev_priv
->hdcp_comp_added
) {
2236 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
2240 dev_priv
->hdcp_comp_added
= false;
2241 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
2243 component_del(dev_priv
->drm
.dev
, &i915_hdcp_component_ops
);
2246 void intel_hdcp_cleanup(struct intel_connector
*connector
)
2248 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2254 * If the connector is registered, it's possible userspace could kick
2255 * off another HDCP enable, which would re-spawn the workers.
2257 drm_WARN_ON(connector
->base
.dev
,
2258 connector
->base
.registration_state
== DRM_CONNECTOR_REGISTERED
);
2261 * Now that the connector is not registered, check_work won't be run,
2262 * but cancel any outstanding instances of it
2264 cancel_delayed_work_sync(&hdcp
->check_work
);
2267 * We don't cancel prop_work in the same way as check_work since it
2268 * requires connection_mutex which could be held while calling this
2269 * function. Instead, we rely on the connector references grabbed before
2270 * scheduling prop_work to ensure the connector is alive when prop_work
2271 * is run. So if we're in the destroy path (which is where this
2272 * function should be called), we're "guaranteed" that prop_work is not
2273 * active (tl;dr This Should Never Happen).
2275 drm_WARN_ON(connector
->base
.dev
, work_pending(&hdcp
->prop_work
));
2277 mutex_lock(&hdcp
->mutex
);
2278 kfree(hdcp
->port_data
.streams
);
2280 mutex_unlock(&hdcp
->mutex
);
2283 void intel_hdcp_atomic_check(struct drm_connector
*connector
,
2284 struct drm_connector_state
*old_state
,
2285 struct drm_connector_state
*new_state
)
2287 u64 old_cp
= old_state
->content_protection
;
2288 u64 new_cp
= new_state
->content_protection
;
2289 struct drm_crtc_state
*crtc_state
;
2291 if (!new_state
->crtc
) {
2293 * If the connector is being disabled with CP enabled, mark it
2294 * desired so it's re-enabled when the connector is brought back
2296 if (old_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
2297 new_state
->content_protection
=
2298 DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2302 crtc_state
= drm_atomic_get_new_crtc_state(new_state
->state
,
2305 * Fix the HDCP uapi content protection state in case of modeset.
2306 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2307 * need to be sent if there is transition from ENABLED->DESIRED.
2309 if (drm_atomic_crtc_needs_modeset(crtc_state
) &&
2310 (old_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
2311 new_cp
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
))
2312 new_state
->content_protection
=
2313 DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2316 * Nothing to do if the state didn't change, or HDCP was activated since
2317 * the last commit. And also no change in hdcp content type.
2319 if (old_cp
== new_cp
||
2320 (old_cp
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&&
2321 new_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)) {
2322 if (old_state
->hdcp_content_type
==
2323 new_state
->hdcp_content_type
)
2327 crtc_state
->mode_changed
= true;
2330 /* Handles the CP_IRQ raised from the DP HDCP sink */
2331 void intel_hdcp_handle_cp_irq(struct intel_connector
*connector
)
2333 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2338 atomic_inc(&connector
->hdcp
.cp_irq_count
);
2339 wake_up_all(&connector
->hdcp
.cp_irq_queue
);
2341 schedule_delayed_work(&hdcp
->check_work
, 0);