1 /* SPDX-License-Identifier: MIT */
3 * Copyright (C) 2017 Google, Inc.
4 * Copyright _ 2017-2019, Intel Corporation.
7 * Sean Paul <seanpaul@chromium.org>
8 * Ramalingam C <ramalingam.c@intel.com>
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
25 #define KEY_LOAD_TRIES 5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
27 #define HDCP2_LC_RETRY_CNT 3
30 bool intel_hdcp_is_ksv_valid(u8
*ksv
)
33 /* KSV has 20 1's and 20 0's */
34 for (i
= 0; i
< DRM_HDCP_KSV_LEN
; i
++)
35 ones
+= hweight8(ksv
[i
]);
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port
*intel_dig_port
,
44 const struct intel_hdcp_shim
*shim
, u8
*bksv
)
46 int ret
, i
, tries
= 2;
48 /* HDCP spec states that we must retry the bksv if it is invalid */
49 for (i
= 0; i
< tries
; i
++) {
50 ret
= shim
->read_bksv(intel_dig_port
, bksv
);
53 if (intel_hdcp_is_ksv_valid(bksv
))
57 DRM_DEBUG_KMS("Bksv is invalid\n");
64 /* Is HDCP1.4 capable on Platform and Sink */
65 bool intel_hdcp_capable(struct intel_connector
*connector
)
67 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
68 const struct intel_hdcp_shim
*shim
= connector
->hdcp
.shim
;
75 if (shim
->hdcp_capable
) {
76 shim
->hdcp_capable(intel_dig_port
, &capable
);
78 if (!intel_hdcp_read_valid_bksv(intel_dig_port
, shim
, bksv
))
85 /* Is HDCP2.2 capable on Platform and Sink */
86 bool intel_hdcp2_capable(struct intel_connector
*connector
)
88 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
89 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
90 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
93 /* I915 support for HDCP2.2 */
94 if (!hdcp
->hdcp2_supported
)
97 /* MEI interface is solid */
98 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
99 if (!dev_priv
->hdcp_comp_added
|| !dev_priv
->hdcp_master
) {
100 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
103 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
105 /* Sink's capability for HDCP2.2 */
106 hdcp
->shim
->hdcp_2_2_capable(intel_dig_port
, &capable
);
112 bool intel_hdcp_in_use(struct drm_i915_private
*dev_priv
,
113 enum transcoder cpu_transcoder
, enum port port
)
115 return I915_READ(HDCP_STATUS(dev_priv
, cpu_transcoder
, port
)) &
120 bool intel_hdcp2_in_use(struct drm_i915_private
*dev_priv
,
121 enum transcoder cpu_transcoder
, enum port port
)
123 return I915_READ(HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
124 LINK_ENCRYPTION_STATUS
;
127 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port
*intel_dig_port
,
128 const struct intel_hdcp_shim
*shim
)
133 /* Poll for ksv list ready (spec says max time allowed is 5s) */
134 ret
= __wait_for(read_ret
= shim
->read_ksv_ready(intel_dig_port
,
136 read_ret
|| ksv_ready
, 5 * 1000 * 1000, 1000,
148 static bool hdcp_key_loadable(struct drm_i915_private
*dev_priv
)
150 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
151 struct i915_power_well
*power_well
;
152 enum i915_power_well_id id
;
153 bool enabled
= false;
156 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
157 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
159 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
160 id
= HSW_DISP_PW_GLOBAL
;
164 mutex_lock(&power_domains
->lock
);
166 /* PG1 (power well #1) needs to be enabled */
167 for_each_power_well(dev_priv
, power_well
) {
168 if (power_well
->desc
->id
== id
) {
169 enabled
= power_well
->desc
->ops
->is_enabled(dev_priv
,
174 mutex_unlock(&power_domains
->lock
);
177 * Another req for hdcp key loadability is enabled state of pll for
178 * cdclk. Without active crtc we wont land here. So we are assuming that
179 * cdclk is already on.
185 static void intel_hdcp_clear_keys(struct drm_i915_private
*dev_priv
)
187 I915_WRITE(HDCP_KEY_CONF
, HDCP_CLEAR_KEYS_TRIGGER
);
188 I915_WRITE(HDCP_KEY_STATUS
, HDCP_KEY_LOAD_DONE
| HDCP_KEY_LOAD_STATUS
|
189 HDCP_FUSE_IN_PROGRESS
| HDCP_FUSE_ERROR
| HDCP_FUSE_DONE
);
192 static int intel_hdcp_load_keys(struct drm_i915_private
*dev_priv
)
197 val
= I915_READ(HDCP_KEY_STATUS
);
198 if ((val
& HDCP_KEY_LOAD_DONE
) && (val
& HDCP_KEY_LOAD_STATUS
))
202 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
203 * out of reset. So if Key is not already loaded, its an error state.
205 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
206 if (!(I915_READ(HDCP_KEY_STATUS
) & HDCP_KEY_LOAD_DONE
))
210 * Initiate loading the HDCP key from fuses.
212 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
213 * platforms except BXT and GLK, differ in the key load trigger process
214 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
216 if (IS_GEN9_BC(dev_priv
)) {
217 ret
= sandybridge_pcode_write(dev_priv
,
218 SKL_PCODE_LOAD_HDCP_KEYS
, 1);
220 DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
225 I915_WRITE(HDCP_KEY_CONF
, HDCP_KEY_LOAD_TRIGGER
);
228 /* Wait for the keys to load (500us) */
229 ret
= __intel_wait_for_register(&dev_priv
->uncore
, HDCP_KEY_STATUS
,
230 HDCP_KEY_LOAD_DONE
, HDCP_KEY_LOAD_DONE
,
234 else if (!(val
& HDCP_KEY_LOAD_STATUS
))
237 /* Send Aksv over to PCH display for use in authentication */
238 I915_WRITE(HDCP_KEY_CONF
, HDCP_AKSV_SEND_TRIGGER
);
243 /* Returns updated SHA-1 index */
244 static int intel_write_sha_text(struct drm_i915_private
*dev_priv
, u32 sha_text
)
246 I915_WRITE(HDCP_SHA_TEXT
, sha_text
);
247 if (intel_de_wait_for_set(dev_priv
, HDCP_REP_CTL
, HDCP_SHA1_READY
, 1)) {
248 DRM_ERROR("Timed out waiting for SHA1 ready\n");
255 u32
intel_hdcp_get_repeater_ctl(struct drm_i915_private
*dev_priv
,
256 enum transcoder cpu_transcoder
, enum port port
)
258 if (INTEL_GEN(dev_priv
) >= 12) {
259 switch (cpu_transcoder
) {
261 return HDCP_TRANSA_REP_PRESENT
|
264 return HDCP_TRANSB_REP_PRESENT
|
267 return HDCP_TRANSC_REP_PRESENT
|
270 return HDCP_TRANSD_REP_PRESENT
|
273 DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder
);
280 return HDCP_DDIA_REP_PRESENT
| HDCP_DDIA_SHA1_M0
;
282 return HDCP_DDIB_REP_PRESENT
| HDCP_DDIB_SHA1_M0
;
284 return HDCP_DDIC_REP_PRESENT
| HDCP_DDIC_SHA1_M0
;
286 return HDCP_DDID_REP_PRESENT
| HDCP_DDID_SHA1_M0
;
288 return HDCP_DDIE_REP_PRESENT
| HDCP_DDIE_SHA1_M0
;
290 DRM_ERROR("Unknown port %d\n", port
);
296 int intel_hdcp_validate_v_prime(struct intel_connector
*connector
,
297 const struct intel_hdcp_shim
*shim
,
298 u8
*ksv_fifo
, u8 num_downstream
, u8
*bstatus
)
300 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
301 struct drm_i915_private
*dev_priv
;
302 enum transcoder cpu_transcoder
= connector
->hdcp
.cpu_transcoder
;
303 enum port port
= intel_dig_port
->base
.port
;
304 u32 vprime
, sha_text
, sha_leftovers
, rep_ctl
;
305 int ret
, i
, j
, sha_idx
;
307 dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
309 /* Process V' values from the receiver */
310 for (i
= 0; i
< DRM_HDCP_V_PRIME_NUM_PARTS
; i
++) {
311 ret
= shim
->read_v_prime_part(intel_dig_port
, i
, &vprime
);
314 I915_WRITE(HDCP_SHA_V_PRIME(i
), vprime
);
318 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
319 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
320 * stream is written via the HDCP_SHA_TEXT register in 32-bit
321 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
322 * index will keep track of our progress through the 64 bytes as well as
323 * helping us work the 40-bit KSVs through our 32-bit register.
325 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
330 rep_ctl
= intel_hdcp_get_repeater_ctl(dev_priv
, cpu_transcoder
, port
);
331 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
332 for (i
= 0; i
< num_downstream
; i
++) {
333 unsigned int sha_empty
;
334 u8
*ksv
= &ksv_fifo
[i
* DRM_HDCP_KSV_LEN
];
336 /* Fill up the empty slots in sha_text and write it out */
337 sha_empty
= sizeof(sha_text
) - sha_leftovers
;
338 for (j
= 0; j
< sha_empty
; j
++)
339 sha_text
|= ksv
[j
] << ((sizeof(sha_text
) - j
- 1) * 8);
341 ret
= intel_write_sha_text(dev_priv
, sha_text
);
345 /* Programming guide writes this every 64 bytes */
346 sha_idx
+= sizeof(sha_text
);
348 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
350 /* Store the leftover bytes from the ksv in sha_text */
351 sha_leftovers
= DRM_HDCP_KSV_LEN
- sha_empty
;
353 for (j
= 0; j
< sha_leftovers
; j
++)
354 sha_text
|= ksv
[sha_empty
+ j
] <<
355 ((sizeof(sha_text
) - j
- 1) * 8);
358 * If we still have room in sha_text for more data, continue.
359 * Otherwise, write it out immediately.
361 if (sizeof(sha_text
) > sha_leftovers
)
364 ret
= intel_write_sha_text(dev_priv
, sha_text
);
369 sha_idx
+= sizeof(sha_text
);
373 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
374 * bytes are leftover from the last ksv, we might be able to fit them
375 * all in sha_text (first 2 cases), or we might need to split them up
376 * into 2 writes (last 2 cases).
378 if (sha_leftovers
== 0) {
379 /* Write 16 bits of text, 16 bits of M0 */
380 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_16
);
381 ret
= intel_write_sha_text(dev_priv
,
382 bstatus
[0] << 8 | bstatus
[1]);
385 sha_idx
+= sizeof(sha_text
);
387 /* Write 32 bits of M0 */
388 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
389 ret
= intel_write_sha_text(dev_priv
, 0);
392 sha_idx
+= sizeof(sha_text
);
394 /* Write 16 bits of M0 */
395 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_16
);
396 ret
= intel_write_sha_text(dev_priv
, 0);
399 sha_idx
+= sizeof(sha_text
);
401 } else if (sha_leftovers
== 1) {
402 /* Write 24 bits of text, 8 bits of M0 */
403 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_24
);
404 sha_text
|= bstatus
[0] << 16 | bstatus
[1] << 8;
405 /* Only 24-bits of data, must be in the LSB */
406 sha_text
= (sha_text
& 0xffffff00) >> 8;
407 ret
= intel_write_sha_text(dev_priv
, sha_text
);
410 sha_idx
+= sizeof(sha_text
);
412 /* Write 32 bits of M0 */
413 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
414 ret
= intel_write_sha_text(dev_priv
, 0);
417 sha_idx
+= sizeof(sha_text
);
419 /* Write 24 bits of M0 */
420 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_8
);
421 ret
= intel_write_sha_text(dev_priv
, 0);
424 sha_idx
+= sizeof(sha_text
);
426 } else if (sha_leftovers
== 2) {
427 /* Write 32 bits of text */
428 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
429 sha_text
|= bstatus
[0] << 24 | bstatus
[1] << 16;
430 ret
= intel_write_sha_text(dev_priv
, sha_text
);
433 sha_idx
+= sizeof(sha_text
);
435 /* Write 64 bits of M0 */
436 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
437 for (i
= 0; i
< 2; i
++) {
438 ret
= intel_write_sha_text(dev_priv
, 0);
441 sha_idx
+= sizeof(sha_text
);
443 } else if (sha_leftovers
== 3) {
444 /* Write 32 bits of text */
445 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
446 sha_text
|= bstatus
[0] << 24;
447 ret
= intel_write_sha_text(dev_priv
, sha_text
);
450 sha_idx
+= sizeof(sha_text
);
452 /* Write 8 bits of text, 24 bits of M0 */
453 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_8
);
454 ret
= intel_write_sha_text(dev_priv
, bstatus
[1]);
457 sha_idx
+= sizeof(sha_text
);
459 /* Write 32 bits of M0 */
460 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_0
);
461 ret
= intel_write_sha_text(dev_priv
, 0);
464 sha_idx
+= sizeof(sha_text
);
466 /* Write 8 bits of M0 */
467 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_24
);
468 ret
= intel_write_sha_text(dev_priv
, 0);
471 sha_idx
+= sizeof(sha_text
);
473 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
478 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_TEXT_32
);
479 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
480 while ((sha_idx
% 64) < (64 - sizeof(sha_text
))) {
481 ret
= intel_write_sha_text(dev_priv
, 0);
484 sha_idx
+= sizeof(sha_text
);
488 * Last write gets the length of the concatenation in bits. That is:
489 * - 5 bytes per device
490 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
492 sha_text
= (num_downstream
* 5 + 10) * 8;
493 ret
= intel_write_sha_text(dev_priv
, sha_text
);
497 /* Tell the HW we're done with the hash and wait for it to ACK */
498 I915_WRITE(HDCP_REP_CTL
, rep_ctl
| HDCP_SHA1_COMPLETE_HASH
);
499 if (intel_de_wait_for_set(dev_priv
, HDCP_REP_CTL
,
500 HDCP_SHA1_COMPLETE
, 1)) {
501 DRM_ERROR("Timed out waiting for SHA1 complete\n");
504 if (!(I915_READ(HDCP_REP_CTL
) & HDCP_SHA1_V_MATCH
)) {
505 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
512 /* Implements Part 2 of the HDCP authorization procedure */
514 int intel_hdcp_auth_downstream(struct intel_connector
*connector
)
516 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
517 const struct intel_hdcp_shim
*shim
= connector
->hdcp
.shim
;
518 struct drm_device
*dev
= connector
->base
.dev
;
519 u8 bstatus
[2], num_downstream
, *ksv_fifo
;
520 int ret
, i
, tries
= 3;
522 ret
= intel_hdcp_poll_ksv_fifo(intel_dig_port
, shim
);
524 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret
);
528 ret
= shim
->read_bstatus(intel_dig_port
, bstatus
);
532 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus
[0]) ||
533 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus
[1])) {
534 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
539 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
540 * the HDCP encryption. That implies that repeater can't have its own
541 * display. As there is no consumption of encrypted content in the
542 * repeater with 0 downstream devices, we are failing the
545 num_downstream
= DRM_HDCP_NUM_DOWNSTREAM(bstatus
[0]);
546 if (num_downstream
== 0) {
547 DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
551 ksv_fifo
= kcalloc(DRM_HDCP_KSV_LEN
, num_downstream
, GFP_KERNEL
);
553 DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
557 ret
= shim
->read_ksv_fifo(intel_dig_port
, num_downstream
, ksv_fifo
);
561 if (drm_hdcp_check_ksvs_revoked(dev
, ksv_fifo
, num_downstream
)) {
562 DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
568 * When V prime mismatches, DP Spec mandates re-read of
569 * V prime atleast twice.
571 for (i
= 0; i
< tries
; i
++) {
572 ret
= intel_hdcp_validate_v_prime(connector
, shim
,
573 ksv_fifo
, num_downstream
,
580 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret
);
584 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
592 /* Implements Part 1 of the HDCP authorization procedure */
593 static int intel_hdcp_auth(struct intel_connector
*connector
)
595 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
596 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
597 struct drm_device
*dev
= connector
->base
.dev
;
598 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
599 struct drm_i915_private
*dev_priv
;
600 enum transcoder cpu_transcoder
= connector
->hdcp
.cpu_transcoder
;
602 unsigned long r0_prime_gen_start
;
603 int ret
, i
, tries
= 2;
606 u8 shim
[DRM_HDCP_AN_LEN
];
610 u8 shim
[DRM_HDCP_KSV_LEN
];
614 u8 shim
[DRM_HDCP_RI_LEN
];
616 bool repeater_present
, hdcp_capable
;
618 dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
620 port
= intel_dig_port
->base
.port
;
623 * Detects whether the display is HDCP capable. Although we check for
624 * valid Bksv below, the HDCP over DP spec requires that we check
625 * whether the display supports HDCP before we write An. For HDMI
626 * displays, this is not necessary.
628 if (shim
->hdcp_capable
) {
629 ret
= shim
->hdcp_capable(intel_dig_port
, &hdcp_capable
);
633 DRM_DEBUG_KMS("Panel is not HDCP capable\n");
638 /* Initialize An with 2 random values and acquire it */
639 for (i
= 0; i
< 2; i
++)
640 I915_WRITE(HDCP_ANINIT(dev_priv
, cpu_transcoder
, port
),
642 I915_WRITE(HDCP_CONF(dev_priv
, cpu_transcoder
, port
),
643 HDCP_CONF_CAPTURE_AN
);
645 /* Wait for An to be acquired */
646 if (intel_de_wait_for_set(dev_priv
,
647 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
),
648 HDCP_STATUS_AN_READY
, 1)) {
649 DRM_ERROR("Timed out waiting for An\n");
653 an
.reg
[0] = I915_READ(HDCP_ANLO(dev_priv
, cpu_transcoder
, port
));
654 an
.reg
[1] = I915_READ(HDCP_ANHI(dev_priv
, cpu_transcoder
, port
));
655 ret
= shim
->write_an_aksv(intel_dig_port
, an
.shim
);
659 r0_prime_gen_start
= jiffies
;
661 memset(&bksv
, 0, sizeof(bksv
));
663 ret
= intel_hdcp_read_valid_bksv(intel_dig_port
, shim
, bksv
.shim
);
667 if (drm_hdcp_check_ksvs_revoked(dev
, bksv
.shim
, 1)) {
668 DRM_ERROR("BKSV is revoked\n");
672 I915_WRITE(HDCP_BKSVLO(dev_priv
, cpu_transcoder
, port
), bksv
.reg
[0]);
673 I915_WRITE(HDCP_BKSVHI(dev_priv
, cpu_transcoder
, port
), bksv
.reg
[1]);
675 ret
= shim
->repeater_present(intel_dig_port
, &repeater_present
);
678 if (repeater_present
)
679 I915_WRITE(HDCP_REP_CTL
,
680 intel_hdcp_get_repeater_ctl(dev_priv
, cpu_transcoder
,
683 ret
= shim
->toggle_signalling(intel_dig_port
, true);
687 I915_WRITE(HDCP_CONF(dev_priv
, cpu_transcoder
, port
),
688 HDCP_CONF_AUTH_AND_ENC
);
690 /* Wait for R0 ready */
691 if (wait_for(I915_READ(HDCP_STATUS(dev_priv
, cpu_transcoder
, port
)) &
692 (HDCP_STATUS_R0_READY
| HDCP_STATUS_ENC
), 1)) {
693 DRM_ERROR("Timed out waiting for R0 ready\n");
698 * Wait for R0' to become available. The spec says 100ms from Aksv, but
699 * some monitors can take longer than this. We'll set the timeout at
700 * 300ms just to be sure.
702 * On DP, there's an R0_READY bit available but no such bit
703 * exists on HDMI. Since the upper-bound is the same, we'll just do
704 * the stupid thing instead of polling on one and not the other.
706 wait_remaining_ms_from_jiffies(r0_prime_gen_start
, 300);
711 * DP HDCP Spec mandates the two more reattempt to read R0, incase
714 for (i
= 0; i
< tries
; i
++) {
716 ret
= shim
->read_ri_prime(intel_dig_port
, ri
.shim
);
719 I915_WRITE(HDCP_RPRIME(dev_priv
, cpu_transcoder
, port
), ri
.reg
);
721 /* Wait for Ri prime match */
722 if (!wait_for(I915_READ(HDCP_STATUS(dev_priv
, cpu_transcoder
,
724 (HDCP_STATUS_RI_MATCH
| HDCP_STATUS_ENC
), 1))
729 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
730 I915_READ(HDCP_STATUS(dev_priv
, cpu_transcoder
,
735 /* Wait for encryption confirmation */
736 if (intel_de_wait_for_set(dev_priv
,
737 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
),
739 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
)) {
740 DRM_ERROR("Timed out waiting for encryption\n");
745 * XXX: If we have MST-connected devices, we need to enable encryption
749 if (repeater_present
)
750 return intel_hdcp_auth_downstream(connector
);
752 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
756 static int _intel_hdcp_disable(struct intel_connector
*connector
)
758 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
759 struct drm_i915_private
*dev_priv
= connector
->base
.dev
->dev_private
;
760 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
761 enum port port
= intel_dig_port
->base
.port
;
762 enum transcoder cpu_transcoder
= hdcp
->cpu_transcoder
;
765 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
766 connector
->base
.name
, connector
->base
.base
.id
);
768 hdcp
->hdcp_encrypted
= false;
769 I915_WRITE(HDCP_CONF(dev_priv
, cpu_transcoder
, port
), 0);
770 if (intel_de_wait_for_clear(dev_priv
,
771 HDCP_STATUS(dev_priv
, cpu_transcoder
, port
),
772 ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
)) {
773 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
777 ret
= hdcp
->shim
->toggle_signalling(intel_dig_port
, false);
779 DRM_ERROR("Failed to disable HDCP signalling\n");
783 DRM_DEBUG_KMS("HDCP is disabled\n");
787 static int _intel_hdcp_enable(struct intel_connector
*connector
)
789 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
790 struct drm_i915_private
*dev_priv
= connector
->base
.dev
->dev_private
;
791 int i
, ret
, tries
= 3;
793 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
794 connector
->base
.name
, connector
->base
.base
.id
);
796 if (!hdcp_key_loadable(dev_priv
)) {
797 DRM_ERROR("HDCP key Load is not possible\n");
801 for (i
= 0; i
< KEY_LOAD_TRIES
; i
++) {
802 ret
= intel_hdcp_load_keys(dev_priv
);
805 intel_hdcp_clear_keys(dev_priv
);
808 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret
);
812 /* Incase of authentication failures, HDCP spec expects reauth. */
813 for (i
= 0; i
< tries
; i
++) {
814 ret
= intel_hdcp_auth(connector
);
816 hdcp
->hdcp_encrypted
= true;
820 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret
);
822 /* Ensuring HDCP encryption and signalling are stopped. */
823 _intel_hdcp_disable(connector
);
826 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries
, ret
);
831 struct intel_connector
*intel_hdcp_to_connector(struct intel_hdcp
*hdcp
)
833 return container_of(hdcp
, struct intel_connector
, hdcp
);
836 /* Implements Part 3 of the HDCP authorization procedure */
837 static int intel_hdcp_check_link(struct intel_connector
*connector
)
839 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
840 struct drm_i915_private
*dev_priv
= connector
->base
.dev
->dev_private
;
841 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
842 enum port port
= intel_dig_port
->base
.port
;
843 enum transcoder cpu_transcoder
;
846 mutex_lock(&hdcp
->mutex
);
847 cpu_transcoder
= hdcp
->cpu_transcoder
;
849 /* Check_link valid only when HDCP1.4 is enabled */
850 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_ENABLED
||
851 !hdcp
->hdcp_encrypted
) {
856 if (WARN_ON(!intel_hdcp_in_use(dev_priv
, cpu_transcoder
, port
))) {
857 DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
858 connector
->base
.name
, connector
->base
.base
.id
,
859 I915_READ(HDCP_STATUS(dev_priv
, cpu_transcoder
,
862 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
863 schedule_work(&hdcp
->prop_work
);
867 if (hdcp
->shim
->check_link(intel_dig_port
)) {
868 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
869 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
870 schedule_work(&hdcp
->prop_work
);
875 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
876 connector
->base
.name
, connector
->base
.base
.id
);
878 ret
= _intel_hdcp_disable(connector
);
880 DRM_ERROR("Failed to disable hdcp (%d)\n", ret
);
881 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
882 schedule_work(&hdcp
->prop_work
);
886 ret
= _intel_hdcp_enable(connector
);
888 DRM_ERROR("Failed to enable hdcp (%d)\n", ret
);
889 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
890 schedule_work(&hdcp
->prop_work
);
895 mutex_unlock(&hdcp
->mutex
);
899 static void intel_hdcp_prop_work(struct work_struct
*work
)
901 struct intel_hdcp
*hdcp
= container_of(work
, struct intel_hdcp
,
903 struct intel_connector
*connector
= intel_hdcp_to_connector(hdcp
);
904 struct drm_device
*dev
= connector
->base
.dev
;
906 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
907 mutex_lock(&hdcp
->mutex
);
910 * This worker is only used to flip between ENABLED/DESIRED. Either of
911 * those to UNDESIRED is handled by core. If value == UNDESIRED,
912 * we're running just after hdcp has been disabled, so just exit
914 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
915 drm_hdcp_update_content_protection(&connector
->base
,
918 mutex_unlock(&hdcp
->mutex
);
919 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
922 bool is_hdcp_supported(struct drm_i915_private
*dev_priv
, enum port port
)
924 /* PORT E doesn't have HDCP, and PORT F is disabled */
925 return INTEL_INFO(dev_priv
)->display
.has_hdcp
&& port
< PORT_E
;
929 hdcp2_prepare_ake_init(struct intel_connector
*connector
,
930 struct hdcp2_ake_init
*ake_data
)
932 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
933 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
934 struct i915_hdcp_comp_master
*comp
;
937 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
938 comp
= dev_priv
->hdcp_master
;
940 if (!comp
|| !comp
->ops
) {
941 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
945 ret
= comp
->ops
->initiate_hdcp2_session(comp
->mei_dev
, data
, ake_data
);
947 DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret
);
948 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
954 hdcp2_verify_rx_cert_prepare_km(struct intel_connector
*connector
,
955 struct hdcp2_ake_send_cert
*rx_cert
,
957 struct hdcp2_ake_no_stored_km
*ek_pub_km
,
960 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
961 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
962 struct i915_hdcp_comp_master
*comp
;
965 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
966 comp
= dev_priv
->hdcp_master
;
968 if (!comp
|| !comp
->ops
) {
969 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
973 ret
= comp
->ops
->verify_receiver_cert_prepare_km(comp
->mei_dev
, data
,
977 DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret
);
978 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
983 static int hdcp2_verify_hprime(struct intel_connector
*connector
,
984 struct hdcp2_ake_send_hprime
*rx_hprime
)
986 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
987 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
988 struct i915_hdcp_comp_master
*comp
;
991 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
992 comp
= dev_priv
->hdcp_master
;
994 if (!comp
|| !comp
->ops
) {
995 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
999 ret
= comp
->ops
->verify_hprime(comp
->mei_dev
, data
, rx_hprime
);
1001 DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret
);
1002 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1008 hdcp2_store_pairing_info(struct intel_connector
*connector
,
1009 struct hdcp2_ake_send_pairing_info
*pairing_info
)
1011 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1012 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1013 struct i915_hdcp_comp_master
*comp
;
1016 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1017 comp
= dev_priv
->hdcp_master
;
1019 if (!comp
|| !comp
->ops
) {
1020 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1024 ret
= comp
->ops
->store_pairing_info(comp
->mei_dev
, data
, pairing_info
);
1026 DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret
);
1027 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1033 hdcp2_prepare_lc_init(struct intel_connector
*connector
,
1034 struct hdcp2_lc_init
*lc_init
)
1036 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1037 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1038 struct i915_hdcp_comp_master
*comp
;
1041 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1042 comp
= dev_priv
->hdcp_master
;
1044 if (!comp
|| !comp
->ops
) {
1045 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1049 ret
= comp
->ops
->initiate_locality_check(comp
->mei_dev
, data
, lc_init
);
1051 DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret
);
1052 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1058 hdcp2_verify_lprime(struct intel_connector
*connector
,
1059 struct hdcp2_lc_send_lprime
*rx_lprime
)
1061 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1062 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1063 struct i915_hdcp_comp_master
*comp
;
1066 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1067 comp
= dev_priv
->hdcp_master
;
1069 if (!comp
|| !comp
->ops
) {
1070 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1074 ret
= comp
->ops
->verify_lprime(comp
->mei_dev
, data
, rx_lprime
);
1076 DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret
);
1077 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1082 static int hdcp2_prepare_skey(struct intel_connector
*connector
,
1083 struct hdcp2_ske_send_eks
*ske_data
)
1085 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1086 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1087 struct i915_hdcp_comp_master
*comp
;
1090 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1091 comp
= dev_priv
->hdcp_master
;
1093 if (!comp
|| !comp
->ops
) {
1094 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1098 ret
= comp
->ops
->get_session_key(comp
->mei_dev
, data
, ske_data
);
1100 DRM_DEBUG_KMS("Get session key failed. %d\n", ret
);
1101 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1107 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector
*connector
,
1108 struct hdcp2_rep_send_receiverid_list
1110 struct hdcp2_rep_send_ack
*rep_send_ack
)
1112 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1113 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1114 struct i915_hdcp_comp_master
*comp
;
1117 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1118 comp
= dev_priv
->hdcp_master
;
1120 if (!comp
|| !comp
->ops
) {
1121 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1125 ret
= comp
->ops
->repeater_check_flow_prepare_ack(comp
->mei_dev
, data
,
1129 DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret
);
1130 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1136 hdcp2_verify_mprime(struct intel_connector
*connector
,
1137 struct hdcp2_rep_stream_ready
*stream_ready
)
1139 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1140 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1141 struct i915_hdcp_comp_master
*comp
;
1144 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1145 comp
= dev_priv
->hdcp_master
;
1147 if (!comp
|| !comp
->ops
) {
1148 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1152 ret
= comp
->ops
->verify_mprime(comp
->mei_dev
, data
, stream_ready
);
1154 DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret
);
1155 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1160 static int hdcp2_authenticate_port(struct intel_connector
*connector
)
1162 struct hdcp_port_data
*data
= &connector
->hdcp
.port_data
;
1163 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1164 struct i915_hdcp_comp_master
*comp
;
1167 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1168 comp
= dev_priv
->hdcp_master
;
1170 if (!comp
|| !comp
->ops
) {
1171 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1175 ret
= comp
->ops
->enable_hdcp_authentication(comp
->mei_dev
, data
);
1177 DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret
);
1178 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1183 static int hdcp2_close_mei_session(struct intel_connector
*connector
)
1185 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1186 struct i915_hdcp_comp_master
*comp
;
1189 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1190 comp
= dev_priv
->hdcp_master
;
1192 if (!comp
|| !comp
->ops
) {
1193 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1197 ret
= comp
->ops
->close_hdcp_session(comp
->mei_dev
,
1198 &connector
->hdcp
.port_data
);
1199 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1204 static int hdcp2_deauthenticate_port(struct intel_connector
*connector
)
1206 return hdcp2_close_mei_session(connector
);
1209 /* Authentication flow starts from here */
1210 static int hdcp2_authentication_key_exchange(struct intel_connector
*connector
)
1212 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1213 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1214 struct drm_device
*dev
= connector
->base
.dev
;
1216 struct hdcp2_ake_init ake_init
;
1217 struct hdcp2_ake_send_cert send_cert
;
1218 struct hdcp2_ake_no_stored_km no_stored_km
;
1219 struct hdcp2_ake_send_hprime send_hprime
;
1220 struct hdcp2_ake_send_pairing_info pairing_info
;
1222 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1226 /* Init for seq_num */
1227 hdcp
->seq_num_v
= 0;
1228 hdcp
->seq_num_m
= 0;
1230 ret
= hdcp2_prepare_ake_init(connector
, &msgs
.ake_init
);
1234 ret
= shim
->write_2_2_msg(intel_dig_port
, &msgs
.ake_init
,
1235 sizeof(msgs
.ake_init
));
1239 ret
= shim
->read_2_2_msg(intel_dig_port
, HDCP_2_2_AKE_SEND_CERT
,
1240 &msgs
.send_cert
, sizeof(msgs
.send_cert
));
1244 if (msgs
.send_cert
.rx_caps
[0] != HDCP_2_2_RX_CAPS_VERSION_VAL
) {
1245 DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
1249 hdcp
->is_repeater
= HDCP_2_2_RX_REPEATER(msgs
.send_cert
.rx_caps
[2]);
1251 if (drm_hdcp_check_ksvs_revoked(dev
, msgs
.send_cert
.cert_rx
.receiver_id
,
1253 DRM_ERROR("Receiver ID is revoked\n");
1258 * Here msgs.no_stored_km will hold msgs corresponding to the km
1261 ret
= hdcp2_verify_rx_cert_prepare_km(connector
, &msgs
.send_cert
,
1263 &msgs
.no_stored_km
, &size
);
1267 ret
= shim
->write_2_2_msg(intel_dig_port
, &msgs
.no_stored_km
, size
);
1271 ret
= shim
->read_2_2_msg(intel_dig_port
, HDCP_2_2_AKE_SEND_HPRIME
,
1272 &msgs
.send_hprime
, sizeof(msgs
.send_hprime
));
1276 ret
= hdcp2_verify_hprime(connector
, &msgs
.send_hprime
);
1280 if (!hdcp
->is_paired
) {
1281 /* Pairing is required */
1282 ret
= shim
->read_2_2_msg(intel_dig_port
,
1283 HDCP_2_2_AKE_SEND_PAIRING_INFO
,
1285 sizeof(msgs
.pairing_info
));
1289 ret
= hdcp2_store_pairing_info(connector
, &msgs
.pairing_info
);
1292 hdcp
->is_paired
= true;
1298 static int hdcp2_locality_check(struct intel_connector
*connector
)
1300 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1301 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1303 struct hdcp2_lc_init lc_init
;
1304 struct hdcp2_lc_send_lprime send_lprime
;
1306 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1307 int tries
= HDCP2_LC_RETRY_CNT
, ret
, i
;
1309 for (i
= 0; i
< tries
; i
++) {
1310 ret
= hdcp2_prepare_lc_init(connector
, &msgs
.lc_init
);
1314 ret
= shim
->write_2_2_msg(intel_dig_port
, &msgs
.lc_init
,
1315 sizeof(msgs
.lc_init
));
1319 ret
= shim
->read_2_2_msg(intel_dig_port
,
1320 HDCP_2_2_LC_SEND_LPRIME
,
1322 sizeof(msgs
.send_lprime
));
1326 ret
= hdcp2_verify_lprime(connector
, &msgs
.send_lprime
);
1334 static int hdcp2_session_key_exchange(struct intel_connector
*connector
)
1336 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1337 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1338 struct hdcp2_ske_send_eks send_eks
;
1341 ret
= hdcp2_prepare_skey(connector
, &send_eks
);
1345 ret
= hdcp
->shim
->write_2_2_msg(intel_dig_port
, &send_eks
,
1354 int hdcp2_propagate_stream_management_info(struct intel_connector
*connector
)
1356 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1357 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1359 struct hdcp2_rep_stream_manage stream_manage
;
1360 struct hdcp2_rep_stream_ready stream_ready
;
1362 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1365 /* Prepare RepeaterAuth_Stream_Manage msg */
1366 msgs
.stream_manage
.msg_id
= HDCP_2_2_REP_STREAM_MANAGE
;
1367 drm_hdcp_cpu_to_be24(msgs
.stream_manage
.seq_num_m
, hdcp
->seq_num_m
);
1369 /* K no of streams is fixed as 1. Stored as big-endian. */
1370 msgs
.stream_manage
.k
= cpu_to_be16(1);
1372 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1373 msgs
.stream_manage
.streams
[0].stream_id
= 0;
1374 msgs
.stream_manage
.streams
[0].stream_type
= hdcp
->content_type
;
1376 /* Send it to Repeater */
1377 ret
= shim
->write_2_2_msg(intel_dig_port
, &msgs
.stream_manage
,
1378 sizeof(msgs
.stream_manage
));
1382 ret
= shim
->read_2_2_msg(intel_dig_port
, HDCP_2_2_REP_STREAM_READY
,
1383 &msgs
.stream_ready
, sizeof(msgs
.stream_ready
));
1387 hdcp
->port_data
.seq_num_m
= hdcp
->seq_num_m
;
1388 hdcp
->port_data
.streams
[0].stream_type
= hdcp
->content_type
;
1390 ret
= hdcp2_verify_mprime(connector
, &msgs
.stream_ready
);
1396 if (hdcp
->seq_num_m
> HDCP_2_2_SEQ_NUM_MAX
) {
1397 DRM_DEBUG_KMS("seq_num_m roll over.\n");
1405 int hdcp2_authenticate_repeater_topology(struct intel_connector
*connector
)
1407 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1408 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1409 struct drm_device
*dev
= connector
->base
.dev
;
1411 struct hdcp2_rep_send_receiverid_list recvid_list
;
1412 struct hdcp2_rep_send_ack rep_ack
;
1414 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1415 u32 seq_num_v
, device_cnt
;
1419 ret
= shim
->read_2_2_msg(intel_dig_port
, HDCP_2_2_REP_SEND_RECVID_LIST
,
1420 &msgs
.recvid_list
, sizeof(msgs
.recvid_list
));
1424 rx_info
= msgs
.recvid_list
.rx_info
;
1426 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info
[1]) ||
1427 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info
[1])) {
1428 DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
1432 /* Converting and Storing the seq_num_v to local variable as DWORD */
1434 drm_hdcp_be24_to_cpu((const u8
*)msgs
.recvid_list
.seq_num_v
);
1436 if (seq_num_v
< hdcp
->seq_num_v
) {
1437 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1438 DRM_DEBUG_KMS("Seq_num_v roll over.\n");
1442 device_cnt
= (HDCP_2_2_DEV_COUNT_HI(rx_info
[0]) << 4 |
1443 HDCP_2_2_DEV_COUNT_LO(rx_info
[1]));
1444 if (drm_hdcp_check_ksvs_revoked(dev
, msgs
.recvid_list
.receiver_ids
,
1446 DRM_ERROR("Revoked receiver ID(s) is in list\n");
1450 ret
= hdcp2_verify_rep_topology_prepare_ack(connector
,
1456 hdcp
->seq_num_v
= seq_num_v
;
1457 ret
= shim
->write_2_2_msg(intel_dig_port
, &msgs
.rep_ack
,
1458 sizeof(msgs
.rep_ack
));
1465 static int hdcp2_authenticate_repeater(struct intel_connector
*connector
)
1469 ret
= hdcp2_authenticate_repeater_topology(connector
);
1473 return hdcp2_propagate_stream_management_info(connector
);
1476 static int hdcp2_authenticate_sink(struct intel_connector
*connector
)
1478 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1479 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1480 const struct intel_hdcp_shim
*shim
= hdcp
->shim
;
1483 ret
= hdcp2_authentication_key_exchange(connector
);
1485 DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret
);
1489 ret
= hdcp2_locality_check(connector
);
1491 DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret
);
1495 ret
= hdcp2_session_key_exchange(connector
);
1497 DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret
);
1501 if (shim
->config_stream_type
) {
1502 ret
= shim
->config_stream_type(intel_dig_port
,
1504 hdcp
->content_type
);
1509 if (hdcp
->is_repeater
) {
1510 ret
= hdcp2_authenticate_repeater(connector
);
1512 DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret
);
1517 hdcp
->port_data
.streams
[0].stream_type
= hdcp
->content_type
;
1518 ret
= hdcp2_authenticate_port(connector
);
1525 static int hdcp2_enable_encryption(struct intel_connector
*connector
)
1527 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1528 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1529 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1530 enum port port
= connector
->encoder
->port
;
1531 enum transcoder cpu_transcoder
= hdcp
->cpu_transcoder
;
1534 WARN_ON(I915_READ(HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
1535 LINK_ENCRYPTION_STATUS
);
1536 if (hdcp
->shim
->toggle_signalling
) {
1537 ret
= hdcp
->shim
->toggle_signalling(intel_dig_port
, true);
1539 DRM_ERROR("Failed to enable HDCP signalling. %d\n",
1545 if (I915_READ(HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
1547 /* Link is Authenticated. Now set for Encryption */
1548 I915_WRITE(HDCP2_CTL(dev_priv
, cpu_transcoder
, port
),
1549 I915_READ(HDCP2_CTL(dev_priv
, cpu_transcoder
,
1551 CTL_LINK_ENCRYPTION_REQ
);
1554 ret
= intel_de_wait_for_set(dev_priv
,
1555 HDCP2_STATUS(dev_priv
, cpu_transcoder
,
1557 LINK_ENCRYPTION_STATUS
,
1558 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
);
1563 static int hdcp2_disable_encryption(struct intel_connector
*connector
)
1565 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1566 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1567 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1568 enum port port
= connector
->encoder
->port
;
1569 enum transcoder cpu_transcoder
= hdcp
->cpu_transcoder
;
1572 WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv
, cpu_transcoder
, port
)) &
1573 LINK_ENCRYPTION_STATUS
));
1575 I915_WRITE(HDCP2_CTL(dev_priv
, cpu_transcoder
, port
),
1576 I915_READ(HDCP2_CTL(dev_priv
, cpu_transcoder
, port
)) &
1577 ~CTL_LINK_ENCRYPTION_REQ
);
1579 ret
= intel_de_wait_for_clear(dev_priv
,
1580 HDCP2_STATUS(dev_priv
, cpu_transcoder
,
1582 LINK_ENCRYPTION_STATUS
,
1583 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS
);
1584 if (ret
== -ETIMEDOUT
)
1585 DRM_DEBUG_KMS("Disable Encryption Timedout");
1587 if (hdcp
->shim
->toggle_signalling
) {
1588 ret
= hdcp
->shim
->toggle_signalling(intel_dig_port
, false);
1590 DRM_ERROR("Failed to disable HDCP signalling. %d\n",
1599 static int hdcp2_authenticate_and_encrypt(struct intel_connector
*connector
)
1601 int ret
, i
, tries
= 3;
1603 for (i
= 0; i
< tries
; i
++) {
1604 ret
= hdcp2_authenticate_sink(connector
);
1608 /* Clearing the mei hdcp session */
1609 DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
1611 if (hdcp2_deauthenticate_port(connector
) < 0)
1612 DRM_DEBUG_KMS("Port deauth failed.\n");
1617 * Ensuring the required 200mSec min time interval between
1618 * Session Key Exchange and encryption.
1620 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN
);
1621 ret
= hdcp2_enable_encryption(connector
);
1623 DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret
);
1624 if (hdcp2_deauthenticate_port(connector
) < 0)
1625 DRM_DEBUG_KMS("Port deauth failed.\n");
1632 static int _intel_hdcp2_enable(struct intel_connector
*connector
)
1634 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1637 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1638 connector
->base
.name
, connector
->base
.base
.id
,
1639 hdcp
->content_type
);
1641 ret
= hdcp2_authenticate_and_encrypt(connector
);
1643 DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
1644 hdcp
->content_type
, ret
);
1648 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
1649 connector
->base
.name
, connector
->base
.base
.id
,
1650 hdcp
->content_type
);
1652 hdcp
->hdcp2_encrypted
= true;
1656 static int _intel_hdcp2_disable(struct intel_connector
*connector
)
1660 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
1661 connector
->base
.name
, connector
->base
.base
.id
);
1663 ret
= hdcp2_disable_encryption(connector
);
1665 if (hdcp2_deauthenticate_port(connector
) < 0)
1666 DRM_DEBUG_KMS("Port deauth failed.\n");
1668 connector
->hdcp
.hdcp2_encrypted
= false;
1673 /* Implements the Link Integrity Check for HDCP2.2 */
1674 static int intel_hdcp2_check_link(struct intel_connector
*connector
)
1676 struct intel_digital_port
*intel_dig_port
= conn_to_dig_port(connector
);
1677 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1678 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1679 enum port port
= connector
->encoder
->port
;
1680 enum transcoder cpu_transcoder
;
1683 mutex_lock(&hdcp
->mutex
);
1684 cpu_transcoder
= hdcp
->cpu_transcoder
;
1686 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1687 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_ENABLED
||
1688 !hdcp
->hdcp2_encrypted
) {
1693 if (WARN_ON(!intel_hdcp2_in_use(dev_priv
, cpu_transcoder
, port
))) {
1694 DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
1695 I915_READ(HDCP2_STATUS(dev_priv
, cpu_transcoder
,
1698 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1699 schedule_work(&hdcp
->prop_work
);
1703 ret
= hdcp
->shim
->check_2_2_link(intel_dig_port
);
1704 if (ret
== HDCP_LINK_PROTECTED
) {
1705 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
1706 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
1707 schedule_work(&hdcp
->prop_work
);
1712 if (ret
== HDCP_TOPOLOGY_CHANGE
) {
1713 if (hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
1716 DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
1717 ret
= hdcp2_authenticate_repeater_topology(connector
);
1719 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
1720 schedule_work(&hdcp
->prop_work
);
1723 DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
1724 connector
->base
.name
, connector
->base
.base
.id
,
1727 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
1728 connector
->base
.name
, connector
->base
.base
.id
);
1731 ret
= _intel_hdcp2_disable(connector
);
1733 DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1734 connector
->base
.name
, connector
->base
.base
.id
, ret
);
1735 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1736 schedule_work(&hdcp
->prop_work
);
1740 ret
= _intel_hdcp2_enable(connector
);
1742 DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1743 connector
->base
.name
, connector
->base
.base
.id
,
1745 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1746 schedule_work(&hdcp
->prop_work
);
1751 mutex_unlock(&hdcp
->mutex
);
1755 static void intel_hdcp_check_work(struct work_struct
*work
)
1757 struct intel_hdcp
*hdcp
= container_of(to_delayed_work(work
),
1760 struct intel_connector
*connector
= intel_hdcp_to_connector(hdcp
);
1762 if (!intel_hdcp2_check_link(connector
))
1763 schedule_delayed_work(&hdcp
->check_work
,
1764 DRM_HDCP2_CHECK_PERIOD_MS
);
1765 else if (!intel_hdcp_check_link(connector
))
1766 schedule_delayed_work(&hdcp
->check_work
,
1767 DRM_HDCP_CHECK_PERIOD_MS
);
1770 static int i915_hdcp_component_bind(struct device
*i915_kdev
,
1771 struct device
*mei_kdev
, void *data
)
1773 struct drm_i915_private
*dev_priv
= kdev_to_i915(i915_kdev
);
1775 DRM_DEBUG("I915 HDCP comp bind\n");
1776 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1777 dev_priv
->hdcp_master
= (struct i915_hdcp_comp_master
*)data
;
1778 dev_priv
->hdcp_master
->mei_dev
= mei_kdev
;
1779 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1784 static void i915_hdcp_component_unbind(struct device
*i915_kdev
,
1785 struct device
*mei_kdev
, void *data
)
1787 struct drm_i915_private
*dev_priv
= kdev_to_i915(i915_kdev
);
1789 DRM_DEBUG("I915 HDCP comp unbind\n");
1790 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1791 dev_priv
->hdcp_master
= NULL
;
1792 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1795 static const struct component_ops i915_hdcp_component_ops
= {
1796 .bind
= i915_hdcp_component_bind
,
1797 .unbind
= i915_hdcp_component_unbind
,
1801 enum mei_fw_ddi
intel_get_mei_fw_ddi_index(enum port port
)
1806 case PORT_B
... PORT_F
:
1807 return (enum mei_fw_ddi
)port
;
1809 return MEI_DDI_INVALID_PORT
;
1814 enum mei_fw_tc
intel_get_mei_fw_tc(enum transcoder cpu_transcoder
)
1816 switch (cpu_transcoder
) {
1817 case TRANSCODER_A
... TRANSCODER_D
:
1818 return (enum mei_fw_tc
)(cpu_transcoder
| 0x10);
1819 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1820 return MEI_INVALID_TRANSCODER
;
1824 static inline int initialize_hdcp_port_data(struct intel_connector
*connector
,
1825 const struct intel_hdcp_shim
*shim
)
1827 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1828 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1829 struct hdcp_port_data
*data
= &hdcp
->port_data
;
1831 if (INTEL_GEN(dev_priv
) < 12)
1833 intel_get_mei_fw_ddi_index(connector
->encoder
->port
);
1836 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1837 * with zero(INVALID PORT index).
1839 data
->fw_ddi
= MEI_DDI_INVALID_PORT
;
1842 * As associated transcoder is set and modified at modeset, here fw_tc
1843 * is initialized to zero (invalid transcoder index). This will be
1844 * retained for <Gen12 forever.
1846 data
->fw_tc
= MEI_INVALID_TRANSCODER
;
1848 data
->port_type
= (u8
)HDCP_PORT_TYPE_INTEGRATED
;
1849 data
->protocol
= (u8
)shim
->protocol
;
1853 data
->streams
= kcalloc(data
->k
,
1854 sizeof(struct hdcp2_streamid_type
),
1856 if (!data
->streams
) {
1857 DRM_ERROR("Out of Memory\n");
1861 data
->streams
[0].stream_id
= 0;
1862 data
->streams
[0].stream_type
= hdcp
->content_type
;
1867 static bool is_hdcp2_supported(struct drm_i915_private
*dev_priv
)
1869 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP
))
1872 return (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
) ||
1873 IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
));
1876 void intel_hdcp_component_init(struct drm_i915_private
*dev_priv
)
1880 if (!is_hdcp2_supported(dev_priv
))
1883 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1884 WARN_ON(dev_priv
->hdcp_comp_added
);
1886 dev_priv
->hdcp_comp_added
= true;
1887 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1888 ret
= component_add_typed(dev_priv
->drm
.dev
, &i915_hdcp_component_ops
,
1889 I915_COMPONENT_HDCP
);
1891 DRM_DEBUG_KMS("Failed at component add(%d)\n", ret
);
1892 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
1893 dev_priv
->hdcp_comp_added
= false;
1894 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
1899 static void intel_hdcp2_init(struct intel_connector
*connector
,
1900 const struct intel_hdcp_shim
*shim
)
1902 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1905 ret
= initialize_hdcp_port_data(connector
, shim
);
1907 DRM_DEBUG_KMS("Mei hdcp data init failed\n");
1911 hdcp
->hdcp2_supported
= true;
1914 int intel_hdcp_init(struct intel_connector
*connector
,
1915 const struct intel_hdcp_shim
*shim
)
1917 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1918 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1924 if (is_hdcp2_supported(dev_priv
))
1925 intel_hdcp2_init(connector
, shim
);
1928 drm_connector_attach_content_protection_property(&connector
->base
,
1929 hdcp
->hdcp2_supported
);
1931 hdcp
->hdcp2_supported
= false;
1932 kfree(hdcp
->port_data
.streams
);
1937 mutex_init(&hdcp
->mutex
);
1938 INIT_DELAYED_WORK(&hdcp
->check_work
, intel_hdcp_check_work
);
1939 INIT_WORK(&hdcp
->prop_work
, intel_hdcp_prop_work
);
1940 init_waitqueue_head(&hdcp
->cp_irq_queue
);
1945 int intel_hdcp_enable(struct intel_connector
*connector
,
1946 enum transcoder cpu_transcoder
, u8 content_type
)
1948 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
1949 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
1950 unsigned long check_link_interval
= DRM_HDCP_CHECK_PERIOD_MS
;
1956 mutex_lock(&hdcp
->mutex
);
1957 WARN_ON(hdcp
->value
== DRM_MODE_CONTENT_PROTECTION_ENABLED
);
1958 hdcp
->content_type
= content_type
;
1960 if (INTEL_GEN(dev_priv
) >= 12) {
1961 hdcp
->cpu_transcoder
= cpu_transcoder
;
1962 hdcp
->port_data
.fw_tc
= intel_get_mei_fw_tc(cpu_transcoder
);
1966 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
1967 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
1969 if (intel_hdcp2_capable(connector
)) {
1970 ret
= _intel_hdcp2_enable(connector
);
1972 check_link_interval
= DRM_HDCP2_CHECK_PERIOD_MS
;
1976 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
1979 if (ret
&& intel_hdcp_capable(connector
) &&
1980 hdcp
->content_type
!= DRM_MODE_HDCP_CONTENT_TYPE1
) {
1981 ret
= _intel_hdcp_enable(connector
);
1985 schedule_delayed_work(&hdcp
->check_work
, check_link_interval
);
1986 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
1987 schedule_work(&hdcp
->prop_work
);
1990 mutex_unlock(&hdcp
->mutex
);
1994 int intel_hdcp_disable(struct intel_connector
*connector
)
1996 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2002 mutex_lock(&hdcp
->mutex
);
2004 if (hdcp
->value
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
2005 hdcp
->value
= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
;
2006 if (hdcp
->hdcp2_encrypted
)
2007 ret
= _intel_hdcp2_disable(connector
);
2008 else if (hdcp
->hdcp_encrypted
)
2009 ret
= _intel_hdcp_disable(connector
);
2012 mutex_unlock(&hdcp
->mutex
);
2013 cancel_delayed_work_sync(&hdcp
->check_work
);
2017 void intel_hdcp_component_fini(struct drm_i915_private
*dev_priv
)
2019 mutex_lock(&dev_priv
->hdcp_comp_mutex
);
2020 if (!dev_priv
->hdcp_comp_added
) {
2021 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
2025 dev_priv
->hdcp_comp_added
= false;
2026 mutex_unlock(&dev_priv
->hdcp_comp_mutex
);
2028 component_del(dev_priv
->drm
.dev
, &i915_hdcp_component_ops
);
2031 void intel_hdcp_cleanup(struct intel_connector
*connector
)
2033 if (!connector
->hdcp
.shim
)
2036 mutex_lock(&connector
->hdcp
.mutex
);
2037 kfree(connector
->hdcp
.port_data
.streams
);
2038 mutex_unlock(&connector
->hdcp
.mutex
);
2041 void intel_hdcp_atomic_check(struct drm_connector
*connector
,
2042 struct drm_connector_state
*old_state
,
2043 struct drm_connector_state
*new_state
)
2045 u64 old_cp
= old_state
->content_protection
;
2046 u64 new_cp
= new_state
->content_protection
;
2047 struct drm_crtc_state
*crtc_state
;
2049 if (!new_state
->crtc
) {
2051 * If the connector is being disabled with CP enabled, mark it
2052 * desired so it's re-enabled when the connector is brought back
2054 if (old_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
2055 new_state
->content_protection
=
2056 DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2061 * Nothing to do if the state didn't change, or HDCP was activated since
2062 * the last commit. And also no change in hdcp content type.
2064 if (old_cp
== new_cp
||
2065 (old_cp
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&&
2066 new_cp
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)) {
2067 if (old_state
->hdcp_content_type
==
2068 new_state
->hdcp_content_type
)
2072 crtc_state
= drm_atomic_get_new_crtc_state(new_state
->state
,
2074 crtc_state
->mode_changed
= true;
2077 /* Handles the CP_IRQ raised from the DP HDCP sink */
2078 void intel_hdcp_handle_cp_irq(struct intel_connector
*connector
)
2080 struct intel_hdcp
*hdcp
= &connector
->hdcp
;
2085 atomic_inc(&connector
->hdcp
.cp_irq_count
);
2086 wake_up_all(&connector
->hdcp
.cp_irq_queue
);
2088 schedule_delayed_work(&hdcp
->check_work
, 0);