2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/i2c.h>
30 #include <drm/drm_dp_mst_helper.h>
33 #include <drm/drm_fixed.h>
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
44 static int test_calc_pbn_mode(void);
46 static void drm_dp_put_port(struct drm_dp_mst_port
*port
);
48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
50 struct drm_dp_payload
*payload
);
52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
53 struct drm_dp_mst_port
*port
,
54 int offset
, int size
, u8
*bytes
);
56 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
57 struct drm_dp_mst_branch
*mstb
);
58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
59 struct drm_dp_mst_branch
*mstb
,
60 struct drm_dp_mst_port
*port
);
61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
64 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
);
65 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
);
66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
);
67 /* sideband msg handling */
68 static u8
drm_dp_msg_header_crc4(const uint8_t *data
, size_t num_nibbles
)
73 int number_of_bits
= num_nibbles
* 4;
76 while (number_of_bits
!= 0) {
79 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
87 if ((remainder
& 0x10) == 0x10)
92 while (number_of_bits
!= 0) {
95 if ((remainder
& 0x10) != 0)
102 static u8
drm_dp_msg_data_crc4(const uint8_t *data
, u8 number_of_bytes
)
107 int number_of_bits
= number_of_bytes
* 8;
110 while (number_of_bits
!= 0) {
113 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
121 if ((remainder
& 0x100) == 0x100)
126 while (number_of_bits
!= 0) {
129 if ((remainder
& 0x100) != 0)
133 return remainder
& 0xff;
135 static inline u8
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr
*hdr
)
138 size
+= (hdr
->lct
/ 2);
142 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
148 buf
[idx
++] = ((hdr
->lct
& 0xf) << 4) | (hdr
->lcr
& 0xf);
149 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
150 buf
[idx
++] = hdr
->rad
[i
];
151 buf
[idx
++] = (hdr
->broadcast
<< 7) | (hdr
->path_msg
<< 6) |
152 (hdr
->msg_len
& 0x3f);
153 buf
[idx
++] = (hdr
->somt
<< 7) | (hdr
->eomt
<< 6) | (hdr
->seqno
<< 4);
155 crc4
= drm_dp_msg_header_crc4(buf
, (idx
* 2) - 1);
156 buf
[idx
- 1] |= (crc4
& 0xf);
161 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
162 u8
*buf
, int buflen
, u8
*hdrlen
)
171 len
+= ((buf
[0] & 0xf0) >> 4) / 2;
174 crc4
= drm_dp_msg_header_crc4(buf
, (len
* 2) - 1);
176 if ((crc4
& 0xf) != (buf
[len
- 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4
, buf
[len
- 1]);
181 hdr
->lct
= (buf
[0] & 0xf0) >> 4;
182 hdr
->lcr
= (buf
[0] & 0xf);
184 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
185 hdr
->rad
[i
] = buf
[idx
++];
186 hdr
->broadcast
= (buf
[idx
] >> 7) & 0x1;
187 hdr
->path_msg
= (buf
[idx
] >> 6) & 0x1;
188 hdr
->msg_len
= buf
[idx
] & 0x3f;
190 hdr
->somt
= (buf
[idx
] >> 7) & 0x1;
191 hdr
->eomt
= (buf
[idx
] >> 6) & 0x1;
192 hdr
->seqno
= (buf
[idx
] >> 4) & 0x1;
198 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body
*req
,
199 struct drm_dp_sideband_msg_tx
*raw
)
204 buf
[idx
++] = req
->req_type
& 0x7f;
206 switch (req
->req_type
) {
207 case DP_ENUM_PATH_RESOURCES
:
208 buf
[idx
] = (req
->u
.port_num
.port_number
& 0xf) << 4;
211 case DP_ALLOCATE_PAYLOAD
:
212 buf
[idx
] = (req
->u
.allocate_payload
.port_number
& 0xf) << 4 |
213 (req
->u
.allocate_payload
.number_sdp_streams
& 0xf);
215 buf
[idx
] = (req
->u
.allocate_payload
.vcpi
& 0x7f);
217 buf
[idx
] = (req
->u
.allocate_payload
.pbn
>> 8);
219 buf
[idx
] = (req
->u
.allocate_payload
.pbn
& 0xff);
221 for (i
= 0; i
< req
->u
.allocate_payload
.number_sdp_streams
/ 2; i
++) {
222 buf
[idx
] = ((req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2] & 0xf) << 4) |
223 (req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2 + 1] & 0xf);
226 if (req
->u
.allocate_payload
.number_sdp_streams
& 1) {
227 i
= req
->u
.allocate_payload
.number_sdp_streams
- 1;
228 buf
[idx
] = (req
->u
.allocate_payload
.sdp_stream_sink
[i
] & 0xf) << 4;
232 case DP_QUERY_PAYLOAD
:
233 buf
[idx
] = (req
->u
.query_payload
.port_number
& 0xf) << 4;
235 buf
[idx
] = (req
->u
.query_payload
.vcpi
& 0x7f);
238 case DP_REMOTE_DPCD_READ
:
239 buf
[idx
] = (req
->u
.dpcd_read
.port_number
& 0xf) << 4;
240 buf
[idx
] |= ((req
->u
.dpcd_read
.dpcd_address
& 0xf0000) >> 16) & 0xf;
242 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff00) >> 8;
244 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff);
246 buf
[idx
] = (req
->u
.dpcd_read
.num_bytes
);
250 case DP_REMOTE_DPCD_WRITE
:
251 buf
[idx
] = (req
->u
.dpcd_write
.port_number
& 0xf) << 4;
252 buf
[idx
] |= ((req
->u
.dpcd_write
.dpcd_address
& 0xf0000) >> 16) & 0xf;
254 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff00) >> 8;
256 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff);
258 buf
[idx
] = (req
->u
.dpcd_write
.num_bytes
);
260 memcpy(&buf
[idx
], req
->u
.dpcd_write
.bytes
, req
->u
.dpcd_write
.num_bytes
);
261 idx
+= req
->u
.dpcd_write
.num_bytes
;
263 case DP_REMOTE_I2C_READ
:
264 buf
[idx
] = (req
->u
.i2c_read
.port_number
& 0xf) << 4;
265 buf
[idx
] |= (req
->u
.i2c_read
.num_transactions
& 0x3);
267 for (i
= 0; i
< (req
->u
.i2c_read
.num_transactions
& 0x3); i
++) {
268 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].i2c_dev_id
& 0x7f;
270 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].num_bytes
;
272 memcpy(&buf
[idx
], req
->u
.i2c_read
.transactions
[i
].bytes
, req
->u
.i2c_read
.transactions
[i
].num_bytes
);
273 idx
+= req
->u
.i2c_read
.transactions
[i
].num_bytes
;
275 buf
[idx
] = (req
->u
.i2c_read
.transactions
[i
].no_stop_bit
& 0x1) << 5;
276 buf
[idx
] |= (req
->u
.i2c_read
.transactions
[i
].i2c_transaction_delay
& 0xf);
279 buf
[idx
] = (req
->u
.i2c_read
.read_i2c_device_id
) & 0x7f;
281 buf
[idx
] = (req
->u
.i2c_read
.num_bytes_read
);
285 case DP_REMOTE_I2C_WRITE
:
286 buf
[idx
] = (req
->u
.i2c_write
.port_number
& 0xf) << 4;
288 buf
[idx
] = (req
->u
.i2c_write
.write_i2c_device_id
) & 0x7f;
290 buf
[idx
] = (req
->u
.i2c_write
.num_bytes
);
292 memcpy(&buf
[idx
], req
->u
.i2c_write
.bytes
, req
->u
.i2c_write
.num_bytes
);
293 idx
+= req
->u
.i2c_write
.num_bytes
;
299 static void drm_dp_crc_sideband_chunk_req(u8
*msg
, u8 len
)
302 crc4
= drm_dp_msg_data_crc4(msg
, len
);
306 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body
*rep
,
307 struct drm_dp_sideband_msg_tx
*raw
)
312 buf
[idx
++] = (rep
->reply_type
& 0x1) << 7 | (rep
->req_type
& 0x7f);
317 /* this adds a chunk of msg to the builder to get the final msg */
318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx
*msg
,
319 u8
*replybuf
, u8 replybuflen
, bool hdr
)
326 struct drm_dp_sideband_msg_hdr recv_hdr
;
327 ret
= drm_dp_decode_sideband_msg_hdr(&recv_hdr
, replybuf
, replybuflen
, &hdrlen
);
329 print_hex_dump(KERN_DEBUG
, "failed hdr", DUMP_PREFIX_NONE
, 16, 1, replybuf
, replybuflen
, false);
333 /* get length contained in this portion */
334 msg
->curchunk_len
= recv_hdr
.msg_len
;
335 msg
->curchunk_hdrlen
= hdrlen
;
337 /* we have already gotten an somt - don't bother parsing */
338 if (recv_hdr
.somt
&& msg
->have_somt
)
342 memcpy(&msg
->initial_hdr
, &recv_hdr
, sizeof(struct drm_dp_sideband_msg_hdr
));
343 msg
->have_somt
= true;
346 msg
->have_eomt
= true;
348 /* copy the bytes for the remainder of this header chunk */
349 msg
->curchunk_idx
= min(msg
->curchunk_len
, (u8
)(replybuflen
- hdrlen
));
350 memcpy(&msg
->chunk
[0], replybuf
+ hdrlen
, msg
->curchunk_idx
);
352 memcpy(&msg
->chunk
[msg
->curchunk_idx
], replybuf
, replybuflen
);
353 msg
->curchunk_idx
+= replybuflen
;
356 if (msg
->curchunk_idx
>= msg
->curchunk_len
) {
358 crc4
= drm_dp_msg_data_crc4(msg
->chunk
, msg
->curchunk_len
- 1);
359 /* copy chunk into bigger msg */
360 memcpy(&msg
->msg
[msg
->curlen
], msg
->chunk
, msg
->curchunk_len
- 1);
361 msg
->curlen
+= msg
->curchunk_len
- 1;
366 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx
*raw
,
367 struct drm_dp_sideband_msg_reply_body
*repmsg
)
371 memcpy(repmsg
->u
.link_addr
.guid
, &raw
->msg
[idx
], 16);
373 repmsg
->u
.link_addr
.nports
= raw
->msg
[idx
] & 0xf;
375 if (idx
> raw
->curlen
)
377 for (i
= 0; i
< repmsg
->u
.link_addr
.nports
; i
++) {
378 if (raw
->msg
[idx
] & 0x80)
379 repmsg
->u
.link_addr
.ports
[i
].input_port
= 1;
381 repmsg
->u
.link_addr
.ports
[i
].peer_device_type
= (raw
->msg
[idx
] >> 4) & 0x7;
382 repmsg
->u
.link_addr
.ports
[i
].port_number
= (raw
->msg
[idx
] & 0xf);
385 if (idx
> raw
->curlen
)
387 repmsg
->u
.link_addr
.ports
[i
].mcs
= (raw
->msg
[idx
] >> 7) & 0x1;
388 repmsg
->u
.link_addr
.ports
[i
].ddps
= (raw
->msg
[idx
] >> 6) & 0x1;
389 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0)
390 repmsg
->u
.link_addr
.ports
[i
].legacy_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
392 if (idx
> raw
->curlen
)
394 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0) {
395 repmsg
->u
.link_addr
.ports
[i
].dpcd_revision
= (raw
->msg
[idx
]);
397 if (idx
> raw
->curlen
)
399 memcpy(repmsg
->u
.link_addr
.ports
[i
].peer_guid
, &raw
->msg
[idx
], 16);
401 if (idx
> raw
->curlen
)
403 repmsg
->u
.link_addr
.ports
[i
].num_sdp_streams
= (raw
->msg
[idx
] >> 4) & 0xf;
404 repmsg
->u
.link_addr
.ports
[i
].num_sdp_stream_sinks
= (raw
->msg
[idx
] & 0xf);
408 if (idx
> raw
->curlen
)
414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
418 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
*raw
,
419 struct drm_dp_sideband_msg_reply_body
*repmsg
)
422 repmsg
->u
.remote_dpcd_read_ack
.port_number
= raw
->msg
[idx
] & 0xf;
424 if (idx
> raw
->curlen
)
426 repmsg
->u
.remote_dpcd_read_ack
.num_bytes
= raw
->msg
[idx
];
427 if (idx
> raw
->curlen
)
430 memcpy(repmsg
->u
.remote_dpcd_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_dpcd_read_ack
.num_bytes
);
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
437 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx
*raw
,
438 struct drm_dp_sideband_msg_reply_body
*repmsg
)
441 repmsg
->u
.remote_dpcd_write_ack
.port_number
= raw
->msg
[idx
] & 0xf;
443 if (idx
> raw
->curlen
)
447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx
, raw
->curlen
);
451 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx
*raw
,
452 struct drm_dp_sideband_msg_reply_body
*repmsg
)
456 repmsg
->u
.remote_i2c_read_ack
.port_number
= (raw
->msg
[idx
] & 0xf);
458 if (idx
> raw
->curlen
)
460 repmsg
->u
.remote_i2c_read_ack
.num_bytes
= raw
->msg
[idx
];
463 memcpy(repmsg
->u
.remote_i2c_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_i2c_read_ack
.num_bytes
);
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx
, raw
->curlen
);
470 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx
*raw
,
471 struct drm_dp_sideband_msg_reply_body
*repmsg
)
474 repmsg
->u
.path_resources
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
476 if (idx
> raw
->curlen
)
478 repmsg
->u
.path_resources
.full_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
480 if (idx
> raw
->curlen
)
482 repmsg
->u
.path_resources
.avail_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
484 if (idx
> raw
->curlen
)
488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx
, raw
->curlen
);
492 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
493 struct drm_dp_sideband_msg_reply_body
*repmsg
)
496 repmsg
->u
.allocate_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
498 if (idx
> raw
->curlen
)
500 repmsg
->u
.allocate_payload
.vcpi
= raw
->msg
[idx
];
502 if (idx
> raw
->curlen
)
504 repmsg
->u
.allocate_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
506 if (idx
> raw
->curlen
)
510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx
, raw
->curlen
);
514 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
515 struct drm_dp_sideband_msg_reply_body
*repmsg
)
518 repmsg
->u
.query_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
520 if (idx
> raw
->curlen
)
522 repmsg
->u
.query_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
524 if (idx
> raw
->curlen
)
528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx
, raw
->curlen
);
532 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx
*raw
,
533 struct drm_dp_sideband_msg_reply_body
*msg
)
535 memset(msg
, 0, sizeof(*msg
));
536 msg
->reply_type
= (raw
->msg
[0] & 0x80) >> 7;
537 msg
->req_type
= (raw
->msg
[0] & 0x7f);
539 if (msg
->reply_type
) {
540 memcpy(msg
->u
.nak
.guid
, &raw
->msg
[1], 16);
541 msg
->u
.nak
.reason
= raw
->msg
[17];
542 msg
->u
.nak
.nak_data
= raw
->msg
[18];
546 switch (msg
->req_type
) {
547 case DP_LINK_ADDRESS
:
548 return drm_dp_sideband_parse_link_address(raw
, msg
);
549 case DP_QUERY_PAYLOAD
:
550 return drm_dp_sideband_parse_query_payload_ack(raw
, msg
);
551 case DP_REMOTE_DPCD_READ
:
552 return drm_dp_sideband_parse_remote_dpcd_read(raw
, msg
);
553 case DP_REMOTE_DPCD_WRITE
:
554 return drm_dp_sideband_parse_remote_dpcd_write(raw
, msg
);
555 case DP_REMOTE_I2C_READ
:
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw
, msg
);
557 case DP_ENUM_PATH_RESOURCES
:
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw
, msg
);
559 case DP_ALLOCATE_PAYLOAD
:
560 return drm_dp_sideband_parse_allocate_payload_ack(raw
, msg
);
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg
->req_type
);
567 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
568 struct drm_dp_sideband_msg_req_body
*msg
)
572 msg
->u
.conn_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
574 if (idx
> raw
->curlen
)
577 memcpy(msg
->u
.conn_stat
.guid
, &raw
->msg
[idx
], 16);
579 if (idx
> raw
->curlen
)
582 msg
->u
.conn_stat
.legacy_device_plug_status
= (raw
->msg
[idx
] >> 6) & 0x1;
583 msg
->u
.conn_stat
.displayport_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
584 msg
->u
.conn_stat
.message_capability_status
= (raw
->msg
[idx
] >> 4) & 0x1;
585 msg
->u
.conn_stat
.input_port
= (raw
->msg
[idx
] >> 3) & 0x1;
586 msg
->u
.conn_stat
.peer_device_type
= (raw
->msg
[idx
] & 0x7);
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx
, raw
->curlen
);
594 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
595 struct drm_dp_sideband_msg_req_body
*msg
)
599 msg
->u
.resource_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
601 if (idx
> raw
->curlen
)
604 memcpy(msg
->u
.resource_stat
.guid
, &raw
->msg
[idx
], 16);
606 if (idx
> raw
->curlen
)
609 msg
->u
.resource_stat
.available_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx
, raw
->curlen
);
617 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx
*raw
,
618 struct drm_dp_sideband_msg_req_body
*msg
)
620 memset(msg
, 0, sizeof(*msg
));
621 msg
->req_type
= (raw
->msg
[0] & 0x7f);
623 switch (msg
->req_type
) {
624 case DP_CONNECTION_STATUS_NOTIFY
:
625 return drm_dp_sideband_parse_connection_status_notify(raw
, msg
);
626 case DP_RESOURCE_STATUS_NOTIFY
:
627 return drm_dp_sideband_parse_resource_status_notify(raw
, msg
);
629 DRM_ERROR("Got unknown request 0x%02x\n", msg
->req_type
);
634 static int build_dpcd_write(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
, u8
*bytes
)
636 struct drm_dp_sideband_msg_req_body req
;
638 req
.req_type
= DP_REMOTE_DPCD_WRITE
;
639 req
.u
.dpcd_write
.port_number
= port_num
;
640 req
.u
.dpcd_write
.dpcd_address
= offset
;
641 req
.u
.dpcd_write
.num_bytes
= num_bytes
;
642 req
.u
.dpcd_write
.bytes
= bytes
;
643 drm_dp_encode_sideband_req(&req
, msg
);
648 static int build_link_address(struct drm_dp_sideband_msg_tx
*msg
)
650 struct drm_dp_sideband_msg_req_body req
;
652 req
.req_type
= DP_LINK_ADDRESS
;
653 drm_dp_encode_sideband_req(&req
, msg
);
657 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx
*msg
, int port_num
)
659 struct drm_dp_sideband_msg_req_body req
;
661 req
.req_type
= DP_ENUM_PATH_RESOURCES
;
662 req
.u
.port_num
.port_number
= port_num
;
663 drm_dp_encode_sideband_req(&req
, msg
);
664 msg
->path_msg
= true;
668 static int build_allocate_payload(struct drm_dp_sideband_msg_tx
*msg
, int port_num
,
669 u8 vcpi
, uint16_t pbn
,
670 u8 number_sdp_streams
,
673 struct drm_dp_sideband_msg_req_body req
;
674 memset(&req
, 0, sizeof(req
));
675 req
.req_type
= DP_ALLOCATE_PAYLOAD
;
676 req
.u
.allocate_payload
.port_number
= port_num
;
677 req
.u
.allocate_payload
.vcpi
= vcpi
;
678 req
.u
.allocate_payload
.pbn
= pbn
;
679 req
.u
.allocate_payload
.number_sdp_streams
= number_sdp_streams
;
680 memcpy(req
.u
.allocate_payload
.sdp_stream_sink
, sdp_stream_sink
,
682 drm_dp_encode_sideband_req(&req
, msg
);
683 msg
->path_msg
= true;
687 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
688 struct drm_dp_vcpi
*vcpi
)
692 mutex_lock(&mgr
->payload_lock
);
693 ret
= find_first_zero_bit(&mgr
->payload_mask
, mgr
->max_payloads
+ 1);
694 if (ret
> mgr
->max_payloads
) {
696 DRM_DEBUG_KMS("out of payload ids %d\n", ret
);
700 vcpi_ret
= find_first_zero_bit(&mgr
->vcpi_mask
, mgr
->max_payloads
+ 1);
701 if (vcpi_ret
> mgr
->max_payloads
) {
703 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret
);
707 set_bit(ret
, &mgr
->payload_mask
);
708 set_bit(vcpi_ret
, &mgr
->vcpi_mask
);
709 vcpi
->vcpi
= vcpi_ret
+ 1;
710 mgr
->proposed_vcpis
[ret
- 1] = vcpi
;
712 mutex_unlock(&mgr
->payload_lock
);
716 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
723 mutex_lock(&mgr
->payload_lock
);
724 DRM_DEBUG_KMS("putting payload %d\n", vcpi
);
725 clear_bit(vcpi
- 1, &mgr
->vcpi_mask
);
727 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
728 if (mgr
->proposed_vcpis
[i
])
729 if (mgr
->proposed_vcpis
[i
]->vcpi
== vcpi
) {
730 mgr
->proposed_vcpis
[i
] = NULL
;
731 clear_bit(i
+ 1, &mgr
->payload_mask
);
734 mutex_unlock(&mgr
->payload_lock
);
737 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr
*mgr
,
738 struct drm_dp_sideband_msg_tx
*txmsg
)
743 * All updates to txmsg->state are protected by mgr->qlock, and the two
744 * cases we check here are terminal states. For those the barriers
745 * provided by the wake_up/wait_event pair are enough.
747 ret
= (txmsg
->state
== DRM_DP_SIDEBAND_TX_RX
||
748 txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
);
752 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch
*mstb
,
753 struct drm_dp_sideband_msg_tx
*txmsg
)
755 struct drm_dp_mst_topology_mgr
*mgr
= mstb
->mgr
;
758 ret
= wait_event_timeout(mgr
->tx_waitq
,
759 check_txmsg_state(mgr
, txmsg
),
761 mutex_lock(&mstb
->mgr
->qlock
);
763 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
) {
768 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg
, txmsg
->state
, txmsg
->seqno
);
770 /* dump some state */
774 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
||
775 txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
) {
776 list_del(&txmsg
->next
);
779 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
||
780 txmsg
->state
== DRM_DP_SIDEBAND_TX_SENT
) {
781 mstb
->tx_slots
[txmsg
->seqno
] = NULL
;
785 mutex_unlock(&mgr
->qlock
);
790 static struct drm_dp_mst_branch
*drm_dp_add_mst_branch_device(u8 lct
, u8
*rad
)
792 struct drm_dp_mst_branch
*mstb
;
794 mstb
= kzalloc(sizeof(*mstb
), GFP_KERNEL
);
800 memcpy(mstb
->rad
, rad
, lct
/ 2);
801 INIT_LIST_HEAD(&mstb
->ports
);
802 kref_init(&mstb
->kref
);
806 static void drm_dp_destroy_mst_branch_device(struct kref
*kref
)
808 struct drm_dp_mst_branch
*mstb
= container_of(kref
, struct drm_dp_mst_branch
, kref
);
809 struct drm_dp_mst_port
*port
, *tmp
;
810 bool wake_tx
= false;
813 * destroy all ports - don't need lock
814 * as there are no more references to the mst branch
815 * device at this point.
817 list_for_each_entry_safe(port
, tmp
, &mstb
->ports
, next
) {
818 list_del(&port
->next
);
819 drm_dp_put_port(port
);
822 /* drop any tx slots msg */
823 mutex_lock(&mstb
->mgr
->qlock
);
824 if (mstb
->tx_slots
[0]) {
825 mstb
->tx_slots
[0]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
826 mstb
->tx_slots
[0] = NULL
;
829 if (mstb
->tx_slots
[1]) {
830 mstb
->tx_slots
[1]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
831 mstb
->tx_slots
[1] = NULL
;
834 mutex_unlock(&mstb
->mgr
->qlock
);
837 wake_up(&mstb
->mgr
->tx_waitq
);
841 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch
*mstb
)
843 kref_put(&mstb
->kref
, drm_dp_destroy_mst_branch_device
);
847 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port
*port
, int old_pdt
)
849 struct drm_dp_mst_branch
*mstb
;
852 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
853 case DP_PEER_DEVICE_SST_SINK
:
854 /* remove i2c over sideband */
855 drm_dp_mst_unregister_i2c_bus(&port
->aux
);
857 case DP_PEER_DEVICE_MST_BRANCHING
:
860 drm_dp_put_mst_branch_device(mstb
);
865 static void drm_dp_destroy_port(struct kref
*kref
)
867 struct drm_dp_mst_port
*port
= container_of(kref
, struct drm_dp_mst_port
, kref
);
868 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
871 port
->vcpi
.num_slots
= 0;
873 kfree(port
->cached_edid
);
876 * The only time we don't have a connector
877 * on an output port is if the connector init
880 if (port
->connector
) {
881 /* we can't destroy the connector here, as
882 * we might be holding the mode_config.mutex
883 * from an EDID retrieval */
885 mutex_lock(&mgr
->destroy_connector_lock
);
886 list_add(&port
->next
, &mgr
->destroy_connector_list
);
887 mutex_unlock(&mgr
->destroy_connector_lock
);
888 schedule_work(&mgr
->destroy_connector_work
);
891 /* no need to clean up vcpi
892 * as if we have no connector we never setup a vcpi */
893 drm_dp_port_teardown_pdt(port
, port
->pdt
);
898 static void drm_dp_put_port(struct drm_dp_mst_port
*port
)
900 kref_put(&port
->kref
, drm_dp_destroy_port
);
903 static struct drm_dp_mst_branch
*drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_branch
*to_find
)
905 struct drm_dp_mst_port
*port
;
906 struct drm_dp_mst_branch
*rmstb
;
907 if (to_find
== mstb
) {
908 kref_get(&mstb
->kref
);
911 list_for_each_entry(port
, &mstb
->ports
, next
) {
913 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(port
->mstb
, to_find
);
921 static struct drm_dp_mst_branch
*drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_branch
*mstb
)
923 struct drm_dp_mst_branch
*rmstb
= NULL
;
924 mutex_lock(&mgr
->lock
);
925 if (mgr
->mst_primary
)
926 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(mgr
->mst_primary
, mstb
);
927 mutex_unlock(&mgr
->lock
);
931 static struct drm_dp_mst_port
*drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_port
*to_find
)
933 struct drm_dp_mst_port
*port
, *mport
;
935 list_for_each_entry(port
, &mstb
->ports
, next
) {
936 if (port
== to_find
) {
937 kref_get(&port
->kref
);
941 mport
= drm_dp_mst_get_port_ref_locked(port
->mstb
, to_find
);
949 static struct drm_dp_mst_port
*drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
951 struct drm_dp_mst_port
*rport
= NULL
;
952 mutex_lock(&mgr
->lock
);
953 if (mgr
->mst_primary
)
954 rport
= drm_dp_mst_get_port_ref_locked(mgr
->mst_primary
, port
);
955 mutex_unlock(&mgr
->lock
);
959 static struct drm_dp_mst_port
*drm_dp_get_port(struct drm_dp_mst_branch
*mstb
, u8 port_num
)
961 struct drm_dp_mst_port
*port
;
963 list_for_each_entry(port
, &mstb
->ports
, next
) {
964 if (port
->port_num
== port_num
) {
965 kref_get(&port
->kref
);
974 * calculate a new RAD for this MST branch device
975 * if parent has an LCT of 2 then it has 1 nibble of RAD,
976 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
978 static u8
drm_dp_calculate_rad(struct drm_dp_mst_port
*port
,
981 int parent_lct
= port
->parent
->lct
;
983 int idx
= (parent_lct
- 1) / 2;
984 if (parent_lct
> 1) {
985 memcpy(rad
, port
->parent
->rad
, idx
+ 1);
986 shift
= (parent_lct
% 2) ? 4 : 0;
990 rad
[idx
] |= port
->port_num
<< shift
;
991 return parent_lct
+ 1;
995 * return sends link address for new mstb
997 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port
*port
)
1001 bool send_link
= false;
1002 switch (port
->pdt
) {
1003 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
1004 case DP_PEER_DEVICE_SST_SINK
:
1005 /* add i2c over sideband */
1006 ret
= drm_dp_mst_register_i2c_bus(&port
->aux
);
1008 case DP_PEER_DEVICE_MST_BRANCHING
:
1009 lct
= drm_dp_calculate_rad(port
, rad
);
1011 port
->mstb
= drm_dp_add_mst_branch_device(lct
, rad
);
1012 port
->mstb
->mgr
= port
->mgr
;
1013 port
->mstb
->port_parent
= port
;
1021 static void drm_dp_check_port_guid(struct drm_dp_mst_branch
*mstb
,
1022 struct drm_dp_mst_port
*port
)
1025 if (port
->dpcd_rev
>= 0x12) {
1026 port
->guid_valid
= drm_dp_validate_guid(mstb
->mgr
, port
->guid
);
1027 if (!port
->guid_valid
) {
1028 ret
= drm_dp_send_dpcd_write(mstb
->mgr
,
1032 port
->guid_valid
= true;
1037 static void build_mst_prop_path(const struct drm_dp_mst_branch
*mstb
,
1040 size_t proppath_size
)
1044 snprintf(proppath
, proppath_size
, "mst:%d", mstb
->mgr
->conn_base_id
);
1045 for (i
= 0; i
< (mstb
->lct
- 1); i
++) {
1046 int shift
= (i
% 2) ? 0 : 4;
1047 int port_num
= (mstb
->rad
[i
/ 2] >> shift
) & 0xf;
1048 snprintf(temp
, sizeof(temp
), "-%d", port_num
);
1049 strlcat(proppath
, temp
, proppath_size
);
1051 snprintf(temp
, sizeof(temp
), "-%d", pnum
);
1052 strlcat(proppath
, temp
, proppath_size
);
1055 static void drm_dp_add_port(struct drm_dp_mst_branch
*mstb
,
1057 struct drm_dp_link_addr_reply_port
*port_msg
)
1059 struct drm_dp_mst_port
*port
;
1061 bool created
= false;
1064 port
= drm_dp_get_port(mstb
, port_msg
->port_number
);
1066 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1069 kref_init(&port
->kref
);
1070 port
->parent
= mstb
;
1071 port
->port_num
= port_msg
->port_number
;
1072 port
->mgr
= mstb
->mgr
;
1073 port
->aux
.name
= "DPMST";
1074 port
->aux
.dev
= dev
;
1077 old_pdt
= port
->pdt
;
1078 old_ddps
= port
->ddps
;
1081 port
->pdt
= port_msg
->peer_device_type
;
1082 port
->input
= port_msg
->input_port
;
1083 port
->mcs
= port_msg
->mcs
;
1084 port
->ddps
= port_msg
->ddps
;
1085 port
->ldps
= port_msg
->legacy_device_plug_status
;
1086 port
->dpcd_rev
= port_msg
->dpcd_revision
;
1087 port
->num_sdp_streams
= port_msg
->num_sdp_streams
;
1088 port
->num_sdp_stream_sinks
= port_msg
->num_sdp_stream_sinks
;
1089 memcpy(port
->guid
, port_msg
->peer_guid
, 16);
1091 /* manage mstb port lists with mgr lock - take a reference
1094 mutex_lock(&mstb
->mgr
->lock
);
1095 kref_get(&port
->kref
);
1096 list_add(&port
->next
, &mstb
->ports
);
1097 mutex_unlock(&mstb
->mgr
->lock
);
1100 if (old_ddps
!= port
->ddps
) {
1102 drm_dp_check_port_guid(mstb
, port
);
1104 drm_dp_send_enum_path_resources(mstb
->mgr
, mstb
, port
);
1106 port
->guid_valid
= false;
1107 port
->available_pbn
= 0;
1111 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1112 drm_dp_port_teardown_pdt(port
, old_pdt
);
1114 ret
= drm_dp_port_setup_pdt(port
);
1116 drm_dp_send_link_address(mstb
->mgr
, port
->mstb
);
1119 if (created
&& !port
->input
) {
1122 build_mst_prop_path(mstb
, port
->port_num
, proppath
, sizeof(proppath
));
1123 port
->connector
= (*mstb
->mgr
->cbs
->add_connector
)(mstb
->mgr
, port
, proppath
);
1124 if (!port
->connector
) {
1125 /* remove it from the port list */
1126 mutex_lock(&mstb
->mgr
->lock
);
1127 list_del(&port
->next
);
1128 mutex_unlock(&mstb
->mgr
->lock
);
1129 /* drop port list reference */
1130 drm_dp_put_port(port
);
1133 if (port
->port_num
>= DP_MST_LOGICAL_PORT_0
) {
1134 port
->cached_edid
= drm_get_edid(port
->connector
, &port
->aux
.ddc
);
1135 drm_mode_connector_set_tile_property(port
->connector
);
1137 (*mstb
->mgr
->cbs
->register_connector
)(port
->connector
);
1141 /* put reference to this port */
1142 drm_dp_put_port(port
);
1145 static void drm_dp_update_port(struct drm_dp_mst_branch
*mstb
,
1146 struct drm_dp_connection_status_notify
*conn_stat
)
1148 struct drm_dp_mst_port
*port
;
1151 bool dowork
= false;
1152 port
= drm_dp_get_port(mstb
, conn_stat
->port_number
);
1156 old_ddps
= port
->ddps
;
1157 old_pdt
= port
->pdt
;
1158 port
->pdt
= conn_stat
->peer_device_type
;
1159 port
->mcs
= conn_stat
->message_capability_status
;
1160 port
->ldps
= conn_stat
->legacy_device_plug_status
;
1161 port
->ddps
= conn_stat
->displayport_device_plug_status
;
1163 if (old_ddps
!= port
->ddps
) {
1165 drm_dp_check_port_guid(mstb
, port
);
1168 port
->guid_valid
= false;
1169 port
->available_pbn
= 0;
1172 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1173 drm_dp_port_teardown_pdt(port
, old_pdt
);
1175 if (drm_dp_port_setup_pdt(port
))
1179 drm_dp_put_port(port
);
1181 queue_work(system_long_wq
, &mstb
->mgr
->work
);
1185 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr
*mgr
,
1188 struct drm_dp_mst_branch
*mstb
;
1189 struct drm_dp_mst_port
*port
;
1191 /* find the port by iterating down */
1193 mutex_lock(&mgr
->lock
);
1194 mstb
= mgr
->mst_primary
;
1196 for (i
= 0; i
< lct
- 1; i
++) {
1197 int shift
= (i
% 2) ? 0 : 4;
1198 int port_num
= (rad
[i
/ 2] >> shift
) & 0xf;
1200 list_for_each_entry(port
, &mstb
->ports
, next
) {
1201 if (port
->port_num
== port_num
) {
1204 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct
, rad
[0]);
1212 kref_get(&mstb
->kref
);
1214 mutex_unlock(&mgr
->lock
);
1218 static struct drm_dp_mst_branch
*get_mst_branch_device_by_guid_helper(
1219 struct drm_dp_mst_branch
*mstb
,
1222 struct drm_dp_mst_branch
*found_mstb
;
1223 struct drm_dp_mst_port
*port
;
1225 list_for_each_entry(port
, &mstb
->ports
, next
) {
1229 if (port
->guid_valid
&& memcmp(port
->guid
, guid
, 16) == 0)
1232 found_mstb
= get_mst_branch_device_by_guid_helper(port
->mstb
, guid
);
1241 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device_by_guid(
1242 struct drm_dp_mst_topology_mgr
*mgr
,
1245 struct drm_dp_mst_branch
*mstb
;
1247 /* find the port by iterating down */
1248 mutex_lock(&mgr
->lock
);
1250 if (mgr
->guid_valid
&& memcmp(mgr
->guid
, guid
, 16) == 0)
1251 mstb
= mgr
->mst_primary
;
1253 mstb
= get_mst_branch_device_by_guid_helper(mgr
->mst_primary
, guid
);
1256 kref_get(&mstb
->kref
);
1258 mutex_unlock(&mgr
->lock
);
1262 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1263 struct drm_dp_mst_branch
*mstb
)
1265 struct drm_dp_mst_port
*port
;
1266 struct drm_dp_mst_branch
*mstb_child
;
1267 if (!mstb
->link_address_sent
)
1268 drm_dp_send_link_address(mgr
, mstb
);
1270 list_for_each_entry(port
, &mstb
->ports
, next
) {
1277 if (!port
->available_pbn
)
1278 drm_dp_send_enum_path_resources(mgr
, mstb
, port
);
1281 mstb_child
= drm_dp_get_validated_mstb_ref(mgr
, port
->mstb
);
1283 drm_dp_check_and_send_link_address(mgr
, mstb_child
);
1284 drm_dp_put_mst_branch_device(mstb_child
);
1290 static void drm_dp_mst_link_probe_work(struct work_struct
*work
)
1292 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, work
);
1293 struct drm_dp_mst_branch
*mstb
;
1295 mutex_lock(&mgr
->lock
);
1296 mstb
= mgr
->mst_primary
;
1298 kref_get(&mstb
->kref
);
1300 mutex_unlock(&mgr
->lock
);
1302 drm_dp_check_and_send_link_address(mgr
, mstb
);
1303 drm_dp_put_mst_branch_device(mstb
);
1307 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
1310 static u8 zero_guid
[16];
1312 if (!memcmp(guid
, zero_guid
, 16)) {
1313 u64 salt
= get_jiffies_64();
1314 memcpy(&guid
[0], &salt
, sizeof(u64
));
1315 memcpy(&guid
[8], &salt
, sizeof(u64
));
1322 static int build_dpcd_read(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
)
1324 struct drm_dp_sideband_msg_req_body req
;
1326 req
.req_type
= DP_REMOTE_DPCD_READ
;
1327 req
.u
.dpcd_read
.port_number
= port_num
;
1328 req
.u
.dpcd_read
.dpcd_address
= offset
;
1329 req
.u
.dpcd_read
.num_bytes
= num_bytes
;
1330 drm_dp_encode_sideband_req(&req
, msg
);
1336 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1337 bool up
, u8
*msg
, int len
)
1340 int regbase
= up
? DP_SIDEBAND_MSG_UP_REP_BASE
: DP_SIDEBAND_MSG_DOWN_REQ_BASE
;
1341 int tosend
, total
, offset
;
1348 tosend
= min3(mgr
->max_dpcd_transaction_bytes
, 16, total
);
1350 ret
= drm_dp_dpcd_write(mgr
->aux
, regbase
+ offset
,
1353 if (ret
!= tosend
) {
1354 if (ret
== -EIO
&& retries
< 5) {
1358 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend
, ret
);
1364 } while (total
> 0);
1368 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr
*hdr
,
1369 struct drm_dp_sideband_msg_tx
*txmsg
)
1371 struct drm_dp_mst_branch
*mstb
= txmsg
->dst
;
1374 /* both msg slots are full */
1375 if (txmsg
->seqno
== -1) {
1376 if (mstb
->tx_slots
[0] && mstb
->tx_slots
[1]) {
1377 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__
);
1380 if (mstb
->tx_slots
[0] == NULL
&& mstb
->tx_slots
[1] == NULL
) {
1381 txmsg
->seqno
= mstb
->last_seqno
;
1382 mstb
->last_seqno
^= 1;
1383 } else if (mstb
->tx_slots
[0] == NULL
)
1387 mstb
->tx_slots
[txmsg
->seqno
] = txmsg
;
1390 req_type
= txmsg
->msg
[0] & 0x7f;
1391 if (req_type
== DP_CONNECTION_STATUS_NOTIFY
||
1392 req_type
== DP_RESOURCE_STATUS_NOTIFY
)
1396 hdr
->path_msg
= txmsg
->path_msg
;
1397 hdr
->lct
= mstb
->lct
;
1398 hdr
->lcr
= mstb
->lct
- 1;
1400 memcpy(hdr
->rad
, mstb
->rad
, mstb
->lct
/ 2);
1401 hdr
->seqno
= txmsg
->seqno
;
1405 * process a single block of the next message in the sideband queue
1407 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1408 struct drm_dp_sideband_msg_tx
*txmsg
,
1412 struct drm_dp_sideband_msg_hdr hdr
;
1413 int len
, space
, idx
, tosend
;
1416 memset(&hdr
, 0, sizeof(struct drm_dp_sideband_msg_hdr
));
1418 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
) {
1420 txmsg
->state
= DRM_DP_SIDEBAND_TX_START_SEND
;
1423 /* make hdr from dst mst - for replies use seqno
1424 otherwise assign one */
1425 ret
= set_hdr_from_dst_qlock(&hdr
, txmsg
);
1429 /* amount left to send in this message */
1430 len
= txmsg
->cur_len
- txmsg
->cur_offset
;
1432 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1433 space
= 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr
);
1435 tosend
= min(len
, space
);
1436 if (len
== txmsg
->cur_len
)
1442 hdr
.msg_len
= tosend
+ 1;
1443 drm_dp_encode_sideband_msg_hdr(&hdr
, chunk
, &idx
);
1444 memcpy(&chunk
[idx
], &txmsg
->msg
[txmsg
->cur_offset
], tosend
);
1445 /* add crc at end */
1446 drm_dp_crc_sideband_chunk_req(&chunk
[idx
], tosend
);
1449 ret
= drm_dp_send_sideband_msg(mgr
, up
, chunk
, idx
);
1451 DRM_DEBUG_KMS("sideband msg failed to send\n");
1455 txmsg
->cur_offset
+= tosend
;
1456 if (txmsg
->cur_offset
== txmsg
->cur_len
) {
1457 txmsg
->state
= DRM_DP_SIDEBAND_TX_SENT
;
1463 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
)
1465 struct drm_dp_sideband_msg_tx
*txmsg
;
1468 WARN_ON(!mutex_is_locked(&mgr
->qlock
));
1470 /* construct a chunk from the first msg in the tx_msg queue */
1471 if (list_empty(&mgr
->tx_msg_downq
)) {
1472 mgr
->tx_down_in_progress
= false;
1475 mgr
->tx_down_in_progress
= true;
1477 txmsg
= list_first_entry(&mgr
->tx_msg_downq
, struct drm_dp_sideband_msg_tx
, next
);
1478 ret
= process_single_tx_qlock(mgr
, txmsg
, false);
1480 /* txmsg is sent it should be in the slots now */
1481 list_del(&txmsg
->next
);
1483 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1484 list_del(&txmsg
->next
);
1485 if (txmsg
->seqno
!= -1)
1486 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1487 txmsg
->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
1488 wake_up(&mgr
->tx_waitq
);
1490 if (list_empty(&mgr
->tx_msg_downq
)) {
1491 mgr
->tx_down_in_progress
= false;
1496 /* called holding qlock */
1497 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1498 struct drm_dp_sideband_msg_tx
*txmsg
)
1502 /* construct a chunk from the first msg in the tx_msg queue */
1503 ret
= process_single_tx_qlock(mgr
, txmsg
, true);
1506 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1508 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1511 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr
*mgr
,
1512 struct drm_dp_sideband_msg_tx
*txmsg
)
1514 mutex_lock(&mgr
->qlock
);
1515 list_add_tail(&txmsg
->next
, &mgr
->tx_msg_downq
);
1516 if (!mgr
->tx_down_in_progress
)
1517 process_single_down_tx_qlock(mgr
);
1518 mutex_unlock(&mgr
->qlock
);
1521 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1522 struct drm_dp_mst_branch
*mstb
)
1525 struct drm_dp_sideband_msg_tx
*txmsg
;
1528 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1533 len
= build_link_address(txmsg
);
1535 mstb
->link_address_sent
= true;
1536 drm_dp_queue_down_tx(mgr
, txmsg
);
1538 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1542 if (txmsg
->reply
.reply_type
== 1)
1543 DRM_DEBUG_KMS("link address nak received\n");
1545 DRM_DEBUG_KMS("link address reply: %d\n", txmsg
->reply
.u
.link_addr
.nports
);
1546 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1547 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i
,
1548 txmsg
->reply
.u
.link_addr
.ports
[i
].input_port
,
1549 txmsg
->reply
.u
.link_addr
.ports
[i
].peer_device_type
,
1550 txmsg
->reply
.u
.link_addr
.ports
[i
].port_number
,
1551 txmsg
->reply
.u
.link_addr
.ports
[i
].dpcd_revision
,
1552 txmsg
->reply
.u
.link_addr
.ports
[i
].mcs
,
1553 txmsg
->reply
.u
.link_addr
.ports
[i
].ddps
,
1554 txmsg
->reply
.u
.link_addr
.ports
[i
].legacy_device_plug_status
,
1555 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_streams
,
1556 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_stream_sinks
);
1558 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1559 drm_dp_add_port(mstb
, mgr
->dev
, &txmsg
->reply
.u
.link_addr
.ports
[i
]);
1561 (*mgr
->cbs
->hotplug
)(mgr
);
1564 mstb
->link_address_sent
= false;
1565 DRM_DEBUG_KMS("link address failed %d\n", ret
);
1571 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
1572 struct drm_dp_mst_branch
*mstb
,
1573 struct drm_dp_mst_port
*port
)
1576 struct drm_dp_sideband_msg_tx
*txmsg
;
1579 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1584 len
= build_enum_path_resources(txmsg
, port
->port_num
);
1586 drm_dp_queue_down_tx(mgr
, txmsg
);
1588 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1590 if (txmsg
->reply
.reply_type
== 1)
1591 DRM_DEBUG_KMS("enum path resources nak received\n");
1593 if (port
->port_num
!= txmsg
->reply
.u
.path_resources
.port_number
)
1594 DRM_ERROR("got incorrect port in response\n");
1595 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg
->reply
.u
.path_resources
.port_number
, txmsg
->reply
.u
.path_resources
.full_payload_bw_number
,
1596 txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
);
1597 port
->available_pbn
= txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
;
1605 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1606 struct drm_dp_mst_port
*port
,
1610 struct drm_dp_sideband_msg_tx
*txmsg
;
1611 struct drm_dp_mst_branch
*mstb
;
1613 u8 sinks
[DRM_DP_MAX_SDP_STREAMS
];
1616 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1620 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1626 for (i
= 0; i
< port
->num_sdp_streams
; i
++)
1630 len
= build_allocate_payload(txmsg
, port
->port_num
,
1632 pbn
, port
->num_sdp_streams
, sinks
);
1634 drm_dp_queue_down_tx(mgr
, txmsg
);
1636 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1638 if (txmsg
->reply
.reply_type
== 1) {
1645 drm_dp_put_mst_branch_device(mstb
);
1649 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1651 struct drm_dp_payload
*payload
)
1655 ret
= drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1657 payload
->payload_state
= 0;
1660 payload
->payload_state
= DP_PAYLOAD_LOCAL
;
1664 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1665 struct drm_dp_mst_port
*port
,
1667 struct drm_dp_payload
*payload
)
1670 ret
= drm_dp_payload_send_msg(mgr
, port
, id
, port
->vcpi
.pbn
);
1673 payload
->payload_state
= DP_PAYLOAD_REMOTE
;
1677 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1678 struct drm_dp_mst_port
*port
,
1680 struct drm_dp_payload
*payload
)
1682 DRM_DEBUG_KMS("\n");
1683 /* its okay for these to fail */
1685 drm_dp_payload_send_msg(mgr
, port
, id
, 0);
1688 drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1689 payload
->payload_state
= DP_PAYLOAD_DELETE_LOCAL
;
1693 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1695 struct drm_dp_payload
*payload
)
1697 payload
->payload_state
= 0;
1702 * drm_dp_update_payload_part1() - Execute payload update part 1
1703 * @mgr: manager to use.
1705 * This iterates over all proposed virtual channels, and tries to
1706 * allocate space in the link for them. For 0->slots transitions,
1707 * this step just writes the VCPI to the MST device. For slots->0
1708 * transitions, this writes the updated VCPIs and removes the
1709 * remote VC payloads.
1711 * after calling this the driver should generate ACT and payload
1714 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr
*mgr
)
1718 struct drm_dp_payload req_payload
;
1719 struct drm_dp_mst_port
*port
;
1721 mutex_lock(&mgr
->payload_lock
);
1722 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1723 /* solve the current payloads - compare to the hw ones
1724 - update the hw view */
1725 req_payload
.start_slot
= cur_slots
;
1726 if (mgr
->proposed_vcpis
[i
]) {
1727 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1728 req_payload
.num_slots
= mgr
->proposed_vcpis
[i
]->num_slots
;
1729 req_payload
.vcpi
= mgr
->proposed_vcpis
[i
]->vcpi
;
1732 req_payload
.num_slots
= 0;
1735 if (mgr
->payloads
[i
].start_slot
!= req_payload
.start_slot
) {
1736 mgr
->payloads
[i
].start_slot
= req_payload
.start_slot
;
1738 /* work out what is required to happen with this payload */
1739 if (mgr
->payloads
[i
].num_slots
!= req_payload
.num_slots
) {
1741 /* need to push an update for this payload */
1742 if (req_payload
.num_slots
) {
1743 drm_dp_create_payload_step1(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &req_payload
);
1744 mgr
->payloads
[i
].num_slots
= req_payload
.num_slots
;
1745 mgr
->payloads
[i
].vcpi
= req_payload
.vcpi
;
1746 } else if (mgr
->payloads
[i
].num_slots
) {
1747 mgr
->payloads
[i
].num_slots
= 0;
1748 drm_dp_destroy_payload_step1(mgr
, port
, port
->vcpi
.vcpi
, &mgr
->payloads
[i
]);
1749 req_payload
.payload_state
= mgr
->payloads
[i
].payload_state
;
1750 mgr
->payloads
[i
].start_slot
= 0;
1752 mgr
->payloads
[i
].payload_state
= req_payload
.payload_state
;
1754 cur_slots
+= req_payload
.num_slots
;
1757 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1758 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1759 DRM_DEBUG_KMS("removing payload %d\n", i
);
1760 for (j
= i
; j
< mgr
->max_payloads
- 1; j
++) {
1761 memcpy(&mgr
->payloads
[j
], &mgr
->payloads
[j
+ 1], sizeof(struct drm_dp_payload
));
1762 mgr
->proposed_vcpis
[j
] = mgr
->proposed_vcpis
[j
+ 1];
1763 if (mgr
->proposed_vcpis
[j
] && mgr
->proposed_vcpis
[j
]->num_slots
) {
1764 set_bit(j
+ 1, &mgr
->payload_mask
);
1766 clear_bit(j
+ 1, &mgr
->payload_mask
);
1769 memset(&mgr
->payloads
[mgr
->max_payloads
- 1], 0, sizeof(struct drm_dp_payload
));
1770 mgr
->proposed_vcpis
[mgr
->max_payloads
- 1] = NULL
;
1771 clear_bit(mgr
->max_payloads
, &mgr
->payload_mask
);
1775 mutex_unlock(&mgr
->payload_lock
);
1779 EXPORT_SYMBOL(drm_dp_update_payload_part1
);
1782 * drm_dp_update_payload_part2() - Execute payload update part 2
1783 * @mgr: manager to use.
1785 * This iterates over all proposed virtual channels, and tries to
1786 * allocate space in the link for them. For 0->slots transitions,
1787 * this step writes the remote VC payload commands. For slots->0
1788 * this just resets some internal state.
1790 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr
*mgr
)
1792 struct drm_dp_mst_port
*port
;
1795 mutex_lock(&mgr
->payload_lock
);
1796 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1798 if (!mgr
->proposed_vcpis
[i
])
1801 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1803 DRM_DEBUG_KMS("payload %d %d\n", i
, mgr
->payloads
[i
].payload_state
);
1804 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_LOCAL
) {
1805 ret
= drm_dp_create_payload_step2(mgr
, port
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1806 } else if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1807 ret
= drm_dp_destroy_payload_step2(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1810 mutex_unlock(&mgr
->payload_lock
);
1814 mutex_unlock(&mgr
->payload_lock
);
1817 EXPORT_SYMBOL(drm_dp_update_payload_part2
);
1819 #if 0 /* unused as of yet */
1820 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr
*mgr
,
1821 struct drm_dp_mst_port
*port
,
1822 int offset
, int size
)
1825 struct drm_dp_sideband_msg_tx
*txmsg
;
1827 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1831 len
= build_dpcd_read(txmsg
, port
->port_num
, 0, 8);
1832 txmsg
->dst
= port
->parent
;
1834 drm_dp_queue_down_tx(mgr
, txmsg
);
1840 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
1841 struct drm_dp_mst_port
*port
,
1842 int offset
, int size
, u8
*bytes
)
1846 struct drm_dp_sideband_msg_tx
*txmsg
;
1847 struct drm_dp_mst_branch
*mstb
;
1849 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1853 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1859 len
= build_dpcd_write(txmsg
, port
->port_num
, offset
, size
, bytes
);
1862 drm_dp_queue_down_tx(mgr
, txmsg
);
1864 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1866 if (txmsg
->reply
.reply_type
== 1) {
1873 drm_dp_put_mst_branch_device(mstb
);
1877 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx
*msg
, u8 req_type
)
1879 struct drm_dp_sideband_msg_reply_body reply
;
1881 reply
.reply_type
= 0;
1882 reply
.req_type
= req_type
;
1883 drm_dp_encode_sideband_reply(&reply
, msg
);
1887 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr
*mgr
,
1888 struct drm_dp_mst_branch
*mstb
,
1889 int req_type
, int seqno
, bool broadcast
)
1891 struct drm_dp_sideband_msg_tx
*txmsg
;
1893 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1898 txmsg
->seqno
= seqno
;
1899 drm_dp_encode_up_ack_reply(txmsg
, req_type
);
1901 mutex_lock(&mgr
->qlock
);
1903 process_single_up_tx_qlock(mgr
, txmsg
);
1905 mutex_unlock(&mgr
->qlock
);
1911 static bool drm_dp_get_vc_payload_bw(int dp_link_bw
,
1915 switch (dp_link_bw
) {
1917 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1918 dp_link_bw
, dp_link_count
);
1921 case DP_LINK_BW_1_62
:
1922 *out
= 3 * dp_link_count
;
1924 case DP_LINK_BW_2_7
:
1925 *out
= 5 * dp_link_count
;
1927 case DP_LINK_BW_5_4
:
1928 *out
= 10 * dp_link_count
;
1935 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1936 * @mgr: manager to set state for
1937 * @mst_state: true to enable MST on this connector - false to disable.
1939 * This is called by the driver when it detects an MST capable device plugged
1940 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1942 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
*mgr
, bool mst_state
)
1945 struct drm_dp_mst_branch
*mstb
= NULL
;
1947 mutex_lock(&mgr
->lock
);
1948 if (mst_state
== mgr
->mst_state
)
1951 mgr
->mst_state
= mst_state
;
1952 /* set the device into MST mode */
1954 WARN_ON(mgr
->mst_primary
);
1957 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
1958 if (ret
!= DP_RECEIVER_CAP_SIZE
) {
1959 DRM_DEBUG_KMS("failed to read DPCD\n");
1963 if (!drm_dp_get_vc_payload_bw(mgr
->dpcd
[1],
1964 mgr
->dpcd
[2] & DP_MAX_LANE_COUNT_MASK
,
1970 mgr
->total_pbn
= 2560;
1971 mgr
->total_slots
= DIV_ROUND_UP(mgr
->total_pbn
, mgr
->pbn_div
);
1972 mgr
->avail_slots
= mgr
->total_slots
;
1974 /* add initial branch device at LCT 1 */
1975 mstb
= drm_dp_add_mst_branch_device(1, NULL
);
1982 /* give this the main reference */
1983 mgr
->mst_primary
= mstb
;
1984 kref_get(&mgr
->mst_primary
->kref
);
1987 struct drm_dp_payload reset_pay
;
1988 reset_pay
.start_slot
= 0;
1989 reset_pay
.num_slots
= 0x3f;
1990 drm_dp_dpcd_write_payload(mgr
, 0, &reset_pay
);
1993 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
1994 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
2001 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_GUID
, mgr
->guid
, 16);
2003 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret
);
2007 mgr
->guid_valid
= drm_dp_validate_guid(mgr
, mgr
->guid
);
2008 if (!mgr
->guid_valid
) {
2009 ret
= drm_dp_dpcd_write(mgr
->aux
, DP_GUID
, mgr
->guid
, 16);
2010 mgr
->guid_valid
= true;
2013 queue_work(system_long_wq
, &mgr
->work
);
2017 /* disable MST on the device */
2018 mstb
= mgr
->mst_primary
;
2019 mgr
->mst_primary
= NULL
;
2020 /* this can fail if the device is gone */
2021 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
, 0);
2023 memset(mgr
->payloads
, 0, mgr
->max_payloads
* sizeof(struct drm_dp_payload
));
2024 mgr
->payload_mask
= 0;
2025 set_bit(0, &mgr
->payload_mask
);
2030 mutex_unlock(&mgr
->lock
);
2032 drm_dp_put_mst_branch_device(mstb
);
2036 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst
);
2039 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2040 * @mgr: manager to suspend
2042 * This function tells the MST device that we can't handle UP messages
2043 * anymore. This should stop it from sending any since we are suspended.
2045 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr
*mgr
)
2047 mutex_lock(&mgr
->lock
);
2048 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2049 DP_MST_EN
| DP_UPSTREAM_IS_SRC
);
2050 mutex_unlock(&mgr
->lock
);
2051 flush_work(&mgr
->work
);
2052 flush_work(&mgr
->destroy_connector_work
);
2054 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend
);
2057 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2058 * @mgr: manager to resume
2060 * This will fetch DPCD and see if the device is still there,
2061 * if it is, it will rewrite the MSTM control bits, and return.
2063 * if the device fails this returns -1, and the driver should do
2064 * a full MST reprobe, in case we were undocked.
2066 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr
*mgr
)
2070 mutex_lock(&mgr
->lock
);
2072 if (mgr
->mst_primary
) {
2074 sret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
2075 if (sret
!= DP_RECEIVER_CAP_SIZE
) {
2076 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2081 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2082 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
2084 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2093 mutex_unlock(&mgr
->lock
);
2096 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume
);
2098 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr
*mgr
, bool up
)
2102 int replylen
, origlen
, curreply
;
2104 struct drm_dp_sideband_msg_rx
*msg
;
2105 int basereg
= up
? DP_SIDEBAND_MSG_UP_REQ_BASE
: DP_SIDEBAND_MSG_DOWN_REP_BASE
;
2106 msg
= up
? &mgr
->up_req_recv
: &mgr
->down_rep_recv
;
2108 len
= min(mgr
->max_dpcd_transaction_bytes
, 16);
2109 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
,
2112 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len
, ret
);
2115 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, true);
2117 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock
[0]);
2120 replylen
= msg
->curchunk_len
+ msg
->curchunk_hdrlen
;
2125 while (replylen
> 0) {
2126 len
= min3(replylen
, mgr
->max_dpcd_transaction_bytes
, 16);
2127 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
+ curreply
,
2130 DRM_DEBUG_KMS("failed to read a chunk\n");
2132 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, false);
2134 DRM_DEBUG_KMS("failed to build sideband msg\n");
2140 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr
*mgr
)
2144 drm_dp_get_one_sb_msg(mgr
, false);
2146 if (mgr
->down_rep_recv
.have_eomt
) {
2147 struct drm_dp_sideband_msg_tx
*txmsg
;
2148 struct drm_dp_mst_branch
*mstb
;
2150 mstb
= drm_dp_get_mst_branch_device(mgr
,
2151 mgr
->down_rep_recv
.initial_hdr
.lct
,
2152 mgr
->down_rep_recv
.initial_hdr
.rad
);
2155 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->down_rep_recv
.initial_hdr
.lct
);
2156 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2160 /* find the message */
2161 slot
= mgr
->down_rep_recv
.initial_hdr
.seqno
;
2162 mutex_lock(&mgr
->qlock
);
2163 txmsg
= mstb
->tx_slots
[slot
];
2164 /* remove from slots */
2165 mutex_unlock(&mgr
->qlock
);
2168 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2170 mgr
->down_rep_recv
.initial_hdr
.seqno
,
2171 mgr
->down_rep_recv
.initial_hdr
.lct
,
2172 mgr
->down_rep_recv
.initial_hdr
.rad
[0],
2173 mgr
->down_rep_recv
.msg
[0]);
2174 drm_dp_put_mst_branch_device(mstb
);
2175 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2179 drm_dp_sideband_parse_reply(&mgr
->down_rep_recv
, &txmsg
->reply
);
2180 if (txmsg
->reply
.reply_type
== 1) {
2181 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg
->reply
.req_type
, txmsg
->reply
.u
.nak
.reason
, txmsg
->reply
.u
.nak
.nak_data
);
2184 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2185 drm_dp_put_mst_branch_device(mstb
);
2187 mutex_lock(&mgr
->qlock
);
2188 txmsg
->state
= DRM_DP_SIDEBAND_TX_RX
;
2189 mstb
->tx_slots
[slot
] = NULL
;
2190 mutex_unlock(&mgr
->qlock
);
2192 wake_up(&mgr
->tx_waitq
);
2197 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr
*mgr
)
2200 drm_dp_get_one_sb_msg(mgr
, true);
2202 if (mgr
->up_req_recv
.have_eomt
) {
2203 struct drm_dp_sideband_msg_req_body msg
;
2204 struct drm_dp_mst_branch
*mstb
= NULL
;
2207 if (!mgr
->up_req_recv
.initial_hdr
.broadcast
) {
2208 mstb
= drm_dp_get_mst_branch_device(mgr
,
2209 mgr
->up_req_recv
.initial_hdr
.lct
,
2210 mgr
->up_req_recv
.initial_hdr
.rad
);
2212 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2213 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2218 seqno
= mgr
->up_req_recv
.initial_hdr
.seqno
;
2219 drm_dp_sideband_parse_req(&mgr
->up_req_recv
, &msg
);
2221 if (msg
.req_type
== DP_CONNECTION_STATUS_NOTIFY
) {
2222 drm_dp_send_up_ack_reply(mgr
, mgr
->mst_primary
, msg
.req_type
, seqno
, false);
2225 mstb
= drm_dp_get_mst_branch_device_by_guid(mgr
, msg
.u
.conn_stat
.guid
);
2228 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2229 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2233 drm_dp_update_port(mstb
, &msg
.u
.conn_stat
);
2234 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg
.u
.conn_stat
.port_number
, msg
.u
.conn_stat
.legacy_device_plug_status
, msg
.u
.conn_stat
.displayport_device_plug_status
, msg
.u
.conn_stat
.message_capability_status
, msg
.u
.conn_stat
.input_port
, msg
.u
.conn_stat
.peer_device_type
);
2235 (*mgr
->cbs
->hotplug
)(mgr
);
2237 } else if (msg
.req_type
== DP_RESOURCE_STATUS_NOTIFY
) {
2238 drm_dp_send_up_ack_reply(mgr
, mgr
->mst_primary
, msg
.req_type
, seqno
, false);
2240 mstb
= drm_dp_get_mst_branch_device_by_guid(mgr
, msg
.u
.resource_stat
.guid
);
2243 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2244 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2248 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg
.u
.resource_stat
.port_number
, msg
.u
.resource_stat
.available_pbn
);
2251 drm_dp_put_mst_branch_device(mstb
);
2252 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2258 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2259 * @mgr: manager to notify irq for.
2260 * @esi: 4 bytes from SINK_COUNT_ESI
2261 * @handled: whether the hpd interrupt was consumed or not
2263 * This should be called from the driver when it detects a short IRQ,
2264 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2265 * topology manager will process the sideband messages received as a result
2268 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr
*mgr
, u8
*esi
, bool *handled
)
2275 if (sc
!= mgr
->sink_count
) {
2276 mgr
->sink_count
= sc
;
2280 if (esi
[1] & DP_DOWN_REP_MSG_RDY
) {
2281 ret
= drm_dp_mst_handle_down_rep(mgr
);
2285 if (esi
[1] & DP_UP_REQ_MSG_RDY
) {
2286 ret
|= drm_dp_mst_handle_up_req(mgr
);
2290 drm_dp_mst_kick_tx(mgr
);
2293 EXPORT_SYMBOL(drm_dp_mst_hpd_irq
);
2296 * drm_dp_mst_detect_port() - get connection status for an MST port
2297 * @mgr: manager for this port
2298 * @port: unverified pointer to a port
2300 * This returns the current connection state for a port. It validates the
2301 * port pointer still exists so the caller doesn't require a reference
2303 enum drm_connector_status
drm_dp_mst_detect_port(struct drm_connector
*connector
,
2304 struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2306 enum drm_connector_status status
= connector_status_disconnected
;
2308 /* we need to search for the port in the mgr in case its gone */
2309 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2311 return connector_status_disconnected
;
2316 switch (port
->pdt
) {
2317 case DP_PEER_DEVICE_NONE
:
2318 case DP_PEER_DEVICE_MST_BRANCHING
:
2321 case DP_PEER_DEVICE_SST_SINK
:
2322 status
= connector_status_connected
;
2323 /* for logical ports - cache the EDID */
2324 if (port
->port_num
>= 8 && !port
->cached_edid
) {
2325 port
->cached_edid
= drm_get_edid(connector
, &port
->aux
.ddc
);
2328 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
2330 status
= connector_status_connected
;
2334 drm_dp_put_port(port
);
2337 EXPORT_SYMBOL(drm_dp_mst_detect_port
);
2340 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2341 * @mgr: manager for this port
2342 * @port: unverified pointer to a port.
2344 * This returns whether the port supports audio or not.
2346 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr
*mgr
,
2347 struct drm_dp_mst_port
*port
)
2351 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2354 ret
= port
->has_audio
;
2355 drm_dp_put_port(port
);
2358 EXPORT_SYMBOL(drm_dp_mst_port_has_audio
);
2361 * drm_dp_mst_get_edid() - get EDID for an MST port
2362 * @connector: toplevel connector to get EDID for
2363 * @mgr: manager for this port
2364 * @port: unverified pointer to a port.
2366 * This returns an EDID for the port connected to a connector,
2367 * It validates the pointer still exists so the caller doesn't require a
2370 struct edid
*drm_dp_mst_get_edid(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2372 struct edid
*edid
= NULL
;
2374 /* we need to search for the port in the mgr in case its gone */
2375 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2379 if (port
->cached_edid
)
2380 edid
= drm_edid_duplicate(port
->cached_edid
);
2382 edid
= drm_get_edid(connector
, &port
->aux
.ddc
);
2383 drm_mode_connector_set_tile_property(connector
);
2385 port
->has_audio
= drm_detect_monitor_audio(edid
);
2386 drm_dp_put_port(port
);
2389 EXPORT_SYMBOL(drm_dp_mst_get_edid
);
2392 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2393 * @mgr: manager to use
2394 * @pbn: payload bandwidth to convert into slots.
2396 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
,
2401 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2403 if (num_slots
> mgr
->avail_slots
)
2407 EXPORT_SYMBOL(drm_dp_find_vcpi_slots
);
2409 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
2410 struct drm_dp_vcpi
*vcpi
, int pbn
)
2415 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2417 if (num_slots
> mgr
->avail_slots
)
2421 vcpi
->aligned_pbn
= num_slots
* mgr
->pbn_div
;
2422 vcpi
->num_slots
= num_slots
;
2424 ret
= drm_dp_mst_assign_payload_id(mgr
, vcpi
);
2431 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2432 * @mgr: manager for this port
2433 * @port: port to allocate a virtual channel for.
2434 * @pbn: payload bandwidth number to request
2435 * @slots: returned number of slots for this PBN.
2437 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
, int pbn
, int *slots
)
2441 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2445 if (port
->vcpi
.vcpi
> 0) {
2446 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port
->vcpi
.vcpi
, port
->vcpi
.pbn
, pbn
);
2447 if (pbn
== port
->vcpi
.pbn
) {
2448 *slots
= port
->vcpi
.num_slots
;
2449 drm_dp_put_port(port
);
2454 ret
= drm_dp_init_vcpi(mgr
, &port
->vcpi
, pbn
);
2456 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn
, mgr
->pbn_div
), mgr
->avail_slots
, ret
);
2459 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn
, port
->vcpi
.num_slots
);
2460 *slots
= port
->vcpi
.num_slots
;
2462 drm_dp_put_port(port
);
2467 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi
);
2469 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2472 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2476 slots
= port
->vcpi
.num_slots
;
2477 drm_dp_put_port(port
);
2480 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots
);
2483 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2484 * @mgr: manager for this port
2485 * @port: unverified pointer to a port.
2487 * This just resets the number of slots for the ports VCPI for later programming.
2489 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2491 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2494 port
->vcpi
.num_slots
= 0;
2495 drm_dp_put_port(port
);
2497 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots
);
2500 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2501 * @mgr: manager for this port
2502 * @port: unverified port to deallocate vcpi for
2504 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2506 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2510 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
2511 port
->vcpi
.num_slots
= 0;
2513 port
->vcpi
.aligned_pbn
= 0;
2514 port
->vcpi
.vcpi
= 0;
2515 drm_dp_put_port(port
);
2517 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi
);
2519 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
2520 int id
, struct drm_dp_payload
*payload
)
2522 u8 payload_alloc
[3], status
;
2526 drm_dp_dpcd_writeb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
,
2527 DP_PAYLOAD_TABLE_UPDATED
);
2529 payload_alloc
[0] = id
;
2530 payload_alloc
[1] = payload
->start_slot
;
2531 payload_alloc
[2] = payload
->num_slots
;
2533 ret
= drm_dp_dpcd_write(mgr
->aux
, DP_PAYLOAD_ALLOCATE_SET
, payload_alloc
, 3);
2535 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret
);
2540 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2542 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2546 if (!(status
& DP_PAYLOAD_TABLE_UPDATED
)) {
2549 usleep_range(10000, 20000);
2552 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status
);
2563 * drm_dp_check_act_status() - Check ACT handled status.
2564 * @mgr: manager to use
2566 * Check the payload status bits in the DPCD for ACT handled completion.
2568 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr
*mgr
)
2575 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2578 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2582 if (status
& DP_PAYLOAD_ACT_HANDLED
)
2587 } while (count
< 30);
2589 if (!(status
& DP_PAYLOAD_ACT_HANDLED
)) {
2590 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status
, count
);
2598 EXPORT_SYMBOL(drm_dp_check_act_status
);
2601 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2602 * @clock: dot clock for the mode
2603 * @bpp: bpp for the mode.
2605 * This uses the formula in the spec to calculate the PBN value for a mode.
2607 int drm_dp_calc_pbn_mode(int clock
, int bpp
)
2612 fixed20_12 margin
, tmp
;
2615 pix_bw
.full
= dfixed_const(clock
);
2616 fbpp
.full
= dfixed_const(bpp
);
2617 tmp
.full
= dfixed_const(8);
2618 fbpp
.full
= dfixed_div(fbpp
, tmp
);
2620 result
.full
= dfixed_mul(pix_bw
, fbpp
);
2621 margin
.full
= dfixed_const(54);
2622 tmp
.full
= dfixed_const(64);
2623 margin
.full
= dfixed_div(margin
, tmp
);
2624 result
.full
= dfixed_div(result
, margin
);
2626 margin
.full
= dfixed_const(1006);
2627 tmp
.full
= dfixed_const(1000);
2628 margin
.full
= dfixed_div(margin
, tmp
);
2629 result
.full
= dfixed_mul(result
, margin
);
2631 result
.full
= dfixed_div(result
, tmp
);
2632 result
.full
= dfixed_ceil(result
);
2633 res
= dfixed_trunc(result
);
2636 EXPORT_SYMBOL(drm_dp_calc_pbn_mode
);
2638 static int test_calc_pbn_mode(void)
2641 ret
= drm_dp_calc_pbn_mode(154000, 30);
2644 ret
= drm_dp_calc_pbn_mode(234000, 30);
2650 /* we want to kick the TX after we've ack the up/down IRQs. */
2651 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
)
2653 queue_work(system_long_wq
, &mgr
->tx_work
);
2656 static void drm_dp_mst_dump_mstb(struct seq_file
*m
,
2657 struct drm_dp_mst_branch
*mstb
)
2659 struct drm_dp_mst_port
*port
;
2660 int tabs
= mstb
->lct
;
2664 for (i
= 0; i
< tabs
; i
++)
2668 seq_printf(m
, "%smst: %p, %d\n", prefix
, mstb
, mstb
->num_ports
);
2669 list_for_each_entry(port
, &mstb
->ports
, next
) {
2670 seq_printf(m
, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix
, port
->port_num
, port
->ddps
, port
->ldps
, port
->num_sdp_streams
, port
->num_sdp_stream_sinks
, port
, port
->connector
);
2672 drm_dp_mst_dump_mstb(m
, port
->mstb
);
2676 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
2681 for (i
= 0; i
< 4; i
++) {
2682 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
+ (i
* 16), &buf
[i
* 16], 16);
2692 * drm_dp_mst_dump_topology(): dump topology to seq file.
2693 * @m: seq_file to dump output to
2694 * @mgr: manager to dump current topology for.
2696 * helper to dump MST topology to a seq file for debugfs.
2698 void drm_dp_mst_dump_topology(struct seq_file
*m
,
2699 struct drm_dp_mst_topology_mgr
*mgr
)
2702 struct drm_dp_mst_port
*port
;
2703 mutex_lock(&mgr
->lock
);
2704 if (mgr
->mst_primary
)
2705 drm_dp_mst_dump_mstb(m
, mgr
->mst_primary
);
2708 mutex_unlock(&mgr
->lock
);
2710 mutex_lock(&mgr
->payload_lock
);
2711 seq_printf(m
, "vcpi: %lx %lx\n", mgr
->payload_mask
, mgr
->vcpi_mask
);
2713 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2714 if (mgr
->proposed_vcpis
[i
]) {
2715 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
2716 seq_printf(m
, "vcpi %d: %d %d %d\n", i
, port
->port_num
, port
->vcpi
.vcpi
, port
->vcpi
.num_slots
);
2718 seq_printf(m
, "vcpi %d:unsed\n", i
);
2720 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2721 seq_printf(m
, "payload %d: %d, %d, %d\n",
2723 mgr
->payloads
[i
].payload_state
,
2724 mgr
->payloads
[i
].start_slot
,
2725 mgr
->payloads
[i
].num_slots
);
2729 mutex_unlock(&mgr
->payload_lock
);
2731 mutex_lock(&mgr
->lock
);
2732 if (mgr
->mst_primary
) {
2736 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, buf
, DP_RECEIVER_CAP_SIZE
);
2737 seq_printf(m
, "dpcd: ");
2738 for (i
= 0; i
< DP_RECEIVER_CAP_SIZE
; i
++)
2739 seq_printf(m
, "%02x ", buf
[i
]);
2740 seq_printf(m
, "\n");
2741 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_FAUX_CAP
, buf
, 2);
2742 seq_printf(m
, "faux/mst: ");
2743 for (i
= 0; i
< 2; i
++)
2744 seq_printf(m
, "%02x ", buf
[i
]);
2745 seq_printf(m
, "\n");
2746 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_MSTM_CTRL
, buf
, 1);
2747 seq_printf(m
, "mst ctrl: ");
2748 for (i
= 0; i
< 1; i
++)
2749 seq_printf(m
, "%02x ", buf
[i
]);
2750 seq_printf(m
, "\n");
2752 /* dump the standard OUI branch header */
2753 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_BRANCH_OUI
, buf
, DP_BRANCH_OUI_HEADER_SIZE
);
2754 seq_printf(m
, "branch oui: ");
2755 for (i
= 0; i
< 0x3; i
++)
2756 seq_printf(m
, "%02x", buf
[i
]);
2757 seq_printf(m
, " devid: ");
2758 for (i
= 0x3; i
< 0x8; i
++)
2759 seq_printf(m
, "%c", buf
[i
]);
2760 seq_printf(m
, " revision: hw: %x.%x sw: %x.%x", buf
[0x9] >> 4, buf
[0x9] & 0xf, buf
[0xa], buf
[0xb]);
2761 seq_printf(m
, "\n");
2762 bret
= dump_dp_payload_table(mgr
, buf
);
2764 seq_printf(m
, "payload table: ");
2765 for (i
= 0; i
< 63; i
++)
2766 seq_printf(m
, "%02x ", buf
[i
]);
2767 seq_printf(m
, "\n");
2772 mutex_unlock(&mgr
->lock
);
2775 EXPORT_SYMBOL(drm_dp_mst_dump_topology
);
2777 static void drm_dp_tx_work(struct work_struct
*work
)
2779 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, tx_work
);
2781 mutex_lock(&mgr
->qlock
);
2782 if (mgr
->tx_down_in_progress
)
2783 process_single_down_tx_qlock(mgr
);
2784 mutex_unlock(&mgr
->qlock
);
2787 static void drm_dp_destroy_connector_work(struct work_struct
*work
)
2789 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, destroy_connector_work
);
2790 struct drm_dp_mst_port
*port
;
2791 bool send_hotplug
= false;
2793 * Not a regular list traverse as we have to drop the destroy
2794 * connector lock before destroying the connector, to avoid AB->BA
2795 * ordering between this lock and the config mutex.
2798 mutex_lock(&mgr
->destroy_connector_lock
);
2799 port
= list_first_entry_or_null(&mgr
->destroy_connector_list
, struct drm_dp_mst_port
, next
);
2801 mutex_unlock(&mgr
->destroy_connector_lock
);
2804 list_del(&port
->next
);
2805 mutex_unlock(&mgr
->destroy_connector_lock
);
2807 mgr
->cbs
->destroy_connector(mgr
, port
->connector
);
2809 drm_dp_port_teardown_pdt(port
, port
->pdt
);
2811 if (!port
->input
&& port
->vcpi
.vcpi
> 0)
2812 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
2814 send_hotplug
= true;
2817 (*mgr
->cbs
->hotplug
)(mgr
);
2821 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2822 * @mgr: manager struct to initialise
2823 * @dev: device providing this structure - for i2c addition.
2824 * @aux: DP helper aux channel to talk to this device
2825 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2826 * @max_payloads: maximum number of payloads this GPU can source
2827 * @conn_base_id: the connector object ID the MST device is connected to.
2829 * Return 0 for success, or negative error code on failure
2831 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr
*mgr
,
2832 struct device
*dev
, struct drm_dp_aux
*aux
,
2833 int max_dpcd_transaction_bytes
,
2834 int max_payloads
, int conn_base_id
)
2836 mutex_init(&mgr
->lock
);
2837 mutex_init(&mgr
->qlock
);
2838 mutex_init(&mgr
->payload_lock
);
2839 mutex_init(&mgr
->destroy_connector_lock
);
2840 INIT_LIST_HEAD(&mgr
->tx_msg_downq
);
2841 INIT_LIST_HEAD(&mgr
->destroy_connector_list
);
2842 INIT_WORK(&mgr
->work
, drm_dp_mst_link_probe_work
);
2843 INIT_WORK(&mgr
->tx_work
, drm_dp_tx_work
);
2844 INIT_WORK(&mgr
->destroy_connector_work
, drm_dp_destroy_connector_work
);
2845 init_waitqueue_head(&mgr
->tx_waitq
);
2848 mgr
->max_dpcd_transaction_bytes
= max_dpcd_transaction_bytes
;
2849 mgr
->max_payloads
= max_payloads
;
2850 mgr
->conn_base_id
= conn_base_id
;
2851 mgr
->payloads
= kcalloc(max_payloads
, sizeof(struct drm_dp_payload
), GFP_KERNEL
);
2854 mgr
->proposed_vcpis
= kcalloc(max_payloads
, sizeof(struct drm_dp_vcpi
*), GFP_KERNEL
);
2855 if (!mgr
->proposed_vcpis
)
2857 set_bit(0, &mgr
->payload_mask
);
2858 test_calc_pbn_mode();
2861 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init
);
2864 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2865 * @mgr: manager to destroy
2867 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr
*mgr
)
2869 flush_work(&mgr
->work
);
2870 flush_work(&mgr
->destroy_connector_work
);
2871 mutex_lock(&mgr
->payload_lock
);
2872 kfree(mgr
->payloads
);
2873 mgr
->payloads
= NULL
;
2874 kfree(mgr
->proposed_vcpis
);
2875 mgr
->proposed_vcpis
= NULL
;
2876 mutex_unlock(&mgr
->payload_lock
);
2880 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy
);
2883 static int drm_dp_mst_i2c_xfer(struct i2c_adapter
*adapter
, struct i2c_msg
*msgs
,
2886 struct drm_dp_aux
*aux
= adapter
->algo_data
;
2887 struct drm_dp_mst_port
*port
= container_of(aux
, struct drm_dp_mst_port
, aux
);
2888 struct drm_dp_mst_branch
*mstb
;
2889 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
2891 bool reading
= false;
2892 struct drm_dp_sideband_msg_req_body msg
;
2893 struct drm_dp_sideband_msg_tx
*txmsg
= NULL
;
2896 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
2900 /* construct i2c msg */
2901 /* see if last msg is a read */
2902 if (msgs
[num
- 1].flags
& I2C_M_RD
)
2905 if (!reading
|| (num
- 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS
)) {
2906 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2911 memset(&msg
, 0, sizeof(msg
));
2912 msg
.req_type
= DP_REMOTE_I2C_READ
;
2913 msg
.u
.i2c_read
.num_transactions
= num
- 1;
2914 msg
.u
.i2c_read
.port_number
= port
->port_num
;
2915 for (i
= 0; i
< num
- 1; i
++) {
2916 msg
.u
.i2c_read
.transactions
[i
].i2c_dev_id
= msgs
[i
].addr
;
2917 msg
.u
.i2c_read
.transactions
[i
].num_bytes
= msgs
[i
].len
;
2918 msg
.u
.i2c_read
.transactions
[i
].bytes
= msgs
[i
].buf
;
2920 msg
.u
.i2c_read
.read_i2c_device_id
= msgs
[num
- 1].addr
;
2921 msg
.u
.i2c_read
.num_bytes_read
= msgs
[num
- 1].len
;
2923 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
2930 drm_dp_encode_sideband_req(&msg
, txmsg
);
2932 drm_dp_queue_down_tx(mgr
, txmsg
);
2934 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
2937 if (txmsg
->reply
.reply_type
== 1) { /* got a NAK back */
2941 if (txmsg
->reply
.u
.remote_i2c_read_ack
.num_bytes
!= msgs
[num
- 1].len
) {
2945 memcpy(msgs
[num
- 1].buf
, txmsg
->reply
.u
.remote_i2c_read_ack
.bytes
, msgs
[num
- 1].len
);
2950 drm_dp_put_mst_branch_device(mstb
);
2954 static u32
drm_dp_mst_i2c_functionality(struct i2c_adapter
*adapter
)
2956 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
|
2957 I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
2958 I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
2959 I2C_FUNC_10BIT_ADDR
;
2962 static const struct i2c_algorithm drm_dp_mst_i2c_algo
= {
2963 .functionality
= drm_dp_mst_i2c_functionality
,
2964 .master_xfer
= drm_dp_mst_i2c_xfer
,
2968 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2969 * @aux: DisplayPort AUX channel
2971 * Returns 0 on success or a negative error code on failure.
2973 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
)
2975 aux
->ddc
.algo
= &drm_dp_mst_i2c_algo
;
2976 aux
->ddc
.algo_data
= aux
;
2977 aux
->ddc
.retries
= 3;
2979 aux
->ddc
.class = I2C_CLASS_DDC
;
2980 aux
->ddc
.owner
= THIS_MODULE
;
2981 aux
->ddc
.dev
.parent
= aux
->dev
;
2982 aux
->ddc
.dev
.of_node
= aux
->dev
->of_node
;
2984 strlcpy(aux
->ddc
.name
, aux
->name
? aux
->name
: dev_name(aux
->dev
),
2985 sizeof(aux
->ddc
.name
));
2987 return i2c_add_adapter(&aux
->ddc
);
2991 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2992 * @aux: DisplayPort AUX channel
2994 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
)
2996 i2c_del_adapter(&aux
->ddc
);