1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt XDomain discovery protocol support
5 * Copyright (C) 2017, Intel Corporation
6 * Authors: Michael Jamet <michael.jamet@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/device.h>
11 #include <linux/delay.h>
12 #include <linux/kmod.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/prandom.h>
16 #include <linux/string_helpers.h>
17 #include <linux/utsname.h>
18 #include <linux/uuid.h>
19 #include <linux/workqueue.h>
23 #define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
24 #define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
25 #define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
26 #define XDOMAIN_RETRIES 10
27 #define XDOMAIN_DEFAULT_MAX_HOPID 15
32 XDOMAIN_STATE_LINK_STATUS
,
33 XDOMAIN_STATE_LINK_STATE_CHANGE
,
34 XDOMAIN_STATE_LINK_STATUS2
,
35 XDOMAIN_STATE_BONDING_UUID_LOW
,
36 XDOMAIN_STATE_BONDING_UUID_HIGH
,
37 XDOMAIN_STATE_PROPERTIES
,
38 XDOMAIN_STATE_ENUMERATED
,
42 static const char * const state_names
[] = {
43 [XDOMAIN_STATE_INIT
] = "INIT",
44 [XDOMAIN_STATE_UUID
] = "UUID",
45 [XDOMAIN_STATE_LINK_STATUS
] = "LINK_STATUS",
46 [XDOMAIN_STATE_LINK_STATE_CHANGE
] = "LINK_STATE_CHANGE",
47 [XDOMAIN_STATE_LINK_STATUS2
] = "LINK_STATUS2",
48 [XDOMAIN_STATE_BONDING_UUID_LOW
] = "BONDING_UUID_LOW",
49 [XDOMAIN_STATE_BONDING_UUID_HIGH
] = "BONDING_UUID_HIGH",
50 [XDOMAIN_STATE_PROPERTIES
] = "PROPERTIES",
51 [XDOMAIN_STATE_ENUMERATED
] = "ENUMERATED",
52 [XDOMAIN_STATE_ERROR
] = "ERROR",
55 struct xdomain_request_work
{
56 struct work_struct work
;
57 struct tb_xdp_header
*pkg
;
61 static bool tb_xdomain_enabled
= true;
62 module_param_named(xdomain
, tb_xdomain_enabled
, bool, 0444);
63 MODULE_PARM_DESC(xdomain
, "allow XDomain protocol (default: true)");
66 * Serializes access to the properties and protocol handlers below. If
67 * you need to take both this lock and the struct tb_xdomain lock, take
70 static DEFINE_MUTEX(xdomain_lock
);
72 /* Properties exposed to the remote domains */
73 static struct tb_property_dir
*xdomain_property_dir
;
74 static u32 xdomain_property_block_gen
;
76 /* Additional protocol handlers */
77 static LIST_HEAD(protocol_handlers
);
79 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
80 static const uuid_t tb_xdp_uuid
=
81 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
82 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
84 bool tb_is_xdomain_enabled(void)
86 return tb_xdomain_enabled
&& tb_acpi_is_xdomain_allowed();
89 static bool tb_xdomain_match(const struct tb_cfg_request
*req
,
90 const struct ctl_pkg
*pkg
)
92 switch (pkg
->frame
.eof
) {
93 case TB_CFG_PKG_ERROR
:
96 case TB_CFG_PKG_XDOMAIN_RESP
: {
97 const struct tb_xdp_header
*res_hdr
= pkg
->buffer
;
98 const struct tb_xdp_header
*req_hdr
= req
->request
;
100 if (pkg
->frame
.size
< req
->response_size
/ 4)
103 /* Make sure route matches */
104 if ((res_hdr
->xd_hdr
.route_hi
& ~BIT(31)) !=
105 req_hdr
->xd_hdr
.route_hi
)
107 if ((res_hdr
->xd_hdr
.route_lo
) != req_hdr
->xd_hdr
.route_lo
)
110 /* Check that the XDomain protocol matches */
111 if (!uuid_equal(&res_hdr
->uuid
, &req_hdr
->uuid
))
122 static bool tb_xdomain_copy(struct tb_cfg_request
*req
,
123 const struct ctl_pkg
*pkg
)
125 memcpy(req
->response
, pkg
->buffer
, req
->response_size
);
130 static void response_ready(void *data
)
132 tb_cfg_request_put(data
);
135 static int __tb_xdomain_response(struct tb_ctl
*ctl
, const void *response
,
136 size_t size
, enum tb_cfg_pkg_type type
)
138 struct tb_cfg_request
*req
;
140 req
= tb_cfg_request_alloc();
144 req
->match
= tb_xdomain_match
;
145 req
->copy
= tb_xdomain_copy
;
146 req
->request
= response
;
147 req
->request_size
= size
;
148 req
->request_type
= type
;
150 return tb_cfg_request(ctl
, req
, response_ready
, req
);
154 * tb_xdomain_response() - Send a XDomain response message
155 * @xd: XDomain to send the message
156 * @response: Response to send
157 * @size: Size of the response
158 * @type: PDF type of the response
160 * This can be used to send a XDomain response message to the other
161 * domain. No response for the message is expected.
163 * Return: %0 in case of success and negative errno in case of failure
165 int tb_xdomain_response(struct tb_xdomain
*xd
, const void *response
,
166 size_t size
, enum tb_cfg_pkg_type type
)
168 return __tb_xdomain_response(xd
->tb
->ctl
, response
, size
, type
);
170 EXPORT_SYMBOL_GPL(tb_xdomain_response
);
172 static int __tb_xdomain_request(struct tb_ctl
*ctl
, const void *request
,
173 size_t request_size
, enum tb_cfg_pkg_type request_type
, void *response
,
174 size_t response_size
, enum tb_cfg_pkg_type response_type
,
175 unsigned int timeout_msec
)
177 struct tb_cfg_request
*req
;
178 struct tb_cfg_result res
;
180 req
= tb_cfg_request_alloc();
184 req
->match
= tb_xdomain_match
;
185 req
->copy
= tb_xdomain_copy
;
186 req
->request
= request
;
187 req
->request_size
= request_size
;
188 req
->request_type
= request_type
;
189 req
->response
= response
;
190 req
->response_size
= response_size
;
191 req
->response_type
= response_type
;
193 res
= tb_cfg_request_sync(ctl
, req
, timeout_msec
);
195 tb_cfg_request_put(req
);
197 return res
.err
== 1 ? -EIO
: res
.err
;
201 * tb_xdomain_request() - Send a XDomain request
202 * @xd: XDomain to send the request
203 * @request: Request to send
204 * @request_size: Size of the request in bytes
205 * @request_type: PDF type of the request
206 * @response: Response is copied here
207 * @response_size: Expected size of the response in bytes
208 * @response_type: Expected PDF type of the response
209 * @timeout_msec: Timeout in milliseconds to wait for the response
211 * This function can be used to send XDomain control channel messages to
212 * the other domain. The function waits until the response is received
213 * or when timeout triggers. Whichever comes first.
215 * Return: %0 in case of success and negative errno in case of failure
217 int tb_xdomain_request(struct tb_xdomain
*xd
, const void *request
,
218 size_t request_size
, enum tb_cfg_pkg_type request_type
,
219 void *response
, size_t response_size
,
220 enum tb_cfg_pkg_type response_type
, unsigned int timeout_msec
)
222 return __tb_xdomain_request(xd
->tb
->ctl
, request
, request_size
,
223 request_type
, response
, response_size
,
224 response_type
, timeout_msec
);
226 EXPORT_SYMBOL_GPL(tb_xdomain_request
);
228 static inline void tb_xdp_fill_header(struct tb_xdp_header
*hdr
, u64 route
,
229 u8 sequence
, enum tb_xdp_type type
, size_t size
)
233 length_sn
= (size
- sizeof(hdr
->xd_hdr
)) / 4;
234 length_sn
|= (sequence
<< TB_XDOMAIN_SN_SHIFT
) & TB_XDOMAIN_SN_MASK
;
236 hdr
->xd_hdr
.route_hi
= upper_32_bits(route
);
237 hdr
->xd_hdr
.route_lo
= lower_32_bits(route
);
238 hdr
->xd_hdr
.length_sn
= length_sn
;
240 memcpy(&hdr
->uuid
, &tb_xdp_uuid
, sizeof(tb_xdp_uuid
));
243 static int tb_xdp_handle_error(const struct tb_xdp_error_response
*res
)
245 if (res
->hdr
.type
!= ERROR_RESPONSE
)
248 switch (res
->error
) {
249 case ERROR_UNKNOWN_PACKET
:
250 case ERROR_UNKNOWN_DOMAIN
:
252 case ERROR_NOT_SUPPORTED
:
254 case ERROR_NOT_READY
:
263 static int tb_xdp_uuid_request(struct tb_ctl
*ctl
, u64 route
, int retry
,
264 uuid_t
*uuid
, u64
*remote_route
)
266 struct tb_xdp_uuid_response res
;
267 struct tb_xdp_uuid req
;
270 memset(&req
, 0, sizeof(req
));
271 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4, UUID_REQUEST
,
274 memset(&res
, 0, sizeof(res
));
275 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
276 TB_CFG_PKG_XDOMAIN_REQ
, &res
, sizeof(res
),
277 TB_CFG_PKG_XDOMAIN_RESP
,
278 XDOMAIN_DEFAULT_TIMEOUT
);
282 ret
= tb_xdp_handle_error(&res
.err
);
286 uuid_copy(uuid
, &res
.src_uuid
);
287 *remote_route
= (u64
)res
.src_route_hi
<< 32 | res
.src_route_lo
;
292 static int tb_xdp_uuid_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
,
295 struct tb_xdp_uuid_response res
;
297 memset(&res
, 0, sizeof(res
));
298 tb_xdp_fill_header(&res
.hdr
, route
, sequence
, UUID_RESPONSE
,
301 uuid_copy(&res
.src_uuid
, uuid
);
302 res
.src_route_hi
= upper_32_bits(route
);
303 res
.src_route_lo
= lower_32_bits(route
);
305 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
306 TB_CFG_PKG_XDOMAIN_RESP
);
309 static int tb_xdp_error_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
,
310 enum tb_xdp_error error
)
312 struct tb_xdp_error_response res
;
314 memset(&res
, 0, sizeof(res
));
315 tb_xdp_fill_header(&res
.hdr
, route
, sequence
, ERROR_RESPONSE
,
319 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
320 TB_CFG_PKG_XDOMAIN_RESP
);
323 static int tb_xdp_properties_request(struct tb_ctl
*ctl
, u64 route
,
324 const uuid_t
*src_uuid
, const uuid_t
*dst_uuid
, int retry
,
325 u32
**block
, u32
*generation
)
327 struct tb_xdp_properties_response
*res
;
328 struct tb_xdp_properties req
;
334 total_size
= sizeof(*res
) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH
* 4;
335 res
= kzalloc(total_size
, GFP_KERNEL
);
339 memset(&req
, 0, sizeof(req
));
340 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4, PROPERTIES_REQUEST
,
342 memcpy(&req
.src_uuid
, src_uuid
, sizeof(*src_uuid
));
343 memcpy(&req
.dst_uuid
, dst_uuid
, sizeof(*dst_uuid
));
348 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
349 TB_CFG_PKG_XDOMAIN_REQ
, res
,
350 total_size
, TB_CFG_PKG_XDOMAIN_RESP
,
351 XDOMAIN_DEFAULT_TIMEOUT
);
355 ret
= tb_xdp_handle_error(&res
->err
);
360 * Package length includes the whole payload without the
361 * XDomain header. Validate first that the package is at
362 * least size of the response structure.
364 len
= res
->hdr
.xd_hdr
.length_sn
& TB_XDOMAIN_LENGTH_MASK
;
365 if (len
< sizeof(*res
) / 4) {
370 len
+= sizeof(res
->hdr
.xd_hdr
) / 4;
371 len
-= sizeof(*res
) / 4;
373 if (res
->offset
!= req
.offset
) {
379 * First time allocate block that has enough space for
380 * the whole properties block.
383 data_len
= res
->data_length
;
384 if (data_len
> TB_XDP_PROPERTIES_MAX_LENGTH
) {
389 data
= kcalloc(data_len
, sizeof(u32
), GFP_KERNEL
);
396 memcpy(data
+ req
.offset
, res
->data
, len
* 4);
398 } while (!data_len
|| req
.offset
< data_len
);
401 *generation
= res
->generation
;
414 static int tb_xdp_properties_response(struct tb
*tb
, struct tb_ctl
*ctl
,
415 struct tb_xdomain
*xd
, u8 sequence
, const struct tb_xdp_properties
*req
)
417 struct tb_xdp_properties_response
*res
;
423 * Currently we expect all requests to be directed to us. The
424 * protocol supports forwarding, though which we might add
427 if (!uuid_equal(xd
->local_uuid
, &req
->dst_uuid
)) {
428 tb_xdp_error_response(ctl
, xd
->route
, sequence
,
429 ERROR_UNKNOWN_DOMAIN
);
433 mutex_lock(&xd
->lock
);
435 if (req
->offset
>= xd
->local_property_block_len
) {
436 mutex_unlock(&xd
->lock
);
440 len
= xd
->local_property_block_len
- req
->offset
;
441 len
= min_t(u16
, len
, TB_XDP_PROPERTIES_MAX_DATA_LENGTH
);
442 total_size
= sizeof(*res
) + len
* 4;
444 res
= kzalloc(total_size
, GFP_KERNEL
);
446 mutex_unlock(&xd
->lock
);
450 tb_xdp_fill_header(&res
->hdr
, xd
->route
, sequence
, PROPERTIES_RESPONSE
,
452 res
->generation
= xd
->local_property_block_gen
;
453 res
->data_length
= xd
->local_property_block_len
;
454 res
->offset
= req
->offset
;
455 uuid_copy(&res
->src_uuid
, xd
->local_uuid
);
456 uuid_copy(&res
->dst_uuid
, &req
->src_uuid
);
457 memcpy(res
->data
, &xd
->local_property_block
[req
->offset
], len
* 4);
459 mutex_unlock(&xd
->lock
);
461 ret
= __tb_xdomain_response(ctl
, res
, total_size
,
462 TB_CFG_PKG_XDOMAIN_RESP
);
468 static int tb_xdp_properties_changed_request(struct tb_ctl
*ctl
, u64 route
,
469 int retry
, const uuid_t
*uuid
)
471 struct tb_xdp_properties_changed_response res
;
472 struct tb_xdp_properties_changed req
;
475 memset(&req
, 0, sizeof(req
));
476 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4,
477 PROPERTIES_CHANGED_REQUEST
, sizeof(req
));
478 uuid_copy(&req
.src_uuid
, uuid
);
480 memset(&res
, 0, sizeof(res
));
481 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
482 TB_CFG_PKG_XDOMAIN_REQ
, &res
, sizeof(res
),
483 TB_CFG_PKG_XDOMAIN_RESP
,
484 XDOMAIN_DEFAULT_TIMEOUT
);
488 return tb_xdp_handle_error(&res
.err
);
492 tb_xdp_properties_changed_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
)
494 struct tb_xdp_properties_changed_response res
;
496 memset(&res
, 0, sizeof(res
));
497 tb_xdp_fill_header(&res
.hdr
, route
, sequence
,
498 PROPERTIES_CHANGED_RESPONSE
, sizeof(res
));
499 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
500 TB_CFG_PKG_XDOMAIN_RESP
);
503 static int tb_xdp_link_state_status_request(struct tb_ctl
*ctl
, u64 route
,
504 u8 sequence
, u8
*slw
, u8
*tlw
,
507 struct tb_xdp_link_state_status_response res
;
508 struct tb_xdp_link_state_status req
;
511 memset(&req
, 0, sizeof(req
));
512 tb_xdp_fill_header(&req
.hdr
, route
, sequence
, LINK_STATE_STATUS_REQUEST
,
515 memset(&res
, 0, sizeof(res
));
516 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
), TB_CFG_PKG_XDOMAIN_REQ
,
517 &res
, sizeof(res
), TB_CFG_PKG_XDOMAIN_RESP
,
518 XDOMAIN_DEFAULT_TIMEOUT
);
522 ret
= tb_xdp_handle_error(&res
.err
);
537 static int tb_xdp_link_state_status_response(struct tb
*tb
, struct tb_ctl
*ctl
,
538 struct tb_xdomain
*xd
, u8 sequence
)
540 struct tb_xdp_link_state_status_response res
;
541 struct tb_port
*port
= tb_xdomain_downstream_port(xd
);
545 memset(&res
, 0, sizeof(res
));
546 tb_xdp_fill_header(&res
.hdr
, xd
->route
, sequence
,
547 LINK_STATE_STATUS_RESPONSE
, sizeof(res
));
549 ret
= tb_port_read(port
, val
, TB_CFG_PORT
,
550 port
->cap_phy
+ LANE_ADP_CS_0
, ARRAY_SIZE(val
));
554 res
.slw
= (val
[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK
) >>
555 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT
;
556 res
.sls
= (val
[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK
) >>
557 LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT
;
558 res
.tls
= val
[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK
;
559 res
.tlw
= (val
[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK
) >>
560 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
562 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
563 TB_CFG_PKG_XDOMAIN_RESP
);
566 static int tb_xdp_link_state_change_request(struct tb_ctl
*ctl
, u64 route
,
567 u8 sequence
, u8 tlw
, u8 tls
)
569 struct tb_xdp_link_state_change_response res
;
570 struct tb_xdp_link_state_change req
;
573 memset(&req
, 0, sizeof(req
));
574 tb_xdp_fill_header(&req
.hdr
, route
, sequence
, LINK_STATE_CHANGE_REQUEST
,
579 memset(&res
, 0, sizeof(res
));
580 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
), TB_CFG_PKG_XDOMAIN_REQ
,
581 &res
, sizeof(res
), TB_CFG_PKG_XDOMAIN_RESP
,
582 XDOMAIN_DEFAULT_TIMEOUT
);
586 ret
= tb_xdp_handle_error(&res
.err
);
590 return res
.status
!= 0 ? -EREMOTEIO
: 0;
593 static int tb_xdp_link_state_change_response(struct tb_ctl
*ctl
, u64 route
,
594 u8 sequence
, u32 status
)
596 struct tb_xdp_link_state_change_response res
;
598 memset(&res
, 0, sizeof(res
));
599 tb_xdp_fill_header(&res
.hdr
, route
, sequence
, LINK_STATE_CHANGE_RESPONSE
,
604 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
605 TB_CFG_PKG_XDOMAIN_RESP
);
609 * tb_register_protocol_handler() - Register protocol handler
610 * @handler: Handler to register
612 * This allows XDomain service drivers to hook into incoming XDomain
613 * messages. After this function is called the service driver needs to
614 * be able to handle calls to callback whenever a package with the
615 * registered protocol is received.
617 int tb_register_protocol_handler(struct tb_protocol_handler
*handler
)
619 if (!handler
->uuid
|| !handler
->callback
)
621 if (uuid_equal(handler
->uuid
, &tb_xdp_uuid
))
624 mutex_lock(&xdomain_lock
);
625 list_add_tail(&handler
->list
, &protocol_handlers
);
626 mutex_unlock(&xdomain_lock
);
630 EXPORT_SYMBOL_GPL(tb_register_protocol_handler
);
633 * tb_unregister_protocol_handler() - Unregister protocol handler
634 * @handler: Handler to unregister
636 * Removes the previously registered protocol handler.
638 void tb_unregister_protocol_handler(struct tb_protocol_handler
*handler
)
640 mutex_lock(&xdomain_lock
);
641 list_del_init(&handler
->list
);
642 mutex_unlock(&xdomain_lock
);
644 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler
);
646 static void update_property_block(struct tb_xdomain
*xd
)
648 mutex_lock(&xdomain_lock
);
649 mutex_lock(&xd
->lock
);
651 * If the local property block is not up-to-date, rebuild it now
652 * based on the global property template.
654 if (!xd
->local_property_block
||
655 xd
->local_property_block_gen
< xdomain_property_block_gen
) {
656 struct tb_property_dir
*dir
;
660 dir
= tb_property_copy_dir(xdomain_property_dir
);
662 dev_warn(&xd
->dev
, "failed to copy properties\n");
666 /* Fill in non-static properties now */
667 tb_property_add_text(dir
, "deviceid", utsname()->nodename
);
668 tb_property_add_immediate(dir
, "maxhopid", xd
->local_max_hopid
);
670 ret
= tb_property_format_dir(dir
, NULL
, 0);
672 dev_warn(&xd
->dev
, "local property block creation failed\n");
673 tb_property_free_dir(dir
);
678 block
= kcalloc(block_len
, sizeof(*block
), GFP_KERNEL
);
680 tb_property_free_dir(dir
);
684 ret
= tb_property_format_dir(dir
, block
, block_len
);
686 dev_warn(&xd
->dev
, "property block generation failed\n");
687 tb_property_free_dir(dir
);
692 tb_property_free_dir(dir
);
693 /* Release the previous block */
694 kfree(xd
->local_property_block
);
696 xd
->local_property_block
= block
;
697 xd
->local_property_block_len
= block_len
;
698 xd
->local_property_block_gen
= xdomain_property_block_gen
;
702 mutex_unlock(&xd
->lock
);
703 mutex_unlock(&xdomain_lock
);
706 static void start_handshake(struct tb_xdomain
*xd
)
708 xd
->state
= XDOMAIN_STATE_INIT
;
709 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
710 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT
));
713 /* Can be called from state_work */
714 static void __stop_handshake(struct tb_xdomain
*xd
)
716 cancel_delayed_work_sync(&xd
->properties_changed_work
);
717 xd
->properties_changed_retries
= 0;
718 xd
->state_retries
= 0;
721 static void stop_handshake(struct tb_xdomain
*xd
)
723 cancel_delayed_work_sync(&xd
->state_work
);
724 __stop_handshake(xd
);
727 static void tb_xdp_handle_request(struct work_struct
*work
)
729 struct xdomain_request_work
*xw
= container_of(work
, typeof(*xw
), work
);
730 const struct tb_xdp_header
*pkg
= xw
->pkg
;
731 const struct tb_xdomain_header
*xhdr
= &pkg
->xd_hdr
;
732 struct tb
*tb
= xw
->tb
;
733 struct tb_ctl
*ctl
= tb
->ctl
;
734 struct tb_xdomain
*xd
;
740 route
= ((u64
)xhdr
->route_hi
<< 32 | xhdr
->route_lo
) & ~BIT_ULL(63);
741 sequence
= xhdr
->length_sn
& TB_XDOMAIN_SN_MASK
;
742 sequence
>>= TB_XDOMAIN_SN_SHIFT
;
744 mutex_lock(&tb
->lock
);
746 uuid
= tb
->root_switch
->uuid
;
749 mutex_unlock(&tb
->lock
);
752 tb_xdp_error_response(ctl
, route
, sequence
, ERROR_NOT_READY
);
756 xd
= tb_xdomain_find_by_route_locked(tb
, route
);
758 update_property_block(xd
);
761 case PROPERTIES_REQUEST
:
762 tb_dbg(tb
, "%llx: received XDomain properties request\n", route
);
764 ret
= tb_xdp_properties_response(tb
, ctl
, xd
, sequence
,
765 (const struct tb_xdp_properties
*)pkg
);
769 case PROPERTIES_CHANGED_REQUEST
:
770 tb_dbg(tb
, "%llx: received XDomain properties changed request\n",
773 ret
= tb_xdp_properties_changed_response(ctl
, route
, sequence
);
776 * Since the properties have been changed, let's update
777 * the xdomain related to this connection as well in
778 * case there is a change in services it offers.
780 if (xd
&& device_is_registered(&xd
->dev
))
781 queue_delayed_work(tb
->wq
, &xd
->state_work
,
782 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT
));
785 case UUID_REQUEST_OLD
:
787 tb_dbg(tb
, "%llx: received XDomain UUID request\n", route
);
788 ret
= tb_xdp_uuid_response(ctl
, route
, sequence
, uuid
);
790 * If we've stopped the discovery with an error such as
791 * timing out, we will restart the handshake now that we
792 * received UUID request from the remote host.
794 if (!ret
&& xd
&& xd
->state
== XDOMAIN_STATE_ERROR
) {
795 dev_dbg(&xd
->dev
, "restarting handshake\n");
800 case LINK_STATE_STATUS_REQUEST
:
801 tb_dbg(tb
, "%llx: received XDomain link state status request\n",
805 ret
= tb_xdp_link_state_status_response(tb
, ctl
, xd
,
808 tb_xdp_error_response(ctl
, route
, sequence
,
813 case LINK_STATE_CHANGE_REQUEST
:
814 tb_dbg(tb
, "%llx: received XDomain link state change request\n",
817 if (xd
&& xd
->state
== XDOMAIN_STATE_BONDING_UUID_HIGH
) {
818 const struct tb_xdp_link_state_change
*lsc
=
819 (const struct tb_xdp_link_state_change
*)pkg
;
821 ret
= tb_xdp_link_state_change_response(ctl
, route
,
823 xd
->target_link_width
= lsc
->tlw
;
824 queue_delayed_work(tb
->wq
, &xd
->state_work
,
825 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT
));
827 tb_xdp_error_response(ctl
, route
, sequence
,
833 tb_dbg(tb
, "%llx: unknown XDomain request %#x\n", route
, pkg
->type
);
834 tb_xdp_error_response(ctl
, route
, sequence
,
835 ERROR_NOT_SUPPORTED
);
842 tb_warn(tb
, "failed to send XDomain response for %#x\n",
854 tb_xdp_schedule_request(struct tb
*tb
, const struct tb_xdp_header
*hdr
,
857 struct xdomain_request_work
*xw
;
859 xw
= kmalloc(sizeof(*xw
), GFP_KERNEL
);
863 INIT_WORK(&xw
->work
, tb_xdp_handle_request
);
864 xw
->pkg
= kmemdup(hdr
, size
, GFP_KERNEL
);
869 xw
->tb
= tb_domain_get(tb
);
871 schedule_work(&xw
->work
);
876 * tb_register_service_driver() - Register XDomain service driver
877 * @drv: Driver to register
879 * Registers new service driver from @drv to the bus.
881 int tb_register_service_driver(struct tb_service_driver
*drv
)
883 drv
->driver
.bus
= &tb_bus_type
;
884 return driver_register(&drv
->driver
);
886 EXPORT_SYMBOL_GPL(tb_register_service_driver
);
889 * tb_unregister_service_driver() - Unregister XDomain service driver
890 * @drv: Driver to unregister
892 * Unregisters XDomain service driver from the bus.
894 void tb_unregister_service_driver(struct tb_service_driver
*drv
)
896 driver_unregister(&drv
->driver
);
898 EXPORT_SYMBOL_GPL(tb_unregister_service_driver
);
900 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
903 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
906 * It should be null terminated but anything else is pretty much
909 return sysfs_emit(buf
, "%*pE\n", (int)strlen(svc
->key
), svc
->key
);
911 static DEVICE_ATTR_RO(key
);
913 static int get_modalias(const struct tb_service
*svc
, char *buf
, size_t size
)
915 return snprintf(buf
, size
, "tbsvc:k%sp%08Xv%08Xr%08X", svc
->key
,
916 svc
->prtcid
, svc
->prtcvers
, svc
->prtcrevs
);
919 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
922 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
924 /* Full buffer size except new line and null termination */
925 get_modalias(svc
, buf
, PAGE_SIZE
- 2);
926 return strlen(strcat(buf
, "\n"));
928 static DEVICE_ATTR_RO(modalias
);
930 static ssize_t
prtcid_show(struct device
*dev
, struct device_attribute
*attr
,
933 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
935 return sysfs_emit(buf
, "%u\n", svc
->prtcid
);
937 static DEVICE_ATTR_RO(prtcid
);
939 static ssize_t
prtcvers_show(struct device
*dev
, struct device_attribute
*attr
,
942 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
944 return sysfs_emit(buf
, "%u\n", svc
->prtcvers
);
946 static DEVICE_ATTR_RO(prtcvers
);
948 static ssize_t
prtcrevs_show(struct device
*dev
, struct device_attribute
*attr
,
951 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
953 return sysfs_emit(buf
, "%u\n", svc
->prtcrevs
);
955 static DEVICE_ATTR_RO(prtcrevs
);
957 static ssize_t
prtcstns_show(struct device
*dev
, struct device_attribute
*attr
,
960 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
962 return sysfs_emit(buf
, "0x%08x\n", svc
->prtcstns
);
964 static DEVICE_ATTR_RO(prtcstns
);
966 static struct attribute
*tb_service_attrs
[] = {
968 &dev_attr_modalias
.attr
,
969 &dev_attr_prtcid
.attr
,
970 &dev_attr_prtcvers
.attr
,
971 &dev_attr_prtcrevs
.attr
,
972 &dev_attr_prtcstns
.attr
,
976 static const struct attribute_group tb_service_attr_group
= {
977 .attrs
= tb_service_attrs
,
980 static const struct attribute_group
*tb_service_attr_groups
[] = {
981 &tb_service_attr_group
,
985 static int tb_service_uevent(const struct device
*dev
, struct kobj_uevent_env
*env
)
987 const struct tb_service
*svc
= container_of_const(dev
, struct tb_service
, dev
);
990 get_modalias(svc
, modalias
, sizeof(modalias
));
991 return add_uevent_var(env
, "MODALIAS=%s", modalias
);
994 static void tb_service_release(struct device
*dev
)
996 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
997 struct tb_xdomain
*xd
= tb_service_parent(svc
);
999 tb_service_debugfs_remove(svc
);
1000 ida_free(&xd
->service_ids
, svc
->id
);
1005 const struct device_type tb_service_type
= {
1006 .name
= "thunderbolt_service",
1007 .groups
= tb_service_attr_groups
,
1008 .uevent
= tb_service_uevent
,
1009 .release
= tb_service_release
,
1011 EXPORT_SYMBOL_GPL(tb_service_type
);
1013 static int remove_missing_service(struct device
*dev
, void *data
)
1015 struct tb_xdomain
*xd
= data
;
1016 struct tb_service
*svc
;
1018 svc
= tb_to_service(dev
);
1022 if (!tb_property_find(xd
->remote_properties
, svc
->key
,
1023 TB_PROPERTY_TYPE_DIRECTORY
))
1024 device_unregister(dev
);
1029 static int find_service(struct device
*dev
, void *data
)
1031 const struct tb_property
*p
= data
;
1032 struct tb_service
*svc
;
1034 svc
= tb_to_service(dev
);
1038 return !strcmp(svc
->key
, p
->key
);
1041 static int populate_service(struct tb_service
*svc
,
1042 struct tb_property
*property
)
1044 struct tb_property_dir
*dir
= property
->value
.dir
;
1045 struct tb_property
*p
;
1047 /* Fill in standard properties */
1048 p
= tb_property_find(dir
, "prtcid", TB_PROPERTY_TYPE_VALUE
);
1050 svc
->prtcid
= p
->value
.immediate
;
1051 p
= tb_property_find(dir
, "prtcvers", TB_PROPERTY_TYPE_VALUE
);
1053 svc
->prtcvers
= p
->value
.immediate
;
1054 p
= tb_property_find(dir
, "prtcrevs", TB_PROPERTY_TYPE_VALUE
);
1056 svc
->prtcrevs
= p
->value
.immediate
;
1057 p
= tb_property_find(dir
, "prtcstns", TB_PROPERTY_TYPE_VALUE
);
1059 svc
->prtcstns
= p
->value
.immediate
;
1061 svc
->key
= kstrdup(property
->key
, GFP_KERNEL
);
1068 static void enumerate_services(struct tb_xdomain
*xd
)
1070 struct tb_service
*svc
;
1071 struct tb_property
*p
;
1076 * First remove all services that are not available anymore in
1077 * the updated property block.
1079 device_for_each_child_reverse(&xd
->dev
, xd
, remove_missing_service
);
1081 /* Then re-enumerate properties creating new services as we go */
1082 tb_property_for_each(xd
->remote_properties
, p
) {
1083 if (p
->type
!= TB_PROPERTY_TYPE_DIRECTORY
)
1086 /* If the service exists already we are fine */
1087 dev
= device_find_child(&xd
->dev
, p
, find_service
);
1093 svc
= kzalloc(sizeof(*svc
), GFP_KERNEL
);
1097 if (populate_service(svc
, p
)) {
1102 id
= ida_alloc(&xd
->service_ids
, GFP_KERNEL
);
1109 svc
->dev
.bus
= &tb_bus_type
;
1110 svc
->dev
.type
= &tb_service_type
;
1111 svc
->dev
.parent
= &xd
->dev
;
1112 dev_set_name(&svc
->dev
, "%s.%d", dev_name(&xd
->dev
), svc
->id
);
1114 tb_service_debugfs_init(svc
);
1116 if (device_register(&svc
->dev
)) {
1117 put_device(&svc
->dev
);
1123 static int populate_properties(struct tb_xdomain
*xd
,
1124 struct tb_property_dir
*dir
)
1126 const struct tb_property
*p
;
1128 /* Required properties */
1129 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_VALUE
);
1132 xd
->device
= p
->value
.immediate
;
1134 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_VALUE
);
1137 xd
->vendor
= p
->value
.immediate
;
1139 p
= tb_property_find(dir
, "maxhopid", TB_PROPERTY_TYPE_VALUE
);
1141 * USB4 inter-domain spec suggests using 15 as HopID if the
1142 * other end does not announce it in a property. This is for
1143 * TBT3 compatibility.
1145 xd
->remote_max_hopid
= p
? p
->value
.immediate
: XDOMAIN_DEFAULT_MAX_HOPID
;
1147 kfree(xd
->device_name
);
1148 xd
->device_name
= NULL
;
1149 kfree(xd
->vendor_name
);
1150 xd
->vendor_name
= NULL
;
1152 /* Optional properties */
1153 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_TEXT
);
1155 xd
->device_name
= kstrdup(p
->value
.text
, GFP_KERNEL
);
1156 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_TEXT
);
1158 xd
->vendor_name
= kstrdup(p
->value
.text
, GFP_KERNEL
);
1163 static int tb_xdomain_update_link_attributes(struct tb_xdomain
*xd
)
1165 bool change
= false;
1166 struct tb_port
*port
;
1169 port
= tb_xdomain_downstream_port(xd
);
1171 ret
= tb_port_get_link_speed(port
);
1175 if (xd
->link_speed
!= ret
)
1178 xd
->link_speed
= ret
;
1180 ret
= tb_port_get_link_width(port
);
1184 if (xd
->link_width
!= ret
)
1187 xd
->link_width
= ret
;
1190 kobject_uevent(&xd
->dev
.kobj
, KOBJ_CHANGE
);
1195 static int tb_xdomain_get_uuid(struct tb_xdomain
*xd
)
1197 struct tb
*tb
= xd
->tb
;
1202 dev_dbg(&xd
->dev
, "requesting remote UUID\n");
1204 ret
= tb_xdp_uuid_request(tb
->ctl
, xd
->route
, xd
->state_retries
, &uuid
,
1207 if (xd
->state_retries
-- > 0) {
1208 dev_dbg(&xd
->dev
, "failed to request UUID, retrying\n");
1211 dev_dbg(&xd
->dev
, "failed to read remote UUID\n");
1215 dev_dbg(&xd
->dev
, "got remote UUID %pUb\n", &uuid
);
1217 if (uuid_equal(&uuid
, xd
->local_uuid
)) {
1218 if (route
== xd
->route
)
1219 dev_dbg(&xd
->dev
, "loop back detected\n");
1221 dev_dbg(&xd
->dev
, "intra-domain loop detected\n");
1223 /* Don't bond lanes automatically for loops */
1224 xd
->bonding_possible
= false;
1228 * If the UUID is different, there is another domain connected
1229 * so mark this one unplugged and wait for the connection
1230 * manager to replace it.
1232 if (xd
->remote_uuid
&& !uuid_equal(&uuid
, xd
->remote_uuid
)) {
1233 dev_dbg(&xd
->dev
, "remote UUID is different, unplugging\n");
1234 xd
->is_unplugged
= true;
1238 /* First time fill in the missing UUID */
1239 if (!xd
->remote_uuid
) {
1240 xd
->remote_uuid
= kmemdup(&uuid
, sizeof(uuid_t
), GFP_KERNEL
);
1241 if (!xd
->remote_uuid
)
1248 static int tb_xdomain_get_link_status(struct tb_xdomain
*xd
)
1250 struct tb
*tb
= xd
->tb
;
1251 u8 slw
, tlw
, sls
, tls
;
1254 dev_dbg(&xd
->dev
, "sending link state status request to %pUb\n",
1257 ret
= tb_xdp_link_state_status_request(tb
->ctl
, xd
->route
,
1258 xd
->state_retries
, &slw
, &tlw
, &sls
,
1261 if (ret
!= -EOPNOTSUPP
&& xd
->state_retries
-- > 0) {
1263 "failed to request remote link status, retrying\n");
1266 dev_dbg(&xd
->dev
, "failed to receive remote link status\n");
1270 dev_dbg(&xd
->dev
, "remote link supports width %#x speed %#x\n", slw
, sls
);
1272 if (slw
< LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL
) {
1273 dev_dbg(&xd
->dev
, "remote adapter is single lane only\n");
1280 static int tb_xdomain_link_state_change(struct tb_xdomain
*xd
,
1283 struct tb_port
*port
= tb_xdomain_downstream_port(xd
);
1284 struct tb
*tb
= xd
->tb
;
1290 tlw
= LANE_ADP_CS_1_TARGET_WIDTH_DUAL
;
1291 else if (width
== 1)
1292 tlw
= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE
;
1296 /* Use the current target speed */
1297 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
, port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1300 tls
= val
& LANE_ADP_CS_1_TARGET_SPEED_MASK
;
1302 dev_dbg(&xd
->dev
, "sending link state change request with width %#x speed %#x\n",
1305 ret
= tb_xdp_link_state_change_request(tb
->ctl
, xd
->route
,
1306 xd
->state_retries
, tlw
, tls
);
1308 if (ret
!= -EOPNOTSUPP
&& xd
->state_retries
-- > 0) {
1310 "failed to change remote link state, retrying\n");
1313 dev_err(&xd
->dev
, "failed request link state change, aborting\n");
1317 dev_dbg(&xd
->dev
, "received link state change response\n");
1321 static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain
*xd
)
1323 unsigned int width
, width_mask
;
1324 struct tb_port
*port
;
1327 if (xd
->target_link_width
== LANE_ADP_CS_1_TARGET_WIDTH_SINGLE
) {
1328 width
= TB_LINK_WIDTH_SINGLE
;
1330 } else if (xd
->target_link_width
== LANE_ADP_CS_1_TARGET_WIDTH_DUAL
) {
1331 width
= TB_LINK_WIDTH_DUAL
;
1332 width_mask
= width
| TB_LINK_WIDTH_ASYM_TX
| TB_LINK_WIDTH_ASYM_RX
;
1334 if (xd
->state_retries
-- > 0) {
1336 "link state change request not received yet, retrying\n");
1339 dev_dbg(&xd
->dev
, "timeout waiting for link change request\n");
1343 port
= tb_xdomain_downstream_port(xd
);
1346 * We can't use tb_xdomain_lane_bonding_enable() here because it
1347 * is the other side that initiates lane bonding. So here we
1348 * just set the width to both lane adapters and wait for the
1349 * link to transition bonded.
1351 ret
= tb_port_set_link_width(port
->dual_link_port
, width
);
1353 tb_port_warn(port
->dual_link_port
,
1354 "failed to set link width to %d\n", width
);
1358 ret
= tb_port_set_link_width(port
, width
);
1360 tb_port_warn(port
, "failed to set link width to %d\n", width
);
1364 ret
= tb_port_wait_for_link_width(port
, width_mask
,
1365 XDOMAIN_BONDING_TIMEOUT
);
1367 dev_warn(&xd
->dev
, "error waiting for link width to become %d\n",
1372 port
->bonded
= width
> TB_LINK_WIDTH_SINGLE
;
1373 port
->dual_link_port
->bonded
= width
> TB_LINK_WIDTH_SINGLE
;
1375 tb_port_update_credits(port
);
1376 tb_xdomain_update_link_attributes(xd
);
1378 dev_dbg(&xd
->dev
, "lane bonding %s\n", str_enabled_disabled(width
== 2));
1382 static int tb_xdomain_get_properties(struct tb_xdomain
*xd
)
1384 struct tb_property_dir
*dir
;
1385 struct tb
*tb
= xd
->tb
;
1386 bool update
= false;
1391 dev_dbg(&xd
->dev
, "requesting remote properties\n");
1393 ret
= tb_xdp_properties_request(tb
->ctl
, xd
->route
, xd
->local_uuid
,
1394 xd
->remote_uuid
, xd
->state_retries
,
1397 if (xd
->state_retries
-- > 0) {
1399 "failed to request remote properties, retrying\n");
1403 dev_err(&xd
->dev
, "failed read XDomain properties from %pUb\n",
1409 mutex_lock(&xd
->lock
);
1411 /* Only accept newer generation properties */
1412 if (xd
->remote_properties
&& gen
<= xd
->remote_property_block_gen
) {
1414 goto err_free_block
;
1417 dir
= tb_property_parse_dir(block
, ret
);
1419 dev_err(&xd
->dev
, "failed to parse XDomain properties\n");
1421 goto err_free_block
;
1424 ret
= populate_properties(xd
, dir
);
1426 dev_err(&xd
->dev
, "missing XDomain properties in response\n");
1430 /* Release the existing one */
1431 if (xd
->remote_properties
) {
1432 tb_property_free_dir(xd
->remote_properties
);
1436 xd
->remote_properties
= dir
;
1437 xd
->remote_property_block_gen
= gen
;
1439 tb_xdomain_update_link_attributes(xd
);
1441 mutex_unlock(&xd
->lock
);
1446 * Now the device should be ready enough so we can add it to the
1447 * bus and let userspace know about it. If the device is already
1448 * registered, we notify the userspace that it has changed.
1452 * Now disable lane 1 if bonding was not enabled. Do
1453 * this only if bonding was possible at the beginning
1454 * (that is we are the connection manager and there are
1457 if (xd
->bonding_possible
) {
1458 struct tb_port
*port
;
1460 port
= tb_xdomain_downstream_port(xd
);
1462 tb_port_disable(port
->dual_link_port
);
1465 dev_dbg(&xd
->dev
, "current link speed %u.0 Gb/s\n",
1467 dev_dbg(&xd
->dev
, "current link width %s\n",
1468 tb_width_name(xd
->link_width
));
1470 if (device_add(&xd
->dev
)) {
1471 dev_err(&xd
->dev
, "failed to add XDomain device\n");
1474 dev_info(&xd
->dev
, "new host found, vendor=%#x device=%#x\n",
1475 xd
->vendor
, xd
->device
);
1476 if (xd
->vendor_name
&& xd
->device_name
)
1477 dev_info(&xd
->dev
, "%s %s\n", xd
->vendor_name
,
1480 tb_xdomain_debugfs_init(xd
);
1482 kobject_uevent(&xd
->dev
.kobj
, KOBJ_CHANGE
);
1485 enumerate_services(xd
);
1489 tb_property_free_dir(dir
);
1492 mutex_unlock(&xd
->lock
);
1497 static void tb_xdomain_queue_uuid(struct tb_xdomain
*xd
)
1499 xd
->state
= XDOMAIN_STATE_UUID
;
1500 xd
->state_retries
= XDOMAIN_RETRIES
;
1501 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1502 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT
));
1505 static void tb_xdomain_queue_link_status(struct tb_xdomain
*xd
)
1507 xd
->state
= XDOMAIN_STATE_LINK_STATUS
;
1508 xd
->state_retries
= XDOMAIN_RETRIES
;
1509 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1510 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1513 static void tb_xdomain_queue_link_status2(struct tb_xdomain
*xd
)
1515 xd
->state
= XDOMAIN_STATE_LINK_STATUS2
;
1516 xd
->state_retries
= XDOMAIN_RETRIES
;
1517 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1518 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1521 static void tb_xdomain_queue_bonding(struct tb_xdomain
*xd
)
1523 if (memcmp(xd
->local_uuid
, xd
->remote_uuid
, UUID_SIZE
) > 0) {
1524 dev_dbg(&xd
->dev
, "we have higher UUID, other side bonds the lanes\n");
1525 xd
->state
= XDOMAIN_STATE_BONDING_UUID_HIGH
;
1527 dev_dbg(&xd
->dev
, "we have lower UUID, bonding lanes\n");
1528 xd
->state
= XDOMAIN_STATE_LINK_STATE_CHANGE
;
1531 xd
->state_retries
= XDOMAIN_RETRIES
;
1532 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1533 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1536 static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain
*xd
)
1538 xd
->state
= XDOMAIN_STATE_BONDING_UUID_LOW
;
1539 xd
->state_retries
= XDOMAIN_RETRIES
;
1540 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1541 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1544 static void tb_xdomain_queue_properties(struct tb_xdomain
*xd
)
1546 xd
->state
= XDOMAIN_STATE_PROPERTIES
;
1547 xd
->state_retries
= XDOMAIN_RETRIES
;
1548 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1549 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1552 static void tb_xdomain_queue_properties_changed(struct tb_xdomain
*xd
)
1554 xd
->properties_changed_retries
= XDOMAIN_RETRIES
;
1555 queue_delayed_work(xd
->tb
->wq
, &xd
->properties_changed_work
,
1556 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT
));
1559 static void tb_xdomain_failed(struct tb_xdomain
*xd
)
1561 xd
->state
= XDOMAIN_STATE_ERROR
;
1562 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1563 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1566 static void tb_xdomain_state_work(struct work_struct
*work
)
1568 struct tb_xdomain
*xd
= container_of(work
, typeof(*xd
), state_work
.work
);
1569 int ret
, state
= xd
->state
;
1571 if (WARN_ON_ONCE(state
< XDOMAIN_STATE_INIT
||
1572 state
> XDOMAIN_STATE_ERROR
))
1575 dev_dbg(&xd
->dev
, "running state %s\n", state_names
[state
]);
1578 case XDOMAIN_STATE_INIT
:
1579 if (xd
->needs_uuid
) {
1580 tb_xdomain_queue_uuid(xd
);
1582 tb_xdomain_queue_properties_changed(xd
);
1583 tb_xdomain_queue_properties(xd
);
1587 case XDOMAIN_STATE_UUID
:
1588 ret
= tb_xdomain_get_uuid(xd
);
1592 tb_xdomain_failed(xd
);
1594 tb_xdomain_queue_properties_changed(xd
);
1595 if (xd
->bonding_possible
)
1596 tb_xdomain_queue_link_status(xd
);
1598 tb_xdomain_queue_properties(xd
);
1602 case XDOMAIN_STATE_LINK_STATUS
:
1603 ret
= tb_xdomain_get_link_status(xd
);
1609 * If any of the lane bonding states fail we skip
1610 * bonding completely and try to continue from
1611 * reading properties.
1613 tb_xdomain_queue_properties(xd
);
1615 tb_xdomain_queue_bonding(xd
);
1619 case XDOMAIN_STATE_LINK_STATE_CHANGE
:
1620 ret
= tb_xdomain_link_state_change(xd
, 2);
1624 tb_xdomain_queue_properties(xd
);
1626 tb_xdomain_queue_link_status2(xd
);
1630 case XDOMAIN_STATE_LINK_STATUS2
:
1631 ret
= tb_xdomain_get_link_status(xd
);
1635 tb_xdomain_queue_properties(xd
);
1637 tb_xdomain_queue_bonding_uuid_low(xd
);
1641 case XDOMAIN_STATE_BONDING_UUID_LOW
:
1642 tb_xdomain_lane_bonding_enable(xd
);
1643 tb_xdomain_queue_properties(xd
);
1646 case XDOMAIN_STATE_BONDING_UUID_HIGH
:
1647 if (tb_xdomain_bond_lanes_uuid_high(xd
) == -EAGAIN
)
1649 tb_xdomain_queue_properties(xd
);
1652 case XDOMAIN_STATE_PROPERTIES
:
1653 ret
= tb_xdomain_get_properties(xd
);
1657 tb_xdomain_failed(xd
);
1659 xd
->state
= XDOMAIN_STATE_ENUMERATED
;
1663 case XDOMAIN_STATE_ENUMERATED
:
1664 tb_xdomain_queue_properties(xd
);
1667 case XDOMAIN_STATE_ERROR
:
1668 dev_dbg(&xd
->dev
, "discovery failed, stopping handshake\n");
1669 __stop_handshake(xd
);
1673 dev_warn(&xd
->dev
, "unexpected state %d\n", state
);
1680 queue_delayed_work(xd
->tb
->wq
, &xd
->state_work
,
1681 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1684 static void tb_xdomain_properties_changed(struct work_struct
*work
)
1686 struct tb_xdomain
*xd
= container_of(work
, typeof(*xd
),
1687 properties_changed_work
.work
);
1690 dev_dbg(&xd
->dev
, "sending properties changed notification\n");
1692 ret
= tb_xdp_properties_changed_request(xd
->tb
->ctl
, xd
->route
,
1693 xd
->properties_changed_retries
, xd
->local_uuid
);
1695 if (xd
->properties_changed_retries
-- > 0) {
1697 "failed to send properties changed notification, retrying\n");
1698 queue_delayed_work(xd
->tb
->wq
,
1699 &xd
->properties_changed_work
,
1700 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT
));
1702 dev_err(&xd
->dev
, "failed to send properties changed notification\n");
1706 xd
->properties_changed_retries
= XDOMAIN_RETRIES
;
1709 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
1712 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1714 return sysfs_emit(buf
, "%#x\n", xd
->device
);
1716 static DEVICE_ATTR_RO(device
);
1719 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1721 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1724 if (mutex_lock_interruptible(&xd
->lock
))
1725 return -ERESTARTSYS
;
1726 ret
= sysfs_emit(buf
, "%s\n", xd
->device_name
?: "");
1727 mutex_unlock(&xd
->lock
);
1731 static DEVICE_ATTR_RO(device_name
);
1733 static ssize_t
maxhopid_show(struct device
*dev
, struct device_attribute
*attr
,
1736 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1738 return sysfs_emit(buf
, "%d\n", xd
->remote_max_hopid
);
1740 static DEVICE_ATTR_RO(maxhopid
);
1742 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
1745 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1747 return sysfs_emit(buf
, "%#x\n", xd
->vendor
);
1749 static DEVICE_ATTR_RO(vendor
);
1752 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1754 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1757 if (mutex_lock_interruptible(&xd
->lock
))
1758 return -ERESTARTSYS
;
1759 ret
= sysfs_emit(buf
, "%s\n", xd
->vendor_name
?: "");
1760 mutex_unlock(&xd
->lock
);
1764 static DEVICE_ATTR_RO(vendor_name
);
1766 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
1769 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1771 return sysfs_emit(buf
, "%pUb\n", xd
->remote_uuid
);
1773 static DEVICE_ATTR_RO(unique_id
);
1775 static ssize_t
speed_show(struct device
*dev
, struct device_attribute
*attr
,
1778 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1780 return sysfs_emit(buf
, "%u.0 Gb/s\n", xd
->link_speed
);
1783 static DEVICE_ATTR(rx_speed
, 0444, speed_show
, NULL
);
1784 static DEVICE_ATTR(tx_speed
, 0444, speed_show
, NULL
);
1786 static ssize_t
rx_lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1789 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1792 switch (xd
->link_width
) {
1793 case TB_LINK_WIDTH_SINGLE
:
1794 case TB_LINK_WIDTH_ASYM_TX
:
1797 case TB_LINK_WIDTH_DUAL
:
1800 case TB_LINK_WIDTH_ASYM_RX
:
1808 return sysfs_emit(buf
, "%u\n", width
);
1810 static DEVICE_ATTR(rx_lanes
, 0444, rx_lanes_show
, NULL
);
1812 static ssize_t
tx_lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1815 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1818 switch (xd
->link_width
) {
1819 case TB_LINK_WIDTH_SINGLE
:
1820 case TB_LINK_WIDTH_ASYM_RX
:
1823 case TB_LINK_WIDTH_DUAL
:
1826 case TB_LINK_WIDTH_ASYM_TX
:
1834 return sysfs_emit(buf
, "%u\n", width
);
1836 static DEVICE_ATTR(tx_lanes
, 0444, tx_lanes_show
, NULL
);
1838 static struct attribute
*xdomain_attrs
[] = {
1839 &dev_attr_device
.attr
,
1840 &dev_attr_device_name
.attr
,
1841 &dev_attr_maxhopid
.attr
,
1842 &dev_attr_rx_lanes
.attr
,
1843 &dev_attr_rx_speed
.attr
,
1844 &dev_attr_tx_lanes
.attr
,
1845 &dev_attr_tx_speed
.attr
,
1846 &dev_attr_unique_id
.attr
,
1847 &dev_attr_vendor
.attr
,
1848 &dev_attr_vendor_name
.attr
,
1852 static const struct attribute_group xdomain_attr_group
= {
1853 .attrs
= xdomain_attrs
,
1856 static const struct attribute_group
*xdomain_attr_groups
[] = {
1857 &xdomain_attr_group
,
1861 static void tb_xdomain_release(struct device
*dev
)
1863 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1865 put_device(xd
->dev
.parent
);
1867 kfree(xd
->local_property_block
);
1868 tb_property_free_dir(xd
->remote_properties
);
1869 ida_destroy(&xd
->out_hopids
);
1870 ida_destroy(&xd
->in_hopids
);
1871 ida_destroy(&xd
->service_ids
);
1873 kfree(xd
->local_uuid
);
1874 kfree(xd
->remote_uuid
);
1875 kfree(xd
->device_name
);
1876 kfree(xd
->vendor_name
);
1880 static int __maybe_unused
tb_xdomain_suspend(struct device
*dev
)
1882 stop_handshake(tb_to_xdomain(dev
));
1886 static int __maybe_unused
tb_xdomain_resume(struct device
*dev
)
1888 start_handshake(tb_to_xdomain(dev
));
1892 static const struct dev_pm_ops tb_xdomain_pm_ops
= {
1893 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend
, tb_xdomain_resume
)
1896 const struct device_type tb_xdomain_type
= {
1897 .name
= "thunderbolt_xdomain",
1898 .release
= tb_xdomain_release
,
1899 .pm
= &tb_xdomain_pm_ops
,
1901 EXPORT_SYMBOL_GPL(tb_xdomain_type
);
1903 static void tb_xdomain_link_init(struct tb_xdomain
*xd
, struct tb_port
*down
)
1905 if (!down
->dual_link_port
)
1909 * Gen 4 links come up already as bonded so only update the port
1912 if (tb_port_get_link_generation(down
) >= 4) {
1913 down
->bonded
= true;
1914 down
->dual_link_port
->bonded
= true;
1916 xd
->bonding_possible
= true;
1920 static void tb_xdomain_link_exit(struct tb_xdomain
*xd
)
1922 struct tb_port
*down
= tb_xdomain_downstream_port(xd
);
1924 if (!down
->dual_link_port
)
1927 if (tb_port_get_link_generation(down
) >= 4) {
1928 down
->bonded
= false;
1929 down
->dual_link_port
->bonded
= false;
1930 } else if (xd
->link_width
> TB_LINK_WIDTH_SINGLE
) {
1932 * Just return port structures back to way they were and
1933 * update credits. No need to update userspace because
1934 * the XDomain is removed soon anyway.
1936 tb_port_lane_bonding_disable(down
);
1937 tb_port_update_credits(down
);
1938 } else if (down
->dual_link_port
) {
1940 * Re-enable the lane 1 adapter we disabled at the end
1941 * of tb_xdomain_get_properties().
1943 tb_port_enable(down
->dual_link_port
);
1948 * tb_xdomain_alloc() - Allocate new XDomain object
1949 * @tb: Domain where the XDomain belongs
1950 * @parent: Parent device (the switch through the connection to the
1951 * other domain is reached).
1952 * @route: Route string used to reach the other domain
1953 * @local_uuid: Our local domain UUID
1954 * @remote_uuid: UUID of the other domain (optional)
1956 * Allocates new XDomain structure and returns pointer to that. The
1957 * object must be released by calling tb_xdomain_put().
1959 struct tb_xdomain
*tb_xdomain_alloc(struct tb
*tb
, struct device
*parent
,
1960 u64 route
, const uuid_t
*local_uuid
,
1961 const uuid_t
*remote_uuid
)
1963 struct tb_switch
*parent_sw
= tb_to_switch(parent
);
1964 struct tb_xdomain
*xd
;
1965 struct tb_port
*down
;
1967 /* Make sure the downstream domain is accessible */
1968 down
= tb_port_at(route
, parent_sw
);
1969 tb_port_unlock(down
);
1971 xd
= kzalloc(sizeof(*xd
), GFP_KERNEL
);
1977 xd
->local_max_hopid
= down
->config
.max_in_hop_id
;
1978 ida_init(&xd
->service_ids
);
1979 ida_init(&xd
->in_hopids
);
1980 ida_init(&xd
->out_hopids
);
1981 mutex_init(&xd
->lock
);
1982 INIT_DELAYED_WORK(&xd
->state_work
, tb_xdomain_state_work
);
1983 INIT_DELAYED_WORK(&xd
->properties_changed_work
,
1984 tb_xdomain_properties_changed
);
1986 xd
->local_uuid
= kmemdup(local_uuid
, sizeof(uuid_t
), GFP_KERNEL
);
1987 if (!xd
->local_uuid
)
1991 xd
->remote_uuid
= kmemdup(remote_uuid
, sizeof(uuid_t
),
1993 if (!xd
->remote_uuid
)
1994 goto err_free_local_uuid
;
1996 xd
->needs_uuid
= true;
1998 tb_xdomain_link_init(xd
, down
);
2001 device_initialize(&xd
->dev
);
2002 xd
->dev
.parent
= get_device(parent
);
2003 xd
->dev
.bus
= &tb_bus_type
;
2004 xd
->dev
.type
= &tb_xdomain_type
;
2005 xd
->dev
.groups
= xdomain_attr_groups
;
2006 dev_set_name(&xd
->dev
, "%u-%llx", tb
->index
, route
);
2008 dev_dbg(&xd
->dev
, "local UUID %pUb\n", local_uuid
);
2010 dev_dbg(&xd
->dev
, "remote UUID %pUb\n", remote_uuid
);
2013 * This keeps the DMA powered on as long as we have active
2014 * connection to another host.
2016 pm_runtime_set_active(&xd
->dev
);
2017 pm_runtime_get_noresume(&xd
->dev
);
2018 pm_runtime_enable(&xd
->dev
);
2022 err_free_local_uuid
:
2023 kfree(xd
->local_uuid
);
2031 * tb_xdomain_add() - Add XDomain to the bus
2032 * @xd: XDomain to add
2034 * This function starts XDomain discovery protocol handshake and
2035 * eventually adds the XDomain to the bus. After calling this function
2036 * the caller needs to call tb_xdomain_remove() in order to remove and
2037 * release the object regardless whether the handshake succeeded or not.
2039 void tb_xdomain_add(struct tb_xdomain
*xd
)
2041 /* Start exchanging properties with the other host */
2042 start_handshake(xd
);
2045 static int unregister_service(struct device
*dev
, void *data
)
2047 device_unregister(dev
);
2052 * tb_xdomain_remove() - Remove XDomain from the bus
2053 * @xd: XDomain to remove
2055 * This will stop all ongoing configuration work and remove the XDomain
2056 * along with any services from the bus. When the last reference to @xd
2057 * is released the object will be released as well.
2059 void tb_xdomain_remove(struct tb_xdomain
*xd
)
2061 tb_xdomain_debugfs_remove(xd
);
2065 device_for_each_child_reverse(&xd
->dev
, xd
, unregister_service
);
2067 tb_xdomain_link_exit(xd
);
2070 * Undo runtime PM here explicitly because it is possible that
2071 * the XDomain was never added to the bus and thus device_del()
2072 * is not called for it (device_del() would handle this otherwise).
2074 pm_runtime_disable(&xd
->dev
);
2075 pm_runtime_put_noidle(&xd
->dev
);
2076 pm_runtime_set_suspended(&xd
->dev
);
2078 if (!device_is_registered(&xd
->dev
)) {
2079 put_device(&xd
->dev
);
2081 dev_info(&xd
->dev
, "host disconnected\n");
2082 device_unregister(&xd
->dev
);
2087 * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
2088 * @xd: XDomain connection
2090 * Lane bonding is disabled by default for XDomains. This function tries
2091 * to enable bonding by first enabling the port and waiting for the CL0
2094 * Return: %0 in case of success and negative errno in case of error.
2096 int tb_xdomain_lane_bonding_enable(struct tb_xdomain
*xd
)
2098 unsigned int width_mask
;
2099 struct tb_port
*port
;
2102 port
= tb_xdomain_downstream_port(xd
);
2103 if (!port
->dual_link_port
)
2106 ret
= tb_port_enable(port
->dual_link_port
);
2110 ret
= tb_wait_for_port(port
->dual_link_port
, true);
2116 ret
= tb_port_lane_bonding_enable(port
);
2118 tb_port_warn(port
, "failed to enable lane bonding\n");
2122 /* Any of the widths are all bonded */
2123 width_mask
= TB_LINK_WIDTH_DUAL
| TB_LINK_WIDTH_ASYM_TX
|
2124 TB_LINK_WIDTH_ASYM_RX
;
2126 ret
= tb_port_wait_for_link_width(port
, width_mask
,
2127 XDOMAIN_BONDING_TIMEOUT
);
2129 tb_port_warn(port
, "failed to enable lane bonding\n");
2133 tb_port_update_credits(port
);
2134 tb_xdomain_update_link_attributes(xd
);
2136 dev_dbg(&xd
->dev
, "lane bonding enabled\n");
2139 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable
);
2142 * tb_xdomain_lane_bonding_disable() - Disable lane bonding
2143 * @xd: XDomain connection
2145 * Lane bonding is disabled by default for XDomains. If bonding has been
2146 * enabled, this function can be used to disable it.
2148 void tb_xdomain_lane_bonding_disable(struct tb_xdomain
*xd
)
2150 struct tb_port
*port
;
2152 port
= tb_xdomain_downstream_port(xd
);
2153 if (port
->dual_link_port
) {
2156 tb_port_lane_bonding_disable(port
);
2157 ret
= tb_port_wait_for_link_width(port
, TB_LINK_WIDTH_SINGLE
, 100);
2158 if (ret
== -ETIMEDOUT
)
2159 tb_port_warn(port
, "timeout disabling lane bonding\n");
2160 tb_port_disable(port
->dual_link_port
);
2161 tb_port_update_credits(port
);
2162 tb_xdomain_update_link_attributes(xd
);
2164 dev_dbg(&xd
->dev
, "lane bonding disabled\n");
2167 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable
);
2170 * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
2171 * @xd: XDomain connection
2172 * @hopid: Preferred HopID or %-1 for next available
2174 * Returns allocated HopID or negative errno. Specifically returns
2175 * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2176 * guaranteed to be within range supported by the input lane adapter.
2177 * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2179 int tb_xdomain_alloc_in_hopid(struct tb_xdomain
*xd
, int hopid
)
2182 hopid
= TB_PATH_MIN_HOPID
;
2183 if (hopid
< TB_PATH_MIN_HOPID
|| hopid
> xd
->local_max_hopid
)
2186 return ida_alloc_range(&xd
->in_hopids
, hopid
, xd
->local_max_hopid
,
2189 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid
);
2192 * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
2193 * @xd: XDomain connection
2194 * @hopid: Preferred HopID or %-1 for next available
2196 * Returns allocated HopID or negative errno. Specifically returns
2197 * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2198 * guaranteed to be within range supported by the output lane adapter.
2199 * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2201 int tb_xdomain_alloc_out_hopid(struct tb_xdomain
*xd
, int hopid
)
2204 hopid
= TB_PATH_MIN_HOPID
;
2205 if (hopid
< TB_PATH_MIN_HOPID
|| hopid
> xd
->remote_max_hopid
)
2208 return ida_alloc_range(&xd
->out_hopids
, hopid
, xd
->remote_max_hopid
,
2211 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid
);
2214 * tb_xdomain_release_in_hopid() - Release input HopID
2215 * @xd: XDomain connection
2216 * @hopid: HopID to release
2218 void tb_xdomain_release_in_hopid(struct tb_xdomain
*xd
, int hopid
)
2220 ida_free(&xd
->in_hopids
, hopid
);
2222 EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid
);
2225 * tb_xdomain_release_out_hopid() - Release output HopID
2226 * @xd: XDomain connection
2227 * @hopid: HopID to release
2229 void tb_xdomain_release_out_hopid(struct tb_xdomain
*xd
, int hopid
)
2231 ida_free(&xd
->out_hopids
, hopid
);
2233 EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid
);
2236 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
2237 * @xd: XDomain connection
2238 * @transmit_path: HopID we are using to send out packets
2239 * @transmit_ring: DMA ring used to send out packets
2240 * @receive_path: HopID the other end is using to send packets to us
2241 * @receive_ring: DMA ring used to receive packets from @receive_path
2243 * The function enables DMA paths accordingly so that after successful
2244 * return the caller can send and receive packets using high-speed DMA
2245 * path. If a transmit or receive path is not needed, pass %-1 for those
2248 * Return: %0 in case of success and negative errno in case of error
2250 int tb_xdomain_enable_paths(struct tb_xdomain
*xd
, int transmit_path
,
2251 int transmit_ring
, int receive_path
,
2254 return tb_domain_approve_xdomain_paths(xd
->tb
, xd
, transmit_path
,
2255 transmit_ring
, receive_path
,
2258 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths
);
2261 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
2262 * @xd: XDomain connection
2263 * @transmit_path: HopID we are using to send out packets
2264 * @transmit_ring: DMA ring used to send out packets
2265 * @receive_path: HopID the other end is using to send packets to us
2266 * @receive_ring: DMA ring used to receive packets from @receive_path
2268 * This does the opposite of tb_xdomain_enable_paths(). After call to
2269 * this the caller is not expected to use the rings anymore. Passing %-1
2270 * as path/ring parameter means don't care. Normally the callers should
2271 * pass the same values here as they do when paths are enabled.
2273 * Return: %0 in case of success and negative errno in case of error
2275 int tb_xdomain_disable_paths(struct tb_xdomain
*xd
, int transmit_path
,
2276 int transmit_ring
, int receive_path
,
2279 return tb_domain_disconnect_xdomain_paths(xd
->tb
, xd
, transmit_path
,
2280 transmit_ring
, receive_path
,
2283 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths
);
2285 struct tb_xdomain_lookup
{
2292 static struct tb_xdomain
*switch_find_xdomain(struct tb_switch
*sw
,
2293 const struct tb_xdomain_lookup
*lookup
)
2295 struct tb_port
*port
;
2297 tb_switch_for_each_port(sw
, port
) {
2298 struct tb_xdomain
*xd
;
2300 if (port
->xdomain
) {
2304 if (xd
->remote_uuid
&&
2305 uuid_equal(xd
->remote_uuid
, lookup
->uuid
))
2308 if (lookup
->link
&& lookup
->link
== xd
->link
&&
2309 lookup
->depth
== xd
->depth
)
2311 if (lookup
->route
&& lookup
->route
== xd
->route
)
2314 } else if (tb_port_has_remote(port
)) {
2315 xd
= switch_find_xdomain(port
->remote
->sw
, lookup
);
2325 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
2326 * @tb: Domain where the XDomain belongs to
2327 * @uuid: UUID to look for
2329 * Finds XDomain by walking through the Thunderbolt topology below @tb.
2330 * The returned XDomain will have its reference count increased so the
2331 * caller needs to call tb_xdomain_put() when it is done with the
2334 * This will find all XDomains including the ones that are not yet added
2335 * to the bus (handshake is still in progress).
2337 * The caller needs to hold @tb->lock.
2339 struct tb_xdomain
*tb_xdomain_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
2341 struct tb_xdomain_lookup lookup
;
2342 struct tb_xdomain
*xd
;
2344 memset(&lookup
, 0, sizeof(lookup
));
2347 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
2348 return tb_xdomain_get(xd
);
2350 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid
);
2353 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
2354 * @tb: Domain where the XDomain belongs to
2355 * @link: Root switch link number
2356 * @depth: Depth in the link
2358 * Finds XDomain by walking through the Thunderbolt topology below @tb.
2359 * The returned XDomain will have its reference count increased so the
2360 * caller needs to call tb_xdomain_put() when it is done with the
2363 * This will find all XDomains including the ones that are not yet added
2364 * to the bus (handshake is still in progress).
2366 * The caller needs to hold @tb->lock.
2368 struct tb_xdomain
*tb_xdomain_find_by_link_depth(struct tb
*tb
, u8 link
,
2371 struct tb_xdomain_lookup lookup
;
2372 struct tb_xdomain
*xd
;
2374 memset(&lookup
, 0, sizeof(lookup
));
2376 lookup
.depth
= depth
;
2378 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
2379 return tb_xdomain_get(xd
);
2383 * tb_xdomain_find_by_route() - Find an XDomain by route string
2384 * @tb: Domain where the XDomain belongs to
2385 * @route: XDomain route string
2387 * Finds XDomain by walking through the Thunderbolt topology below @tb.
2388 * The returned XDomain will have its reference count increased so the
2389 * caller needs to call tb_xdomain_put() when it is done with the
2392 * This will find all XDomains including the ones that are not yet added
2393 * to the bus (handshake is still in progress).
2395 * The caller needs to hold @tb->lock.
2397 struct tb_xdomain
*tb_xdomain_find_by_route(struct tb
*tb
, u64 route
)
2399 struct tb_xdomain_lookup lookup
;
2400 struct tb_xdomain
*xd
;
2402 memset(&lookup
, 0, sizeof(lookup
));
2403 lookup
.route
= route
;
2405 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
2406 return tb_xdomain_get(xd
);
2408 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route
);
2410 bool tb_xdomain_handle_request(struct tb
*tb
, enum tb_cfg_pkg_type type
,
2411 const void *buf
, size_t size
)
2413 const struct tb_protocol_handler
*handler
, *tmp
;
2414 const struct tb_xdp_header
*hdr
= buf
;
2415 unsigned int length
;
2418 /* We expect the packet is at least size of the header */
2419 length
= hdr
->xd_hdr
.length_sn
& TB_XDOMAIN_LENGTH_MASK
;
2420 if (length
!= size
/ 4 - sizeof(hdr
->xd_hdr
) / 4)
2422 if (length
< sizeof(*hdr
) / 4 - sizeof(hdr
->xd_hdr
) / 4)
2426 * Handle XDomain discovery protocol packets directly here. For
2427 * other protocols (based on their UUID) we call registered
2430 if (uuid_equal(&hdr
->uuid
, &tb_xdp_uuid
)) {
2431 if (type
== TB_CFG_PKG_XDOMAIN_REQ
)
2432 return tb_xdp_schedule_request(tb
, hdr
, size
);
2436 mutex_lock(&xdomain_lock
);
2437 list_for_each_entry_safe(handler
, tmp
, &protocol_handlers
, list
) {
2438 if (!uuid_equal(&hdr
->uuid
, handler
->uuid
))
2441 mutex_unlock(&xdomain_lock
);
2442 ret
= handler
->callback(buf
, size
, handler
->data
);
2443 mutex_lock(&xdomain_lock
);
2448 mutex_unlock(&xdomain_lock
);
2453 static int update_xdomain(struct device
*dev
, void *data
)
2455 struct tb_xdomain
*xd
;
2457 xd
= tb_to_xdomain(dev
);
2459 queue_delayed_work(xd
->tb
->wq
, &xd
->properties_changed_work
,
2460 msecs_to_jiffies(50));
2466 static void update_all_xdomains(void)
2468 bus_for_each_dev(&tb_bus_type
, NULL
, NULL
, update_xdomain
);
2471 static bool remove_directory(const char *key
, const struct tb_property_dir
*dir
)
2473 struct tb_property
*p
;
2475 p
= tb_property_find(xdomain_property_dir
, key
,
2476 TB_PROPERTY_TYPE_DIRECTORY
);
2477 if (p
&& p
->value
.dir
== dir
) {
2478 tb_property_remove(p
);
2485 * tb_register_property_dir() - Register property directory to the host
2486 * @key: Key (name) of the directory to add
2487 * @dir: Directory to add
2489 * Service drivers can use this function to add new property directory
2490 * to the host available properties. The other connected hosts are
2491 * notified so they can re-read properties of this host if they are
2494 * Return: %0 on success and negative errno on failure
2496 int tb_register_property_dir(const char *key
, struct tb_property_dir
*dir
)
2500 if (WARN_ON(!xdomain_property_dir
))
2503 if (!key
|| strlen(key
) > 8)
2506 mutex_lock(&xdomain_lock
);
2507 if (tb_property_find(xdomain_property_dir
, key
,
2508 TB_PROPERTY_TYPE_DIRECTORY
)) {
2513 ret
= tb_property_add_dir(xdomain_property_dir
, key
, dir
);
2517 xdomain_property_block_gen
++;
2519 mutex_unlock(&xdomain_lock
);
2520 update_all_xdomains();
2524 mutex_unlock(&xdomain_lock
);
2527 EXPORT_SYMBOL_GPL(tb_register_property_dir
);
2530 * tb_unregister_property_dir() - Removes property directory from host
2531 * @key: Key (name) of the directory
2532 * @dir: Directory to remove
2534 * This will remove the existing directory from this host and notify the
2535 * connected hosts about the change.
2537 void tb_unregister_property_dir(const char *key
, struct tb_property_dir
*dir
)
2541 mutex_lock(&xdomain_lock
);
2542 if (remove_directory(key
, dir
))
2543 xdomain_property_block_gen
++;
2544 mutex_unlock(&xdomain_lock
);
2547 update_all_xdomains();
2549 EXPORT_SYMBOL_GPL(tb_unregister_property_dir
);
2551 int tb_xdomain_init(void)
2553 xdomain_property_dir
= tb_property_create_dir(NULL
);
2554 if (!xdomain_property_dir
)
2558 * Initialize standard set of properties without any service
2559 * directories. Those will be added by service drivers
2560 * themselves when they are loaded.
2562 * Rest of the properties are filled dynamically based on these
2563 * when the P2P connection is made.
2565 tb_property_add_immediate(xdomain_property_dir
, "vendorid",
2566 PCI_VENDOR_ID_INTEL
);
2567 tb_property_add_text(xdomain_property_dir
, "vendorid", "Intel Corp.");
2568 tb_property_add_immediate(xdomain_property_dir
, "deviceid", 0x1);
2569 tb_property_add_immediate(xdomain_property_dir
, "devicerv", 0x80000100);
2571 xdomain_property_block_gen
= get_random_u32();
2575 void tb_xdomain_exit(void)
2577 tb_property_free_dir(xdomain_property_dir
);