2 * Thunderbolt XDomain discovery protocol support
4 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/kmod.h>
15 #include <linux/module.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/utsname.h>
18 #include <linux/uuid.h>
19 #include <linux/workqueue.h>
23 #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
24 #define XDOMAIN_PROPERTIES_RETRIES 60
25 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
27 struct xdomain_request_work
{
28 struct work_struct work
;
29 struct tb_xdp_header
*pkg
;
33 /* Serializes access to the properties and protocol handlers below */
34 static DEFINE_MUTEX(xdomain_lock
);
36 /* Properties exposed to the remote domains */
37 static struct tb_property_dir
*xdomain_property_dir
;
38 static u32
*xdomain_property_block
;
39 static u32 xdomain_property_block_len
;
40 static u32 xdomain_property_block_gen
;
42 /* Additional protocol handlers */
43 static LIST_HEAD(protocol_handlers
);
45 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
46 static const uuid_t tb_xdp_uuid
=
47 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
48 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
50 static bool tb_xdomain_match(const struct tb_cfg_request
*req
,
51 const struct ctl_pkg
*pkg
)
53 switch (pkg
->frame
.eof
) {
54 case TB_CFG_PKG_ERROR
:
57 case TB_CFG_PKG_XDOMAIN_RESP
: {
58 const struct tb_xdp_header
*res_hdr
= pkg
->buffer
;
59 const struct tb_xdp_header
*req_hdr
= req
->request
;
61 if (pkg
->frame
.size
< req
->response_size
/ 4)
64 /* Make sure route matches */
65 if ((res_hdr
->xd_hdr
.route_hi
& ~BIT(31)) !=
66 req_hdr
->xd_hdr
.route_hi
)
68 if ((res_hdr
->xd_hdr
.route_lo
) != req_hdr
->xd_hdr
.route_lo
)
71 /* Check that the XDomain protocol matches */
72 if (!uuid_equal(&res_hdr
->uuid
, &req_hdr
->uuid
))
83 static bool tb_xdomain_copy(struct tb_cfg_request
*req
,
84 const struct ctl_pkg
*pkg
)
86 memcpy(req
->response
, pkg
->buffer
, req
->response_size
);
91 static void response_ready(void *data
)
93 tb_cfg_request_put(data
);
96 static int __tb_xdomain_response(struct tb_ctl
*ctl
, const void *response
,
97 size_t size
, enum tb_cfg_pkg_type type
)
99 struct tb_cfg_request
*req
;
101 req
= tb_cfg_request_alloc();
105 req
->match
= tb_xdomain_match
;
106 req
->copy
= tb_xdomain_copy
;
107 req
->request
= response
;
108 req
->request_size
= size
;
109 req
->request_type
= type
;
111 return tb_cfg_request(ctl
, req
, response_ready
, req
);
115 * tb_xdomain_response() - Send a XDomain response message
116 * @xd: XDomain to send the message
117 * @response: Response to send
118 * @size: Size of the response
119 * @type: PDF type of the response
121 * This can be used to send a XDomain response message to the other
122 * domain. No response for the message is expected.
124 * Return: %0 in case of success and negative errno in case of failure
126 int tb_xdomain_response(struct tb_xdomain
*xd
, const void *response
,
127 size_t size
, enum tb_cfg_pkg_type type
)
129 return __tb_xdomain_response(xd
->tb
->ctl
, response
, size
, type
);
131 EXPORT_SYMBOL_GPL(tb_xdomain_response
);
133 static int __tb_xdomain_request(struct tb_ctl
*ctl
, const void *request
,
134 size_t request_size
, enum tb_cfg_pkg_type request_type
, void *response
,
135 size_t response_size
, enum tb_cfg_pkg_type response_type
,
136 unsigned int timeout_msec
)
138 struct tb_cfg_request
*req
;
139 struct tb_cfg_result res
;
141 req
= tb_cfg_request_alloc();
145 req
->match
= tb_xdomain_match
;
146 req
->copy
= tb_xdomain_copy
;
147 req
->request
= request
;
148 req
->request_size
= request_size
;
149 req
->request_type
= request_type
;
150 req
->response
= response
;
151 req
->response_size
= response_size
;
152 req
->response_type
= response_type
;
154 res
= tb_cfg_request_sync(ctl
, req
, timeout_msec
);
156 tb_cfg_request_put(req
);
158 return res
.err
== 1 ? -EIO
: res
.err
;
162 * tb_xdomain_request() - Send a XDomain request
163 * @xd: XDomain to send the request
164 * @request: Request to send
165 * @request_size: Size of the request in bytes
166 * @request_type: PDF type of the request
167 * @response: Response is copied here
168 * @response_size: Expected size of the response in bytes
169 * @response_type: Expected PDF type of the response
170 * @timeout_msec: Timeout in milliseconds to wait for the response
172 * This function can be used to send XDomain control channel messages to
173 * the other domain. The function waits until the response is received
174 * or when timeout triggers. Whichever comes first.
176 * Return: %0 in case of success and negative errno in case of failure
178 int tb_xdomain_request(struct tb_xdomain
*xd
, const void *request
,
179 size_t request_size
, enum tb_cfg_pkg_type request_type
,
180 void *response
, size_t response_size
,
181 enum tb_cfg_pkg_type response_type
, unsigned int timeout_msec
)
183 return __tb_xdomain_request(xd
->tb
->ctl
, request
, request_size
,
184 request_type
, response
, response_size
,
185 response_type
, timeout_msec
);
187 EXPORT_SYMBOL_GPL(tb_xdomain_request
);
189 static inline void tb_xdp_fill_header(struct tb_xdp_header
*hdr
, u64 route
,
190 u8 sequence
, enum tb_xdp_type type
, size_t size
)
194 length_sn
= (size
- sizeof(hdr
->xd_hdr
)) / 4;
195 length_sn
|= (sequence
<< TB_XDOMAIN_SN_SHIFT
) & TB_XDOMAIN_SN_MASK
;
197 hdr
->xd_hdr
.route_hi
= upper_32_bits(route
);
198 hdr
->xd_hdr
.route_lo
= lower_32_bits(route
);
199 hdr
->xd_hdr
.length_sn
= length_sn
;
201 memcpy(&hdr
->uuid
, &tb_xdp_uuid
, sizeof(tb_xdp_uuid
));
204 static int tb_xdp_handle_error(const struct tb_xdp_header
*hdr
)
206 const struct tb_xdp_error_response
*error
;
208 if (hdr
->type
!= ERROR_RESPONSE
)
211 error
= (const struct tb_xdp_error_response
*)hdr
;
213 switch (error
->error
) {
214 case ERROR_UNKNOWN_PACKET
:
215 case ERROR_UNKNOWN_DOMAIN
:
217 case ERROR_NOT_SUPPORTED
:
219 case ERROR_NOT_READY
:
228 static int tb_xdp_error_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
,
229 enum tb_xdp_error error
)
231 struct tb_xdp_error_response res
;
233 memset(&res
, 0, sizeof(res
));
234 tb_xdp_fill_header(&res
.hdr
, route
, sequence
, ERROR_RESPONSE
,
238 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
239 TB_CFG_PKG_XDOMAIN_RESP
);
242 static int tb_xdp_properties_request(struct tb_ctl
*ctl
, u64 route
,
243 const uuid_t
*src_uuid
, const uuid_t
*dst_uuid
, int retry
,
244 u32
**block
, u32
*generation
)
246 struct tb_xdp_properties_response
*res
;
247 struct tb_xdp_properties req
;
253 total_size
= sizeof(*res
) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH
* 4;
254 res
= kzalloc(total_size
, GFP_KERNEL
);
258 memset(&req
, 0, sizeof(req
));
259 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4, PROPERTIES_REQUEST
,
261 memcpy(&req
.src_uuid
, src_uuid
, sizeof(*src_uuid
));
262 memcpy(&req
.dst_uuid
, dst_uuid
, sizeof(*dst_uuid
));
268 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
269 TB_CFG_PKG_XDOMAIN_REQ
, res
,
270 total_size
, TB_CFG_PKG_XDOMAIN_RESP
,
271 XDOMAIN_DEFAULT_TIMEOUT
);
275 ret
= tb_xdp_handle_error(&res
->hdr
);
280 * Package length includes the whole payload without the
281 * XDomain header. Validate first that the package is at
282 * least size of the response structure.
284 len
= res
->hdr
.xd_hdr
.length_sn
& TB_XDOMAIN_LENGTH_MASK
;
285 if (len
< sizeof(*res
) / 4) {
290 len
+= sizeof(res
->hdr
.xd_hdr
) / 4;
291 len
-= sizeof(*res
) / 4;
293 if (res
->offset
!= req
.offset
) {
299 * First time allocate block that has enough space for
300 * the whole properties block.
303 data_len
= res
->data_length
;
304 if (data_len
> TB_XDP_PROPERTIES_MAX_LENGTH
) {
309 data
= kcalloc(data_len
, sizeof(u32
), GFP_KERNEL
);
316 memcpy(data
+ req
.offset
, res
->data
, len
* 4);
318 } while (!data_len
|| req
.offset
< data_len
);
321 *generation
= res
->generation
;
334 static int tb_xdp_properties_response(struct tb
*tb
, struct tb_ctl
*ctl
,
335 u64 route
, u8 sequence
, const uuid_t
*src_uuid
,
336 const struct tb_xdp_properties
*req
)
338 struct tb_xdp_properties_response
*res
;
344 * Currently we expect all requests to be directed to us. The
345 * protocol supports forwarding, though which we might add
348 if (!uuid_equal(src_uuid
, &req
->dst_uuid
)) {
349 tb_xdp_error_response(ctl
, route
, sequence
,
350 ERROR_UNKNOWN_DOMAIN
);
354 mutex_lock(&xdomain_lock
);
356 if (req
->offset
>= xdomain_property_block_len
) {
357 mutex_unlock(&xdomain_lock
);
361 len
= xdomain_property_block_len
- req
->offset
;
362 len
= min_t(u16
, len
, TB_XDP_PROPERTIES_MAX_DATA_LENGTH
);
363 total_size
= sizeof(*res
) + len
* 4;
365 res
= kzalloc(total_size
, GFP_KERNEL
);
367 mutex_unlock(&xdomain_lock
);
371 tb_xdp_fill_header(&res
->hdr
, route
, sequence
, PROPERTIES_RESPONSE
,
373 res
->generation
= xdomain_property_block_gen
;
374 res
->data_length
= xdomain_property_block_len
;
375 res
->offset
= req
->offset
;
376 uuid_copy(&res
->src_uuid
, src_uuid
);
377 uuid_copy(&res
->dst_uuid
, &req
->src_uuid
);
378 memcpy(res
->data
, &xdomain_property_block
[req
->offset
], len
* 4);
380 mutex_unlock(&xdomain_lock
);
382 ret
= __tb_xdomain_response(ctl
, res
, total_size
,
383 TB_CFG_PKG_XDOMAIN_RESP
);
389 static int tb_xdp_properties_changed_request(struct tb_ctl
*ctl
, u64 route
,
390 int retry
, const uuid_t
*uuid
)
392 struct tb_xdp_properties_changed_response res
;
393 struct tb_xdp_properties_changed req
;
396 memset(&req
, 0, sizeof(req
));
397 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4,
398 PROPERTIES_CHANGED_REQUEST
, sizeof(req
));
399 uuid_copy(&req
.src_uuid
, uuid
);
401 memset(&res
, 0, sizeof(res
));
402 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
403 TB_CFG_PKG_XDOMAIN_REQ
, &res
, sizeof(res
),
404 TB_CFG_PKG_XDOMAIN_RESP
,
405 XDOMAIN_DEFAULT_TIMEOUT
);
409 return tb_xdp_handle_error(&res
.hdr
);
413 tb_xdp_properties_changed_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
)
415 struct tb_xdp_properties_changed_response res
;
417 memset(&res
, 0, sizeof(res
));
418 tb_xdp_fill_header(&res
.hdr
, route
, sequence
,
419 PROPERTIES_CHANGED_RESPONSE
, sizeof(res
));
420 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
421 TB_CFG_PKG_XDOMAIN_RESP
);
425 * tb_register_protocol_handler() - Register protocol handler
426 * @handler: Handler to register
428 * This allows XDomain service drivers to hook into incoming XDomain
429 * messages. After this function is called the service driver needs to
430 * be able to handle calls to callback whenever a package with the
431 * registered protocol is received.
433 int tb_register_protocol_handler(struct tb_protocol_handler
*handler
)
435 if (!handler
->uuid
|| !handler
->callback
)
437 if (uuid_equal(handler
->uuid
, &tb_xdp_uuid
))
440 mutex_lock(&xdomain_lock
);
441 list_add_tail(&handler
->list
, &protocol_handlers
);
442 mutex_unlock(&xdomain_lock
);
446 EXPORT_SYMBOL_GPL(tb_register_protocol_handler
);
449 * tb_unregister_protocol_handler() - Unregister protocol handler
450 * @handler: Handler to unregister
452 * Removes the previously registered protocol handler.
454 void tb_unregister_protocol_handler(struct tb_protocol_handler
*handler
)
456 mutex_lock(&xdomain_lock
);
457 list_del_init(&handler
->list
);
458 mutex_unlock(&xdomain_lock
);
460 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler
);
462 static void tb_xdp_handle_request(struct work_struct
*work
)
464 struct xdomain_request_work
*xw
= container_of(work
, typeof(*xw
), work
);
465 const struct tb_xdp_header
*pkg
= xw
->pkg
;
466 const struct tb_xdomain_header
*xhdr
= &pkg
->xd_hdr
;
467 struct tb
*tb
= xw
->tb
;
468 struct tb_ctl
*ctl
= tb
->ctl
;
474 route
= ((u64
)xhdr
->route_hi
<< 32 | xhdr
->route_lo
) & ~BIT_ULL(63);
475 sequence
= xhdr
->length_sn
& TB_XDOMAIN_SN_MASK
;
476 sequence
>>= TB_XDOMAIN_SN_SHIFT
;
478 mutex_lock(&tb
->lock
);
480 uuid
= tb
->root_switch
->uuid
;
483 mutex_unlock(&tb
->lock
);
486 tb_xdp_error_response(ctl
, route
, sequence
, ERROR_NOT_READY
);
491 case PROPERTIES_REQUEST
:
492 ret
= tb_xdp_properties_response(tb
, ctl
, route
, sequence
, uuid
,
493 (const struct tb_xdp_properties
*)pkg
);
496 case PROPERTIES_CHANGED_REQUEST
: {
497 const struct tb_xdp_properties_changed
*xchg
=
498 (const struct tb_xdp_properties_changed
*)pkg
;
499 struct tb_xdomain
*xd
;
501 ret
= tb_xdp_properties_changed_response(ctl
, route
, sequence
);
504 * Since the properties have been changed, let's update
505 * the xdomain related to this connection as well in
506 * case there is a change in services it offers.
508 xd
= tb_xdomain_find_by_uuid_locked(tb
, &xchg
->src_uuid
);
510 queue_delayed_work(tb
->wq
, &xd
->get_properties_work
,
511 msecs_to_jiffies(50));
523 tb_warn(tb
, "failed to send XDomain response for %#x\n",
533 tb_xdp_schedule_request(struct tb
*tb
, const struct tb_xdp_header
*hdr
,
536 struct xdomain_request_work
*xw
;
538 xw
= kmalloc(sizeof(*xw
), GFP_KERNEL
);
542 INIT_WORK(&xw
->work
, tb_xdp_handle_request
);
543 xw
->pkg
= kmemdup(hdr
, size
, GFP_KERNEL
);
546 queue_work(tb
->wq
, &xw
->work
);
550 * tb_register_service_driver() - Register XDomain service driver
551 * @drv: Driver to register
553 * Registers new service driver from @drv to the bus.
555 int tb_register_service_driver(struct tb_service_driver
*drv
)
557 drv
->driver
.bus
= &tb_bus_type
;
558 return driver_register(&drv
->driver
);
560 EXPORT_SYMBOL_GPL(tb_register_service_driver
);
563 * tb_unregister_service_driver() - Unregister XDomain service driver
564 * @xdrv: Driver to unregister
566 * Unregisters XDomain service driver from the bus.
568 void tb_unregister_service_driver(struct tb_service_driver
*drv
)
570 driver_unregister(&drv
->driver
);
572 EXPORT_SYMBOL_GPL(tb_unregister_service_driver
);
574 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
577 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
580 * It should be null terminated but anything else is pretty much
583 return sprintf(buf
, "%*pEp\n", (int)strlen(svc
->key
), svc
->key
);
585 static DEVICE_ATTR_RO(key
);
587 static int get_modalias(struct tb_service
*svc
, char *buf
, size_t size
)
589 return snprintf(buf
, size
, "tbsvc:k%sp%08Xv%08Xr%08X", svc
->key
,
590 svc
->prtcid
, svc
->prtcvers
, svc
->prtcrevs
);
593 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
596 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
598 /* Full buffer size except new line and null termination */
599 get_modalias(svc
, buf
, PAGE_SIZE
- 2);
600 return sprintf(buf
, "%s\n", buf
);
602 static DEVICE_ATTR_RO(modalias
);
604 static ssize_t
prtcid_show(struct device
*dev
, struct device_attribute
*attr
,
607 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
609 return sprintf(buf
, "%u\n", svc
->prtcid
);
611 static DEVICE_ATTR_RO(prtcid
);
613 static ssize_t
prtcvers_show(struct device
*dev
, struct device_attribute
*attr
,
616 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
618 return sprintf(buf
, "%u\n", svc
->prtcvers
);
620 static DEVICE_ATTR_RO(prtcvers
);
622 static ssize_t
prtcrevs_show(struct device
*dev
, struct device_attribute
*attr
,
625 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
627 return sprintf(buf
, "%u\n", svc
->prtcrevs
);
629 static DEVICE_ATTR_RO(prtcrevs
);
631 static ssize_t
prtcstns_show(struct device
*dev
, struct device_attribute
*attr
,
634 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
636 return sprintf(buf
, "0x%08x\n", svc
->prtcstns
);
638 static DEVICE_ATTR_RO(prtcstns
);
640 static struct attribute
*tb_service_attrs
[] = {
642 &dev_attr_modalias
.attr
,
643 &dev_attr_prtcid
.attr
,
644 &dev_attr_prtcvers
.attr
,
645 &dev_attr_prtcrevs
.attr
,
646 &dev_attr_prtcstns
.attr
,
650 static struct attribute_group tb_service_attr_group
= {
651 .attrs
= tb_service_attrs
,
654 static const struct attribute_group
*tb_service_attr_groups
[] = {
655 &tb_service_attr_group
,
659 static int tb_service_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
661 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
664 get_modalias(svc
, modalias
, sizeof(modalias
));
665 return add_uevent_var(env
, "MODALIAS=%s", modalias
);
668 static void tb_service_release(struct device
*dev
)
670 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
671 struct tb_xdomain
*xd
= tb_service_parent(svc
);
673 ida_simple_remove(&xd
->service_ids
, svc
->id
);
678 struct device_type tb_service_type
= {
679 .name
= "thunderbolt_service",
680 .groups
= tb_service_attr_groups
,
681 .uevent
= tb_service_uevent
,
682 .release
= tb_service_release
,
684 EXPORT_SYMBOL_GPL(tb_service_type
);
686 static int remove_missing_service(struct device
*dev
, void *data
)
688 struct tb_xdomain
*xd
= data
;
689 struct tb_service
*svc
;
691 svc
= tb_to_service(dev
);
695 if (!tb_property_find(xd
->properties
, svc
->key
,
696 TB_PROPERTY_TYPE_DIRECTORY
))
697 device_unregister(dev
);
702 static int find_service(struct device
*dev
, void *data
)
704 const struct tb_property
*p
= data
;
705 struct tb_service
*svc
;
707 svc
= tb_to_service(dev
);
711 return !strcmp(svc
->key
, p
->key
);
714 static int populate_service(struct tb_service
*svc
,
715 struct tb_property
*property
)
717 struct tb_property_dir
*dir
= property
->value
.dir
;
718 struct tb_property
*p
;
720 /* Fill in standard properties */
721 p
= tb_property_find(dir
, "prtcid", TB_PROPERTY_TYPE_VALUE
);
723 svc
->prtcid
= p
->value
.immediate
;
724 p
= tb_property_find(dir
, "prtcvers", TB_PROPERTY_TYPE_VALUE
);
726 svc
->prtcvers
= p
->value
.immediate
;
727 p
= tb_property_find(dir
, "prtcrevs", TB_PROPERTY_TYPE_VALUE
);
729 svc
->prtcrevs
= p
->value
.immediate
;
730 p
= tb_property_find(dir
, "prtcstns", TB_PROPERTY_TYPE_VALUE
);
732 svc
->prtcstns
= p
->value
.immediate
;
734 svc
->key
= kstrdup(property
->key
, GFP_KERNEL
);
741 static void enumerate_services(struct tb_xdomain
*xd
)
743 struct tb_service
*svc
;
744 struct tb_property
*p
;
748 * First remove all services that are not available anymore in
749 * the updated property block.
751 device_for_each_child_reverse(&xd
->dev
, xd
, remove_missing_service
);
753 /* Then re-enumerate properties creating new services as we go */
754 tb_property_for_each(xd
->properties
, p
) {
755 if (p
->type
!= TB_PROPERTY_TYPE_DIRECTORY
)
758 /* If the service exists already we are fine */
759 dev
= device_find_child(&xd
->dev
, p
, find_service
);
765 svc
= kzalloc(sizeof(*svc
), GFP_KERNEL
);
769 if (populate_service(svc
, p
)) {
774 svc
->id
= ida_simple_get(&xd
->service_ids
, 0, 0, GFP_KERNEL
);
775 svc
->dev
.bus
= &tb_bus_type
;
776 svc
->dev
.type
= &tb_service_type
;
777 svc
->dev
.parent
= &xd
->dev
;
778 dev_set_name(&svc
->dev
, "%s.%d", dev_name(&xd
->dev
), svc
->id
);
780 if (device_register(&svc
->dev
)) {
781 put_device(&svc
->dev
);
787 static int populate_properties(struct tb_xdomain
*xd
,
788 struct tb_property_dir
*dir
)
790 const struct tb_property
*p
;
792 /* Required properties */
793 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_VALUE
);
796 xd
->device
= p
->value
.immediate
;
798 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_VALUE
);
801 xd
->vendor
= p
->value
.immediate
;
803 kfree(xd
->device_name
);
804 xd
->device_name
= NULL
;
805 kfree(xd
->vendor_name
);
806 xd
->vendor_name
= NULL
;
808 /* Optional properties */
809 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_TEXT
);
811 xd
->device_name
= kstrdup(p
->value
.text
, GFP_KERNEL
);
812 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_TEXT
);
814 xd
->vendor_name
= kstrdup(p
->value
.text
, GFP_KERNEL
);
819 /* Called with @xd->lock held */
820 static void tb_xdomain_restore_paths(struct tb_xdomain
*xd
)
826 if (xd
->transmit_path
) {
827 dev_dbg(&xd
->dev
, "re-establishing DMA path\n");
828 tb_domain_approve_xdomain_paths(xd
->tb
, xd
);
832 static void tb_xdomain_get_properties(struct work_struct
*work
)
834 struct tb_xdomain
*xd
= container_of(work
, typeof(*xd
),
835 get_properties_work
.work
);
836 struct tb_property_dir
*dir
;
837 struct tb
*tb
= xd
->tb
;
843 ret
= tb_xdp_properties_request(tb
->ctl
, xd
->route
, xd
->local_uuid
,
844 xd
->remote_uuid
, xd
->properties_retries
,
847 if (xd
->properties_retries
-- > 0) {
848 queue_delayed_work(xd
->tb
->wq
, &xd
->get_properties_work
,
849 msecs_to_jiffies(1000));
853 "failed read XDomain properties from %pUb\n",
859 xd
->properties_retries
= XDOMAIN_PROPERTIES_RETRIES
;
861 mutex_lock(&xd
->lock
);
863 /* Only accept newer generation properties */
864 if (xd
->properties
&& gen
<= xd
->property_block_gen
) {
866 * On resume it is likely that the properties block is
867 * not changed (unless the other end added or removed
868 * services). However, we need to make sure the existing
869 * DMA paths are restored properly.
871 tb_xdomain_restore_paths(xd
);
875 dir
= tb_property_parse_dir(block
, ret
);
877 dev_err(&xd
->dev
, "failed to parse XDomain properties\n");
881 ret
= populate_properties(xd
, dir
);
883 dev_err(&xd
->dev
, "missing XDomain properties in response\n");
887 /* Release the existing one */
888 if (xd
->properties
) {
889 tb_property_free_dir(xd
->properties
);
893 xd
->properties
= dir
;
894 xd
->property_block_gen
= gen
;
896 tb_xdomain_restore_paths(xd
);
898 mutex_unlock(&xd
->lock
);
903 * Now the device should be ready enough so we can add it to the
904 * bus and let userspace know about it. If the device is already
905 * registered, we notify the userspace that it has changed.
908 if (device_add(&xd
->dev
)) {
909 dev_err(&xd
->dev
, "failed to add XDomain device\n");
913 kobject_uevent(&xd
->dev
.kobj
, KOBJ_CHANGE
);
916 enumerate_services(xd
);
920 tb_property_free_dir(dir
);
923 mutex_unlock(&xd
->lock
);
926 static void tb_xdomain_properties_changed(struct work_struct
*work
)
928 struct tb_xdomain
*xd
= container_of(work
, typeof(*xd
),
929 properties_changed_work
.work
);
932 ret
= tb_xdp_properties_changed_request(xd
->tb
->ctl
, xd
->route
,
933 xd
->properties_changed_retries
, xd
->local_uuid
);
935 if (xd
->properties_changed_retries
-- > 0)
936 queue_delayed_work(xd
->tb
->wq
,
937 &xd
->properties_changed_work
,
938 msecs_to_jiffies(1000));
942 xd
->properties_changed_retries
= XDOMAIN_PROPERTIES_CHANGED_RETRIES
;
945 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
948 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
950 return sprintf(buf
, "%#x\n", xd
->device
);
952 static DEVICE_ATTR_RO(device
);
955 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
957 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
960 if (mutex_lock_interruptible(&xd
->lock
))
962 ret
= sprintf(buf
, "%s\n", xd
->device_name
? xd
->device_name
: "");
963 mutex_unlock(&xd
->lock
);
967 static DEVICE_ATTR_RO(device_name
);
969 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
972 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
974 return sprintf(buf
, "%#x\n", xd
->vendor
);
976 static DEVICE_ATTR_RO(vendor
);
979 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
981 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
984 if (mutex_lock_interruptible(&xd
->lock
))
986 ret
= sprintf(buf
, "%s\n", xd
->vendor_name
? xd
->vendor_name
: "");
987 mutex_unlock(&xd
->lock
);
991 static DEVICE_ATTR_RO(vendor_name
);
993 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
996 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
998 return sprintf(buf
, "%pUb\n", xd
->remote_uuid
);
1000 static DEVICE_ATTR_RO(unique_id
);
1002 static struct attribute
*xdomain_attrs
[] = {
1003 &dev_attr_device
.attr
,
1004 &dev_attr_device_name
.attr
,
1005 &dev_attr_unique_id
.attr
,
1006 &dev_attr_vendor
.attr
,
1007 &dev_attr_vendor_name
.attr
,
1011 static struct attribute_group xdomain_attr_group
= {
1012 .attrs
= xdomain_attrs
,
1015 static const struct attribute_group
*xdomain_attr_groups
[] = {
1016 &xdomain_attr_group
,
1020 static void tb_xdomain_release(struct device
*dev
)
1022 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1024 put_device(xd
->dev
.parent
);
1026 tb_property_free_dir(xd
->properties
);
1027 ida_destroy(&xd
->service_ids
);
1029 kfree(xd
->local_uuid
);
1030 kfree(xd
->remote_uuid
);
1031 kfree(xd
->device_name
);
1032 kfree(xd
->vendor_name
);
1036 static void start_handshake(struct tb_xdomain
*xd
)
1038 xd
->properties_retries
= XDOMAIN_PROPERTIES_RETRIES
;
1039 xd
->properties_changed_retries
= XDOMAIN_PROPERTIES_CHANGED_RETRIES
;
1041 /* Start exchanging properties with the other host */
1042 queue_delayed_work(xd
->tb
->wq
, &xd
->properties_changed_work
,
1043 msecs_to_jiffies(100));
1044 queue_delayed_work(xd
->tb
->wq
, &xd
->get_properties_work
,
1045 msecs_to_jiffies(1000));
1048 static void stop_handshake(struct tb_xdomain
*xd
)
1050 xd
->properties_retries
= 0;
1051 xd
->properties_changed_retries
= 0;
1053 cancel_delayed_work_sync(&xd
->get_properties_work
);
1054 cancel_delayed_work_sync(&xd
->properties_changed_work
);
1057 static int __maybe_unused
tb_xdomain_suspend(struct device
*dev
)
1059 stop_handshake(tb_to_xdomain(dev
));
1063 static int __maybe_unused
tb_xdomain_resume(struct device
*dev
)
1065 struct tb_xdomain
*xd
= tb_to_xdomain(dev
);
1068 * Ask tb_xdomain_get_properties() restore any existing DMA
1069 * paths after properties are re-read.
1072 start_handshake(xd
);
1077 static const struct dev_pm_ops tb_xdomain_pm_ops
= {
1078 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend
, tb_xdomain_resume
)
1081 struct device_type tb_xdomain_type
= {
1082 .name
= "thunderbolt_xdomain",
1083 .release
= tb_xdomain_release
,
1084 .pm
= &tb_xdomain_pm_ops
,
1086 EXPORT_SYMBOL_GPL(tb_xdomain_type
);
1089 * tb_xdomain_alloc() - Allocate new XDomain object
1090 * @tb: Domain where the XDomain belongs
1091 * @parent: Parent device (the switch through the connection to the
1092 * other domain is reached).
1093 * @route: Route string used to reach the other domain
1094 * @local_uuid: Our local domain UUID
1095 * @remote_uuid: UUID of the other domain
1097 * Allocates new XDomain structure and returns pointer to that. The
1098 * object must be released by calling tb_xdomain_put().
1100 struct tb_xdomain
*tb_xdomain_alloc(struct tb
*tb
, struct device
*parent
,
1101 u64 route
, const uuid_t
*local_uuid
,
1102 const uuid_t
*remote_uuid
)
1104 struct tb_xdomain
*xd
;
1106 xd
= kzalloc(sizeof(*xd
), GFP_KERNEL
);
1112 ida_init(&xd
->service_ids
);
1113 mutex_init(&xd
->lock
);
1114 INIT_DELAYED_WORK(&xd
->get_properties_work
, tb_xdomain_get_properties
);
1115 INIT_DELAYED_WORK(&xd
->properties_changed_work
,
1116 tb_xdomain_properties_changed
);
1118 xd
->local_uuid
= kmemdup(local_uuid
, sizeof(uuid_t
), GFP_KERNEL
);
1119 if (!xd
->local_uuid
)
1122 xd
->remote_uuid
= kmemdup(remote_uuid
, sizeof(uuid_t
), GFP_KERNEL
);
1123 if (!xd
->remote_uuid
)
1124 goto err_free_local_uuid
;
1126 device_initialize(&xd
->dev
);
1127 xd
->dev
.parent
= get_device(parent
);
1128 xd
->dev
.bus
= &tb_bus_type
;
1129 xd
->dev
.type
= &tb_xdomain_type
;
1130 xd
->dev
.groups
= xdomain_attr_groups
;
1131 dev_set_name(&xd
->dev
, "%u-%llx", tb
->index
, route
);
1134 * This keeps the DMA powered on as long as we have active
1135 * connection to another host.
1137 pm_runtime_set_active(&xd
->dev
);
1138 pm_runtime_get_noresume(&xd
->dev
);
1139 pm_runtime_enable(&xd
->dev
);
1143 err_free_local_uuid
:
1144 kfree(xd
->local_uuid
);
1152 * tb_xdomain_add() - Add XDomain to the bus
1153 * @xd: XDomain to add
1155 * This function starts XDomain discovery protocol handshake and
1156 * eventually adds the XDomain to the bus. After calling this function
1157 * the caller needs to call tb_xdomain_remove() in order to remove and
1158 * release the object regardless whether the handshake succeeded or not.
1160 void tb_xdomain_add(struct tb_xdomain
*xd
)
1162 /* Start exchanging properties with the other host */
1163 start_handshake(xd
);
1166 static int unregister_service(struct device
*dev
, void *data
)
1168 device_unregister(dev
);
1173 * tb_xdomain_remove() - Remove XDomain from the bus
1174 * @xd: XDomain to remove
1176 * This will stop all ongoing configuration work and remove the XDomain
1177 * along with any services from the bus. When the last reference to @xd
1178 * is released the object will be released as well.
1180 void tb_xdomain_remove(struct tb_xdomain
*xd
)
1184 device_for_each_child_reverse(&xd
->dev
, xd
, unregister_service
);
1187 * Undo runtime PM here explicitly because it is possible that
1188 * the XDomain was never added to the bus and thus device_del()
1189 * is not called for it (device_del() would handle this otherwise).
1191 pm_runtime_disable(&xd
->dev
);
1192 pm_runtime_put_noidle(&xd
->dev
);
1193 pm_runtime_set_suspended(&xd
->dev
);
1195 if (!device_is_registered(&xd
->dev
))
1196 put_device(&xd
->dev
);
1198 device_unregister(&xd
->dev
);
1202 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1203 * @xd: XDomain connection
1204 * @transmit_path: HopID of the transmit path the other end is using to
1206 * @transmit_ring: DMA ring used to receive packets from the other end
1207 * @receive_path: HopID of the receive path the other end is using to
1209 * @receive_ring: DMA ring used to send packets to the other end
1211 * The function enables DMA paths accordingly so that after successful
1212 * return the caller can send and receive packets using high-speed DMA
1215 * Return: %0 in case of success and negative errno in case of error
1217 int tb_xdomain_enable_paths(struct tb_xdomain
*xd
, u16 transmit_path
,
1218 u16 transmit_ring
, u16 receive_path
,
1223 mutex_lock(&xd
->lock
);
1225 if (xd
->transmit_path
) {
1226 ret
= xd
->transmit_path
== transmit_path
? 0 : -EBUSY
;
1230 xd
->transmit_path
= transmit_path
;
1231 xd
->transmit_ring
= transmit_ring
;
1232 xd
->receive_path
= receive_path
;
1233 xd
->receive_ring
= receive_ring
;
1235 ret
= tb_domain_approve_xdomain_paths(xd
->tb
, xd
);
1238 mutex_unlock(&xd
->lock
);
1242 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths
);
1245 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1246 * @xd: XDomain connection
1248 * This does the opposite of tb_xdomain_enable_paths(). After call to
1249 * this the caller is not expected to use the rings anymore.
1251 * Return: %0 in case of success and negative errno in case of error
1253 int tb_xdomain_disable_paths(struct tb_xdomain
*xd
)
1257 mutex_lock(&xd
->lock
);
1258 if (xd
->transmit_path
) {
1259 xd
->transmit_path
= 0;
1260 xd
->transmit_ring
= 0;
1261 xd
->receive_path
= 0;
1262 xd
->receive_ring
= 0;
1264 ret
= tb_domain_disconnect_xdomain_paths(xd
->tb
, xd
);
1266 mutex_unlock(&xd
->lock
);
1270 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths
);
1272 struct tb_xdomain_lookup
{
1279 static struct tb_xdomain
*switch_find_xdomain(struct tb_switch
*sw
,
1280 const struct tb_xdomain_lookup
*lookup
)
1284 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1285 struct tb_port
*port
= &sw
->ports
[i
];
1286 struct tb_xdomain
*xd
;
1288 if (tb_is_upstream_port(port
))
1291 if (port
->xdomain
) {
1295 if (uuid_equal(xd
->remote_uuid
, lookup
->uuid
))
1297 } else if (lookup
->link
&&
1298 lookup
->link
== xd
->link
&&
1299 lookup
->depth
== xd
->depth
) {
1301 } else if (lookup
->route
&&
1302 lookup
->route
== xd
->route
) {
1305 } else if (port
->remote
) {
1306 xd
= switch_find_xdomain(port
->remote
->sw
, lookup
);
1316 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1317 * @tb: Domain where the XDomain belongs to
1318 * @uuid: UUID to look for
1320 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1321 * The returned XDomain will have its reference count increased so the
1322 * caller needs to call tb_xdomain_put() when it is done with the
1325 * This will find all XDomains including the ones that are not yet added
1326 * to the bus (handshake is still in progress).
1328 * The caller needs to hold @tb->lock.
1330 struct tb_xdomain
*tb_xdomain_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
1332 struct tb_xdomain_lookup lookup
;
1333 struct tb_xdomain
*xd
;
1335 memset(&lookup
, 0, sizeof(lookup
));
1338 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
1339 return tb_xdomain_get(xd
);
1341 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid
);
1344 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1345 * @tb: Domain where the XDomain belongs to
1346 * @link: Root switch link number
1347 * @depth: Depth in the link
1349 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1350 * The returned XDomain will have its reference count increased so the
1351 * caller needs to call tb_xdomain_put() when it is done with the
1354 * This will find all XDomains including the ones that are not yet added
1355 * to the bus (handshake is still in progress).
1357 * The caller needs to hold @tb->lock.
1359 struct tb_xdomain
*tb_xdomain_find_by_link_depth(struct tb
*tb
, u8 link
,
1362 struct tb_xdomain_lookup lookup
;
1363 struct tb_xdomain
*xd
;
1365 memset(&lookup
, 0, sizeof(lookup
));
1367 lookup
.depth
= depth
;
1369 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
1370 return tb_xdomain_get(xd
);
1374 * tb_xdomain_find_by_route() - Find an XDomain by route string
1375 * @tb: Domain where the XDomain belongs to
1376 * @route: XDomain route string
1378 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1379 * The returned XDomain will have its reference count increased so the
1380 * caller needs to call tb_xdomain_put() when it is done with the
1383 * This will find all XDomains including the ones that are not yet added
1384 * to the bus (handshake is still in progress).
1386 * The caller needs to hold @tb->lock.
1388 struct tb_xdomain
*tb_xdomain_find_by_route(struct tb
*tb
, u64 route
)
1390 struct tb_xdomain_lookup lookup
;
1391 struct tb_xdomain
*xd
;
1393 memset(&lookup
, 0, sizeof(lookup
));
1394 lookup
.route
= route
;
1396 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
1397 return tb_xdomain_get(xd
);
1399 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route
);
1401 bool tb_xdomain_handle_request(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1402 const void *buf
, size_t size
)
1404 const struct tb_protocol_handler
*handler
, *tmp
;
1405 const struct tb_xdp_header
*hdr
= buf
;
1406 unsigned int length
;
1409 /* We expect the packet is at least size of the header */
1410 length
= hdr
->xd_hdr
.length_sn
& TB_XDOMAIN_LENGTH_MASK
;
1411 if (length
!= size
/ 4 - sizeof(hdr
->xd_hdr
) / 4)
1413 if (length
< sizeof(*hdr
) / 4 - sizeof(hdr
->xd_hdr
) / 4)
1417 * Handle XDomain discovery protocol packets directly here. For
1418 * other protocols (based on their UUID) we call registered
1421 if (uuid_equal(&hdr
->uuid
, &tb_xdp_uuid
)) {
1422 if (type
== TB_CFG_PKG_XDOMAIN_REQ
) {
1423 tb_xdp_schedule_request(tb
, hdr
, size
);
1429 mutex_lock(&xdomain_lock
);
1430 list_for_each_entry_safe(handler
, tmp
, &protocol_handlers
, list
) {
1431 if (!uuid_equal(&hdr
->uuid
, handler
->uuid
))
1434 mutex_unlock(&xdomain_lock
);
1435 ret
= handler
->callback(buf
, size
, handler
->data
);
1436 mutex_lock(&xdomain_lock
);
1441 mutex_unlock(&xdomain_lock
);
1446 static int rebuild_property_block(void)
1451 ret
= tb_property_format_dir(xdomain_property_dir
, NULL
, 0);
1457 block
= kcalloc(len
, sizeof(u32
), GFP_KERNEL
);
1461 ret
= tb_property_format_dir(xdomain_property_dir
, block
, len
);
1467 kfree(xdomain_property_block
);
1468 xdomain_property_block
= block
;
1469 xdomain_property_block_len
= len
;
1470 xdomain_property_block_gen
++;
1475 static int update_xdomain(struct device
*dev
, void *data
)
1477 struct tb_xdomain
*xd
;
1479 xd
= tb_to_xdomain(dev
);
1481 queue_delayed_work(xd
->tb
->wq
, &xd
->properties_changed_work
,
1482 msecs_to_jiffies(50));
1488 static void update_all_xdomains(void)
1490 bus_for_each_dev(&tb_bus_type
, NULL
, NULL
, update_xdomain
);
1493 static bool remove_directory(const char *key
, const struct tb_property_dir
*dir
)
1495 struct tb_property
*p
;
1497 p
= tb_property_find(xdomain_property_dir
, key
,
1498 TB_PROPERTY_TYPE_DIRECTORY
);
1499 if (p
&& p
->value
.dir
== dir
) {
1500 tb_property_remove(p
);
1507 * tb_register_property_dir() - Register property directory to the host
1508 * @key: Key (name) of the directory to add
1509 * @dir: Directory to add
1511 * Service drivers can use this function to add new property directory
1512 * to the host available properties. The other connected hosts are
1513 * notified so they can re-read properties of this host if they are
1516 * Return: %0 on success and negative errno on failure
1518 int tb_register_property_dir(const char *key
, struct tb_property_dir
*dir
)
1522 if (WARN_ON(!xdomain_property_dir
))
1525 if (!key
|| strlen(key
) > 8)
1528 mutex_lock(&xdomain_lock
);
1529 if (tb_property_find(xdomain_property_dir
, key
,
1530 TB_PROPERTY_TYPE_DIRECTORY
)) {
1535 ret
= tb_property_add_dir(xdomain_property_dir
, key
, dir
);
1539 ret
= rebuild_property_block();
1541 remove_directory(key
, dir
);
1545 mutex_unlock(&xdomain_lock
);
1546 update_all_xdomains();
1550 mutex_unlock(&xdomain_lock
);
1553 EXPORT_SYMBOL_GPL(tb_register_property_dir
);
1556 * tb_unregister_property_dir() - Removes property directory from host
1557 * @key: Key (name) of the directory
1558 * @dir: Directory to remove
1560 * This will remove the existing directory from this host and notify the
1561 * connected hosts about the change.
1563 void tb_unregister_property_dir(const char *key
, struct tb_property_dir
*dir
)
1567 mutex_lock(&xdomain_lock
);
1568 if (remove_directory(key
, dir
))
1569 ret
= rebuild_property_block();
1570 mutex_unlock(&xdomain_lock
);
1573 update_all_xdomains();
1575 EXPORT_SYMBOL_GPL(tb_unregister_property_dir
);
1577 int tb_xdomain_init(void)
1581 xdomain_property_dir
= tb_property_create_dir(NULL
);
1582 if (!xdomain_property_dir
)
1586 * Initialize standard set of properties without any service
1587 * directories. Those will be added by service drivers
1588 * themselves when they are loaded.
1590 tb_property_add_immediate(xdomain_property_dir
, "vendorid",
1591 PCI_VENDOR_ID_INTEL
);
1592 tb_property_add_text(xdomain_property_dir
, "vendorid", "Intel Corp.");
1593 tb_property_add_immediate(xdomain_property_dir
, "deviceid", 0x1);
1594 tb_property_add_text(xdomain_property_dir
, "deviceid",
1595 utsname()->nodename
);
1596 tb_property_add_immediate(xdomain_property_dir
, "devicerv", 0x80000100);
1598 ret
= rebuild_property_block();
1600 tb_property_free_dir(xdomain_property_dir
);
1601 xdomain_property_dir
= NULL
;
1607 void tb_xdomain_exit(void)
1609 kfree(xdomain_property_block
);
1610 tb_property_free_dir(xdomain_property_dir
);