1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt XDomain discovery protocol support
5 * Copyright (C) 2017, Intel Corporation
6 * Authors: Michael Jamet <michael.jamet@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/device.h>
11 #include <linux/kmod.h>
12 #include <linux/module.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/utsname.h>
15 #include <linux/uuid.h>
16 #include <linux/workqueue.h>
20 #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
21 #define XDOMAIN_PROPERTIES_RETRIES 60
22 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
24 struct xdomain_request_work
{
25 struct work_struct work
;
26 struct tb_xdp_header
*pkg
;
30 /* Serializes access to the properties and protocol handlers below */
31 static DEFINE_MUTEX(xdomain_lock
);
33 /* Properties exposed to the remote domains */
34 static struct tb_property_dir
*xdomain_property_dir
;
35 static u32
*xdomain_property_block
;
36 static u32 xdomain_property_block_len
;
37 static u32 xdomain_property_block_gen
;
39 /* Additional protocol handlers */
40 static LIST_HEAD(protocol_handlers
);
42 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
43 static const uuid_t tb_xdp_uuid
=
44 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
45 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
47 static bool tb_xdomain_match(const struct tb_cfg_request
*req
,
48 const struct ctl_pkg
*pkg
)
50 switch (pkg
->frame
.eof
) {
51 case TB_CFG_PKG_ERROR
:
54 case TB_CFG_PKG_XDOMAIN_RESP
: {
55 const struct tb_xdp_header
*res_hdr
= pkg
->buffer
;
56 const struct tb_xdp_header
*req_hdr
= req
->request
;
58 if (pkg
->frame
.size
< req
->response_size
/ 4)
61 /* Make sure route matches */
62 if ((res_hdr
->xd_hdr
.route_hi
& ~BIT(31)) !=
63 req_hdr
->xd_hdr
.route_hi
)
65 if ((res_hdr
->xd_hdr
.route_lo
) != req_hdr
->xd_hdr
.route_lo
)
68 /* Check that the XDomain protocol matches */
69 if (!uuid_equal(&res_hdr
->uuid
, &req_hdr
->uuid
))
80 static bool tb_xdomain_copy(struct tb_cfg_request
*req
,
81 const struct ctl_pkg
*pkg
)
83 memcpy(req
->response
, pkg
->buffer
, req
->response_size
);
88 static void response_ready(void *data
)
90 tb_cfg_request_put(data
);
93 static int __tb_xdomain_response(struct tb_ctl
*ctl
, const void *response
,
94 size_t size
, enum tb_cfg_pkg_type type
)
96 struct tb_cfg_request
*req
;
98 req
= tb_cfg_request_alloc();
102 req
->match
= tb_xdomain_match
;
103 req
->copy
= tb_xdomain_copy
;
104 req
->request
= response
;
105 req
->request_size
= size
;
106 req
->request_type
= type
;
108 return tb_cfg_request(ctl
, req
, response_ready
, req
);
112 * tb_xdomain_response() - Send a XDomain response message
113 * @xd: XDomain to send the message
114 * @response: Response to send
115 * @size: Size of the response
116 * @type: PDF type of the response
118 * This can be used to send a XDomain response message to the other
119 * domain. No response for the message is expected.
121 * Return: %0 in case of success and negative errno in case of failure
123 int tb_xdomain_response(struct tb_xdomain
*xd
, const void *response
,
124 size_t size
, enum tb_cfg_pkg_type type
)
126 return __tb_xdomain_response(xd
->tb
->ctl
, response
, size
, type
);
128 EXPORT_SYMBOL_GPL(tb_xdomain_response
);
130 static int __tb_xdomain_request(struct tb_ctl
*ctl
, const void *request
,
131 size_t request_size
, enum tb_cfg_pkg_type request_type
, void *response
,
132 size_t response_size
, enum tb_cfg_pkg_type response_type
,
133 unsigned int timeout_msec
)
135 struct tb_cfg_request
*req
;
136 struct tb_cfg_result res
;
138 req
= tb_cfg_request_alloc();
142 req
->match
= tb_xdomain_match
;
143 req
->copy
= tb_xdomain_copy
;
144 req
->request
= request
;
145 req
->request_size
= request_size
;
146 req
->request_type
= request_type
;
147 req
->response
= response
;
148 req
->response_size
= response_size
;
149 req
->response_type
= response_type
;
151 res
= tb_cfg_request_sync(ctl
, req
, timeout_msec
);
153 tb_cfg_request_put(req
);
155 return res
.err
== 1 ? -EIO
: res
.err
;
159 * tb_xdomain_request() - Send a XDomain request
160 * @xd: XDomain to send the request
161 * @request: Request to send
162 * @request_size: Size of the request in bytes
163 * @request_type: PDF type of the request
164 * @response: Response is copied here
165 * @response_size: Expected size of the response in bytes
166 * @response_type: Expected PDF type of the response
167 * @timeout_msec: Timeout in milliseconds to wait for the response
169 * This function can be used to send XDomain control channel messages to
170 * the other domain. The function waits until the response is received
171 * or when timeout triggers. Whichever comes first.
173 * Return: %0 in case of success and negative errno in case of failure
175 int tb_xdomain_request(struct tb_xdomain
*xd
, const void *request
,
176 size_t request_size
, enum tb_cfg_pkg_type request_type
,
177 void *response
, size_t response_size
,
178 enum tb_cfg_pkg_type response_type
, unsigned int timeout_msec
)
180 return __tb_xdomain_request(xd
->tb
->ctl
, request
, request_size
,
181 request_type
, response
, response_size
,
182 response_type
, timeout_msec
);
184 EXPORT_SYMBOL_GPL(tb_xdomain_request
);
186 static inline void tb_xdp_fill_header(struct tb_xdp_header
*hdr
, u64 route
,
187 u8 sequence
, enum tb_xdp_type type
, size_t size
)
191 length_sn
= (size
- sizeof(hdr
->xd_hdr
)) / 4;
192 length_sn
|= (sequence
<< TB_XDOMAIN_SN_SHIFT
) & TB_XDOMAIN_SN_MASK
;
194 hdr
->xd_hdr
.route_hi
= upper_32_bits(route
);
195 hdr
->xd_hdr
.route_lo
= lower_32_bits(route
);
196 hdr
->xd_hdr
.length_sn
= length_sn
;
198 memcpy(&hdr
->uuid
, &tb_xdp_uuid
, sizeof(tb_xdp_uuid
));
201 static int tb_xdp_handle_error(const struct tb_xdp_header
*hdr
)
203 const struct tb_xdp_error_response
*error
;
205 if (hdr
->type
!= ERROR_RESPONSE
)
208 error
= (const struct tb_xdp_error_response
*)hdr
;
210 switch (error
->error
) {
211 case ERROR_UNKNOWN_PACKET
:
212 case ERROR_UNKNOWN_DOMAIN
:
214 case ERROR_NOT_SUPPORTED
:
216 case ERROR_NOT_READY
:
225 static int tb_xdp_error_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
,
226 enum tb_xdp_error error
)
228 struct tb_xdp_error_response res
;
230 memset(&res
, 0, sizeof(res
));
231 tb_xdp_fill_header(&res
.hdr
, route
, sequence
, ERROR_RESPONSE
,
235 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
236 TB_CFG_PKG_XDOMAIN_RESP
);
239 static int tb_xdp_properties_request(struct tb_ctl
*ctl
, u64 route
,
240 const uuid_t
*src_uuid
, const uuid_t
*dst_uuid
, int retry
,
241 u32
**block
, u32
*generation
)
243 struct tb_xdp_properties_response
*res
;
244 struct tb_xdp_properties req
;
250 total_size
= sizeof(*res
) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH
* 4;
251 res
= kzalloc(total_size
, GFP_KERNEL
);
255 memset(&req
, 0, sizeof(req
));
256 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4, PROPERTIES_REQUEST
,
258 memcpy(&req
.src_uuid
, src_uuid
, sizeof(*src_uuid
));
259 memcpy(&req
.dst_uuid
, dst_uuid
, sizeof(*dst_uuid
));
265 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
266 TB_CFG_PKG_XDOMAIN_REQ
, res
,
267 total_size
, TB_CFG_PKG_XDOMAIN_RESP
,
268 XDOMAIN_DEFAULT_TIMEOUT
);
272 ret
= tb_xdp_handle_error(&res
->hdr
);
277 * Package length includes the whole payload without the
278 * XDomain header. Validate first that the package is at
279 * least size of the response structure.
281 len
= res
->hdr
.xd_hdr
.length_sn
& TB_XDOMAIN_LENGTH_MASK
;
282 if (len
< sizeof(*res
) / 4) {
287 len
+= sizeof(res
->hdr
.xd_hdr
) / 4;
288 len
-= sizeof(*res
) / 4;
290 if (res
->offset
!= req
.offset
) {
296 * First time allocate block that has enough space for
297 * the whole properties block.
300 data_len
= res
->data_length
;
301 if (data_len
> TB_XDP_PROPERTIES_MAX_LENGTH
) {
306 data
= kcalloc(data_len
, sizeof(u32
), GFP_KERNEL
);
313 memcpy(data
+ req
.offset
, res
->data
, len
* 4);
315 } while (!data_len
|| req
.offset
< data_len
);
318 *generation
= res
->generation
;
331 static int tb_xdp_properties_response(struct tb
*tb
, struct tb_ctl
*ctl
,
332 u64 route
, u8 sequence
, const uuid_t
*src_uuid
,
333 const struct tb_xdp_properties
*req
)
335 struct tb_xdp_properties_response
*res
;
341 * Currently we expect all requests to be directed to us. The
342 * protocol supports forwarding, though which we might add
345 if (!uuid_equal(src_uuid
, &req
->dst_uuid
)) {
346 tb_xdp_error_response(ctl
, route
, sequence
,
347 ERROR_UNKNOWN_DOMAIN
);
351 mutex_lock(&xdomain_lock
);
353 if (req
->offset
>= xdomain_property_block_len
) {
354 mutex_unlock(&xdomain_lock
);
358 len
= xdomain_property_block_len
- req
->offset
;
359 len
= min_t(u16
, len
, TB_XDP_PROPERTIES_MAX_DATA_LENGTH
);
360 total_size
= sizeof(*res
) + len
* 4;
362 res
= kzalloc(total_size
, GFP_KERNEL
);
364 mutex_unlock(&xdomain_lock
);
368 tb_xdp_fill_header(&res
->hdr
, route
, sequence
, PROPERTIES_RESPONSE
,
370 res
->generation
= xdomain_property_block_gen
;
371 res
->data_length
= xdomain_property_block_len
;
372 res
->offset
= req
->offset
;
373 uuid_copy(&res
->src_uuid
, src_uuid
);
374 uuid_copy(&res
->dst_uuid
, &req
->src_uuid
);
375 memcpy(res
->data
, &xdomain_property_block
[req
->offset
], len
* 4);
377 mutex_unlock(&xdomain_lock
);
379 ret
= __tb_xdomain_response(ctl
, res
, total_size
,
380 TB_CFG_PKG_XDOMAIN_RESP
);
386 static int tb_xdp_properties_changed_request(struct tb_ctl
*ctl
, u64 route
,
387 int retry
, const uuid_t
*uuid
)
389 struct tb_xdp_properties_changed_response res
;
390 struct tb_xdp_properties_changed req
;
393 memset(&req
, 0, sizeof(req
));
394 tb_xdp_fill_header(&req
.hdr
, route
, retry
% 4,
395 PROPERTIES_CHANGED_REQUEST
, sizeof(req
));
396 uuid_copy(&req
.src_uuid
, uuid
);
398 memset(&res
, 0, sizeof(res
));
399 ret
= __tb_xdomain_request(ctl
, &req
, sizeof(req
),
400 TB_CFG_PKG_XDOMAIN_REQ
, &res
, sizeof(res
),
401 TB_CFG_PKG_XDOMAIN_RESP
,
402 XDOMAIN_DEFAULT_TIMEOUT
);
406 return tb_xdp_handle_error(&res
.hdr
);
410 tb_xdp_properties_changed_response(struct tb_ctl
*ctl
, u64 route
, u8 sequence
)
412 struct tb_xdp_properties_changed_response res
;
414 memset(&res
, 0, sizeof(res
));
415 tb_xdp_fill_header(&res
.hdr
, route
, sequence
,
416 PROPERTIES_CHANGED_RESPONSE
, sizeof(res
));
417 return __tb_xdomain_response(ctl
, &res
, sizeof(res
),
418 TB_CFG_PKG_XDOMAIN_RESP
);
422 * tb_register_protocol_handler() - Register protocol handler
423 * @handler: Handler to register
425 * This allows XDomain service drivers to hook into incoming XDomain
426 * messages. After this function is called the service driver needs to
427 * be able to handle calls to callback whenever a package with the
428 * registered protocol is received.
430 int tb_register_protocol_handler(struct tb_protocol_handler
*handler
)
432 if (!handler
->uuid
|| !handler
->callback
)
434 if (uuid_equal(handler
->uuid
, &tb_xdp_uuid
))
437 mutex_lock(&xdomain_lock
);
438 list_add_tail(&handler
->list
, &protocol_handlers
);
439 mutex_unlock(&xdomain_lock
);
443 EXPORT_SYMBOL_GPL(tb_register_protocol_handler
);
446 * tb_unregister_protocol_handler() - Unregister protocol handler
447 * @handler: Handler to unregister
449 * Removes the previously registered protocol handler.
451 void tb_unregister_protocol_handler(struct tb_protocol_handler
*handler
)
453 mutex_lock(&xdomain_lock
);
454 list_del_init(&handler
->list
);
455 mutex_unlock(&xdomain_lock
);
457 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler
);
459 static void tb_xdp_handle_request(struct work_struct
*work
)
461 struct xdomain_request_work
*xw
= container_of(work
, typeof(*xw
), work
);
462 const struct tb_xdp_header
*pkg
= xw
->pkg
;
463 const struct tb_xdomain_header
*xhdr
= &pkg
->xd_hdr
;
464 struct tb
*tb
= xw
->tb
;
465 struct tb_ctl
*ctl
= tb
->ctl
;
471 route
= ((u64
)xhdr
->route_hi
<< 32 | xhdr
->route_lo
) & ~BIT_ULL(63);
472 sequence
= xhdr
->length_sn
& TB_XDOMAIN_SN_MASK
;
473 sequence
>>= TB_XDOMAIN_SN_SHIFT
;
475 mutex_lock(&tb
->lock
);
477 uuid
= tb
->root_switch
->uuid
;
480 mutex_unlock(&tb
->lock
);
483 tb_xdp_error_response(ctl
, route
, sequence
, ERROR_NOT_READY
);
488 case PROPERTIES_REQUEST
:
489 ret
= tb_xdp_properties_response(tb
, ctl
, route
, sequence
, uuid
,
490 (const struct tb_xdp_properties
*)pkg
);
493 case PROPERTIES_CHANGED_REQUEST
: {
494 const struct tb_xdp_properties_changed
*xchg
=
495 (const struct tb_xdp_properties_changed
*)pkg
;
496 struct tb_xdomain
*xd
;
498 ret
= tb_xdp_properties_changed_response(ctl
, route
, sequence
);
501 * Since the properties have been changed, let's update
502 * the xdomain related to this connection as well in
503 * case there is a change in services it offers.
505 xd
= tb_xdomain_find_by_uuid_locked(tb
, &xchg
->src_uuid
);
507 queue_delayed_work(tb
->wq
, &xd
->get_properties_work
,
508 msecs_to_jiffies(50));
520 tb_warn(tb
, "failed to send XDomain response for %#x\n",
530 tb_xdp_schedule_request(struct tb
*tb
, const struct tb_xdp_header
*hdr
,
533 struct xdomain_request_work
*xw
;
535 xw
= kmalloc(sizeof(*xw
), GFP_KERNEL
);
539 INIT_WORK(&xw
->work
, tb_xdp_handle_request
);
540 xw
->pkg
= kmemdup(hdr
, size
, GFP_KERNEL
);
543 queue_work(tb
->wq
, &xw
->work
);
547 * tb_register_service_driver() - Register XDomain service driver
548 * @drv: Driver to register
550 * Registers new service driver from @drv to the bus.
552 int tb_register_service_driver(struct tb_service_driver
*drv
)
554 drv
->driver
.bus
= &tb_bus_type
;
555 return driver_register(&drv
->driver
);
557 EXPORT_SYMBOL_GPL(tb_register_service_driver
);
560 * tb_unregister_service_driver() - Unregister XDomain service driver
561 * @xdrv: Driver to unregister
563 * Unregisters XDomain service driver from the bus.
565 void tb_unregister_service_driver(struct tb_service_driver
*drv
)
567 driver_unregister(&drv
->driver
);
569 EXPORT_SYMBOL_GPL(tb_unregister_service_driver
);
571 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
574 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
577 * It should be null terminated but anything else is pretty much
580 return sprintf(buf
, "%*pEp\n", (int)strlen(svc
->key
), svc
->key
);
582 static DEVICE_ATTR_RO(key
);
584 static int get_modalias(struct tb_service
*svc
, char *buf
, size_t size
)
586 return snprintf(buf
, size
, "tbsvc:k%sp%08Xv%08Xr%08X", svc
->key
,
587 svc
->prtcid
, svc
->prtcvers
, svc
->prtcrevs
);
590 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
593 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
595 /* Full buffer size except new line and null termination */
596 get_modalias(svc
, buf
, PAGE_SIZE
- 2);
597 return sprintf(buf
, "%s\n", buf
);
599 static DEVICE_ATTR_RO(modalias
);
601 static ssize_t
prtcid_show(struct device
*dev
, struct device_attribute
*attr
,
604 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
606 return sprintf(buf
, "%u\n", svc
->prtcid
);
608 static DEVICE_ATTR_RO(prtcid
);
610 static ssize_t
prtcvers_show(struct device
*dev
, struct device_attribute
*attr
,
613 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
615 return sprintf(buf
, "%u\n", svc
->prtcvers
);
617 static DEVICE_ATTR_RO(prtcvers
);
619 static ssize_t
prtcrevs_show(struct device
*dev
, struct device_attribute
*attr
,
622 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
624 return sprintf(buf
, "%u\n", svc
->prtcrevs
);
626 static DEVICE_ATTR_RO(prtcrevs
);
628 static ssize_t
prtcstns_show(struct device
*dev
, struct device_attribute
*attr
,
631 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
633 return sprintf(buf
, "0x%08x\n", svc
->prtcstns
);
635 static DEVICE_ATTR_RO(prtcstns
);
637 static struct attribute
*tb_service_attrs
[] = {
639 &dev_attr_modalias
.attr
,
640 &dev_attr_prtcid
.attr
,
641 &dev_attr_prtcvers
.attr
,
642 &dev_attr_prtcrevs
.attr
,
643 &dev_attr_prtcstns
.attr
,
647 static struct attribute_group tb_service_attr_group
= {
648 .attrs
= tb_service_attrs
,
651 static const struct attribute_group
*tb_service_attr_groups
[] = {
652 &tb_service_attr_group
,
656 static int tb_service_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
658 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
661 get_modalias(svc
, modalias
, sizeof(modalias
));
662 return add_uevent_var(env
, "MODALIAS=%s", modalias
);
665 static void tb_service_release(struct device
*dev
)
667 struct tb_service
*svc
= container_of(dev
, struct tb_service
, dev
);
668 struct tb_xdomain
*xd
= tb_service_parent(svc
);
670 ida_simple_remove(&xd
->service_ids
, svc
->id
);
675 struct device_type tb_service_type
= {
676 .name
= "thunderbolt_service",
677 .groups
= tb_service_attr_groups
,
678 .uevent
= tb_service_uevent
,
679 .release
= tb_service_release
,
681 EXPORT_SYMBOL_GPL(tb_service_type
);
683 static int remove_missing_service(struct device
*dev
, void *data
)
685 struct tb_xdomain
*xd
= data
;
686 struct tb_service
*svc
;
688 svc
= tb_to_service(dev
);
692 if (!tb_property_find(xd
->properties
, svc
->key
,
693 TB_PROPERTY_TYPE_DIRECTORY
))
694 device_unregister(dev
);
699 static int find_service(struct device
*dev
, void *data
)
701 const struct tb_property
*p
= data
;
702 struct tb_service
*svc
;
704 svc
= tb_to_service(dev
);
708 return !strcmp(svc
->key
, p
->key
);
711 static int populate_service(struct tb_service
*svc
,
712 struct tb_property
*property
)
714 struct tb_property_dir
*dir
= property
->value
.dir
;
715 struct tb_property
*p
;
717 /* Fill in standard properties */
718 p
= tb_property_find(dir
, "prtcid", TB_PROPERTY_TYPE_VALUE
);
720 svc
->prtcid
= p
->value
.immediate
;
721 p
= tb_property_find(dir
, "prtcvers", TB_PROPERTY_TYPE_VALUE
);
723 svc
->prtcvers
= p
->value
.immediate
;
724 p
= tb_property_find(dir
, "prtcrevs", TB_PROPERTY_TYPE_VALUE
);
726 svc
->prtcrevs
= p
->value
.immediate
;
727 p
= tb_property_find(dir
, "prtcstns", TB_PROPERTY_TYPE_VALUE
);
729 svc
->prtcstns
= p
->value
.immediate
;
731 svc
->key
= kstrdup(property
->key
, GFP_KERNEL
);
738 static void enumerate_services(struct tb_xdomain
*xd
)
740 struct tb_service
*svc
;
741 struct tb_property
*p
;
745 * First remove all services that are not available anymore in
746 * the updated property block.
748 device_for_each_child_reverse(&xd
->dev
, xd
, remove_missing_service
);
750 /* Then re-enumerate properties creating new services as we go */
751 tb_property_for_each(xd
->properties
, p
) {
752 if (p
->type
!= TB_PROPERTY_TYPE_DIRECTORY
)
755 /* If the service exists already we are fine */
756 dev
= device_find_child(&xd
->dev
, p
, find_service
);
762 svc
= kzalloc(sizeof(*svc
), GFP_KERNEL
);
766 if (populate_service(svc
, p
)) {
771 svc
->id
= ida_simple_get(&xd
->service_ids
, 0, 0, GFP_KERNEL
);
772 svc
->dev
.bus
= &tb_bus_type
;
773 svc
->dev
.type
= &tb_service_type
;
774 svc
->dev
.parent
= &xd
->dev
;
775 dev_set_name(&svc
->dev
, "%s.%d", dev_name(&xd
->dev
), svc
->id
);
777 if (device_register(&svc
->dev
)) {
778 put_device(&svc
->dev
);
784 static int populate_properties(struct tb_xdomain
*xd
,
785 struct tb_property_dir
*dir
)
787 const struct tb_property
*p
;
789 /* Required properties */
790 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_VALUE
);
793 xd
->device
= p
->value
.immediate
;
795 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_VALUE
);
798 xd
->vendor
= p
->value
.immediate
;
800 kfree(xd
->device_name
);
801 xd
->device_name
= NULL
;
802 kfree(xd
->vendor_name
);
803 xd
->vendor_name
= NULL
;
805 /* Optional properties */
806 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_TEXT
);
808 xd
->device_name
= kstrdup(p
->value
.text
, GFP_KERNEL
);
809 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_TEXT
);
811 xd
->vendor_name
= kstrdup(p
->value
.text
, GFP_KERNEL
);
816 /* Called with @xd->lock held */
817 static void tb_xdomain_restore_paths(struct tb_xdomain
*xd
)
823 if (xd
->transmit_path
) {
824 dev_dbg(&xd
->dev
, "re-establishing DMA path\n");
825 tb_domain_approve_xdomain_paths(xd
->tb
, xd
);
829 static void tb_xdomain_get_properties(struct work_struct
*work
)
831 struct tb_xdomain
*xd
= container_of(work
, typeof(*xd
),
832 get_properties_work
.work
);
833 struct tb_property_dir
*dir
;
834 struct tb
*tb
= xd
->tb
;
840 ret
= tb_xdp_properties_request(tb
->ctl
, xd
->route
, xd
->local_uuid
,
841 xd
->remote_uuid
, xd
->properties_retries
,
844 if (xd
->properties_retries
-- > 0) {
845 queue_delayed_work(xd
->tb
->wq
, &xd
->get_properties_work
,
846 msecs_to_jiffies(1000));
850 "failed read XDomain properties from %pUb\n",
856 xd
->properties_retries
= XDOMAIN_PROPERTIES_RETRIES
;
858 mutex_lock(&xd
->lock
);
860 /* Only accept newer generation properties */
861 if (xd
->properties
&& gen
<= xd
->property_block_gen
) {
863 * On resume it is likely that the properties block is
864 * not changed (unless the other end added or removed
865 * services). However, we need to make sure the existing
866 * DMA paths are restored properly.
868 tb_xdomain_restore_paths(xd
);
872 dir
= tb_property_parse_dir(block
, ret
);
874 dev_err(&xd
->dev
, "failed to parse XDomain properties\n");
878 ret
= populate_properties(xd
, dir
);
880 dev_err(&xd
->dev
, "missing XDomain properties in response\n");
884 /* Release the existing one */
885 if (xd
->properties
) {
886 tb_property_free_dir(xd
->properties
);
890 xd
->properties
= dir
;
891 xd
->property_block_gen
= gen
;
893 tb_xdomain_restore_paths(xd
);
895 mutex_unlock(&xd
->lock
);
900 * Now the device should be ready enough so we can add it to the
901 * bus and let userspace know about it. If the device is already
902 * registered, we notify the userspace that it has changed.
905 if (device_add(&xd
->dev
)) {
906 dev_err(&xd
->dev
, "failed to add XDomain device\n");
910 kobject_uevent(&xd
->dev
.kobj
, KOBJ_CHANGE
);
913 enumerate_services(xd
);
917 tb_property_free_dir(dir
);
920 mutex_unlock(&xd
->lock
);
923 static void tb_xdomain_properties_changed(struct work_struct
*work
)
925 struct tb_xdomain
*xd
= container_of(work
, typeof(*xd
),
926 properties_changed_work
.work
);
929 ret
= tb_xdp_properties_changed_request(xd
->tb
->ctl
, xd
->route
,
930 xd
->properties_changed_retries
, xd
->local_uuid
);
932 if (xd
->properties_changed_retries
-- > 0)
933 queue_delayed_work(xd
->tb
->wq
,
934 &xd
->properties_changed_work
,
935 msecs_to_jiffies(1000));
939 xd
->properties_changed_retries
= XDOMAIN_PROPERTIES_CHANGED_RETRIES
;
942 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
945 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
947 return sprintf(buf
, "%#x\n", xd
->device
);
949 static DEVICE_ATTR_RO(device
);
952 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
954 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
957 if (mutex_lock_interruptible(&xd
->lock
))
959 ret
= sprintf(buf
, "%s\n", xd
->device_name
? xd
->device_name
: "");
960 mutex_unlock(&xd
->lock
);
964 static DEVICE_ATTR_RO(device_name
);
966 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
969 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
971 return sprintf(buf
, "%#x\n", xd
->vendor
);
973 static DEVICE_ATTR_RO(vendor
);
976 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
978 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
981 if (mutex_lock_interruptible(&xd
->lock
))
983 ret
= sprintf(buf
, "%s\n", xd
->vendor_name
? xd
->vendor_name
: "");
984 mutex_unlock(&xd
->lock
);
988 static DEVICE_ATTR_RO(vendor_name
);
990 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
993 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
995 return sprintf(buf
, "%pUb\n", xd
->remote_uuid
);
997 static DEVICE_ATTR_RO(unique_id
);
999 static struct attribute
*xdomain_attrs
[] = {
1000 &dev_attr_device
.attr
,
1001 &dev_attr_device_name
.attr
,
1002 &dev_attr_unique_id
.attr
,
1003 &dev_attr_vendor
.attr
,
1004 &dev_attr_vendor_name
.attr
,
1008 static struct attribute_group xdomain_attr_group
= {
1009 .attrs
= xdomain_attrs
,
1012 static const struct attribute_group
*xdomain_attr_groups
[] = {
1013 &xdomain_attr_group
,
1017 static void tb_xdomain_release(struct device
*dev
)
1019 struct tb_xdomain
*xd
= container_of(dev
, struct tb_xdomain
, dev
);
1021 put_device(xd
->dev
.parent
);
1023 tb_property_free_dir(xd
->properties
);
1024 ida_destroy(&xd
->service_ids
);
1026 kfree(xd
->local_uuid
);
1027 kfree(xd
->remote_uuid
);
1028 kfree(xd
->device_name
);
1029 kfree(xd
->vendor_name
);
1033 static void start_handshake(struct tb_xdomain
*xd
)
1035 xd
->properties_retries
= XDOMAIN_PROPERTIES_RETRIES
;
1036 xd
->properties_changed_retries
= XDOMAIN_PROPERTIES_CHANGED_RETRIES
;
1038 /* Start exchanging properties with the other host */
1039 queue_delayed_work(xd
->tb
->wq
, &xd
->properties_changed_work
,
1040 msecs_to_jiffies(100));
1041 queue_delayed_work(xd
->tb
->wq
, &xd
->get_properties_work
,
1042 msecs_to_jiffies(1000));
1045 static void stop_handshake(struct tb_xdomain
*xd
)
1047 xd
->properties_retries
= 0;
1048 xd
->properties_changed_retries
= 0;
1050 cancel_delayed_work_sync(&xd
->get_properties_work
);
1051 cancel_delayed_work_sync(&xd
->properties_changed_work
);
1054 static int __maybe_unused
tb_xdomain_suspend(struct device
*dev
)
1056 stop_handshake(tb_to_xdomain(dev
));
1060 static int __maybe_unused
tb_xdomain_resume(struct device
*dev
)
1062 struct tb_xdomain
*xd
= tb_to_xdomain(dev
);
1065 * Ask tb_xdomain_get_properties() restore any existing DMA
1066 * paths after properties are re-read.
1069 start_handshake(xd
);
1074 static const struct dev_pm_ops tb_xdomain_pm_ops
= {
1075 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend
, tb_xdomain_resume
)
1078 struct device_type tb_xdomain_type
= {
1079 .name
= "thunderbolt_xdomain",
1080 .release
= tb_xdomain_release
,
1081 .pm
= &tb_xdomain_pm_ops
,
1083 EXPORT_SYMBOL_GPL(tb_xdomain_type
);
1086 * tb_xdomain_alloc() - Allocate new XDomain object
1087 * @tb: Domain where the XDomain belongs
1088 * @parent: Parent device (the switch through the connection to the
1089 * other domain is reached).
1090 * @route: Route string used to reach the other domain
1091 * @local_uuid: Our local domain UUID
1092 * @remote_uuid: UUID of the other domain
1094 * Allocates new XDomain structure and returns pointer to that. The
1095 * object must be released by calling tb_xdomain_put().
1097 struct tb_xdomain
*tb_xdomain_alloc(struct tb
*tb
, struct device
*parent
,
1098 u64 route
, const uuid_t
*local_uuid
,
1099 const uuid_t
*remote_uuid
)
1101 struct tb_xdomain
*xd
;
1103 xd
= kzalloc(sizeof(*xd
), GFP_KERNEL
);
1109 ida_init(&xd
->service_ids
);
1110 mutex_init(&xd
->lock
);
1111 INIT_DELAYED_WORK(&xd
->get_properties_work
, tb_xdomain_get_properties
);
1112 INIT_DELAYED_WORK(&xd
->properties_changed_work
,
1113 tb_xdomain_properties_changed
);
1115 xd
->local_uuid
= kmemdup(local_uuid
, sizeof(uuid_t
), GFP_KERNEL
);
1116 if (!xd
->local_uuid
)
1119 xd
->remote_uuid
= kmemdup(remote_uuid
, sizeof(uuid_t
), GFP_KERNEL
);
1120 if (!xd
->remote_uuid
)
1121 goto err_free_local_uuid
;
1123 device_initialize(&xd
->dev
);
1124 xd
->dev
.parent
= get_device(parent
);
1125 xd
->dev
.bus
= &tb_bus_type
;
1126 xd
->dev
.type
= &tb_xdomain_type
;
1127 xd
->dev
.groups
= xdomain_attr_groups
;
1128 dev_set_name(&xd
->dev
, "%u-%llx", tb
->index
, route
);
1131 * This keeps the DMA powered on as long as we have active
1132 * connection to another host.
1134 pm_runtime_set_active(&xd
->dev
);
1135 pm_runtime_get_noresume(&xd
->dev
);
1136 pm_runtime_enable(&xd
->dev
);
1140 err_free_local_uuid
:
1141 kfree(xd
->local_uuid
);
1149 * tb_xdomain_add() - Add XDomain to the bus
1150 * @xd: XDomain to add
1152 * This function starts XDomain discovery protocol handshake and
1153 * eventually adds the XDomain to the bus. After calling this function
1154 * the caller needs to call tb_xdomain_remove() in order to remove and
1155 * release the object regardless whether the handshake succeeded or not.
1157 void tb_xdomain_add(struct tb_xdomain
*xd
)
1159 /* Start exchanging properties with the other host */
1160 start_handshake(xd
);
1163 static int unregister_service(struct device
*dev
, void *data
)
1165 device_unregister(dev
);
1170 * tb_xdomain_remove() - Remove XDomain from the bus
1171 * @xd: XDomain to remove
1173 * This will stop all ongoing configuration work and remove the XDomain
1174 * along with any services from the bus. When the last reference to @xd
1175 * is released the object will be released as well.
1177 void tb_xdomain_remove(struct tb_xdomain
*xd
)
1181 device_for_each_child_reverse(&xd
->dev
, xd
, unregister_service
);
1184 * Undo runtime PM here explicitly because it is possible that
1185 * the XDomain was never added to the bus and thus device_del()
1186 * is not called for it (device_del() would handle this otherwise).
1188 pm_runtime_disable(&xd
->dev
);
1189 pm_runtime_put_noidle(&xd
->dev
);
1190 pm_runtime_set_suspended(&xd
->dev
);
1192 if (!device_is_registered(&xd
->dev
))
1193 put_device(&xd
->dev
);
1195 device_unregister(&xd
->dev
);
1199 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1200 * @xd: XDomain connection
1201 * @transmit_path: HopID of the transmit path the other end is using to
1203 * @transmit_ring: DMA ring used to receive packets from the other end
1204 * @receive_path: HopID of the receive path the other end is using to
1206 * @receive_ring: DMA ring used to send packets to the other end
1208 * The function enables DMA paths accordingly so that after successful
1209 * return the caller can send and receive packets using high-speed DMA
1212 * Return: %0 in case of success and negative errno in case of error
1214 int tb_xdomain_enable_paths(struct tb_xdomain
*xd
, u16 transmit_path
,
1215 u16 transmit_ring
, u16 receive_path
,
1220 mutex_lock(&xd
->lock
);
1222 if (xd
->transmit_path
) {
1223 ret
= xd
->transmit_path
== transmit_path
? 0 : -EBUSY
;
1227 xd
->transmit_path
= transmit_path
;
1228 xd
->transmit_ring
= transmit_ring
;
1229 xd
->receive_path
= receive_path
;
1230 xd
->receive_ring
= receive_ring
;
1232 ret
= tb_domain_approve_xdomain_paths(xd
->tb
, xd
);
1235 mutex_unlock(&xd
->lock
);
1239 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths
);
1242 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1243 * @xd: XDomain connection
1245 * This does the opposite of tb_xdomain_enable_paths(). After call to
1246 * this the caller is not expected to use the rings anymore.
1248 * Return: %0 in case of success and negative errno in case of error
1250 int tb_xdomain_disable_paths(struct tb_xdomain
*xd
)
1254 mutex_lock(&xd
->lock
);
1255 if (xd
->transmit_path
) {
1256 xd
->transmit_path
= 0;
1257 xd
->transmit_ring
= 0;
1258 xd
->receive_path
= 0;
1259 xd
->receive_ring
= 0;
1261 ret
= tb_domain_disconnect_xdomain_paths(xd
->tb
, xd
);
1263 mutex_unlock(&xd
->lock
);
1267 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths
);
1269 struct tb_xdomain_lookup
{
1276 static struct tb_xdomain
*switch_find_xdomain(struct tb_switch
*sw
,
1277 const struct tb_xdomain_lookup
*lookup
)
1281 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1282 struct tb_port
*port
= &sw
->ports
[i
];
1283 struct tb_xdomain
*xd
;
1285 if (tb_is_upstream_port(port
))
1288 if (port
->xdomain
) {
1292 if (uuid_equal(xd
->remote_uuid
, lookup
->uuid
))
1294 } else if (lookup
->link
&&
1295 lookup
->link
== xd
->link
&&
1296 lookup
->depth
== xd
->depth
) {
1298 } else if (lookup
->route
&&
1299 lookup
->route
== xd
->route
) {
1302 } else if (port
->remote
) {
1303 xd
= switch_find_xdomain(port
->remote
->sw
, lookup
);
1313 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1314 * @tb: Domain where the XDomain belongs to
1315 * @uuid: UUID to look for
1317 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1318 * The returned XDomain will have its reference count increased so the
1319 * caller needs to call tb_xdomain_put() when it is done with the
1322 * This will find all XDomains including the ones that are not yet added
1323 * to the bus (handshake is still in progress).
1325 * The caller needs to hold @tb->lock.
1327 struct tb_xdomain
*tb_xdomain_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
1329 struct tb_xdomain_lookup lookup
;
1330 struct tb_xdomain
*xd
;
1332 memset(&lookup
, 0, sizeof(lookup
));
1335 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
1336 return tb_xdomain_get(xd
);
1338 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid
);
1341 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1342 * @tb: Domain where the XDomain belongs to
1343 * @link: Root switch link number
1344 * @depth: Depth in the link
1346 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1347 * The returned XDomain will have its reference count increased so the
1348 * caller needs to call tb_xdomain_put() when it is done with the
1351 * This will find all XDomains including the ones that are not yet added
1352 * to the bus (handshake is still in progress).
1354 * The caller needs to hold @tb->lock.
1356 struct tb_xdomain
*tb_xdomain_find_by_link_depth(struct tb
*tb
, u8 link
,
1359 struct tb_xdomain_lookup lookup
;
1360 struct tb_xdomain
*xd
;
1362 memset(&lookup
, 0, sizeof(lookup
));
1364 lookup
.depth
= depth
;
1366 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
1367 return tb_xdomain_get(xd
);
1371 * tb_xdomain_find_by_route() - Find an XDomain by route string
1372 * @tb: Domain where the XDomain belongs to
1373 * @route: XDomain route string
1375 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1376 * The returned XDomain will have its reference count increased so the
1377 * caller needs to call tb_xdomain_put() when it is done with the
1380 * This will find all XDomains including the ones that are not yet added
1381 * to the bus (handshake is still in progress).
1383 * The caller needs to hold @tb->lock.
1385 struct tb_xdomain
*tb_xdomain_find_by_route(struct tb
*tb
, u64 route
)
1387 struct tb_xdomain_lookup lookup
;
1388 struct tb_xdomain
*xd
;
1390 memset(&lookup
, 0, sizeof(lookup
));
1391 lookup
.route
= route
;
1393 xd
= switch_find_xdomain(tb
->root_switch
, &lookup
);
1394 return tb_xdomain_get(xd
);
1396 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route
);
1398 bool tb_xdomain_handle_request(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1399 const void *buf
, size_t size
)
1401 const struct tb_protocol_handler
*handler
, *tmp
;
1402 const struct tb_xdp_header
*hdr
= buf
;
1403 unsigned int length
;
1406 /* We expect the packet is at least size of the header */
1407 length
= hdr
->xd_hdr
.length_sn
& TB_XDOMAIN_LENGTH_MASK
;
1408 if (length
!= size
/ 4 - sizeof(hdr
->xd_hdr
) / 4)
1410 if (length
< sizeof(*hdr
) / 4 - sizeof(hdr
->xd_hdr
) / 4)
1414 * Handle XDomain discovery protocol packets directly here. For
1415 * other protocols (based on their UUID) we call registered
1418 if (uuid_equal(&hdr
->uuid
, &tb_xdp_uuid
)) {
1419 if (type
== TB_CFG_PKG_XDOMAIN_REQ
) {
1420 tb_xdp_schedule_request(tb
, hdr
, size
);
1426 mutex_lock(&xdomain_lock
);
1427 list_for_each_entry_safe(handler
, tmp
, &protocol_handlers
, list
) {
1428 if (!uuid_equal(&hdr
->uuid
, handler
->uuid
))
1431 mutex_unlock(&xdomain_lock
);
1432 ret
= handler
->callback(buf
, size
, handler
->data
);
1433 mutex_lock(&xdomain_lock
);
1438 mutex_unlock(&xdomain_lock
);
1443 static int rebuild_property_block(void)
1448 ret
= tb_property_format_dir(xdomain_property_dir
, NULL
, 0);
1454 block
= kcalloc(len
, sizeof(u32
), GFP_KERNEL
);
1458 ret
= tb_property_format_dir(xdomain_property_dir
, block
, len
);
1464 kfree(xdomain_property_block
);
1465 xdomain_property_block
= block
;
1466 xdomain_property_block_len
= len
;
1467 xdomain_property_block_gen
++;
1472 static int update_xdomain(struct device
*dev
, void *data
)
1474 struct tb_xdomain
*xd
;
1476 xd
= tb_to_xdomain(dev
);
1478 queue_delayed_work(xd
->tb
->wq
, &xd
->properties_changed_work
,
1479 msecs_to_jiffies(50));
1485 static void update_all_xdomains(void)
1487 bus_for_each_dev(&tb_bus_type
, NULL
, NULL
, update_xdomain
);
1490 static bool remove_directory(const char *key
, const struct tb_property_dir
*dir
)
1492 struct tb_property
*p
;
1494 p
= tb_property_find(xdomain_property_dir
, key
,
1495 TB_PROPERTY_TYPE_DIRECTORY
);
1496 if (p
&& p
->value
.dir
== dir
) {
1497 tb_property_remove(p
);
1504 * tb_register_property_dir() - Register property directory to the host
1505 * @key: Key (name) of the directory to add
1506 * @dir: Directory to add
1508 * Service drivers can use this function to add new property directory
1509 * to the host available properties. The other connected hosts are
1510 * notified so they can re-read properties of this host if they are
1513 * Return: %0 on success and negative errno on failure
1515 int tb_register_property_dir(const char *key
, struct tb_property_dir
*dir
)
1519 if (WARN_ON(!xdomain_property_dir
))
1522 if (!key
|| strlen(key
) > 8)
1525 mutex_lock(&xdomain_lock
);
1526 if (tb_property_find(xdomain_property_dir
, key
,
1527 TB_PROPERTY_TYPE_DIRECTORY
)) {
1532 ret
= tb_property_add_dir(xdomain_property_dir
, key
, dir
);
1536 ret
= rebuild_property_block();
1538 remove_directory(key
, dir
);
1542 mutex_unlock(&xdomain_lock
);
1543 update_all_xdomains();
1547 mutex_unlock(&xdomain_lock
);
1550 EXPORT_SYMBOL_GPL(tb_register_property_dir
);
1553 * tb_unregister_property_dir() - Removes property directory from host
1554 * @key: Key (name) of the directory
1555 * @dir: Directory to remove
1557 * This will remove the existing directory from this host and notify the
1558 * connected hosts about the change.
1560 void tb_unregister_property_dir(const char *key
, struct tb_property_dir
*dir
)
1564 mutex_lock(&xdomain_lock
);
1565 if (remove_directory(key
, dir
))
1566 ret
= rebuild_property_block();
1567 mutex_unlock(&xdomain_lock
);
1570 update_all_xdomains();
1572 EXPORT_SYMBOL_GPL(tb_unregister_property_dir
);
1574 int tb_xdomain_init(void)
1578 xdomain_property_dir
= tb_property_create_dir(NULL
);
1579 if (!xdomain_property_dir
)
1583 * Initialize standard set of properties without any service
1584 * directories. Those will be added by service drivers
1585 * themselves when they are loaded.
1587 tb_property_add_immediate(xdomain_property_dir
, "vendorid",
1588 PCI_VENDOR_ID_INTEL
);
1589 tb_property_add_text(xdomain_property_dir
, "vendorid", "Intel Corp.");
1590 tb_property_add_immediate(xdomain_property_dir
, "deviceid", 0x1);
1591 tb_property_add_text(xdomain_property_dir
, "deviceid",
1592 utsname()->nodename
);
1593 tb_property_add_immediate(xdomain_property_dir
, "devicerv", 0x80000100);
1595 ret
= rebuild_property_block();
1597 tb_property_free_dir(xdomain_property_dir
);
1598 xdomain_property_dir
= NULL
;
1604 void tb_xdomain_exit(void)
1606 kfree(xdomain_property_block
);
1607 tb_property_free_dir(xdomain_property_dir
);