2 * Thunderbolt bus support
4 * Copyright (C) 2017, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/device.h>
13 #include <linux/idr.h>
14 #include <linux/module.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <crypto/hash.h>
22 static DEFINE_IDA(tb_domain_ida
);
24 static bool match_service_id(const struct tb_service_id
*id
,
25 const struct tb_service
*svc
)
27 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_KEY
) {
28 if (strcmp(id
->protocol_key
, svc
->key
))
32 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_ID
) {
33 if (id
->protocol_id
!= svc
->prtcid
)
37 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_VERSION
) {
38 if (id
->protocol_version
!= svc
->prtcvers
)
42 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_VERSION
) {
43 if (id
->protocol_revision
!= svc
->prtcrevs
)
50 static const struct tb_service_id
*__tb_service_match(struct device
*dev
,
51 struct device_driver
*drv
)
53 struct tb_service_driver
*driver
;
54 const struct tb_service_id
*ids
;
55 struct tb_service
*svc
;
57 svc
= tb_to_service(dev
);
61 driver
= container_of(drv
, struct tb_service_driver
, driver
);
62 if (!driver
->id_table
)
65 for (ids
= driver
->id_table
; ids
->match_flags
!= 0; ids
++) {
66 if (match_service_id(ids
, svc
))
73 static int tb_service_match(struct device
*dev
, struct device_driver
*drv
)
75 return !!__tb_service_match(dev
, drv
);
78 static int tb_service_probe(struct device
*dev
)
80 struct tb_service
*svc
= tb_to_service(dev
);
81 struct tb_service_driver
*driver
;
82 const struct tb_service_id
*id
;
84 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
85 id
= __tb_service_match(dev
, &driver
->driver
);
87 return driver
->probe(svc
, id
);
90 static int tb_service_remove(struct device
*dev
)
92 struct tb_service
*svc
= tb_to_service(dev
);
93 struct tb_service_driver
*driver
;
95 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
102 static void tb_service_shutdown(struct device
*dev
)
104 struct tb_service_driver
*driver
;
105 struct tb_service
*svc
;
107 svc
= tb_to_service(dev
);
108 if (!svc
|| !dev
->driver
)
111 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
112 if (driver
->shutdown
)
113 driver
->shutdown(svc
);
116 static const char * const tb_security_names
[] = {
117 [TB_SECURITY_NONE
] = "none",
118 [TB_SECURITY_USER
] = "user",
119 [TB_SECURITY_SECURE
] = "secure",
120 [TB_SECURITY_DPONLY
] = "dponly",
121 [TB_SECURITY_USBONLY
] = "usbonly",
124 static ssize_t
boot_acl_show(struct device
*dev
, struct device_attribute
*attr
,
127 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
132 uuids
= kcalloc(tb
->nboot_acl
, sizeof(uuid_t
), GFP_KERNEL
);
136 pm_runtime_get_sync(&tb
->dev
);
138 if (mutex_lock_interruptible(&tb
->lock
)) {
142 ret
= tb
->cm_ops
->get_boot_acl(tb
, uuids
, tb
->nboot_acl
);
144 mutex_unlock(&tb
->lock
);
147 mutex_unlock(&tb
->lock
);
149 for (ret
= 0, i
= 0; i
< tb
->nboot_acl
; i
++) {
150 if (!uuid_is_null(&uuids
[i
]))
151 ret
+= snprintf(buf
+ ret
, PAGE_SIZE
- ret
, "%pUb",
154 ret
+= snprintf(buf
+ ret
, PAGE_SIZE
- ret
, "%s",
155 i
< tb
->nboot_acl
- 1 ? "," : "\n");
159 pm_runtime_mark_last_busy(&tb
->dev
);
160 pm_runtime_put_autosuspend(&tb
->dev
);
166 static ssize_t
boot_acl_store(struct device
*dev
, struct device_attribute
*attr
,
167 const char *buf
, size_t count
)
169 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
170 char *str
, *s
, *uuid_str
;
176 * Make sure the value is not bigger than tb->nboot_acl * UUID
177 * length + commas and optional "\n". Also the smallest allowable
178 * string is tb->nboot_acl * ",".
180 if (count
> (UUID_STRING_LEN
+ 1) * tb
->nboot_acl
+ 1)
182 if (count
< tb
->nboot_acl
- 1)
185 str
= kstrdup(buf
, GFP_KERNEL
);
189 acl
= kcalloc(tb
->nboot_acl
, sizeof(uuid_t
), GFP_KERNEL
);
195 uuid_str
= strim(str
);
196 while ((s
= strsep(&uuid_str
, ",")) != NULL
&& i
< tb
->nboot_acl
) {
197 size_t len
= strlen(s
);
200 if (len
!= UUID_STRING_LEN
) {
204 ret
= uuid_parse(s
, &acl
[i
]);
212 if (s
|| i
< tb
->nboot_acl
) {
217 pm_runtime_get_sync(&tb
->dev
);
219 if (mutex_lock_interruptible(&tb
->lock
)) {
223 ret
= tb
->cm_ops
->set_boot_acl(tb
, acl
, tb
->nboot_acl
);
225 /* Notify userspace about the change */
226 kobject_uevent(&tb
->dev
.kobj
, KOBJ_CHANGE
);
228 mutex_unlock(&tb
->lock
);
231 pm_runtime_mark_last_busy(&tb
->dev
);
232 pm_runtime_put_autosuspend(&tb
->dev
);
240 static DEVICE_ATTR_RW(boot_acl
);
242 static ssize_t
security_show(struct device
*dev
, struct device_attribute
*attr
,
245 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
246 const char *name
= "unknown";
248 if (tb
->security_level
< ARRAY_SIZE(tb_security_names
))
249 name
= tb_security_names
[tb
->security_level
];
251 return sprintf(buf
, "%s\n", name
);
253 static DEVICE_ATTR_RO(security
);
255 static struct attribute
*domain_attrs
[] = {
256 &dev_attr_boot_acl
.attr
,
257 &dev_attr_security
.attr
,
261 static umode_t
domain_attr_is_visible(struct kobject
*kobj
,
262 struct attribute
*attr
, int n
)
264 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
265 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
267 if (attr
== &dev_attr_boot_acl
.attr
) {
269 tb
->cm_ops
->get_boot_acl
&&
270 tb
->cm_ops
->set_boot_acl
)
278 static struct attribute_group domain_attr_group
= {
279 .is_visible
= domain_attr_is_visible
,
280 .attrs
= domain_attrs
,
283 static const struct attribute_group
*domain_attr_groups
[] = {
288 struct bus_type tb_bus_type
= {
289 .name
= "thunderbolt",
290 .match
= tb_service_match
,
291 .probe
= tb_service_probe
,
292 .remove
= tb_service_remove
,
293 .shutdown
= tb_service_shutdown
,
296 static void tb_domain_release(struct device
*dev
)
298 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
300 tb_ctl_free(tb
->ctl
);
301 destroy_workqueue(tb
->wq
);
302 ida_simple_remove(&tb_domain_ida
, tb
->index
);
303 mutex_destroy(&tb
->lock
);
307 struct device_type tb_domain_type
= {
308 .name
= "thunderbolt_domain",
309 .release
= tb_domain_release
,
313 * tb_domain_alloc() - Allocate a domain
314 * @nhi: Pointer to the host controller
315 * @privsize: Size of the connection manager private data
317 * Allocates and initializes a new Thunderbolt domain. Connection
318 * managers are expected to call this and then fill in @cm_ops
321 * Call tb_domain_put() to release the domain before it has been added
324 * Return: allocated domain structure on %NULL in case of error
326 struct tb
*tb_domain_alloc(struct tb_nhi
*nhi
, size_t privsize
)
331 * Make sure the structure sizes map with that the hardware
332 * expects because bit-fields are being used.
334 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header
) != 5 * 4);
335 BUILD_BUG_ON(sizeof(struct tb_regs_port_header
) != 8 * 4);
336 BUILD_BUG_ON(sizeof(struct tb_regs_hop
) != 2 * 4);
338 tb
= kzalloc(sizeof(*tb
) + privsize
, GFP_KERNEL
);
343 mutex_init(&tb
->lock
);
345 tb
->index
= ida_simple_get(&tb_domain_ida
, 0, 0, GFP_KERNEL
);
349 tb
->wq
= alloc_ordered_workqueue("thunderbolt%d", 0, tb
->index
);
353 tb
->dev
.parent
= &nhi
->pdev
->dev
;
354 tb
->dev
.bus
= &tb_bus_type
;
355 tb
->dev
.type
= &tb_domain_type
;
356 tb
->dev
.groups
= domain_attr_groups
;
357 dev_set_name(&tb
->dev
, "domain%d", tb
->index
);
358 device_initialize(&tb
->dev
);
363 ida_simple_remove(&tb_domain_ida
, tb
->index
);
370 static bool tb_domain_event_cb(void *data
, enum tb_cfg_pkg_type type
,
371 const void *buf
, size_t size
)
373 struct tb
*tb
= data
;
375 if (!tb
->cm_ops
->handle_event
) {
376 tb_warn(tb
, "domain does not have event handler\n");
381 case TB_CFG_PKG_XDOMAIN_REQ
:
382 case TB_CFG_PKG_XDOMAIN_RESP
:
383 return tb_xdomain_handle_request(tb
, type
, buf
, size
);
386 tb
->cm_ops
->handle_event(tb
, type
, buf
, size
);
393 * tb_domain_add() - Add domain to the system
396 * Starts the domain and adds it to the system. Hotplugging devices will
397 * work after this has been returned successfully. In order to remove
398 * and release the domain after this function has been called, call
399 * tb_domain_remove().
401 * Return: %0 in case of success and negative errno in case of error
403 int tb_domain_add(struct tb
*tb
)
407 if (WARN_ON(!tb
->cm_ops
))
410 mutex_lock(&tb
->lock
);
412 tb
->ctl
= tb_ctl_alloc(tb
->nhi
, tb_domain_event_cb
, tb
);
419 * tb_schedule_hotplug_handler may be called as soon as the config
420 * channel is started. Thats why we have to hold the lock here.
422 tb_ctl_start(tb
->ctl
);
424 if (tb
->cm_ops
->driver_ready
) {
425 ret
= tb
->cm_ops
->driver_ready(tb
);
430 ret
= device_add(&tb
->dev
);
434 /* Start the domain */
435 if (tb
->cm_ops
->start
) {
436 ret
= tb
->cm_ops
->start(tb
);
441 /* This starts event processing */
442 mutex_unlock(&tb
->lock
);
444 pm_runtime_no_callbacks(&tb
->dev
);
445 pm_runtime_set_active(&tb
->dev
);
446 pm_runtime_enable(&tb
->dev
);
447 pm_runtime_set_autosuspend_delay(&tb
->dev
, TB_AUTOSUSPEND_DELAY
);
448 pm_runtime_mark_last_busy(&tb
->dev
);
449 pm_runtime_use_autosuspend(&tb
->dev
);
454 device_del(&tb
->dev
);
456 tb_ctl_stop(tb
->ctl
);
458 mutex_unlock(&tb
->lock
);
464 * tb_domain_remove() - Removes and releases a domain
465 * @tb: Domain to remove
467 * Stops the domain, removes it from the system and releases all
468 * resources once the last reference has been released.
470 void tb_domain_remove(struct tb
*tb
)
472 mutex_lock(&tb
->lock
);
473 if (tb
->cm_ops
->stop
)
474 tb
->cm_ops
->stop(tb
);
475 /* Stop the domain control traffic */
476 tb_ctl_stop(tb
->ctl
);
477 mutex_unlock(&tb
->lock
);
479 flush_workqueue(tb
->wq
);
480 device_unregister(&tb
->dev
);
484 * tb_domain_suspend_noirq() - Suspend a domain
485 * @tb: Domain to suspend
487 * Suspends all devices in the domain and stops the control channel.
489 int tb_domain_suspend_noirq(struct tb
*tb
)
494 * The control channel interrupt is left enabled during suspend
495 * and taking the lock here prevents any events happening before
496 * we actually have stopped the domain and the control channel.
498 mutex_lock(&tb
->lock
);
499 if (tb
->cm_ops
->suspend_noirq
)
500 ret
= tb
->cm_ops
->suspend_noirq(tb
);
502 tb_ctl_stop(tb
->ctl
);
503 mutex_unlock(&tb
->lock
);
509 * tb_domain_resume_noirq() - Resume a domain
510 * @tb: Domain to resume
512 * Re-starts the control channel, and resumes all devices connected to
515 int tb_domain_resume_noirq(struct tb
*tb
)
519 mutex_lock(&tb
->lock
);
520 tb_ctl_start(tb
->ctl
);
521 if (tb
->cm_ops
->resume_noirq
)
522 ret
= tb
->cm_ops
->resume_noirq(tb
);
523 mutex_unlock(&tb
->lock
);
528 int tb_domain_suspend(struct tb
*tb
)
530 return tb
->cm_ops
->suspend
? tb
->cm_ops
->suspend(tb
) : 0;
533 void tb_domain_complete(struct tb
*tb
)
535 if (tb
->cm_ops
->complete
)
536 tb
->cm_ops
->complete(tb
);
539 int tb_domain_runtime_suspend(struct tb
*tb
)
541 if (tb
->cm_ops
->runtime_suspend
) {
542 int ret
= tb
->cm_ops
->runtime_suspend(tb
);
546 tb_ctl_stop(tb
->ctl
);
550 int tb_domain_runtime_resume(struct tb
*tb
)
552 tb_ctl_start(tb
->ctl
);
553 if (tb
->cm_ops
->runtime_resume
) {
554 int ret
= tb
->cm_ops
->runtime_resume(tb
);
562 * tb_domain_approve_switch() - Approve switch
563 * @tb: Domain the switch belongs to
564 * @sw: Switch to approve
566 * This will approve switch by connection manager specific means. In
567 * case of success the connection manager will create tunnels for all
568 * supported protocols.
570 int tb_domain_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
572 struct tb_switch
*parent_sw
;
574 if (!tb
->cm_ops
->approve_switch
)
577 /* The parent switch must be authorized before this one */
578 parent_sw
= tb_to_switch(sw
->dev
.parent
);
579 if (!parent_sw
|| !parent_sw
->authorized
)
582 return tb
->cm_ops
->approve_switch(tb
, sw
);
586 * tb_domain_approve_switch_key() - Approve switch and add key
587 * @tb: Domain the switch belongs to
588 * @sw: Switch to approve
590 * For switches that support secure connect, this function first adds
591 * key to the switch NVM using connection manager specific means. If
592 * adding the key is successful, the switch is approved and connected.
594 * Return: %0 on success and negative errno in case of failure.
596 int tb_domain_approve_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
598 struct tb_switch
*parent_sw
;
601 if (!tb
->cm_ops
->approve_switch
|| !tb
->cm_ops
->add_switch_key
)
604 /* The parent switch must be authorized before this one */
605 parent_sw
= tb_to_switch(sw
->dev
.parent
);
606 if (!parent_sw
|| !parent_sw
->authorized
)
609 ret
= tb
->cm_ops
->add_switch_key(tb
, sw
);
613 return tb
->cm_ops
->approve_switch(tb
, sw
);
617 * tb_domain_challenge_switch_key() - Challenge and approve switch
618 * @tb: Domain the switch belongs to
619 * @sw: Switch to approve
621 * For switches that support secure connect, this function generates
622 * random challenge and sends it to the switch. The switch responds to
623 * this and if the response matches our random challenge, the switch is
624 * approved and connected.
626 * Return: %0 on success and negative errno in case of failure.
628 int tb_domain_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
630 u8 challenge
[TB_SWITCH_KEY_SIZE
];
631 u8 response
[TB_SWITCH_KEY_SIZE
];
632 u8 hmac
[TB_SWITCH_KEY_SIZE
];
633 struct tb_switch
*parent_sw
;
634 struct crypto_shash
*tfm
;
635 struct shash_desc
*shash
;
638 if (!tb
->cm_ops
->approve_switch
|| !tb
->cm_ops
->challenge_switch_key
)
641 /* The parent switch must be authorized before this one */
642 parent_sw
= tb_to_switch(sw
->dev
.parent
);
643 if (!parent_sw
|| !parent_sw
->authorized
)
646 get_random_bytes(challenge
, sizeof(challenge
));
647 ret
= tb
->cm_ops
->challenge_switch_key(tb
, sw
, challenge
, response
);
651 tfm
= crypto_alloc_shash("hmac(sha256)", 0, 0);
655 ret
= crypto_shash_setkey(tfm
, sw
->key
, TB_SWITCH_KEY_SIZE
);
659 shash
= kzalloc(sizeof(*shash
) + crypto_shash_descsize(tfm
),
667 shash
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
669 memset(hmac
, 0, sizeof(hmac
));
670 ret
= crypto_shash_digest(shash
, challenge
, sizeof(hmac
), hmac
);
674 /* The returned HMAC must match the one we calculated */
675 if (memcmp(response
, hmac
, sizeof(hmac
))) {
680 crypto_free_shash(tfm
);
683 return tb
->cm_ops
->approve_switch(tb
, sw
);
688 crypto_free_shash(tfm
);
694 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
695 * @tb: Domain whose PCIe paths to disconnect
697 * This needs to be called in preparation for NVM upgrade of the host
698 * controller. Makes sure all PCIe paths are disconnected.
700 * Return %0 on success and negative errno in case of error.
702 int tb_domain_disconnect_pcie_paths(struct tb
*tb
)
704 if (!tb
->cm_ops
->disconnect_pcie_paths
)
707 return tb
->cm_ops
->disconnect_pcie_paths(tb
);
711 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
712 * @tb: Domain enabling the DMA paths
713 * @xd: XDomain DMA paths are created to
715 * Calls connection manager specific method to enable DMA paths to the
716 * XDomain in question.
718 * Return: 0% in case of success and negative errno otherwise. In
719 * particular returns %-ENOTSUPP if the connection manager
720 * implementation does not support XDomains.
722 int tb_domain_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
724 if (!tb
->cm_ops
->approve_xdomain_paths
)
727 return tb
->cm_ops
->approve_xdomain_paths(tb
, xd
);
731 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
732 * @tb: Domain disabling the DMA paths
733 * @xd: XDomain whose DMA paths are disconnected
735 * Calls connection manager specific method to disconnect DMA paths to
736 * the XDomain in question.
738 * Return: 0% in case of success and negative errno otherwise. In
739 * particular returns %-ENOTSUPP if the connection manager
740 * implementation does not support XDomains.
742 int tb_domain_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
744 if (!tb
->cm_ops
->disconnect_xdomain_paths
)
747 return tb
->cm_ops
->disconnect_xdomain_paths(tb
, xd
);
750 static int disconnect_xdomain(struct device
*dev
, void *data
)
752 struct tb_xdomain
*xd
;
753 struct tb
*tb
= data
;
756 xd
= tb_to_xdomain(dev
);
757 if (xd
&& xd
->tb
== tb
)
758 ret
= tb_xdomain_disable_paths(xd
);
764 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
765 * @tb: Domain whose paths are disconnected
767 * This function can be used to disconnect all paths (PCIe, XDomain) for
768 * example in preparation for host NVM firmware upgrade. After this is
769 * called the paths cannot be established without resetting the switch.
771 * Return: %0 in case of success and negative errno otherwise.
773 int tb_domain_disconnect_all_paths(struct tb
*tb
)
777 ret
= tb_domain_disconnect_pcie_paths(tb
);
781 return bus_for_each_dev(&tb_bus_type
, NULL
, tb
, disconnect_xdomain
);
784 int tb_domain_init(void)
788 ret
= tb_xdomain_init();
791 ret
= bus_register(&tb_bus_type
);
798 void tb_domain_exit(void)
800 bus_unregister(&tb_bus_type
);
801 ida_destroy(&tb_domain_ida
);