1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt bus support
5 * Copyright (C) 2017, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
9 #include <linux/device.h>
10 #include <linux/dmar.h>
11 #include <linux/idr.h>
12 #include <linux/iommu.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
21 static DEFINE_IDA(tb_domain_ida
);
23 static bool match_service_id(const struct tb_service_id
*id
,
24 const struct tb_service
*svc
)
26 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_KEY
) {
27 if (strcmp(id
->protocol_key
, svc
->key
))
31 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_ID
) {
32 if (id
->protocol_id
!= svc
->prtcid
)
36 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_VERSION
) {
37 if (id
->protocol_version
!= svc
->prtcvers
)
41 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_VERSION
) {
42 if (id
->protocol_revision
!= svc
->prtcrevs
)
49 static const struct tb_service_id
*__tb_service_match(struct device
*dev
,
50 struct device_driver
*drv
)
52 struct tb_service_driver
*driver
;
53 const struct tb_service_id
*ids
;
54 struct tb_service
*svc
;
56 svc
= tb_to_service(dev
);
60 driver
= container_of(drv
, struct tb_service_driver
, driver
);
61 if (!driver
->id_table
)
64 for (ids
= driver
->id_table
; ids
->match_flags
!= 0; ids
++) {
65 if (match_service_id(ids
, svc
))
72 static int tb_service_match(struct device
*dev
, struct device_driver
*drv
)
74 return !!__tb_service_match(dev
, drv
);
77 static int tb_service_probe(struct device
*dev
)
79 struct tb_service
*svc
= tb_to_service(dev
);
80 struct tb_service_driver
*driver
;
81 const struct tb_service_id
*id
;
83 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
84 id
= __tb_service_match(dev
, &driver
->driver
);
86 return driver
->probe(svc
, id
);
89 static int tb_service_remove(struct device
*dev
)
91 struct tb_service
*svc
= tb_to_service(dev
);
92 struct tb_service_driver
*driver
;
94 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
101 static void tb_service_shutdown(struct device
*dev
)
103 struct tb_service_driver
*driver
;
104 struct tb_service
*svc
;
106 svc
= tb_to_service(dev
);
107 if (!svc
|| !dev
->driver
)
110 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
111 if (driver
->shutdown
)
112 driver
->shutdown(svc
);
115 static const char * const tb_security_names
[] = {
116 [TB_SECURITY_NONE
] = "none",
117 [TB_SECURITY_USER
] = "user",
118 [TB_SECURITY_SECURE
] = "secure",
119 [TB_SECURITY_DPONLY
] = "dponly",
120 [TB_SECURITY_USBONLY
] = "usbonly",
123 static ssize_t
boot_acl_show(struct device
*dev
, struct device_attribute
*attr
,
126 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
131 uuids
= kcalloc(tb
->nboot_acl
, sizeof(uuid_t
), GFP_KERNEL
);
135 pm_runtime_get_sync(&tb
->dev
);
137 if (mutex_lock_interruptible(&tb
->lock
)) {
141 ret
= tb
->cm_ops
->get_boot_acl(tb
, uuids
, tb
->nboot_acl
);
143 mutex_unlock(&tb
->lock
);
146 mutex_unlock(&tb
->lock
);
148 for (ret
= 0, i
= 0; i
< tb
->nboot_acl
; i
++) {
149 if (!uuid_is_null(&uuids
[i
]))
150 ret
+= snprintf(buf
+ ret
, PAGE_SIZE
- ret
, "%pUb",
153 ret
+= snprintf(buf
+ ret
, PAGE_SIZE
- ret
, "%s",
154 i
< tb
->nboot_acl
- 1 ? "," : "\n");
158 pm_runtime_mark_last_busy(&tb
->dev
);
159 pm_runtime_put_autosuspend(&tb
->dev
);
165 static ssize_t
boot_acl_store(struct device
*dev
, struct device_attribute
*attr
,
166 const char *buf
, size_t count
)
168 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
169 char *str
, *s
, *uuid_str
;
175 * Make sure the value is not bigger than tb->nboot_acl * UUID
176 * length + commas and optional "\n". Also the smallest allowable
177 * string is tb->nboot_acl * ",".
179 if (count
> (UUID_STRING_LEN
+ 1) * tb
->nboot_acl
+ 1)
181 if (count
< tb
->nboot_acl
- 1)
184 str
= kstrdup(buf
, GFP_KERNEL
);
188 acl
= kcalloc(tb
->nboot_acl
, sizeof(uuid_t
), GFP_KERNEL
);
194 uuid_str
= strim(str
);
195 while ((s
= strsep(&uuid_str
, ",")) != NULL
&& i
< tb
->nboot_acl
) {
196 size_t len
= strlen(s
);
199 if (len
!= UUID_STRING_LEN
) {
203 ret
= uuid_parse(s
, &acl
[i
]);
211 if (s
|| i
< tb
->nboot_acl
) {
216 pm_runtime_get_sync(&tb
->dev
);
218 if (mutex_lock_interruptible(&tb
->lock
)) {
222 ret
= tb
->cm_ops
->set_boot_acl(tb
, acl
, tb
->nboot_acl
);
224 /* Notify userspace about the change */
225 kobject_uevent(&tb
->dev
.kobj
, KOBJ_CHANGE
);
227 mutex_unlock(&tb
->lock
);
230 pm_runtime_mark_last_busy(&tb
->dev
);
231 pm_runtime_put_autosuspend(&tb
->dev
);
239 static DEVICE_ATTR_RW(boot_acl
);
241 static ssize_t
iommu_dma_protection_show(struct device
*dev
,
242 struct device_attribute
*attr
,
246 * Kernel DMA protection is a feature where Thunderbolt security is
247 * handled natively using IOMMU. It is enabled when IOMMU is
248 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
250 return sprintf(buf
, "%d\n",
251 iommu_present(&pci_bus_type
) && dmar_platform_optin());
253 static DEVICE_ATTR_RO(iommu_dma_protection
);
255 static ssize_t
security_show(struct device
*dev
, struct device_attribute
*attr
,
258 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
259 const char *name
= "unknown";
261 if (tb
->security_level
< ARRAY_SIZE(tb_security_names
))
262 name
= tb_security_names
[tb
->security_level
];
264 return sprintf(buf
, "%s\n", name
);
266 static DEVICE_ATTR_RO(security
);
268 static struct attribute
*domain_attrs
[] = {
269 &dev_attr_boot_acl
.attr
,
270 &dev_attr_iommu_dma_protection
.attr
,
271 &dev_attr_security
.attr
,
275 static umode_t
domain_attr_is_visible(struct kobject
*kobj
,
276 struct attribute
*attr
, int n
)
278 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
279 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
281 if (attr
== &dev_attr_boot_acl
.attr
) {
283 tb
->cm_ops
->get_boot_acl
&&
284 tb
->cm_ops
->set_boot_acl
)
292 static struct attribute_group domain_attr_group
= {
293 .is_visible
= domain_attr_is_visible
,
294 .attrs
= domain_attrs
,
297 static const struct attribute_group
*domain_attr_groups
[] = {
302 struct bus_type tb_bus_type
= {
303 .name
= "thunderbolt",
304 .match
= tb_service_match
,
305 .probe
= tb_service_probe
,
306 .remove
= tb_service_remove
,
307 .shutdown
= tb_service_shutdown
,
310 static void tb_domain_release(struct device
*dev
)
312 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
314 tb_ctl_free(tb
->ctl
);
315 destroy_workqueue(tb
->wq
);
316 ida_simple_remove(&tb_domain_ida
, tb
->index
);
317 mutex_destroy(&tb
->lock
);
321 struct device_type tb_domain_type
= {
322 .name
= "thunderbolt_domain",
323 .release
= tb_domain_release
,
327 * tb_domain_alloc() - Allocate a domain
328 * @nhi: Pointer to the host controller
329 * @privsize: Size of the connection manager private data
331 * Allocates and initializes a new Thunderbolt domain. Connection
332 * managers are expected to call this and then fill in @cm_ops
335 * Call tb_domain_put() to release the domain before it has been added
338 * Return: allocated domain structure on %NULL in case of error
340 struct tb
*tb_domain_alloc(struct tb_nhi
*nhi
, size_t privsize
)
345 * Make sure the structure sizes map with that the hardware
346 * expects because bit-fields are being used.
348 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header
) != 5 * 4);
349 BUILD_BUG_ON(sizeof(struct tb_regs_port_header
) != 8 * 4);
350 BUILD_BUG_ON(sizeof(struct tb_regs_hop
) != 2 * 4);
352 tb
= kzalloc(sizeof(*tb
) + privsize
, GFP_KERNEL
);
357 mutex_init(&tb
->lock
);
359 tb
->index
= ida_simple_get(&tb_domain_ida
, 0, 0, GFP_KERNEL
);
363 tb
->wq
= alloc_ordered_workqueue("thunderbolt%d", 0, tb
->index
);
367 tb
->dev
.parent
= &nhi
->pdev
->dev
;
368 tb
->dev
.bus
= &tb_bus_type
;
369 tb
->dev
.type
= &tb_domain_type
;
370 tb
->dev
.groups
= domain_attr_groups
;
371 dev_set_name(&tb
->dev
, "domain%d", tb
->index
);
372 device_initialize(&tb
->dev
);
377 ida_simple_remove(&tb_domain_ida
, tb
->index
);
384 static bool tb_domain_event_cb(void *data
, enum tb_cfg_pkg_type type
,
385 const void *buf
, size_t size
)
387 struct tb
*tb
= data
;
389 if (!tb
->cm_ops
->handle_event
) {
390 tb_warn(tb
, "domain does not have event handler\n");
395 case TB_CFG_PKG_XDOMAIN_REQ
:
396 case TB_CFG_PKG_XDOMAIN_RESP
:
397 return tb_xdomain_handle_request(tb
, type
, buf
, size
);
400 tb
->cm_ops
->handle_event(tb
, type
, buf
, size
);
407 * tb_domain_add() - Add domain to the system
410 * Starts the domain and adds it to the system. Hotplugging devices will
411 * work after this has been returned successfully. In order to remove
412 * and release the domain after this function has been called, call
413 * tb_domain_remove().
415 * Return: %0 in case of success and negative errno in case of error
417 int tb_domain_add(struct tb
*tb
)
421 if (WARN_ON(!tb
->cm_ops
))
424 mutex_lock(&tb
->lock
);
426 tb
->ctl
= tb_ctl_alloc(tb
->nhi
, tb_domain_event_cb
, tb
);
433 * tb_schedule_hotplug_handler may be called as soon as the config
434 * channel is started. Thats why we have to hold the lock here.
436 tb_ctl_start(tb
->ctl
);
438 if (tb
->cm_ops
->driver_ready
) {
439 ret
= tb
->cm_ops
->driver_ready(tb
);
444 ret
= device_add(&tb
->dev
);
448 /* Start the domain */
449 if (tb
->cm_ops
->start
) {
450 ret
= tb
->cm_ops
->start(tb
);
455 /* This starts event processing */
456 mutex_unlock(&tb
->lock
);
458 pm_runtime_no_callbacks(&tb
->dev
);
459 pm_runtime_set_active(&tb
->dev
);
460 pm_runtime_enable(&tb
->dev
);
461 pm_runtime_set_autosuspend_delay(&tb
->dev
, TB_AUTOSUSPEND_DELAY
);
462 pm_runtime_mark_last_busy(&tb
->dev
);
463 pm_runtime_use_autosuspend(&tb
->dev
);
468 device_del(&tb
->dev
);
470 tb_ctl_stop(tb
->ctl
);
472 mutex_unlock(&tb
->lock
);
478 * tb_domain_remove() - Removes and releases a domain
479 * @tb: Domain to remove
481 * Stops the domain, removes it from the system and releases all
482 * resources once the last reference has been released.
484 void tb_domain_remove(struct tb
*tb
)
486 mutex_lock(&tb
->lock
);
487 if (tb
->cm_ops
->stop
)
488 tb
->cm_ops
->stop(tb
);
489 /* Stop the domain control traffic */
490 tb_ctl_stop(tb
->ctl
);
491 mutex_unlock(&tb
->lock
);
493 flush_workqueue(tb
->wq
);
494 device_unregister(&tb
->dev
);
498 * tb_domain_suspend_noirq() - Suspend a domain
499 * @tb: Domain to suspend
501 * Suspends all devices in the domain and stops the control channel.
503 int tb_domain_suspend_noirq(struct tb
*tb
)
508 * The control channel interrupt is left enabled during suspend
509 * and taking the lock here prevents any events happening before
510 * we actually have stopped the domain and the control channel.
512 mutex_lock(&tb
->lock
);
513 if (tb
->cm_ops
->suspend_noirq
)
514 ret
= tb
->cm_ops
->suspend_noirq(tb
);
516 tb_ctl_stop(tb
->ctl
);
517 mutex_unlock(&tb
->lock
);
523 * tb_domain_resume_noirq() - Resume a domain
524 * @tb: Domain to resume
526 * Re-starts the control channel, and resumes all devices connected to
529 int tb_domain_resume_noirq(struct tb
*tb
)
533 mutex_lock(&tb
->lock
);
534 tb_ctl_start(tb
->ctl
);
535 if (tb
->cm_ops
->resume_noirq
)
536 ret
= tb
->cm_ops
->resume_noirq(tb
);
537 mutex_unlock(&tb
->lock
);
542 int tb_domain_suspend(struct tb
*tb
)
544 return tb
->cm_ops
->suspend
? tb
->cm_ops
->suspend(tb
) : 0;
547 void tb_domain_complete(struct tb
*tb
)
549 if (tb
->cm_ops
->complete
)
550 tb
->cm_ops
->complete(tb
);
553 int tb_domain_runtime_suspend(struct tb
*tb
)
555 if (tb
->cm_ops
->runtime_suspend
) {
556 int ret
= tb
->cm_ops
->runtime_suspend(tb
);
560 tb_ctl_stop(tb
->ctl
);
564 int tb_domain_runtime_resume(struct tb
*tb
)
566 tb_ctl_start(tb
->ctl
);
567 if (tb
->cm_ops
->runtime_resume
) {
568 int ret
= tb
->cm_ops
->runtime_resume(tb
);
576 * tb_domain_approve_switch() - Approve switch
577 * @tb: Domain the switch belongs to
578 * @sw: Switch to approve
580 * This will approve switch by connection manager specific means. In
581 * case of success the connection manager will create tunnels for all
582 * supported protocols.
584 int tb_domain_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
586 struct tb_switch
*parent_sw
;
588 if (!tb
->cm_ops
->approve_switch
)
591 /* The parent switch must be authorized before this one */
592 parent_sw
= tb_to_switch(sw
->dev
.parent
);
593 if (!parent_sw
|| !parent_sw
->authorized
)
596 return tb
->cm_ops
->approve_switch(tb
, sw
);
600 * tb_domain_approve_switch_key() - Approve switch and add key
601 * @tb: Domain the switch belongs to
602 * @sw: Switch to approve
604 * For switches that support secure connect, this function first adds
605 * key to the switch NVM using connection manager specific means. If
606 * adding the key is successful, the switch is approved and connected.
608 * Return: %0 on success and negative errno in case of failure.
610 int tb_domain_approve_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
612 struct tb_switch
*parent_sw
;
615 if (!tb
->cm_ops
->approve_switch
|| !tb
->cm_ops
->add_switch_key
)
618 /* The parent switch must be authorized before this one */
619 parent_sw
= tb_to_switch(sw
->dev
.parent
);
620 if (!parent_sw
|| !parent_sw
->authorized
)
623 ret
= tb
->cm_ops
->add_switch_key(tb
, sw
);
627 return tb
->cm_ops
->approve_switch(tb
, sw
);
631 * tb_domain_challenge_switch_key() - Challenge and approve switch
632 * @tb: Domain the switch belongs to
633 * @sw: Switch to approve
635 * For switches that support secure connect, this function generates
636 * random challenge and sends it to the switch. The switch responds to
637 * this and if the response matches our random challenge, the switch is
638 * approved and connected.
640 * Return: %0 on success and negative errno in case of failure.
642 int tb_domain_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
644 u8 challenge
[TB_SWITCH_KEY_SIZE
];
645 u8 response
[TB_SWITCH_KEY_SIZE
];
646 u8 hmac
[TB_SWITCH_KEY_SIZE
];
647 struct tb_switch
*parent_sw
;
648 struct crypto_shash
*tfm
;
649 struct shash_desc
*shash
;
652 if (!tb
->cm_ops
->approve_switch
|| !tb
->cm_ops
->challenge_switch_key
)
655 /* The parent switch must be authorized before this one */
656 parent_sw
= tb_to_switch(sw
->dev
.parent
);
657 if (!parent_sw
|| !parent_sw
->authorized
)
660 get_random_bytes(challenge
, sizeof(challenge
));
661 ret
= tb
->cm_ops
->challenge_switch_key(tb
, sw
, challenge
, response
);
665 tfm
= crypto_alloc_shash("hmac(sha256)", 0, 0);
669 ret
= crypto_shash_setkey(tfm
, sw
->key
, TB_SWITCH_KEY_SIZE
);
673 shash
= kzalloc(sizeof(*shash
) + crypto_shash_descsize(tfm
),
681 shash
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
683 memset(hmac
, 0, sizeof(hmac
));
684 ret
= crypto_shash_digest(shash
, challenge
, sizeof(hmac
), hmac
);
688 /* The returned HMAC must match the one we calculated */
689 if (memcmp(response
, hmac
, sizeof(hmac
))) {
694 crypto_free_shash(tfm
);
697 return tb
->cm_ops
->approve_switch(tb
, sw
);
702 crypto_free_shash(tfm
);
708 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
709 * @tb: Domain whose PCIe paths to disconnect
711 * This needs to be called in preparation for NVM upgrade of the host
712 * controller. Makes sure all PCIe paths are disconnected.
714 * Return %0 on success and negative errno in case of error.
716 int tb_domain_disconnect_pcie_paths(struct tb
*tb
)
718 if (!tb
->cm_ops
->disconnect_pcie_paths
)
721 return tb
->cm_ops
->disconnect_pcie_paths(tb
);
725 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
726 * @tb: Domain enabling the DMA paths
727 * @xd: XDomain DMA paths are created to
729 * Calls connection manager specific method to enable DMA paths to the
730 * XDomain in question.
732 * Return: 0% in case of success and negative errno otherwise. In
733 * particular returns %-ENOTSUPP if the connection manager
734 * implementation does not support XDomains.
736 int tb_domain_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
738 if (!tb
->cm_ops
->approve_xdomain_paths
)
741 return tb
->cm_ops
->approve_xdomain_paths(tb
, xd
);
745 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
746 * @tb: Domain disabling the DMA paths
747 * @xd: XDomain whose DMA paths are disconnected
749 * Calls connection manager specific method to disconnect DMA paths to
750 * the XDomain in question.
752 * Return: 0% in case of success and negative errno otherwise. In
753 * particular returns %-ENOTSUPP if the connection manager
754 * implementation does not support XDomains.
756 int tb_domain_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
758 if (!tb
->cm_ops
->disconnect_xdomain_paths
)
761 return tb
->cm_ops
->disconnect_xdomain_paths(tb
, xd
);
764 static int disconnect_xdomain(struct device
*dev
, void *data
)
766 struct tb_xdomain
*xd
;
767 struct tb
*tb
= data
;
770 xd
= tb_to_xdomain(dev
);
771 if (xd
&& xd
->tb
== tb
)
772 ret
= tb_xdomain_disable_paths(xd
);
778 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
779 * @tb: Domain whose paths are disconnected
781 * This function can be used to disconnect all paths (PCIe, XDomain) for
782 * example in preparation for host NVM firmware upgrade. After this is
783 * called the paths cannot be established without resetting the switch.
785 * Return: %0 in case of success and negative errno otherwise.
787 int tb_domain_disconnect_all_paths(struct tb
*tb
)
791 ret
= tb_domain_disconnect_pcie_paths(tb
);
795 return bus_for_each_dev(&tb_bus_type
, NULL
, tb
, disconnect_xdomain
);
798 int tb_domain_init(void)
802 ret
= tb_xdomain_init();
805 ret
= bus_register(&tb_bus_type
);
812 void tb_domain_exit(void)
814 bus_unregister(&tb_bus_type
);
815 ida_destroy(&tb_domain_ida
);