io_uring: do not always copy iovec in io_req_map_rw()
[linux/fpc-iii.git] / drivers / thunderbolt / domain.c
blob68c1b93ac5d9ea90eccd3a25ea7b18cf15242ee9
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt bus support
5 * Copyright (C) 2017, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
9 #include <linux/device.h>
10 #include <linux/dmar.h>
11 #include <linux/idr.h>
12 #include <linux/iommu.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
19 #include "tb.h"
21 static DEFINE_IDA(tb_domain_ida);
23 static bool match_service_id(const struct tb_service_id *id,
24 const struct tb_service *svc)
26 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 if (strcmp(id->protocol_key, svc->key))
28 return false;
31 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 if (id->protocol_id != svc->prtcid)
33 return false;
36 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 if (id->protocol_version != svc->prtcvers)
38 return false;
41 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 if (id->protocol_revision != svc->prtcrevs)
43 return false;
46 return true;
49 static const struct tb_service_id *__tb_service_match(struct device *dev,
50 struct device_driver *drv)
52 struct tb_service_driver *driver;
53 const struct tb_service_id *ids;
54 struct tb_service *svc;
56 svc = tb_to_service(dev);
57 if (!svc)
58 return NULL;
60 driver = container_of(drv, struct tb_service_driver, driver);
61 if (!driver->id_table)
62 return NULL;
64 for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 if (match_service_id(ids, svc))
66 return ids;
69 return NULL;
72 static int tb_service_match(struct device *dev, struct device_driver *drv)
74 return !!__tb_service_match(dev, drv);
77 static int tb_service_probe(struct device *dev)
79 struct tb_service *svc = tb_to_service(dev);
80 struct tb_service_driver *driver;
81 const struct tb_service_id *id;
83 driver = container_of(dev->driver, struct tb_service_driver, driver);
84 id = __tb_service_match(dev, &driver->driver);
86 return driver->probe(svc, id);
89 static int tb_service_remove(struct device *dev)
91 struct tb_service *svc = tb_to_service(dev);
92 struct tb_service_driver *driver;
94 driver = container_of(dev->driver, struct tb_service_driver, driver);
95 if (driver->remove)
96 driver->remove(svc);
98 return 0;
101 static void tb_service_shutdown(struct device *dev)
103 struct tb_service_driver *driver;
104 struct tb_service *svc;
106 svc = tb_to_service(dev);
107 if (!svc || !dev->driver)
108 return;
110 driver = container_of(dev->driver, struct tb_service_driver, driver);
111 if (driver->shutdown)
112 driver->shutdown(svc);
115 static const char * const tb_security_names[] = {
116 [TB_SECURITY_NONE] = "none",
117 [TB_SECURITY_USER] = "user",
118 [TB_SECURITY_SECURE] = "secure",
119 [TB_SECURITY_DPONLY] = "dponly",
120 [TB_SECURITY_USBONLY] = "usbonly",
123 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
124 char *buf)
126 struct tb *tb = container_of(dev, struct tb, dev);
127 uuid_t *uuids;
128 ssize_t ret;
129 int i;
131 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132 if (!uuids)
133 return -ENOMEM;
135 pm_runtime_get_sync(&tb->dev);
137 if (mutex_lock_interruptible(&tb->lock)) {
138 ret = -ERESTARTSYS;
139 goto out;
141 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
142 if (ret) {
143 mutex_unlock(&tb->lock);
144 goto out;
146 mutex_unlock(&tb->lock);
148 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
149 if (!uuid_is_null(&uuids[i]))
150 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
151 &uuids[i]);
153 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
154 i < tb->nboot_acl - 1 ? "," : "\n");
157 out:
158 pm_runtime_mark_last_busy(&tb->dev);
159 pm_runtime_put_autosuspend(&tb->dev);
160 kfree(uuids);
162 return ret;
165 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
166 const char *buf, size_t count)
168 struct tb *tb = container_of(dev, struct tb, dev);
169 char *str, *s, *uuid_str;
170 ssize_t ret = 0;
171 uuid_t *acl;
172 int i = 0;
175 * Make sure the value is not bigger than tb->nboot_acl * UUID
176 * length + commas and optional "\n". Also the smallest allowable
177 * string is tb->nboot_acl * ",".
179 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
180 return -EINVAL;
181 if (count < tb->nboot_acl - 1)
182 return -EINVAL;
184 str = kstrdup(buf, GFP_KERNEL);
185 if (!str)
186 return -ENOMEM;
188 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
189 if (!acl) {
190 ret = -ENOMEM;
191 goto err_free_str;
194 uuid_str = strim(str);
195 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
196 size_t len = strlen(s);
198 if (len) {
199 if (len != UUID_STRING_LEN) {
200 ret = -EINVAL;
201 goto err_free_acl;
203 ret = uuid_parse(s, &acl[i]);
204 if (ret)
205 goto err_free_acl;
208 i++;
211 if (s || i < tb->nboot_acl) {
212 ret = -EINVAL;
213 goto err_free_acl;
216 pm_runtime_get_sync(&tb->dev);
218 if (mutex_lock_interruptible(&tb->lock)) {
219 ret = -ERESTARTSYS;
220 goto err_rpm_put;
222 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
223 if (!ret) {
224 /* Notify userspace about the change */
225 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
227 mutex_unlock(&tb->lock);
229 err_rpm_put:
230 pm_runtime_mark_last_busy(&tb->dev);
231 pm_runtime_put_autosuspend(&tb->dev);
232 err_free_acl:
233 kfree(acl);
234 err_free_str:
235 kfree(str);
237 return ret ?: count;
239 static DEVICE_ATTR_RW(boot_acl);
241 static ssize_t iommu_dma_protection_show(struct device *dev,
242 struct device_attribute *attr,
243 char *buf)
246 * Kernel DMA protection is a feature where Thunderbolt security is
247 * handled natively using IOMMU. It is enabled when IOMMU is
248 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
250 return sprintf(buf, "%d\n",
251 iommu_present(&pci_bus_type) && dmar_platform_optin());
253 static DEVICE_ATTR_RO(iommu_dma_protection);
255 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
256 char *buf)
258 struct tb *tb = container_of(dev, struct tb, dev);
259 const char *name = "unknown";
261 if (tb->security_level < ARRAY_SIZE(tb_security_names))
262 name = tb_security_names[tb->security_level];
264 return sprintf(buf, "%s\n", name);
266 static DEVICE_ATTR_RO(security);
268 static struct attribute *domain_attrs[] = {
269 &dev_attr_boot_acl.attr,
270 &dev_attr_iommu_dma_protection.attr,
271 &dev_attr_security.attr,
272 NULL,
275 static umode_t domain_attr_is_visible(struct kobject *kobj,
276 struct attribute *attr, int n)
278 struct device *dev = container_of(kobj, struct device, kobj);
279 struct tb *tb = container_of(dev, struct tb, dev);
281 if (attr == &dev_attr_boot_acl.attr) {
282 if (tb->nboot_acl &&
283 tb->cm_ops->get_boot_acl &&
284 tb->cm_ops->set_boot_acl)
285 return attr->mode;
286 return 0;
289 return attr->mode;
292 static struct attribute_group domain_attr_group = {
293 .is_visible = domain_attr_is_visible,
294 .attrs = domain_attrs,
297 static const struct attribute_group *domain_attr_groups[] = {
298 &domain_attr_group,
299 NULL,
302 struct bus_type tb_bus_type = {
303 .name = "thunderbolt",
304 .match = tb_service_match,
305 .probe = tb_service_probe,
306 .remove = tb_service_remove,
307 .shutdown = tb_service_shutdown,
310 static void tb_domain_release(struct device *dev)
312 struct tb *tb = container_of(dev, struct tb, dev);
314 tb_ctl_free(tb->ctl);
315 destroy_workqueue(tb->wq);
316 ida_simple_remove(&tb_domain_ida, tb->index);
317 mutex_destroy(&tb->lock);
318 kfree(tb);
321 struct device_type tb_domain_type = {
322 .name = "thunderbolt_domain",
323 .release = tb_domain_release,
327 * tb_domain_alloc() - Allocate a domain
328 * @nhi: Pointer to the host controller
329 * @privsize: Size of the connection manager private data
331 * Allocates and initializes a new Thunderbolt domain. Connection
332 * managers are expected to call this and then fill in @cm_ops
333 * accordingly.
335 * Call tb_domain_put() to release the domain before it has been added
336 * to the system.
338 * Return: allocated domain structure on %NULL in case of error
340 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
342 struct tb *tb;
345 * Make sure the structure sizes map with that the hardware
346 * expects because bit-fields are being used.
348 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
349 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
350 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
352 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
353 if (!tb)
354 return NULL;
356 tb->nhi = nhi;
357 mutex_init(&tb->lock);
359 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
360 if (tb->index < 0)
361 goto err_free;
363 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
364 if (!tb->wq)
365 goto err_remove_ida;
367 tb->dev.parent = &nhi->pdev->dev;
368 tb->dev.bus = &tb_bus_type;
369 tb->dev.type = &tb_domain_type;
370 tb->dev.groups = domain_attr_groups;
371 dev_set_name(&tb->dev, "domain%d", tb->index);
372 device_initialize(&tb->dev);
374 return tb;
376 err_remove_ida:
377 ida_simple_remove(&tb_domain_ida, tb->index);
378 err_free:
379 kfree(tb);
381 return NULL;
384 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
385 const void *buf, size_t size)
387 struct tb *tb = data;
389 if (!tb->cm_ops->handle_event) {
390 tb_warn(tb, "domain does not have event handler\n");
391 return true;
394 switch (type) {
395 case TB_CFG_PKG_XDOMAIN_REQ:
396 case TB_CFG_PKG_XDOMAIN_RESP:
397 return tb_xdomain_handle_request(tb, type, buf, size);
399 default:
400 tb->cm_ops->handle_event(tb, type, buf, size);
403 return true;
407 * tb_domain_add() - Add domain to the system
408 * @tb: Domain to add
410 * Starts the domain and adds it to the system. Hotplugging devices will
411 * work after this has been returned successfully. In order to remove
412 * and release the domain after this function has been called, call
413 * tb_domain_remove().
415 * Return: %0 in case of success and negative errno in case of error
417 int tb_domain_add(struct tb *tb)
419 int ret;
421 if (WARN_ON(!tb->cm_ops))
422 return -EINVAL;
424 mutex_lock(&tb->lock);
426 tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
427 if (!tb->ctl) {
428 ret = -ENOMEM;
429 goto err_unlock;
433 * tb_schedule_hotplug_handler may be called as soon as the config
434 * channel is started. Thats why we have to hold the lock here.
436 tb_ctl_start(tb->ctl);
438 if (tb->cm_ops->driver_ready) {
439 ret = tb->cm_ops->driver_ready(tb);
440 if (ret)
441 goto err_ctl_stop;
444 ret = device_add(&tb->dev);
445 if (ret)
446 goto err_ctl_stop;
448 /* Start the domain */
449 if (tb->cm_ops->start) {
450 ret = tb->cm_ops->start(tb);
451 if (ret)
452 goto err_domain_del;
455 /* This starts event processing */
456 mutex_unlock(&tb->lock);
458 pm_runtime_no_callbacks(&tb->dev);
459 pm_runtime_set_active(&tb->dev);
460 pm_runtime_enable(&tb->dev);
461 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
462 pm_runtime_mark_last_busy(&tb->dev);
463 pm_runtime_use_autosuspend(&tb->dev);
465 return 0;
467 err_domain_del:
468 device_del(&tb->dev);
469 err_ctl_stop:
470 tb_ctl_stop(tb->ctl);
471 err_unlock:
472 mutex_unlock(&tb->lock);
474 return ret;
478 * tb_domain_remove() - Removes and releases a domain
479 * @tb: Domain to remove
481 * Stops the domain, removes it from the system and releases all
482 * resources once the last reference has been released.
484 void tb_domain_remove(struct tb *tb)
486 mutex_lock(&tb->lock);
487 if (tb->cm_ops->stop)
488 tb->cm_ops->stop(tb);
489 /* Stop the domain control traffic */
490 tb_ctl_stop(tb->ctl);
491 mutex_unlock(&tb->lock);
493 flush_workqueue(tb->wq);
494 device_unregister(&tb->dev);
498 * tb_domain_suspend_noirq() - Suspend a domain
499 * @tb: Domain to suspend
501 * Suspends all devices in the domain and stops the control channel.
503 int tb_domain_suspend_noirq(struct tb *tb)
505 int ret = 0;
508 * The control channel interrupt is left enabled during suspend
509 * and taking the lock here prevents any events happening before
510 * we actually have stopped the domain and the control channel.
512 mutex_lock(&tb->lock);
513 if (tb->cm_ops->suspend_noirq)
514 ret = tb->cm_ops->suspend_noirq(tb);
515 if (!ret)
516 tb_ctl_stop(tb->ctl);
517 mutex_unlock(&tb->lock);
519 return ret;
523 * tb_domain_resume_noirq() - Resume a domain
524 * @tb: Domain to resume
526 * Re-starts the control channel, and resumes all devices connected to
527 * the domain.
529 int tb_domain_resume_noirq(struct tb *tb)
531 int ret = 0;
533 mutex_lock(&tb->lock);
534 tb_ctl_start(tb->ctl);
535 if (tb->cm_ops->resume_noirq)
536 ret = tb->cm_ops->resume_noirq(tb);
537 mutex_unlock(&tb->lock);
539 return ret;
542 int tb_domain_suspend(struct tb *tb)
544 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
547 void tb_domain_complete(struct tb *tb)
549 if (tb->cm_ops->complete)
550 tb->cm_ops->complete(tb);
553 int tb_domain_runtime_suspend(struct tb *tb)
555 if (tb->cm_ops->runtime_suspend) {
556 int ret = tb->cm_ops->runtime_suspend(tb);
557 if (ret)
558 return ret;
560 tb_ctl_stop(tb->ctl);
561 return 0;
564 int tb_domain_runtime_resume(struct tb *tb)
566 tb_ctl_start(tb->ctl);
567 if (tb->cm_ops->runtime_resume) {
568 int ret = tb->cm_ops->runtime_resume(tb);
569 if (ret)
570 return ret;
572 return 0;
576 * tb_domain_approve_switch() - Approve switch
577 * @tb: Domain the switch belongs to
578 * @sw: Switch to approve
580 * This will approve switch by connection manager specific means. In
581 * case of success the connection manager will create tunnels for all
582 * supported protocols.
584 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
586 struct tb_switch *parent_sw;
588 if (!tb->cm_ops->approve_switch)
589 return -EPERM;
591 /* The parent switch must be authorized before this one */
592 parent_sw = tb_to_switch(sw->dev.parent);
593 if (!parent_sw || !parent_sw->authorized)
594 return -EINVAL;
596 return tb->cm_ops->approve_switch(tb, sw);
600 * tb_domain_approve_switch_key() - Approve switch and add key
601 * @tb: Domain the switch belongs to
602 * @sw: Switch to approve
604 * For switches that support secure connect, this function first adds
605 * key to the switch NVM using connection manager specific means. If
606 * adding the key is successful, the switch is approved and connected.
608 * Return: %0 on success and negative errno in case of failure.
610 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
612 struct tb_switch *parent_sw;
613 int ret;
615 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
616 return -EPERM;
618 /* The parent switch must be authorized before this one */
619 parent_sw = tb_to_switch(sw->dev.parent);
620 if (!parent_sw || !parent_sw->authorized)
621 return -EINVAL;
623 ret = tb->cm_ops->add_switch_key(tb, sw);
624 if (ret)
625 return ret;
627 return tb->cm_ops->approve_switch(tb, sw);
631 * tb_domain_challenge_switch_key() - Challenge and approve switch
632 * @tb: Domain the switch belongs to
633 * @sw: Switch to approve
635 * For switches that support secure connect, this function generates
636 * random challenge and sends it to the switch. The switch responds to
637 * this and if the response matches our random challenge, the switch is
638 * approved and connected.
640 * Return: %0 on success and negative errno in case of failure.
642 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
644 u8 challenge[TB_SWITCH_KEY_SIZE];
645 u8 response[TB_SWITCH_KEY_SIZE];
646 u8 hmac[TB_SWITCH_KEY_SIZE];
647 struct tb_switch *parent_sw;
648 struct crypto_shash *tfm;
649 struct shash_desc *shash;
650 int ret;
652 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
653 return -EPERM;
655 /* The parent switch must be authorized before this one */
656 parent_sw = tb_to_switch(sw->dev.parent);
657 if (!parent_sw || !parent_sw->authorized)
658 return -EINVAL;
660 get_random_bytes(challenge, sizeof(challenge));
661 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
662 if (ret)
663 return ret;
665 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
666 if (IS_ERR(tfm))
667 return PTR_ERR(tfm);
669 ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
670 if (ret)
671 goto err_free_tfm;
673 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
674 GFP_KERNEL);
675 if (!shash) {
676 ret = -ENOMEM;
677 goto err_free_tfm;
680 shash->tfm = tfm;
682 memset(hmac, 0, sizeof(hmac));
683 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
684 if (ret)
685 goto err_free_shash;
687 /* The returned HMAC must match the one we calculated */
688 if (memcmp(response, hmac, sizeof(hmac))) {
689 ret = -EKEYREJECTED;
690 goto err_free_shash;
693 crypto_free_shash(tfm);
694 kfree(shash);
696 return tb->cm_ops->approve_switch(tb, sw);
698 err_free_shash:
699 kfree(shash);
700 err_free_tfm:
701 crypto_free_shash(tfm);
703 return ret;
707 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
708 * @tb: Domain whose PCIe paths to disconnect
710 * This needs to be called in preparation for NVM upgrade of the host
711 * controller. Makes sure all PCIe paths are disconnected.
713 * Return %0 on success and negative errno in case of error.
715 int tb_domain_disconnect_pcie_paths(struct tb *tb)
717 if (!tb->cm_ops->disconnect_pcie_paths)
718 return -EPERM;
720 return tb->cm_ops->disconnect_pcie_paths(tb);
724 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
725 * @tb: Domain enabling the DMA paths
726 * @xd: XDomain DMA paths are created to
728 * Calls connection manager specific method to enable DMA paths to the
729 * XDomain in question.
731 * Return: 0% in case of success and negative errno otherwise. In
732 * particular returns %-ENOTSUPP if the connection manager
733 * implementation does not support XDomains.
735 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
737 if (!tb->cm_ops->approve_xdomain_paths)
738 return -ENOTSUPP;
740 return tb->cm_ops->approve_xdomain_paths(tb, xd);
744 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
745 * @tb: Domain disabling the DMA paths
746 * @xd: XDomain whose DMA paths are disconnected
748 * Calls connection manager specific method to disconnect DMA paths to
749 * the XDomain in question.
751 * Return: 0% in case of success and negative errno otherwise. In
752 * particular returns %-ENOTSUPP if the connection manager
753 * implementation does not support XDomains.
755 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
757 if (!tb->cm_ops->disconnect_xdomain_paths)
758 return -ENOTSUPP;
760 return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
763 static int disconnect_xdomain(struct device *dev, void *data)
765 struct tb_xdomain *xd;
766 struct tb *tb = data;
767 int ret = 0;
769 xd = tb_to_xdomain(dev);
770 if (xd && xd->tb == tb)
771 ret = tb_xdomain_disable_paths(xd);
773 return ret;
777 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
778 * @tb: Domain whose paths are disconnected
780 * This function can be used to disconnect all paths (PCIe, XDomain) for
781 * example in preparation for host NVM firmware upgrade. After this is
782 * called the paths cannot be established without resetting the switch.
784 * Return: %0 in case of success and negative errno otherwise.
786 int tb_domain_disconnect_all_paths(struct tb *tb)
788 int ret;
790 ret = tb_domain_disconnect_pcie_paths(tb);
791 if (ret)
792 return ret;
794 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
797 int tb_domain_init(void)
799 int ret;
801 ret = tb_xdomain_init();
802 if (ret)
803 return ret;
804 ret = bus_register(&tb_bus_type);
805 if (ret)
806 tb_xdomain_exit();
808 return ret;
811 void tb_domain_exit(void)
813 bus_unregister(&tb_bus_type);
814 ida_destroy(&tb_domain_ida);
815 tb_switch_exit();
816 tb_xdomain_exit();