Input: tm2-touchkey - add support for aries touchkey variant
[linux/fpc-iii.git] / drivers / thunderbolt / domain.c
blob6281266b8ec0a15721da5196b641b707b8a5c973
1 /*
2 * Thunderbolt bus support
4 * Copyright (C) 2017, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/device.h>
13 #include <linux/idr.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
19 #include "tb.h"
21 static DEFINE_IDA(tb_domain_ida);
23 static bool match_service_id(const struct tb_service_id *id,
24 const struct tb_service *svc)
26 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 if (strcmp(id->protocol_key, svc->key))
28 return false;
31 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 if (id->protocol_id != svc->prtcid)
33 return false;
36 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 if (id->protocol_version != svc->prtcvers)
38 return false;
41 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 if (id->protocol_revision != svc->prtcrevs)
43 return false;
46 return true;
49 static const struct tb_service_id *__tb_service_match(struct device *dev,
50 struct device_driver *drv)
52 struct tb_service_driver *driver;
53 const struct tb_service_id *ids;
54 struct tb_service *svc;
56 svc = tb_to_service(dev);
57 if (!svc)
58 return NULL;
60 driver = container_of(drv, struct tb_service_driver, driver);
61 if (!driver->id_table)
62 return NULL;
64 for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 if (match_service_id(ids, svc))
66 return ids;
69 return NULL;
72 static int tb_service_match(struct device *dev, struct device_driver *drv)
74 return !!__tb_service_match(dev, drv);
77 static int tb_service_probe(struct device *dev)
79 struct tb_service *svc = tb_to_service(dev);
80 struct tb_service_driver *driver;
81 const struct tb_service_id *id;
83 driver = container_of(dev->driver, struct tb_service_driver, driver);
84 id = __tb_service_match(dev, &driver->driver);
86 return driver->probe(svc, id);
89 static int tb_service_remove(struct device *dev)
91 struct tb_service *svc = tb_to_service(dev);
92 struct tb_service_driver *driver;
94 driver = container_of(dev->driver, struct tb_service_driver, driver);
95 if (driver->remove)
96 driver->remove(svc);
98 return 0;
101 static void tb_service_shutdown(struct device *dev)
103 struct tb_service_driver *driver;
104 struct tb_service *svc;
106 svc = tb_to_service(dev);
107 if (!svc || !dev->driver)
108 return;
110 driver = container_of(dev->driver, struct tb_service_driver, driver);
111 if (driver->shutdown)
112 driver->shutdown(svc);
115 static const char * const tb_security_names[] = {
116 [TB_SECURITY_NONE] = "none",
117 [TB_SECURITY_USER] = "user",
118 [TB_SECURITY_SECURE] = "secure",
119 [TB_SECURITY_DPONLY] = "dponly",
120 [TB_SECURITY_USBONLY] = "usbonly",
123 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
124 char *buf)
126 struct tb *tb = container_of(dev, struct tb, dev);
127 uuid_t *uuids;
128 ssize_t ret;
129 int i;
131 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132 if (!uuids)
133 return -ENOMEM;
135 if (mutex_lock_interruptible(&tb->lock)) {
136 ret = -ERESTARTSYS;
137 goto out;
139 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
140 if (ret) {
141 mutex_unlock(&tb->lock);
142 goto out;
144 mutex_unlock(&tb->lock);
146 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
147 if (!uuid_is_null(&uuids[i]))
148 ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
149 &uuids[i]);
151 ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s",
152 i < tb->nboot_acl - 1 ? "," : "\n");
155 out:
156 kfree(uuids);
157 return ret;
160 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
161 const char *buf, size_t count)
163 struct tb *tb = container_of(dev, struct tb, dev);
164 char *str, *s, *uuid_str;
165 ssize_t ret = 0;
166 uuid_t *acl;
167 int i = 0;
170 * Make sure the value is not bigger than tb->nboot_acl * UUID
171 * length + commas and optional "\n". Also the smallest allowable
172 * string is tb->nboot_acl * ",".
174 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
175 return -EINVAL;
176 if (count < tb->nboot_acl - 1)
177 return -EINVAL;
179 str = kstrdup(buf, GFP_KERNEL);
180 if (!str)
181 return -ENOMEM;
183 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
184 if (!acl) {
185 ret = -ENOMEM;
186 goto err_free_str;
189 uuid_str = strim(str);
190 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
191 size_t len = strlen(s);
193 if (len) {
194 if (len != UUID_STRING_LEN) {
195 ret = -EINVAL;
196 goto err_free_acl;
198 ret = uuid_parse(s, &acl[i]);
199 if (ret)
200 goto err_free_acl;
203 i++;
206 if (s || i < tb->nboot_acl) {
207 ret = -EINVAL;
208 goto err_free_acl;
211 if (mutex_lock_interruptible(&tb->lock)) {
212 ret = -ERESTARTSYS;
213 goto err_free_acl;
215 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
216 mutex_unlock(&tb->lock);
218 err_free_acl:
219 kfree(acl);
220 err_free_str:
221 kfree(str);
223 return ret ?: count;
225 static DEVICE_ATTR_RW(boot_acl);
227 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
228 char *buf)
230 struct tb *tb = container_of(dev, struct tb, dev);
231 const char *name = "unknown";
233 if (tb->security_level < ARRAY_SIZE(tb_security_names))
234 name = tb_security_names[tb->security_level];
236 return sprintf(buf, "%s\n", name);
238 static DEVICE_ATTR_RO(security);
240 static struct attribute *domain_attrs[] = {
241 &dev_attr_boot_acl.attr,
242 &dev_attr_security.attr,
243 NULL,
246 static umode_t domain_attr_is_visible(struct kobject *kobj,
247 struct attribute *attr, int n)
249 struct device *dev = container_of(kobj, struct device, kobj);
250 struct tb *tb = container_of(dev, struct tb, dev);
252 if (attr == &dev_attr_boot_acl.attr) {
253 if (tb->nboot_acl &&
254 tb->cm_ops->get_boot_acl &&
255 tb->cm_ops->set_boot_acl)
256 return attr->mode;
257 return 0;
260 return attr->mode;
263 static struct attribute_group domain_attr_group = {
264 .is_visible = domain_attr_is_visible,
265 .attrs = domain_attrs,
268 static const struct attribute_group *domain_attr_groups[] = {
269 &domain_attr_group,
270 NULL,
273 struct bus_type tb_bus_type = {
274 .name = "thunderbolt",
275 .match = tb_service_match,
276 .probe = tb_service_probe,
277 .remove = tb_service_remove,
278 .shutdown = tb_service_shutdown,
281 static void tb_domain_release(struct device *dev)
283 struct tb *tb = container_of(dev, struct tb, dev);
285 tb_ctl_free(tb->ctl);
286 destroy_workqueue(tb->wq);
287 ida_simple_remove(&tb_domain_ida, tb->index);
288 mutex_destroy(&tb->lock);
289 kfree(tb);
292 struct device_type tb_domain_type = {
293 .name = "thunderbolt_domain",
294 .release = tb_domain_release,
298 * tb_domain_alloc() - Allocate a domain
299 * @nhi: Pointer to the host controller
300 * @privsize: Size of the connection manager private data
302 * Allocates and initializes a new Thunderbolt domain. Connection
303 * managers are expected to call this and then fill in @cm_ops
304 * accordingly.
306 * Call tb_domain_put() to release the domain before it has been added
307 * to the system.
309 * Return: allocated domain structure on %NULL in case of error
311 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
313 struct tb *tb;
316 * Make sure the structure sizes map with that the hardware
317 * expects because bit-fields are being used.
319 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
320 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
321 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
323 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
324 if (!tb)
325 return NULL;
327 tb->nhi = nhi;
328 mutex_init(&tb->lock);
330 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
331 if (tb->index < 0)
332 goto err_free;
334 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
335 if (!tb->wq)
336 goto err_remove_ida;
338 tb->dev.parent = &nhi->pdev->dev;
339 tb->dev.bus = &tb_bus_type;
340 tb->dev.type = &tb_domain_type;
341 tb->dev.groups = domain_attr_groups;
342 dev_set_name(&tb->dev, "domain%d", tb->index);
343 device_initialize(&tb->dev);
345 return tb;
347 err_remove_ida:
348 ida_simple_remove(&tb_domain_ida, tb->index);
349 err_free:
350 kfree(tb);
352 return NULL;
355 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
356 const void *buf, size_t size)
358 struct tb *tb = data;
360 if (!tb->cm_ops->handle_event) {
361 tb_warn(tb, "domain does not have event handler\n");
362 return true;
365 switch (type) {
366 case TB_CFG_PKG_XDOMAIN_REQ:
367 case TB_CFG_PKG_XDOMAIN_RESP:
368 return tb_xdomain_handle_request(tb, type, buf, size);
370 default:
371 tb->cm_ops->handle_event(tb, type, buf, size);
374 return true;
378 * tb_domain_add() - Add domain to the system
379 * @tb: Domain to add
381 * Starts the domain and adds it to the system. Hotplugging devices will
382 * work after this has been returned successfully. In order to remove
383 * and release the domain after this function has been called, call
384 * tb_domain_remove().
386 * Return: %0 in case of success and negative errno in case of error
388 int tb_domain_add(struct tb *tb)
390 int ret;
392 if (WARN_ON(!tb->cm_ops))
393 return -EINVAL;
395 mutex_lock(&tb->lock);
397 tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
398 if (!tb->ctl) {
399 ret = -ENOMEM;
400 goto err_unlock;
404 * tb_schedule_hotplug_handler may be called as soon as the config
405 * channel is started. Thats why we have to hold the lock here.
407 tb_ctl_start(tb->ctl);
409 if (tb->cm_ops->driver_ready) {
410 ret = tb->cm_ops->driver_ready(tb);
411 if (ret)
412 goto err_ctl_stop;
415 ret = device_add(&tb->dev);
416 if (ret)
417 goto err_ctl_stop;
419 /* Start the domain */
420 if (tb->cm_ops->start) {
421 ret = tb->cm_ops->start(tb);
422 if (ret)
423 goto err_domain_del;
426 /* This starts event processing */
427 mutex_unlock(&tb->lock);
429 return 0;
431 err_domain_del:
432 device_del(&tb->dev);
433 err_ctl_stop:
434 tb_ctl_stop(tb->ctl);
435 err_unlock:
436 mutex_unlock(&tb->lock);
438 return ret;
442 * tb_domain_remove() - Removes and releases a domain
443 * @tb: Domain to remove
445 * Stops the domain, removes it from the system and releases all
446 * resources once the last reference has been released.
448 void tb_domain_remove(struct tb *tb)
450 mutex_lock(&tb->lock);
451 if (tb->cm_ops->stop)
452 tb->cm_ops->stop(tb);
453 /* Stop the domain control traffic */
454 tb_ctl_stop(tb->ctl);
455 mutex_unlock(&tb->lock);
457 flush_workqueue(tb->wq);
458 device_unregister(&tb->dev);
462 * tb_domain_suspend_noirq() - Suspend a domain
463 * @tb: Domain to suspend
465 * Suspends all devices in the domain and stops the control channel.
467 int tb_domain_suspend_noirq(struct tb *tb)
469 int ret = 0;
472 * The control channel interrupt is left enabled during suspend
473 * and taking the lock here prevents any events happening before
474 * we actually have stopped the domain and the control channel.
476 mutex_lock(&tb->lock);
477 if (tb->cm_ops->suspend_noirq)
478 ret = tb->cm_ops->suspend_noirq(tb);
479 if (!ret)
480 tb_ctl_stop(tb->ctl);
481 mutex_unlock(&tb->lock);
483 return ret;
487 * tb_domain_resume_noirq() - Resume a domain
488 * @tb: Domain to resume
490 * Re-starts the control channel, and resumes all devices connected to
491 * the domain.
493 int tb_domain_resume_noirq(struct tb *tb)
495 int ret = 0;
497 mutex_lock(&tb->lock);
498 tb_ctl_start(tb->ctl);
499 if (tb->cm_ops->resume_noirq)
500 ret = tb->cm_ops->resume_noirq(tb);
501 mutex_unlock(&tb->lock);
503 return ret;
506 int tb_domain_suspend(struct tb *tb)
508 int ret;
510 mutex_lock(&tb->lock);
511 if (tb->cm_ops->suspend) {
512 ret = tb->cm_ops->suspend(tb);
513 if (ret) {
514 mutex_unlock(&tb->lock);
515 return ret;
518 mutex_unlock(&tb->lock);
519 return 0;
522 void tb_domain_complete(struct tb *tb)
524 mutex_lock(&tb->lock);
525 if (tb->cm_ops->complete)
526 tb->cm_ops->complete(tb);
527 mutex_unlock(&tb->lock);
531 * tb_domain_approve_switch() - Approve switch
532 * @tb: Domain the switch belongs to
533 * @sw: Switch to approve
535 * This will approve switch by connection manager specific means. In
536 * case of success the connection manager will create tunnels for all
537 * supported protocols.
539 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
541 struct tb_switch *parent_sw;
543 if (!tb->cm_ops->approve_switch)
544 return -EPERM;
546 /* The parent switch must be authorized before this one */
547 parent_sw = tb_to_switch(sw->dev.parent);
548 if (!parent_sw || !parent_sw->authorized)
549 return -EINVAL;
551 return tb->cm_ops->approve_switch(tb, sw);
555 * tb_domain_approve_switch_key() - Approve switch and add key
556 * @tb: Domain the switch belongs to
557 * @sw: Switch to approve
559 * For switches that support secure connect, this function first adds
560 * key to the switch NVM using connection manager specific means. If
561 * adding the key is successful, the switch is approved and connected.
563 * Return: %0 on success and negative errno in case of failure.
565 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
567 struct tb_switch *parent_sw;
568 int ret;
570 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
571 return -EPERM;
573 /* The parent switch must be authorized before this one */
574 parent_sw = tb_to_switch(sw->dev.parent);
575 if (!parent_sw || !parent_sw->authorized)
576 return -EINVAL;
578 ret = tb->cm_ops->add_switch_key(tb, sw);
579 if (ret)
580 return ret;
582 return tb->cm_ops->approve_switch(tb, sw);
586 * tb_domain_challenge_switch_key() - Challenge and approve switch
587 * @tb: Domain the switch belongs to
588 * @sw: Switch to approve
590 * For switches that support secure connect, this function generates
591 * random challenge and sends it to the switch. The switch responds to
592 * this and if the response matches our random challenge, the switch is
593 * approved and connected.
595 * Return: %0 on success and negative errno in case of failure.
597 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
599 u8 challenge[TB_SWITCH_KEY_SIZE];
600 u8 response[TB_SWITCH_KEY_SIZE];
601 u8 hmac[TB_SWITCH_KEY_SIZE];
602 struct tb_switch *parent_sw;
603 struct crypto_shash *tfm;
604 struct shash_desc *shash;
605 int ret;
607 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
608 return -EPERM;
610 /* The parent switch must be authorized before this one */
611 parent_sw = tb_to_switch(sw->dev.parent);
612 if (!parent_sw || !parent_sw->authorized)
613 return -EINVAL;
615 get_random_bytes(challenge, sizeof(challenge));
616 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
617 if (ret)
618 return ret;
620 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
621 if (IS_ERR(tfm))
622 return PTR_ERR(tfm);
624 ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
625 if (ret)
626 goto err_free_tfm;
628 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
629 GFP_KERNEL);
630 if (!shash) {
631 ret = -ENOMEM;
632 goto err_free_tfm;
635 shash->tfm = tfm;
636 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
638 memset(hmac, 0, sizeof(hmac));
639 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
640 if (ret)
641 goto err_free_shash;
643 /* The returned HMAC must match the one we calculated */
644 if (memcmp(response, hmac, sizeof(hmac))) {
645 ret = -EKEYREJECTED;
646 goto err_free_shash;
649 crypto_free_shash(tfm);
650 kfree(shash);
652 return tb->cm_ops->approve_switch(tb, sw);
654 err_free_shash:
655 kfree(shash);
656 err_free_tfm:
657 crypto_free_shash(tfm);
659 return ret;
663 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
664 * @tb: Domain whose PCIe paths to disconnect
666 * This needs to be called in preparation for NVM upgrade of the host
667 * controller. Makes sure all PCIe paths are disconnected.
669 * Return %0 on success and negative errno in case of error.
671 int tb_domain_disconnect_pcie_paths(struct tb *tb)
673 if (!tb->cm_ops->disconnect_pcie_paths)
674 return -EPERM;
676 return tb->cm_ops->disconnect_pcie_paths(tb);
680 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
681 * @tb: Domain enabling the DMA paths
682 * @xd: XDomain DMA paths are created to
684 * Calls connection manager specific method to enable DMA paths to the
685 * XDomain in question.
687 * Return: 0% in case of success and negative errno otherwise. In
688 * particular returns %-ENOTSUPP if the connection manager
689 * implementation does not support XDomains.
691 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
693 if (!tb->cm_ops->approve_xdomain_paths)
694 return -ENOTSUPP;
696 return tb->cm_ops->approve_xdomain_paths(tb, xd);
700 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
701 * @tb: Domain disabling the DMA paths
702 * @xd: XDomain whose DMA paths are disconnected
704 * Calls connection manager specific method to disconnect DMA paths to
705 * the XDomain in question.
707 * Return: 0% in case of success and negative errno otherwise. In
708 * particular returns %-ENOTSUPP if the connection manager
709 * implementation does not support XDomains.
711 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
713 if (!tb->cm_ops->disconnect_xdomain_paths)
714 return -ENOTSUPP;
716 return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
719 static int disconnect_xdomain(struct device *dev, void *data)
721 struct tb_xdomain *xd;
722 struct tb *tb = data;
723 int ret = 0;
725 xd = tb_to_xdomain(dev);
726 if (xd && xd->tb == tb)
727 ret = tb_xdomain_disable_paths(xd);
729 return ret;
733 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
734 * @tb: Domain whose paths are disconnected
736 * This function can be used to disconnect all paths (PCIe, XDomain) for
737 * example in preparation for host NVM firmware upgrade. After this is
738 * called the paths cannot be established without resetting the switch.
740 * Return: %0 in case of success and negative errno otherwise.
742 int tb_domain_disconnect_all_paths(struct tb *tb)
744 int ret;
746 ret = tb_domain_disconnect_pcie_paths(tb);
747 if (ret)
748 return ret;
750 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
753 int tb_domain_init(void)
755 int ret;
757 ret = tb_xdomain_init();
758 if (ret)
759 return ret;
760 ret = bus_register(&tb_bus_type);
761 if (ret)
762 tb_xdomain_exit();
764 return ret;
767 void tb_domain_exit(void)
769 bus_unregister(&tb_bus_type);
770 ida_destroy(&tb_domain_ida);
771 tb_switch_exit();
772 tb_xdomain_exit();