1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
27 struct list_head tunnel_list
;
31 struct tb_hotplug_event
{
32 struct work_struct work
;
39 static void tb_handle_hotplug(struct work_struct
*work
);
41 static void tb_queue_hotplug(struct tb
*tb
, u64 route
, u8 port
, bool unplug
)
43 struct tb_hotplug_event
*ev
;
45 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
53 INIT_WORK(&ev
->work
, tb_handle_hotplug
);
54 queue_work(tb
->wq
, &ev
->work
);
57 /* enumeration & hot plug handling */
59 static void tb_discover_tunnels(struct tb_switch
*sw
)
61 struct tb
*tb
= sw
->tb
;
62 struct tb_cm
*tcm
= tb_priv(tb
);
66 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
67 struct tb_tunnel
*tunnel
= NULL
;
70 switch (port
->config
.type
) {
71 case TB_TYPE_DP_HDMI_IN
:
72 tunnel
= tb_tunnel_discover_dp(tb
, port
);
75 case TB_TYPE_PCIE_DOWN
:
76 tunnel
= tb_tunnel_discover_pci(tb
, port
);
86 if (tb_tunnel_is_pci(tunnel
)) {
87 struct tb_switch
*parent
= tunnel
->dst_port
->sw
;
89 while (parent
!= tunnel
->src_port
->sw
) {
91 parent
= tb_switch_parent(parent
);
95 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
98 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
99 if (tb_port_has_remote(&sw
->ports
[i
]))
100 tb_discover_tunnels(sw
->ports
[i
].remote
->sw
);
104 static void tb_scan_xdomain(struct tb_port
*port
)
106 struct tb_switch
*sw
= port
->sw
;
107 struct tb
*tb
= sw
->tb
;
108 struct tb_xdomain
*xd
;
111 route
= tb_downstream_route(port
);
112 xd
= tb_xdomain_find_by_route(tb
, route
);
118 xd
= tb_xdomain_alloc(tb
, &sw
->dev
, route
, tb
->root_switch
->uuid
,
121 tb_port_at(route
, sw
)->xdomain
= xd
;
126 static void tb_scan_port(struct tb_port
*port
);
129 * tb_scan_switch() - scan for and initialize downstream switches
131 static void tb_scan_switch(struct tb_switch
*sw
)
134 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++)
135 tb_scan_port(&sw
->ports
[i
]);
139 * tb_scan_port() - check for and initialize switches below port
141 static void tb_scan_port(struct tb_port
*port
)
143 struct tb_cm
*tcm
= tb_priv(port
->sw
->tb
);
144 struct tb_port
*upstream_port
;
145 struct tb_switch
*sw
;
147 if (tb_is_upstream_port(port
))
150 if (tb_port_is_dpout(port
) && tb_dp_port_hpd_is_active(port
) == 1 &&
151 !tb_dp_port_is_enabled(port
)) {
152 tb_port_dbg(port
, "DP adapter HPD set, queuing hotplug\n");
153 tb_queue_hotplug(port
->sw
->tb
, tb_route(port
->sw
), port
->port
,
158 if (port
->config
.type
!= TB_TYPE_PORT
)
160 if (port
->dual_link_port
&& port
->link_nr
)
162 * Downstream switch is reachable through two ports.
163 * Only scan on the primary port (link_nr == 0).
165 if (tb_wait_for_port(port
, false) <= 0)
168 tb_port_dbg(port
, "port already has a remote\n");
171 sw
= tb_switch_alloc(port
->sw
->tb
, &port
->sw
->dev
,
172 tb_downstream_route(port
));
175 * If there is an error accessing the connected switch
176 * it may be connected to another domain. Also we allow
177 * the other domain to be connected to a max depth switch.
179 if (PTR_ERR(sw
) == -EIO
|| PTR_ERR(sw
) == -EADDRNOTAVAIL
)
180 tb_scan_xdomain(port
);
184 if (tb_switch_configure(sw
)) {
190 * If there was previously another domain connected remove it
194 tb_xdomain_remove(port
->xdomain
);
195 port
->xdomain
= NULL
;
199 * Do not send uevents until we have discovered all existing
200 * tunnels and know which switches were authorized already by
203 if (!tcm
->hotplug_active
)
204 dev_set_uevent_suppress(&sw
->dev
, true);
206 if (tb_switch_add(sw
)) {
211 /* Link the switches using both links if available */
212 upstream_port
= tb_upstream_port(sw
);
213 port
->remote
= upstream_port
;
214 upstream_port
->remote
= port
;
215 if (port
->dual_link_port
&& upstream_port
->dual_link_port
) {
216 port
->dual_link_port
->remote
= upstream_port
->dual_link_port
;
217 upstream_port
->dual_link_port
->remote
= port
->dual_link_port
;
223 static int tb_free_tunnel(struct tb
*tb
, enum tb_tunnel_type type
,
224 struct tb_port
*src_port
, struct tb_port
*dst_port
)
226 struct tb_cm
*tcm
= tb_priv(tb
);
227 struct tb_tunnel
*tunnel
;
229 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
230 if (tunnel
->type
== type
&&
231 ((src_port
&& src_port
== tunnel
->src_port
) ||
232 (dst_port
&& dst_port
== tunnel
->dst_port
))) {
233 tb_tunnel_deactivate(tunnel
);
234 list_del(&tunnel
->list
);
235 tb_tunnel_free(tunnel
);
244 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
246 static void tb_free_invalid_tunnels(struct tb
*tb
)
248 struct tb_cm
*tcm
= tb_priv(tb
);
249 struct tb_tunnel
*tunnel
;
252 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
253 if (tb_tunnel_is_invalid(tunnel
)) {
254 tb_tunnel_deactivate(tunnel
);
255 list_del(&tunnel
->list
);
256 tb_tunnel_free(tunnel
);
262 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
264 static void tb_free_unplugged_children(struct tb_switch
*sw
)
267 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
268 struct tb_port
*port
= &sw
->ports
[i
];
270 if (!tb_port_has_remote(port
))
273 if (port
->remote
->sw
->is_unplugged
) {
274 tb_switch_remove(port
->remote
->sw
);
276 if (port
->dual_link_port
)
277 port
->dual_link_port
->remote
= NULL
;
279 tb_free_unplugged_children(port
->remote
->sw
);
285 * tb_find_port() - return the first port of @type on @sw or NULL
286 * @sw: Switch to find the port from
287 * @type: Port type to look for
289 static struct tb_port
*tb_find_port(struct tb_switch
*sw
,
290 enum tb_port_type type
)
293 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++)
294 if (sw
->ports
[i
].config
.type
== type
)
295 return &sw
->ports
[i
];
300 * tb_find_unused_port() - return the first inactive port on @sw
301 * @sw: Switch to find the port on
302 * @type: Port type to look for
304 static struct tb_port
*tb_find_unused_port(struct tb_switch
*sw
,
305 enum tb_port_type type
)
309 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
310 if (tb_is_upstream_port(&sw
->ports
[i
]))
312 if (sw
->ports
[i
].config
.type
!= type
)
314 if (!sw
->ports
[i
].cap_adap
)
316 if (tb_port_is_enabled(&sw
->ports
[i
]))
318 return &sw
->ports
[i
];
323 static struct tb_port
*tb_find_pcie_down(struct tb_switch
*sw
,
324 const struct tb_port
*port
)
327 * To keep plugging devices consistently in the same PCIe
328 * hierarchy, do mapping here for root switch downstream PCIe
332 int phy_port
= tb_phy_port_from_link(port
->port
);
336 * Hard-coded Thunderbolt port to PCIe down port mapping
339 if (tb_switch_is_cr(sw
))
340 index
= !phy_port
? 6 : 7;
341 else if (tb_switch_is_fr(sw
))
342 index
= !phy_port
? 6 : 8;
346 /* Validate the hard-coding */
347 if (WARN_ON(index
> sw
->config
.max_port_number
))
349 if (WARN_ON(!tb_port_is_pcie_down(&sw
->ports
[index
])))
351 if (WARN_ON(tb_pci_port_is_enabled(&sw
->ports
[index
])))
354 return &sw
->ports
[index
];
358 return tb_find_unused_port(sw
, TB_TYPE_PCIE_DOWN
);
361 static int tb_tunnel_dp(struct tb
*tb
, struct tb_port
*out
)
363 struct tb_cm
*tcm
= tb_priv(tb
);
364 struct tb_switch
*sw
= out
->sw
;
365 struct tb_tunnel
*tunnel
;
368 if (tb_port_is_enabled(out
))
372 sw
= tb_to_switch(sw
->dev
.parent
);
375 in
= tb_find_unused_port(sw
, TB_TYPE_DP_HDMI_IN
);
378 tunnel
= tb_tunnel_alloc_dp(tb
, in
, out
);
380 tb_port_dbg(out
, "DP tunnel allocation failed\n");
384 if (tb_tunnel_activate(tunnel
)) {
385 tb_port_info(out
, "DP tunnel activation failed, aborting\n");
386 tb_tunnel_free(tunnel
);
390 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
394 static void tb_teardown_dp(struct tb
*tb
, struct tb_port
*out
)
396 tb_free_tunnel(tb
, TB_TUNNEL_DP
, NULL
, out
);
399 static int tb_tunnel_pci(struct tb
*tb
, struct tb_switch
*sw
)
401 struct tb_port
*up
, *down
, *port
;
402 struct tb_cm
*tcm
= tb_priv(tb
);
403 struct tb_switch
*parent_sw
;
404 struct tb_tunnel
*tunnel
;
406 up
= tb_find_port(sw
, TB_TYPE_PCIE_UP
);
411 * Look up available down port. Since we are chaining it should
412 * be found right above this switch.
414 parent_sw
= tb_to_switch(sw
->dev
.parent
);
415 port
= tb_port_at(tb_route(sw
), parent_sw
);
416 down
= tb_find_pcie_down(parent_sw
, port
);
420 tunnel
= tb_tunnel_alloc_pci(tb
, up
, down
);
424 if (tb_tunnel_activate(tunnel
)) {
426 "PCIe tunnel activation failed, aborting\n");
427 tb_tunnel_free(tunnel
);
431 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
435 static int tb_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
437 struct tb_cm
*tcm
= tb_priv(tb
);
438 struct tb_port
*nhi_port
, *dst_port
;
439 struct tb_tunnel
*tunnel
;
440 struct tb_switch
*sw
;
442 sw
= tb_to_switch(xd
->dev
.parent
);
443 dst_port
= tb_port_at(xd
->route
, sw
);
444 nhi_port
= tb_find_port(tb
->root_switch
, TB_TYPE_NHI
);
446 mutex_lock(&tb
->lock
);
447 tunnel
= tb_tunnel_alloc_dma(tb
, nhi_port
, dst_port
, xd
->transmit_ring
,
448 xd
->transmit_path
, xd
->receive_ring
,
451 mutex_unlock(&tb
->lock
);
455 if (tb_tunnel_activate(tunnel
)) {
456 tb_port_info(nhi_port
,
457 "DMA tunnel activation failed, aborting\n");
458 tb_tunnel_free(tunnel
);
459 mutex_unlock(&tb
->lock
);
463 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
464 mutex_unlock(&tb
->lock
);
468 static void __tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
470 struct tb_port
*dst_port
;
471 struct tb_switch
*sw
;
473 sw
= tb_to_switch(xd
->dev
.parent
);
474 dst_port
= tb_port_at(xd
->route
, sw
);
477 * It is possible that the tunnel was already teared down (in
478 * case of cable disconnect) so it is fine if we cannot find it
481 tb_free_tunnel(tb
, TB_TUNNEL_DMA
, NULL
, dst_port
);
484 static int tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
486 if (!xd
->is_unplugged
) {
487 mutex_lock(&tb
->lock
);
488 __tb_disconnect_xdomain_paths(tb
, xd
);
489 mutex_unlock(&tb
->lock
);
494 /* hotplug handling */
497 * tb_handle_hotplug() - handle hotplug event
499 * Executes on tb->wq.
501 static void tb_handle_hotplug(struct work_struct
*work
)
503 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
504 struct tb
*tb
= ev
->tb
;
505 struct tb_cm
*tcm
= tb_priv(tb
);
506 struct tb_switch
*sw
;
507 struct tb_port
*port
;
508 mutex_lock(&tb
->lock
);
509 if (!tcm
->hotplug_active
)
510 goto out
; /* during init, suspend or shutdown */
512 sw
= tb_switch_find_by_route(tb
, ev
->route
);
515 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
516 ev
->route
, ev
->port
, ev
->unplug
);
519 if (ev
->port
> sw
->config
.max_port_number
) {
521 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
522 ev
->route
, ev
->port
, ev
->unplug
);
525 port
= &sw
->ports
[ev
->port
];
526 if (tb_is_upstream_port(port
)) {
527 tb_dbg(tb
, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
528 ev
->route
, ev
->port
, ev
->unplug
);
532 if (tb_port_has_remote(port
)) {
533 tb_port_dbg(port
, "switch unplugged\n");
534 tb_sw_set_unplugged(port
->remote
->sw
);
535 tb_free_invalid_tunnels(tb
);
536 tb_switch_remove(port
->remote
->sw
);
538 if (port
->dual_link_port
)
539 port
->dual_link_port
->remote
= NULL
;
540 } else if (port
->xdomain
) {
541 struct tb_xdomain
*xd
= tb_xdomain_get(port
->xdomain
);
543 tb_port_dbg(port
, "xdomain unplugged\n");
545 * Service drivers are unbound during
546 * tb_xdomain_remove() so setting XDomain as
547 * unplugged here prevents deadlock if they call
548 * tb_xdomain_disable_paths(). We will tear down
551 xd
->is_unplugged
= true;
552 tb_xdomain_remove(xd
);
553 port
->xdomain
= NULL
;
554 __tb_disconnect_xdomain_paths(tb
, xd
);
556 } else if (tb_port_is_dpout(port
)) {
557 tb_teardown_dp(tb
, port
);
560 "got unplug event for disconnected port, ignoring\n");
562 } else if (port
->remote
) {
563 tb_port_dbg(port
, "got plug event for connected port, ignoring\n");
565 if (tb_port_is_null(port
)) {
566 tb_port_dbg(port
, "hotplug: scanning\n");
569 tb_port_dbg(port
, "hotplug: no switch found\n");
570 } else if (tb_port_is_dpout(port
)) {
571 tb_tunnel_dp(tb
, port
);
578 mutex_unlock(&tb
->lock
);
583 * tb_schedule_hotplug_handler() - callback function for the control channel
585 * Delegates to tb_handle_hotplug.
587 static void tb_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
588 const void *buf
, size_t size
)
590 const struct cfg_event_pkg
*pkg
= buf
;
593 if (type
!= TB_CFG_PKG_EVENT
) {
594 tb_warn(tb
, "unexpected event %#x, ignoring\n", type
);
598 route
= tb_cfg_get_route(&pkg
->header
);
600 if (tb_cfg_error(tb
->ctl
, route
, pkg
->port
,
601 TB_CFG_ERROR_ACK_PLUG_EVENT
)) {
602 tb_warn(tb
, "could not ack plug event on %llx:%x\n", route
,
606 tb_queue_hotplug(tb
, route
, pkg
->port
, pkg
->unplug
);
609 static void tb_stop(struct tb
*tb
)
611 struct tb_cm
*tcm
= tb_priv(tb
);
612 struct tb_tunnel
*tunnel
;
615 /* tunnels are only present after everything has been initialized */
616 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
618 * DMA tunnels require the driver to be functional so we
619 * tear them down. Other protocol tunnels can be left
622 if (tb_tunnel_is_dma(tunnel
))
623 tb_tunnel_deactivate(tunnel
);
624 tb_tunnel_free(tunnel
);
626 tb_switch_remove(tb
->root_switch
);
627 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
630 static int tb_scan_finalize_switch(struct device
*dev
, void *data
)
632 if (tb_is_switch(dev
)) {
633 struct tb_switch
*sw
= tb_to_switch(dev
);
636 * If we found that the switch was already setup by the
637 * boot firmware, mark it as authorized now before we
638 * send uevent to userspace.
643 dev_set_uevent_suppress(dev
, false);
644 kobject_uevent(&dev
->kobj
, KOBJ_ADD
);
645 device_for_each_child(dev
, NULL
, tb_scan_finalize_switch
);
651 static int tb_start(struct tb
*tb
)
653 struct tb_cm
*tcm
= tb_priv(tb
);
656 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
657 if (IS_ERR(tb
->root_switch
))
658 return PTR_ERR(tb
->root_switch
);
661 * ICM firmware upgrade needs running firmware and in native
662 * mode that is not available so disable firmware upgrade of the
665 tb
->root_switch
->no_nvm_upgrade
= true;
667 ret
= tb_switch_configure(tb
->root_switch
);
669 tb_switch_put(tb
->root_switch
);
673 /* Announce the switch to the world */
674 ret
= tb_switch_add(tb
->root_switch
);
676 tb_switch_put(tb
->root_switch
);
680 /* Full scan to discover devices added before the driver was loaded. */
681 tb_scan_switch(tb
->root_switch
);
682 /* Find out tunnels created by the boot firmware */
683 tb_discover_tunnels(tb
->root_switch
);
684 /* Make the discovered switches available to the userspace */
685 device_for_each_child(&tb
->root_switch
->dev
, NULL
,
686 tb_scan_finalize_switch
);
688 /* Allow tb_handle_hotplug to progress events */
689 tcm
->hotplug_active
= true;
693 static int tb_suspend_noirq(struct tb
*tb
)
695 struct tb_cm
*tcm
= tb_priv(tb
);
697 tb_dbg(tb
, "suspending...\n");
698 tb_switch_suspend(tb
->root_switch
);
699 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
700 tb_dbg(tb
, "suspend finished\n");
705 static int tb_resume_noirq(struct tb
*tb
)
707 struct tb_cm
*tcm
= tb_priv(tb
);
708 struct tb_tunnel
*tunnel
, *n
;
710 tb_dbg(tb
, "resuming...\n");
712 /* remove any pci devices the firmware might have setup */
713 tb_switch_reset(tb
, 0);
715 tb_switch_resume(tb
->root_switch
);
716 tb_free_invalid_tunnels(tb
);
717 tb_free_unplugged_children(tb
->root_switch
);
718 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
719 tb_tunnel_restart(tunnel
);
720 if (!list_empty(&tcm
->tunnel_list
)) {
722 * the pcie links need some time to get going.
723 * 100ms works for me...
725 tb_dbg(tb
, "tunnels restarted, sleeping for 100ms\n");
728 /* Allow tb_handle_hotplug to progress events */
729 tcm
->hotplug_active
= true;
730 tb_dbg(tb
, "resume finished\n");
735 static int tb_free_unplugged_xdomains(struct tb_switch
*sw
)
739 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
740 struct tb_port
*port
= &sw
->ports
[i
];
742 if (tb_is_upstream_port(port
))
744 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
745 tb_xdomain_remove(port
->xdomain
);
746 port
->xdomain
= NULL
;
748 } else if (port
->remote
) {
749 ret
+= tb_free_unplugged_xdomains(port
->remote
->sw
);
756 static void tb_complete(struct tb
*tb
)
759 * Release any unplugged XDomains and if there is a case where
760 * another domain is swapped in place of unplugged XDomain we
761 * need to run another rescan.
763 mutex_lock(&tb
->lock
);
764 if (tb_free_unplugged_xdomains(tb
->root_switch
))
765 tb_scan_switch(tb
->root_switch
);
766 mutex_unlock(&tb
->lock
);
769 static const struct tb_cm_ops tb_cm_ops
= {
772 .suspend_noirq
= tb_suspend_noirq
,
773 .resume_noirq
= tb_resume_noirq
,
774 .complete
= tb_complete
,
775 .handle_event
= tb_handle_event
,
776 .approve_switch
= tb_tunnel_pci
,
777 .approve_xdomain_paths
= tb_approve_xdomain_paths
,
778 .disconnect_xdomain_paths
= tb_disconnect_xdomain_paths
,
781 struct tb
*tb_probe(struct tb_nhi
*nhi
)
786 if (!x86_apple_machine
)
789 tb
= tb_domain_alloc(nhi
, sizeof(*tcm
));
793 tb
->security_level
= TB_SECURITY_USER
;
794 tb
->cm_ops
= &tb_cm_ops
;
797 INIT_LIST_HEAD(&tcm
->tunnel_list
);