1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
18 * struct tb_cm - Simple Thunderbolt connection manager
19 * @tunnel_list: List of active tunnels
20 * @dp_resources: List of available DP resources for DP tunneling
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
27 struct list_head tunnel_list
;
28 struct list_head dp_resources
;
32 struct tb_hotplug_event
{
33 struct work_struct work
;
40 static void tb_handle_hotplug(struct work_struct
*work
);
42 static void tb_queue_hotplug(struct tb
*tb
, u64 route
, u8 port
, bool unplug
)
44 struct tb_hotplug_event
*ev
;
46 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
54 INIT_WORK(&ev
->work
, tb_handle_hotplug
);
55 queue_work(tb
->wq
, &ev
->work
);
58 /* enumeration & hot plug handling */
60 static void tb_add_dp_resources(struct tb_switch
*sw
)
62 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
65 tb_switch_for_each_port(sw
, port
) {
66 if (!tb_port_is_dpin(port
))
69 if (!tb_switch_query_dp_resource(sw
, port
))
72 list_add_tail(&port
->list
, &tcm
->dp_resources
);
73 tb_port_dbg(port
, "DP IN resource available\n");
77 static void tb_remove_dp_resources(struct tb_switch
*sw
)
79 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
80 struct tb_port
*port
, *tmp
;
82 /* Clear children resources first */
83 tb_switch_for_each_port(sw
, port
) {
84 if (tb_port_has_remote(port
))
85 tb_remove_dp_resources(port
->remote
->sw
);
88 list_for_each_entry_safe(port
, tmp
, &tcm
->dp_resources
, list
) {
90 tb_port_dbg(port
, "DP OUT resource unavailable\n");
91 list_del_init(&port
->list
);
96 static void tb_discover_tunnels(struct tb_switch
*sw
)
98 struct tb
*tb
= sw
->tb
;
99 struct tb_cm
*tcm
= tb_priv(tb
);
100 struct tb_port
*port
;
102 tb_switch_for_each_port(sw
, port
) {
103 struct tb_tunnel
*tunnel
= NULL
;
105 switch (port
->config
.type
) {
106 case TB_TYPE_DP_HDMI_IN
:
107 tunnel
= tb_tunnel_discover_dp(tb
, port
);
110 case TB_TYPE_PCIE_DOWN
:
111 tunnel
= tb_tunnel_discover_pci(tb
, port
);
114 case TB_TYPE_USB3_DOWN
:
115 tunnel
= tb_tunnel_discover_usb3(tb
, port
);
125 if (tb_tunnel_is_pci(tunnel
)) {
126 struct tb_switch
*parent
= tunnel
->dst_port
->sw
;
128 while (parent
!= tunnel
->src_port
->sw
) {
130 parent
= tb_switch_parent(parent
);
134 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
137 tb_switch_for_each_port(sw
, port
) {
138 if (tb_port_has_remote(port
))
139 tb_discover_tunnels(port
->remote
->sw
);
143 static void tb_scan_xdomain(struct tb_port
*port
)
145 struct tb_switch
*sw
= port
->sw
;
146 struct tb
*tb
= sw
->tb
;
147 struct tb_xdomain
*xd
;
150 route
= tb_downstream_route(port
);
151 xd
= tb_xdomain_find_by_route(tb
, route
);
157 xd
= tb_xdomain_alloc(tb
, &sw
->dev
, route
, tb
->root_switch
->uuid
,
160 tb_port_at(route
, sw
)->xdomain
= xd
;
165 static int tb_enable_tmu(struct tb_switch
*sw
)
169 /* If it is already enabled in correct mode, don't touch it */
170 if (tb_switch_tmu_is_enabled(sw
))
173 ret
= tb_switch_tmu_disable(sw
);
177 ret
= tb_switch_tmu_post_time(sw
);
181 return tb_switch_tmu_enable(sw
);
185 * tb_find_unused_port() - return the first inactive port on @sw
186 * @sw: Switch to find the port on
187 * @type: Port type to look for
189 static struct tb_port
*tb_find_unused_port(struct tb_switch
*sw
,
190 enum tb_port_type type
)
192 struct tb_port
*port
;
194 tb_switch_for_each_port(sw
, port
) {
195 if (tb_is_upstream_port(port
))
197 if (port
->config
.type
!= type
)
201 if (tb_port_is_enabled(port
))
208 static struct tb_port
*tb_find_usb3_down(struct tb_switch
*sw
,
209 const struct tb_port
*port
)
211 struct tb_port
*down
;
213 down
= usb4_switch_map_usb3_down(sw
, port
);
215 if (WARN_ON(!tb_port_is_usb3_down(down
)))
217 if (WARN_ON(tb_usb3_port_is_enabled(down
)))
224 return tb_find_unused_port(sw
, TB_TYPE_USB3_DOWN
);
227 static int tb_tunnel_usb3(struct tb
*tb
, struct tb_switch
*sw
)
229 struct tb_switch
*parent
= tb_switch_parent(sw
);
230 struct tb_port
*up
, *down
, *port
;
231 struct tb_cm
*tcm
= tb_priv(tb
);
232 struct tb_tunnel
*tunnel
;
234 up
= tb_switch_find_port(sw
, TB_TYPE_USB3_UP
);
239 * Look up available down port. Since we are chaining it should
240 * be found right above this switch.
242 port
= tb_port_at(tb_route(sw
), parent
);
243 down
= tb_find_usb3_down(parent
, port
);
247 if (tb_route(parent
)) {
248 struct tb_port
*parent_up
;
250 * Check first that the parent switch has its upstream USB3
251 * port enabled. Otherwise the chain is not complete and
252 * there is no point setting up a new tunnel.
254 parent_up
= tb_switch_find_port(parent
, TB_TYPE_USB3_UP
);
255 if (!parent_up
|| !tb_port_is_enabled(parent_up
))
259 tunnel
= tb_tunnel_alloc_usb3(tb
, up
, down
);
263 if (tb_tunnel_activate(tunnel
)) {
265 "USB3 tunnel activation failed, aborting\n");
266 tb_tunnel_free(tunnel
);
270 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
274 static int tb_create_usb3_tunnels(struct tb_switch
*sw
)
276 struct tb_port
*port
;
280 ret
= tb_tunnel_usb3(sw
->tb
, sw
);
285 tb_switch_for_each_port(sw
, port
) {
286 if (!tb_port_has_remote(port
))
288 ret
= tb_create_usb3_tunnels(port
->remote
->sw
);
296 static void tb_scan_port(struct tb_port
*port
);
299 * tb_scan_switch() - scan for and initialize downstream switches
301 static void tb_scan_switch(struct tb_switch
*sw
)
303 struct tb_port
*port
;
305 tb_switch_for_each_port(sw
, port
)
310 * tb_scan_port() - check for and initialize switches below port
312 static void tb_scan_port(struct tb_port
*port
)
314 struct tb_cm
*tcm
= tb_priv(port
->sw
->tb
);
315 struct tb_port
*upstream_port
;
316 struct tb_switch
*sw
;
318 if (tb_is_upstream_port(port
))
321 if (tb_port_is_dpout(port
) && tb_dp_port_hpd_is_active(port
) == 1 &&
322 !tb_dp_port_is_enabled(port
)) {
323 tb_port_dbg(port
, "DP adapter HPD set, queuing hotplug\n");
324 tb_queue_hotplug(port
->sw
->tb
, tb_route(port
->sw
), port
->port
,
329 if (port
->config
.type
!= TB_TYPE_PORT
)
331 if (port
->dual_link_port
&& port
->link_nr
)
333 * Downstream switch is reachable through two ports.
334 * Only scan on the primary port (link_nr == 0).
336 if (tb_wait_for_port(port
, false) <= 0)
339 tb_port_dbg(port
, "port already has a remote\n");
342 sw
= tb_switch_alloc(port
->sw
->tb
, &port
->sw
->dev
,
343 tb_downstream_route(port
));
346 * If there is an error accessing the connected switch
347 * it may be connected to another domain. Also we allow
348 * the other domain to be connected to a max depth switch.
350 if (PTR_ERR(sw
) == -EIO
|| PTR_ERR(sw
) == -EADDRNOTAVAIL
)
351 tb_scan_xdomain(port
);
355 if (tb_switch_configure(sw
)) {
361 * If there was previously another domain connected remove it
365 tb_xdomain_remove(port
->xdomain
);
366 port
->xdomain
= NULL
;
370 * Do not send uevents until we have discovered all existing
371 * tunnels and know which switches were authorized already by
374 if (!tcm
->hotplug_active
)
375 dev_set_uevent_suppress(&sw
->dev
, true);
377 if (tb_switch_add(sw
)) {
382 /* Link the switches using both links if available */
383 upstream_port
= tb_upstream_port(sw
);
384 port
->remote
= upstream_port
;
385 upstream_port
->remote
= port
;
386 if (port
->dual_link_port
&& upstream_port
->dual_link_port
) {
387 port
->dual_link_port
->remote
= upstream_port
->dual_link_port
;
388 upstream_port
->dual_link_port
->remote
= port
->dual_link_port
;
391 /* Enable lane bonding if supported */
392 if (tb_switch_lane_bonding_enable(sw
))
393 tb_sw_warn(sw
, "failed to enable lane bonding\n");
395 if (tb_enable_tmu(sw
))
396 tb_sw_warn(sw
, "failed to enable TMU\n");
399 * Create USB 3.x tunnels only when the switch is plugged to the
400 * domain. This is because we scan the domain also during discovery
401 * and want to discover existing USB 3.x tunnels before we create
404 if (tcm
->hotplug_active
&& tb_tunnel_usb3(sw
->tb
, sw
))
405 tb_sw_warn(sw
, "USB3 tunnel creation failed\n");
410 static struct tb_tunnel
*tb_find_tunnel(struct tb
*tb
, enum tb_tunnel_type type
,
411 struct tb_port
*src_port
,
412 struct tb_port
*dst_port
)
414 struct tb_cm
*tcm
= tb_priv(tb
);
415 struct tb_tunnel
*tunnel
;
417 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
418 if (tunnel
->type
== type
&&
419 ((src_port
&& src_port
== tunnel
->src_port
) ||
420 (dst_port
&& dst_port
== tunnel
->dst_port
))) {
428 static void tb_deactivate_and_free_tunnel(struct tb_tunnel
*tunnel
)
433 tb_tunnel_deactivate(tunnel
);
434 list_del(&tunnel
->list
);
437 * In case of DP tunnel make sure the DP IN resource is deallocated
440 if (tb_tunnel_is_dp(tunnel
)) {
441 struct tb_port
*in
= tunnel
->src_port
;
443 tb_switch_dealloc_dp_resource(in
->sw
, in
);
446 tb_tunnel_free(tunnel
);
450 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
452 static void tb_free_invalid_tunnels(struct tb
*tb
)
454 struct tb_cm
*tcm
= tb_priv(tb
);
455 struct tb_tunnel
*tunnel
;
458 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
459 if (tb_tunnel_is_invalid(tunnel
))
460 tb_deactivate_and_free_tunnel(tunnel
);
465 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
467 static void tb_free_unplugged_children(struct tb_switch
*sw
)
469 struct tb_port
*port
;
471 tb_switch_for_each_port(sw
, port
) {
472 if (!tb_port_has_remote(port
))
475 if (port
->remote
->sw
->is_unplugged
) {
476 tb_remove_dp_resources(port
->remote
->sw
);
477 tb_switch_lane_bonding_disable(port
->remote
->sw
);
478 tb_switch_remove(port
->remote
->sw
);
480 if (port
->dual_link_port
)
481 port
->dual_link_port
->remote
= NULL
;
483 tb_free_unplugged_children(port
->remote
->sw
);
488 static struct tb_port
*tb_find_pcie_down(struct tb_switch
*sw
,
489 const struct tb_port
*port
)
491 struct tb_port
*down
= NULL
;
494 * To keep plugging devices consistently in the same PCIe
495 * hierarchy, do mapping here for switch downstream PCIe ports.
497 if (tb_switch_is_usb4(sw
)) {
498 down
= usb4_switch_map_pcie_down(sw
, port
);
499 } else if (!tb_route(sw
)) {
500 int phy_port
= tb_phy_port_from_link(port
->port
);
504 * Hard-coded Thunderbolt port to PCIe down port mapping
507 if (tb_switch_is_cactus_ridge(sw
) ||
508 tb_switch_is_alpine_ridge(sw
))
509 index
= !phy_port
? 6 : 7;
510 else if (tb_switch_is_falcon_ridge(sw
))
511 index
= !phy_port
? 6 : 8;
512 else if (tb_switch_is_titan_ridge(sw
))
513 index
= !phy_port
? 8 : 9;
517 /* Validate the hard-coding */
518 if (WARN_ON(index
> sw
->config
.max_port_number
))
521 down
= &sw
->ports
[index
];
525 if (WARN_ON(!tb_port_is_pcie_down(down
)))
527 if (WARN_ON(tb_pci_port_is_enabled(down
)))
534 return tb_find_unused_port(sw
, TB_TYPE_PCIE_DOWN
);
537 static int tb_available_bw(struct tb_cm
*tcm
, struct tb_port
*in
,
540 struct tb_switch
*sw
= out
->sw
;
541 struct tb_tunnel
*tunnel
;
542 int bw
, available_bw
= 40000;
544 while (sw
&& sw
!= in
->sw
) {
545 bw
= sw
->link_speed
* sw
->link_width
* 1000; /* Mb/s */
546 /* Leave 10% guard band */
550 * Check for any active DP tunnels that go through this
551 * switch and reduce their consumed bandwidth from
554 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
557 if (!tb_tunnel_switch_on_path(tunnel
, sw
))
560 consumed_bw
= tb_tunnel_consumed_bandwidth(tunnel
);
567 if (bw
< available_bw
)
570 sw
= tb_switch_parent(sw
);
576 static void tb_tunnel_dp(struct tb
*tb
)
578 struct tb_cm
*tcm
= tb_priv(tb
);
579 struct tb_port
*port
, *in
, *out
;
580 struct tb_tunnel
*tunnel
;
584 * Find pair of inactive DP IN and DP OUT adapters and then
585 * establish a DP tunnel between them.
587 tb_dbg(tb
, "looking for DP IN <-> DP OUT pairs:\n");
591 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
592 if (tb_port_is_enabled(port
)) {
593 tb_port_dbg(port
, "in use\n");
597 tb_port_dbg(port
, "available\n");
599 if (!in
&& tb_port_is_dpin(port
))
601 else if (!out
&& tb_port_is_dpout(port
))
606 tb_dbg(tb
, "no suitable DP IN adapter available, not tunneling\n");
610 tb_dbg(tb
, "no suitable DP OUT adapter available, not tunneling\n");
614 if (tb_switch_alloc_dp_resource(in
->sw
, in
)) {
615 tb_port_dbg(in
, "no resource available for DP IN, not tunneling\n");
619 /* Calculate available bandwidth between in and out */
620 available_bw
= tb_available_bw(tcm
, in
, out
);
621 if (available_bw
< 0) {
622 tb_warn(tb
, "failed to determine available bandwidth\n");
626 tb_dbg(tb
, "available bandwidth for new DP tunnel %u Mb/s\n",
629 tunnel
= tb_tunnel_alloc_dp(tb
, in
, out
, available_bw
);
631 tb_port_dbg(out
, "could not allocate DP tunnel\n");
635 if (tb_tunnel_activate(tunnel
)) {
636 tb_port_info(out
, "DP tunnel activation failed, aborting\n");
637 tb_tunnel_free(tunnel
);
641 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
645 tb_switch_dealloc_dp_resource(in
->sw
, in
);
648 static void tb_dp_resource_unavailable(struct tb
*tb
, struct tb_port
*port
)
650 struct tb_port
*in
, *out
;
651 struct tb_tunnel
*tunnel
;
653 if (tb_port_is_dpin(port
)) {
654 tb_port_dbg(port
, "DP IN resource unavailable\n");
658 tb_port_dbg(port
, "DP OUT resource unavailable\n");
663 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, out
);
664 tb_deactivate_and_free_tunnel(tunnel
);
665 list_del_init(&port
->list
);
668 * See if there is another DP OUT port that can be used for
669 * to create another tunnel.
674 static void tb_dp_resource_available(struct tb
*tb
, struct tb_port
*port
)
676 struct tb_cm
*tcm
= tb_priv(tb
);
679 if (tb_port_is_enabled(port
))
682 list_for_each_entry(p
, &tcm
->dp_resources
, list
) {
687 tb_port_dbg(port
, "DP %s resource available\n",
688 tb_port_is_dpin(port
) ? "IN" : "OUT");
689 list_add_tail(&port
->list
, &tcm
->dp_resources
);
691 /* Look for suitable DP IN <-> DP OUT pairs now */
695 static int tb_tunnel_pci(struct tb
*tb
, struct tb_switch
*sw
)
697 struct tb_port
*up
, *down
, *port
;
698 struct tb_cm
*tcm
= tb_priv(tb
);
699 struct tb_switch
*parent_sw
;
700 struct tb_tunnel
*tunnel
;
702 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
707 * Look up available down port. Since we are chaining it should
708 * be found right above this switch.
710 parent_sw
= tb_to_switch(sw
->dev
.parent
);
711 port
= tb_port_at(tb_route(sw
), parent_sw
);
712 down
= tb_find_pcie_down(parent_sw
, port
);
716 tunnel
= tb_tunnel_alloc_pci(tb
, up
, down
);
720 if (tb_tunnel_activate(tunnel
)) {
722 "PCIe tunnel activation failed, aborting\n");
723 tb_tunnel_free(tunnel
);
727 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
731 static int tb_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
733 struct tb_cm
*tcm
= tb_priv(tb
);
734 struct tb_port
*nhi_port
, *dst_port
;
735 struct tb_tunnel
*tunnel
;
736 struct tb_switch
*sw
;
738 sw
= tb_to_switch(xd
->dev
.parent
);
739 dst_port
= tb_port_at(xd
->route
, sw
);
740 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
742 mutex_lock(&tb
->lock
);
743 tunnel
= tb_tunnel_alloc_dma(tb
, nhi_port
, dst_port
, xd
->transmit_ring
,
744 xd
->transmit_path
, xd
->receive_ring
,
747 mutex_unlock(&tb
->lock
);
751 if (tb_tunnel_activate(tunnel
)) {
752 tb_port_info(nhi_port
,
753 "DMA tunnel activation failed, aborting\n");
754 tb_tunnel_free(tunnel
);
755 mutex_unlock(&tb
->lock
);
759 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
760 mutex_unlock(&tb
->lock
);
764 static void __tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
766 struct tb_port
*dst_port
;
767 struct tb_tunnel
*tunnel
;
768 struct tb_switch
*sw
;
770 sw
= tb_to_switch(xd
->dev
.parent
);
771 dst_port
= tb_port_at(xd
->route
, sw
);
774 * It is possible that the tunnel was already teared down (in
775 * case of cable disconnect) so it is fine if we cannot find it
778 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DMA
, NULL
, dst_port
);
779 tb_deactivate_and_free_tunnel(tunnel
);
782 static int tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
784 if (!xd
->is_unplugged
) {
785 mutex_lock(&tb
->lock
);
786 __tb_disconnect_xdomain_paths(tb
, xd
);
787 mutex_unlock(&tb
->lock
);
792 /* hotplug handling */
795 * tb_handle_hotplug() - handle hotplug event
797 * Executes on tb->wq.
799 static void tb_handle_hotplug(struct work_struct
*work
)
801 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
802 struct tb
*tb
= ev
->tb
;
803 struct tb_cm
*tcm
= tb_priv(tb
);
804 struct tb_switch
*sw
;
805 struct tb_port
*port
;
806 mutex_lock(&tb
->lock
);
807 if (!tcm
->hotplug_active
)
808 goto out
; /* during init, suspend or shutdown */
810 sw
= tb_switch_find_by_route(tb
, ev
->route
);
813 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
814 ev
->route
, ev
->port
, ev
->unplug
);
817 if (ev
->port
> sw
->config
.max_port_number
) {
819 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
820 ev
->route
, ev
->port
, ev
->unplug
);
823 port
= &sw
->ports
[ev
->port
];
824 if (tb_is_upstream_port(port
)) {
825 tb_dbg(tb
, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
826 ev
->route
, ev
->port
, ev
->unplug
);
830 if (tb_port_has_remote(port
)) {
831 tb_port_dbg(port
, "switch unplugged\n");
832 tb_sw_set_unplugged(port
->remote
->sw
);
833 tb_free_invalid_tunnels(tb
);
834 tb_remove_dp_resources(port
->remote
->sw
);
835 tb_switch_tmu_disable(port
->remote
->sw
);
836 tb_switch_lane_bonding_disable(port
->remote
->sw
);
837 tb_switch_remove(port
->remote
->sw
);
839 if (port
->dual_link_port
)
840 port
->dual_link_port
->remote
= NULL
;
841 /* Maybe we can create another DP tunnel */
843 } else if (port
->xdomain
) {
844 struct tb_xdomain
*xd
= tb_xdomain_get(port
->xdomain
);
846 tb_port_dbg(port
, "xdomain unplugged\n");
848 * Service drivers are unbound during
849 * tb_xdomain_remove() so setting XDomain as
850 * unplugged here prevents deadlock if they call
851 * tb_xdomain_disable_paths(). We will tear down
854 xd
->is_unplugged
= true;
855 tb_xdomain_remove(xd
);
856 port
->xdomain
= NULL
;
857 __tb_disconnect_xdomain_paths(tb
, xd
);
859 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
860 tb_dp_resource_unavailable(tb
, port
);
863 "got unplug event for disconnected port, ignoring\n");
865 } else if (port
->remote
) {
866 tb_port_dbg(port
, "got plug event for connected port, ignoring\n");
868 if (tb_port_is_null(port
)) {
869 tb_port_dbg(port
, "hotplug: scanning\n");
872 tb_port_dbg(port
, "hotplug: no switch found\n");
873 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
874 tb_dp_resource_available(tb
, port
);
881 mutex_unlock(&tb
->lock
);
886 * tb_schedule_hotplug_handler() - callback function for the control channel
888 * Delegates to tb_handle_hotplug.
890 static void tb_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
891 const void *buf
, size_t size
)
893 const struct cfg_event_pkg
*pkg
= buf
;
896 if (type
!= TB_CFG_PKG_EVENT
) {
897 tb_warn(tb
, "unexpected event %#x, ignoring\n", type
);
901 route
= tb_cfg_get_route(&pkg
->header
);
903 if (tb_cfg_ack_plug(tb
->ctl
, route
, pkg
->port
, pkg
->unplug
)) {
904 tb_warn(tb
, "could not ack plug event on %llx:%x\n", route
,
908 tb_queue_hotplug(tb
, route
, pkg
->port
, pkg
->unplug
);
911 static void tb_stop(struct tb
*tb
)
913 struct tb_cm
*tcm
= tb_priv(tb
);
914 struct tb_tunnel
*tunnel
;
917 /* tunnels are only present after everything has been initialized */
918 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
920 * DMA tunnels require the driver to be functional so we
921 * tear them down. Other protocol tunnels can be left
924 if (tb_tunnel_is_dma(tunnel
))
925 tb_tunnel_deactivate(tunnel
);
926 tb_tunnel_free(tunnel
);
928 tb_switch_remove(tb
->root_switch
);
929 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
932 static int tb_scan_finalize_switch(struct device
*dev
, void *data
)
934 if (tb_is_switch(dev
)) {
935 struct tb_switch
*sw
= tb_to_switch(dev
);
938 * If we found that the switch was already setup by the
939 * boot firmware, mark it as authorized now before we
940 * send uevent to userspace.
945 dev_set_uevent_suppress(dev
, false);
946 kobject_uevent(&dev
->kobj
, KOBJ_ADD
);
947 device_for_each_child(dev
, NULL
, tb_scan_finalize_switch
);
953 static int tb_start(struct tb
*tb
)
955 struct tb_cm
*tcm
= tb_priv(tb
);
958 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
959 if (IS_ERR(tb
->root_switch
))
960 return PTR_ERR(tb
->root_switch
);
963 * ICM firmware upgrade needs running firmware and in native
964 * mode that is not available so disable firmware upgrade of the
967 tb
->root_switch
->no_nvm_upgrade
= true;
969 ret
= tb_switch_configure(tb
->root_switch
);
971 tb_switch_put(tb
->root_switch
);
975 /* Announce the switch to the world */
976 ret
= tb_switch_add(tb
->root_switch
);
978 tb_switch_put(tb
->root_switch
);
982 /* Enable TMU if it is off */
983 tb_switch_tmu_enable(tb
->root_switch
);
984 /* Full scan to discover devices added before the driver was loaded. */
985 tb_scan_switch(tb
->root_switch
);
986 /* Find out tunnels created by the boot firmware */
987 tb_discover_tunnels(tb
->root_switch
);
989 * If the boot firmware did not create USB 3.x tunnels create them
990 * now for the whole topology.
992 tb_create_usb3_tunnels(tb
->root_switch
);
993 /* Add DP IN resources for the root switch */
994 tb_add_dp_resources(tb
->root_switch
);
995 /* Make the discovered switches available to the userspace */
996 device_for_each_child(&tb
->root_switch
->dev
, NULL
,
997 tb_scan_finalize_switch
);
999 /* Allow tb_handle_hotplug to progress events */
1000 tcm
->hotplug_active
= true;
1004 static int tb_suspend_noirq(struct tb
*tb
)
1006 struct tb_cm
*tcm
= tb_priv(tb
);
1008 tb_dbg(tb
, "suspending...\n");
1009 tb_switch_suspend(tb
->root_switch
);
1010 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1011 tb_dbg(tb
, "suspend finished\n");
1016 static void tb_restore_children(struct tb_switch
*sw
)
1018 struct tb_port
*port
;
1020 if (tb_enable_tmu(sw
))
1021 tb_sw_warn(sw
, "failed to restore TMU configuration\n");
1023 tb_switch_for_each_port(sw
, port
) {
1024 if (!tb_port_has_remote(port
))
1027 if (tb_switch_lane_bonding_enable(port
->remote
->sw
))
1028 dev_warn(&sw
->dev
, "failed to restore lane bonding\n");
1030 tb_restore_children(port
->remote
->sw
);
1034 static int tb_resume_noirq(struct tb
*tb
)
1036 struct tb_cm
*tcm
= tb_priv(tb
);
1037 struct tb_tunnel
*tunnel
, *n
;
1039 tb_dbg(tb
, "resuming...\n");
1041 /* remove any pci devices the firmware might have setup */
1042 tb_switch_reset(tb
, 0);
1044 tb_switch_resume(tb
->root_switch
);
1045 tb_free_invalid_tunnels(tb
);
1046 tb_free_unplugged_children(tb
->root_switch
);
1047 tb_restore_children(tb
->root_switch
);
1048 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1049 tb_tunnel_restart(tunnel
);
1050 if (!list_empty(&tcm
->tunnel_list
)) {
1052 * the pcie links need some time to get going.
1053 * 100ms works for me...
1055 tb_dbg(tb
, "tunnels restarted, sleeping for 100ms\n");
1058 /* Allow tb_handle_hotplug to progress events */
1059 tcm
->hotplug_active
= true;
1060 tb_dbg(tb
, "resume finished\n");
1065 static int tb_free_unplugged_xdomains(struct tb_switch
*sw
)
1067 struct tb_port
*port
;
1070 tb_switch_for_each_port(sw
, port
) {
1071 if (tb_is_upstream_port(port
))
1073 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
1074 tb_xdomain_remove(port
->xdomain
);
1075 port
->xdomain
= NULL
;
1077 } else if (port
->remote
) {
1078 ret
+= tb_free_unplugged_xdomains(port
->remote
->sw
);
1085 static void tb_complete(struct tb
*tb
)
1088 * Release any unplugged XDomains and if there is a case where
1089 * another domain is swapped in place of unplugged XDomain we
1090 * need to run another rescan.
1092 mutex_lock(&tb
->lock
);
1093 if (tb_free_unplugged_xdomains(tb
->root_switch
))
1094 tb_scan_switch(tb
->root_switch
);
1095 mutex_unlock(&tb
->lock
);
1098 static const struct tb_cm_ops tb_cm_ops
= {
1101 .suspend_noirq
= tb_suspend_noirq
,
1102 .resume_noirq
= tb_resume_noirq
,
1103 .complete
= tb_complete
,
1104 .handle_event
= tb_handle_event
,
1105 .approve_switch
= tb_tunnel_pci
,
1106 .approve_xdomain_paths
= tb_approve_xdomain_paths
,
1107 .disconnect_xdomain_paths
= tb_disconnect_xdomain_paths
,
1110 struct tb
*tb_probe(struct tb_nhi
*nhi
)
1115 tb
= tb_domain_alloc(nhi
, sizeof(*tcm
));
1119 tb
->security_level
= TB_SECURITY_USER
;
1120 tb
->cm_ops
= &tb_cm_ops
;
1123 INIT_LIST_HEAD(&tcm
->tunnel_list
);
1124 INIT_LIST_HEAD(&tcm
->dp_resources
);