1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @dp_resources: List of available DP resources for DP tunneling
22 * @hotplug_active: tb_handle_hotplug will stop progressing plug
23 * events and exit if this is not set (it needs to
24 * acquire the lock one more time). Used to drain wq
25 * after cfg has been paused.
26 * @remove_work: Work used to remove any unplugged routers after
30 struct list_head tunnel_list
;
31 struct list_head dp_resources
;
33 struct delayed_work remove_work
;
36 static inline struct tb
*tcm_to_tb(struct tb_cm
*tcm
)
38 return ((void *)tcm
- sizeof(struct tb
));
41 struct tb_hotplug_event
{
42 struct work_struct work
;
49 static void tb_handle_hotplug(struct work_struct
*work
);
51 static void tb_queue_hotplug(struct tb
*tb
, u64 route
, u8 port
, bool unplug
)
53 struct tb_hotplug_event
*ev
;
55 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
63 INIT_WORK(&ev
->work
, tb_handle_hotplug
);
64 queue_work(tb
->wq
, &ev
->work
);
67 /* enumeration & hot plug handling */
69 static void tb_add_dp_resources(struct tb_switch
*sw
)
71 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
74 tb_switch_for_each_port(sw
, port
) {
75 if (!tb_port_is_dpin(port
))
78 if (!tb_switch_query_dp_resource(sw
, port
))
81 list_add_tail(&port
->list
, &tcm
->dp_resources
);
82 tb_port_dbg(port
, "DP IN resource available\n");
86 static void tb_remove_dp_resources(struct tb_switch
*sw
)
88 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
89 struct tb_port
*port
, *tmp
;
91 /* Clear children resources first */
92 tb_switch_for_each_port(sw
, port
) {
93 if (tb_port_has_remote(port
))
94 tb_remove_dp_resources(port
->remote
->sw
);
97 list_for_each_entry_safe(port
, tmp
, &tcm
->dp_resources
, list
) {
99 tb_port_dbg(port
, "DP OUT resource unavailable\n");
100 list_del_init(&port
->list
);
105 static void tb_discover_tunnels(struct tb_switch
*sw
)
107 struct tb
*tb
= sw
->tb
;
108 struct tb_cm
*tcm
= tb_priv(tb
);
109 struct tb_port
*port
;
111 tb_switch_for_each_port(sw
, port
) {
112 struct tb_tunnel
*tunnel
= NULL
;
114 switch (port
->config
.type
) {
115 case TB_TYPE_DP_HDMI_IN
:
116 tunnel
= tb_tunnel_discover_dp(tb
, port
);
119 case TB_TYPE_PCIE_DOWN
:
120 tunnel
= tb_tunnel_discover_pci(tb
, port
);
123 case TB_TYPE_USB3_DOWN
:
124 tunnel
= tb_tunnel_discover_usb3(tb
, port
);
134 if (tb_tunnel_is_pci(tunnel
)) {
135 struct tb_switch
*parent
= tunnel
->dst_port
->sw
;
137 while (parent
!= tunnel
->src_port
->sw
) {
139 parent
= tb_switch_parent(parent
);
143 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
146 tb_switch_for_each_port(sw
, port
) {
147 if (tb_port_has_remote(port
))
148 tb_discover_tunnels(port
->remote
->sw
);
152 static int tb_port_configure_xdomain(struct tb_port
*port
)
155 * XDomain paths currently only support single lane so we must
156 * disable the other lane according to USB4 spec.
158 tb_port_disable(port
->dual_link_port
);
160 if (tb_switch_is_usb4(port
->sw
))
161 return usb4_port_configure_xdomain(port
);
162 return tb_lc_configure_xdomain(port
);
165 static void tb_port_unconfigure_xdomain(struct tb_port
*port
)
167 if (tb_switch_is_usb4(port
->sw
))
168 usb4_port_unconfigure_xdomain(port
);
170 tb_lc_unconfigure_xdomain(port
);
172 tb_port_enable(port
->dual_link_port
);
175 static void tb_scan_xdomain(struct tb_port
*port
)
177 struct tb_switch
*sw
= port
->sw
;
178 struct tb
*tb
= sw
->tb
;
179 struct tb_xdomain
*xd
;
182 route
= tb_downstream_route(port
);
183 xd
= tb_xdomain_find_by_route(tb
, route
);
189 xd
= tb_xdomain_alloc(tb
, &sw
->dev
, route
, tb
->root_switch
->uuid
,
192 tb_port_at(route
, sw
)->xdomain
= xd
;
193 tb_port_configure_xdomain(port
);
198 static int tb_enable_tmu(struct tb_switch
*sw
)
202 /* If it is already enabled in correct mode, don't touch it */
203 if (tb_switch_tmu_is_enabled(sw
))
206 ret
= tb_switch_tmu_disable(sw
);
210 ret
= tb_switch_tmu_post_time(sw
);
214 return tb_switch_tmu_enable(sw
);
218 * tb_find_unused_port() - return the first inactive port on @sw
219 * @sw: Switch to find the port on
220 * @type: Port type to look for
222 static struct tb_port
*tb_find_unused_port(struct tb_switch
*sw
,
223 enum tb_port_type type
)
225 struct tb_port
*port
;
227 tb_switch_for_each_port(sw
, port
) {
228 if (tb_is_upstream_port(port
))
230 if (port
->config
.type
!= type
)
234 if (tb_port_is_enabled(port
))
241 static struct tb_port
*tb_find_usb3_down(struct tb_switch
*sw
,
242 const struct tb_port
*port
)
244 struct tb_port
*down
;
246 down
= usb4_switch_map_usb3_down(sw
, port
);
247 if (down
&& !tb_usb3_port_is_enabled(down
))
252 static struct tb_tunnel
*tb_find_tunnel(struct tb
*tb
, enum tb_tunnel_type type
,
253 struct tb_port
*src_port
,
254 struct tb_port
*dst_port
)
256 struct tb_cm
*tcm
= tb_priv(tb
);
257 struct tb_tunnel
*tunnel
;
259 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
260 if (tunnel
->type
== type
&&
261 ((src_port
&& src_port
== tunnel
->src_port
) ||
262 (dst_port
&& dst_port
== tunnel
->dst_port
))) {
270 static struct tb_tunnel
*tb_find_first_usb3_tunnel(struct tb
*tb
,
271 struct tb_port
*src_port
,
272 struct tb_port
*dst_port
)
274 struct tb_port
*port
, *usb3_down
;
275 struct tb_switch
*sw
;
277 /* Pick the router that is deepest in the topology */
278 if (dst_port
->sw
->config
.depth
> src_port
->sw
->config
.depth
)
283 /* Can't be the host router */
284 if (sw
== tb
->root_switch
)
287 /* Find the downstream USB4 port that leads to this router */
288 port
= tb_port_at(tb_route(sw
), tb
->root_switch
);
289 /* Find the corresponding host router USB3 downstream port */
290 usb3_down
= usb4_switch_map_usb3_down(tb
->root_switch
, port
);
294 return tb_find_tunnel(tb
, TB_TUNNEL_USB3
, usb3_down
, NULL
);
297 static int tb_available_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
298 struct tb_port
*dst_port
, int *available_up
, int *available_down
)
300 int usb3_consumed_up
, usb3_consumed_down
, ret
;
301 struct tb_cm
*tcm
= tb_priv(tb
);
302 struct tb_tunnel
*tunnel
;
303 struct tb_port
*port
;
305 tb_port_dbg(dst_port
, "calculating available bandwidth\n");
307 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
309 ret
= tb_tunnel_consumed_bandwidth(tunnel
, &usb3_consumed_up
,
310 &usb3_consumed_down
);
314 usb3_consumed_up
= 0;
315 usb3_consumed_down
= 0;
318 *available_up
= *available_down
= 40000;
320 /* Find the minimum available bandwidth over all links */
321 tb_for_each_port_on_path(src_port
, dst_port
, port
) {
322 int link_speed
, link_width
, up_bw
, down_bw
;
324 if (!tb_port_is_null(port
))
327 if (tb_is_upstream_port(port
)) {
328 link_speed
= port
->sw
->link_speed
;
330 link_speed
= tb_port_get_link_speed(port
);
335 link_width
= port
->bonded
? 2 : 1;
337 up_bw
= link_speed
* link_width
* 1000; /* Mb/s */
338 /* Leave 10% guard band */
342 tb_port_dbg(port
, "link total bandwidth %d Mb/s\n", up_bw
);
345 * Find all DP tunnels that cross the port and reduce
346 * their consumed bandwidth from the available.
348 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
349 int dp_consumed_up
, dp_consumed_down
;
351 if (!tb_tunnel_is_dp(tunnel
))
354 if (!tb_tunnel_port_on_path(tunnel
, port
))
357 ret
= tb_tunnel_consumed_bandwidth(tunnel
,
363 up_bw
-= dp_consumed_up
;
364 down_bw
-= dp_consumed_down
;
368 * If USB3 is tunneled from the host router down to the
369 * branch leading to port we need to take USB3 consumed
370 * bandwidth into account regardless whether it actually
373 up_bw
-= usb3_consumed_up
;
374 down_bw
-= usb3_consumed_down
;
376 if (up_bw
< *available_up
)
377 *available_up
= up_bw
;
378 if (down_bw
< *available_down
)
379 *available_down
= down_bw
;
382 if (*available_up
< 0)
384 if (*available_down
< 0)
390 static int tb_release_unused_usb3_bandwidth(struct tb
*tb
,
391 struct tb_port
*src_port
,
392 struct tb_port
*dst_port
)
394 struct tb_tunnel
*tunnel
;
396 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
397 return tunnel
? tb_tunnel_release_unused_bandwidth(tunnel
) : 0;
400 static void tb_reclaim_usb3_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
401 struct tb_port
*dst_port
)
403 int ret
, available_up
, available_down
;
404 struct tb_tunnel
*tunnel
;
406 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
410 tb_dbg(tb
, "reclaiming unused bandwidth for USB3\n");
413 * Calculate available bandwidth for the first hop USB3 tunnel.
414 * That determines the whole USB3 bandwidth for this branch.
416 ret
= tb_available_bandwidth(tb
, tunnel
->src_port
, tunnel
->dst_port
,
417 &available_up
, &available_down
);
419 tb_warn(tb
, "failed to calculate available bandwidth\n");
423 tb_dbg(tb
, "available bandwidth for USB3 %d/%d Mb/s\n",
424 available_up
, available_down
);
426 tb_tunnel_reclaim_available_bandwidth(tunnel
, &available_up
, &available_down
);
429 static int tb_tunnel_usb3(struct tb
*tb
, struct tb_switch
*sw
)
431 struct tb_switch
*parent
= tb_switch_parent(sw
);
432 int ret
, available_up
, available_down
;
433 struct tb_port
*up
, *down
, *port
;
434 struct tb_cm
*tcm
= tb_priv(tb
);
435 struct tb_tunnel
*tunnel
;
437 up
= tb_switch_find_port(sw
, TB_TYPE_USB3_UP
);
445 * Look up available down port. Since we are chaining it should
446 * be found right above this switch.
448 port
= tb_port_at(tb_route(sw
), parent
);
449 down
= tb_find_usb3_down(parent
, port
);
453 if (tb_route(parent
)) {
454 struct tb_port
*parent_up
;
456 * Check first that the parent switch has its upstream USB3
457 * port enabled. Otherwise the chain is not complete and
458 * there is no point setting up a new tunnel.
460 parent_up
= tb_switch_find_port(parent
, TB_TYPE_USB3_UP
);
461 if (!parent_up
|| !tb_port_is_enabled(parent_up
))
464 /* Make all unused bandwidth available for the new tunnel */
465 ret
= tb_release_unused_usb3_bandwidth(tb
, down
, up
);
470 ret
= tb_available_bandwidth(tb
, down
, up
, &available_up
,
475 tb_port_dbg(up
, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
476 available_up
, available_down
);
478 tunnel
= tb_tunnel_alloc_usb3(tb
, up
, down
, available_up
,
485 if (tb_tunnel_activate(tunnel
)) {
487 "USB3 tunnel activation failed, aborting\n");
492 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
493 if (tb_route(parent
))
494 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
499 tb_tunnel_free(tunnel
);
501 if (tb_route(parent
))
502 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
507 static int tb_create_usb3_tunnels(struct tb_switch
*sw
)
509 struct tb_port
*port
;
513 ret
= tb_tunnel_usb3(sw
->tb
, sw
);
518 tb_switch_for_each_port(sw
, port
) {
519 if (!tb_port_has_remote(port
))
521 ret
= tb_create_usb3_tunnels(port
->remote
->sw
);
529 static void tb_scan_port(struct tb_port
*port
);
532 * tb_scan_switch() - scan for and initialize downstream switches
534 static void tb_scan_switch(struct tb_switch
*sw
)
536 struct tb_port
*port
;
538 pm_runtime_get_sync(&sw
->dev
);
540 tb_switch_for_each_port(sw
, port
)
543 pm_runtime_mark_last_busy(&sw
->dev
);
544 pm_runtime_put_autosuspend(&sw
->dev
);
548 * tb_scan_port() - check for and initialize switches below port
550 static void tb_scan_port(struct tb_port
*port
)
552 struct tb_cm
*tcm
= tb_priv(port
->sw
->tb
);
553 struct tb_port
*upstream_port
;
554 struct tb_switch
*sw
;
556 if (tb_is_upstream_port(port
))
559 if (tb_port_is_dpout(port
) && tb_dp_port_hpd_is_active(port
) == 1 &&
560 !tb_dp_port_is_enabled(port
)) {
561 tb_port_dbg(port
, "DP adapter HPD set, queuing hotplug\n");
562 tb_queue_hotplug(port
->sw
->tb
, tb_route(port
->sw
), port
->port
,
567 if (port
->config
.type
!= TB_TYPE_PORT
)
569 if (port
->dual_link_port
&& port
->link_nr
)
571 * Downstream switch is reachable through two ports.
572 * Only scan on the primary port (link_nr == 0).
574 if (tb_wait_for_port(port
, false) <= 0)
577 tb_port_dbg(port
, "port already has a remote\n");
581 tb_retimer_scan(port
);
583 sw
= tb_switch_alloc(port
->sw
->tb
, &port
->sw
->dev
,
584 tb_downstream_route(port
));
587 * If there is an error accessing the connected switch
588 * it may be connected to another domain. Also we allow
589 * the other domain to be connected to a max depth switch.
591 if (PTR_ERR(sw
) == -EIO
|| PTR_ERR(sw
) == -EADDRNOTAVAIL
)
592 tb_scan_xdomain(port
);
596 if (tb_switch_configure(sw
)) {
602 * If there was previously another domain connected remove it
606 tb_xdomain_remove(port
->xdomain
);
607 tb_port_unconfigure_xdomain(port
);
608 port
->xdomain
= NULL
;
612 * Do not send uevents until we have discovered all existing
613 * tunnels and know which switches were authorized already by
616 if (!tcm
->hotplug_active
)
617 dev_set_uevent_suppress(&sw
->dev
, true);
620 * At the moment Thunderbolt 2 and beyond (devices with LC) we
621 * can support runtime PM.
623 sw
->rpm
= sw
->generation
> 1;
625 if (tb_switch_add(sw
)) {
630 /* Link the switches using both links if available */
631 upstream_port
= tb_upstream_port(sw
);
632 port
->remote
= upstream_port
;
633 upstream_port
->remote
= port
;
634 if (port
->dual_link_port
&& upstream_port
->dual_link_port
) {
635 port
->dual_link_port
->remote
= upstream_port
->dual_link_port
;
636 upstream_port
->dual_link_port
->remote
= port
->dual_link_port
;
639 /* Enable lane bonding if supported */
640 tb_switch_lane_bonding_enable(sw
);
641 /* Set the link configured */
642 tb_switch_configure_link(sw
);
644 if (tb_enable_tmu(sw
))
645 tb_sw_warn(sw
, "failed to enable TMU\n");
647 /* Scan upstream retimers */
648 tb_retimer_scan(upstream_port
);
651 * Create USB 3.x tunnels only when the switch is plugged to the
652 * domain. This is because we scan the domain also during discovery
653 * and want to discover existing USB 3.x tunnels before we create
656 if (tcm
->hotplug_active
&& tb_tunnel_usb3(sw
->tb
, sw
))
657 tb_sw_warn(sw
, "USB3 tunnel creation failed\n");
659 tb_add_dp_resources(sw
);
663 static void tb_deactivate_and_free_tunnel(struct tb_tunnel
*tunnel
)
665 struct tb_port
*src_port
, *dst_port
;
671 tb_tunnel_deactivate(tunnel
);
672 list_del(&tunnel
->list
);
675 src_port
= tunnel
->src_port
;
676 dst_port
= tunnel
->dst_port
;
678 switch (tunnel
->type
) {
681 * In case of DP tunnel make sure the DP IN resource is
682 * deallocated properly.
684 tb_switch_dealloc_dp_resource(src_port
->sw
, src_port
);
685 /* Now we can allow the domain to runtime suspend again */
686 pm_runtime_mark_last_busy(&dst_port
->sw
->dev
);
687 pm_runtime_put_autosuspend(&dst_port
->sw
->dev
);
688 pm_runtime_mark_last_busy(&src_port
->sw
->dev
);
689 pm_runtime_put_autosuspend(&src_port
->sw
->dev
);
693 tb_reclaim_usb3_bandwidth(tb
, src_port
, dst_port
);
698 * PCIe and DMA tunnels do not consume guaranteed
704 tb_tunnel_free(tunnel
);
708 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
710 static void tb_free_invalid_tunnels(struct tb
*tb
)
712 struct tb_cm
*tcm
= tb_priv(tb
);
713 struct tb_tunnel
*tunnel
;
716 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
717 if (tb_tunnel_is_invalid(tunnel
))
718 tb_deactivate_and_free_tunnel(tunnel
);
723 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
725 static void tb_free_unplugged_children(struct tb_switch
*sw
)
727 struct tb_port
*port
;
729 tb_switch_for_each_port(sw
, port
) {
730 if (!tb_port_has_remote(port
))
733 if (port
->remote
->sw
->is_unplugged
) {
734 tb_retimer_remove_all(port
);
735 tb_remove_dp_resources(port
->remote
->sw
);
736 tb_switch_unconfigure_link(port
->remote
->sw
);
737 tb_switch_lane_bonding_disable(port
->remote
->sw
);
738 tb_switch_remove(port
->remote
->sw
);
740 if (port
->dual_link_port
)
741 port
->dual_link_port
->remote
= NULL
;
743 tb_free_unplugged_children(port
->remote
->sw
);
748 static struct tb_port
*tb_find_pcie_down(struct tb_switch
*sw
,
749 const struct tb_port
*port
)
751 struct tb_port
*down
= NULL
;
754 * To keep plugging devices consistently in the same PCIe
755 * hierarchy, do mapping here for switch downstream PCIe ports.
757 if (tb_switch_is_usb4(sw
)) {
758 down
= usb4_switch_map_pcie_down(sw
, port
);
759 } else if (!tb_route(sw
)) {
760 int phy_port
= tb_phy_port_from_link(port
->port
);
764 * Hard-coded Thunderbolt port to PCIe down port mapping
767 if (tb_switch_is_cactus_ridge(sw
) ||
768 tb_switch_is_alpine_ridge(sw
))
769 index
= !phy_port
? 6 : 7;
770 else if (tb_switch_is_falcon_ridge(sw
))
771 index
= !phy_port
? 6 : 8;
772 else if (tb_switch_is_titan_ridge(sw
))
773 index
= !phy_port
? 8 : 9;
777 /* Validate the hard-coding */
778 if (WARN_ON(index
> sw
->config
.max_port_number
))
781 down
= &sw
->ports
[index
];
785 if (WARN_ON(!tb_port_is_pcie_down(down
)))
787 if (tb_pci_port_is_enabled(down
))
794 return tb_find_unused_port(sw
, TB_TYPE_PCIE_DOWN
);
797 static struct tb_port
*tb_find_dp_out(struct tb
*tb
, struct tb_port
*in
)
799 struct tb_port
*host_port
, *port
;
800 struct tb_cm
*tcm
= tb_priv(tb
);
802 host_port
= tb_route(in
->sw
) ?
803 tb_port_at(tb_route(in
->sw
), tb
->root_switch
) : NULL
;
805 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
806 if (!tb_port_is_dpout(port
))
809 if (tb_port_is_enabled(port
)) {
810 tb_port_dbg(port
, "in use\n");
814 tb_port_dbg(port
, "DP OUT available\n");
817 * Keep the DP tunnel under the topology starting from
818 * the same host router downstream port.
820 if (host_port
&& tb_route(port
->sw
)) {
823 p
= tb_port_at(tb_route(port
->sw
), tb
->root_switch
);
834 static void tb_tunnel_dp(struct tb
*tb
)
836 int available_up
, available_down
, ret
;
837 struct tb_cm
*tcm
= tb_priv(tb
);
838 struct tb_port
*port
, *in
, *out
;
839 struct tb_tunnel
*tunnel
;
842 * Find pair of inactive DP IN and DP OUT adapters and then
843 * establish a DP tunnel between them.
845 tb_dbg(tb
, "looking for DP IN <-> DP OUT pairs:\n");
849 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
850 if (!tb_port_is_dpin(port
))
853 if (tb_port_is_enabled(port
)) {
854 tb_port_dbg(port
, "in use\n");
858 tb_port_dbg(port
, "DP IN available\n");
860 out
= tb_find_dp_out(tb
, port
);
868 tb_dbg(tb
, "no suitable DP IN adapter available, not tunneling\n");
872 tb_dbg(tb
, "no suitable DP OUT adapter available, not tunneling\n");
877 * DP stream needs the domain to be active so runtime resume
878 * both ends of the tunnel.
880 * This should bring the routers in the middle active as well
881 * and keeps the domain from runtime suspending while the DP
884 pm_runtime_get_sync(&in
->sw
->dev
);
885 pm_runtime_get_sync(&out
->sw
->dev
);
887 if (tb_switch_alloc_dp_resource(in
->sw
, in
)) {
888 tb_port_dbg(in
, "no resource available for DP IN, not tunneling\n");
892 /* Make all unused USB3 bandwidth available for the new DP tunnel */
893 ret
= tb_release_unused_usb3_bandwidth(tb
, in
, out
);
895 tb_warn(tb
, "failed to release unused bandwidth\n");
899 ret
= tb_available_bandwidth(tb
, in
, out
, &available_up
,
904 tb_dbg(tb
, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
905 available_up
, available_down
);
907 tunnel
= tb_tunnel_alloc_dp(tb
, in
, out
, available_up
, available_down
);
909 tb_port_dbg(out
, "could not allocate DP tunnel\n");
913 if (tb_tunnel_activate(tunnel
)) {
914 tb_port_info(out
, "DP tunnel activation failed, aborting\n");
918 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
919 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
923 tb_tunnel_free(tunnel
);
925 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
927 tb_switch_dealloc_dp_resource(in
->sw
, in
);
929 pm_runtime_mark_last_busy(&out
->sw
->dev
);
930 pm_runtime_put_autosuspend(&out
->sw
->dev
);
931 pm_runtime_mark_last_busy(&in
->sw
->dev
);
932 pm_runtime_put_autosuspend(&in
->sw
->dev
);
935 static void tb_dp_resource_unavailable(struct tb
*tb
, struct tb_port
*port
)
937 struct tb_port
*in
, *out
;
938 struct tb_tunnel
*tunnel
;
940 if (tb_port_is_dpin(port
)) {
941 tb_port_dbg(port
, "DP IN resource unavailable\n");
945 tb_port_dbg(port
, "DP OUT resource unavailable\n");
950 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, out
);
951 tb_deactivate_and_free_tunnel(tunnel
);
952 list_del_init(&port
->list
);
955 * See if there is another DP OUT port that can be used for
956 * to create another tunnel.
961 static void tb_dp_resource_available(struct tb
*tb
, struct tb_port
*port
)
963 struct tb_cm
*tcm
= tb_priv(tb
);
966 if (tb_port_is_enabled(port
))
969 list_for_each_entry(p
, &tcm
->dp_resources
, list
) {
974 tb_port_dbg(port
, "DP %s resource available\n",
975 tb_port_is_dpin(port
) ? "IN" : "OUT");
976 list_add_tail(&port
->list
, &tcm
->dp_resources
);
978 /* Look for suitable DP IN <-> DP OUT pairs now */
982 static void tb_disconnect_and_release_dp(struct tb
*tb
)
984 struct tb_cm
*tcm
= tb_priv(tb
);
985 struct tb_tunnel
*tunnel
, *n
;
988 * Tear down all DP tunnels and release their resources. They
989 * will be re-established after resume based on plug events.
991 list_for_each_entry_safe_reverse(tunnel
, n
, &tcm
->tunnel_list
, list
) {
992 if (tb_tunnel_is_dp(tunnel
))
993 tb_deactivate_and_free_tunnel(tunnel
);
996 while (!list_empty(&tcm
->dp_resources
)) {
997 struct tb_port
*port
;
999 port
= list_first_entry(&tcm
->dp_resources
,
1000 struct tb_port
, list
);
1001 list_del_init(&port
->list
);
1005 static int tb_tunnel_pci(struct tb
*tb
, struct tb_switch
*sw
)
1007 struct tb_port
*up
, *down
, *port
;
1008 struct tb_cm
*tcm
= tb_priv(tb
);
1009 struct tb_switch
*parent_sw
;
1010 struct tb_tunnel
*tunnel
;
1012 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
1017 * Look up available down port. Since we are chaining it should
1018 * be found right above this switch.
1020 parent_sw
= tb_to_switch(sw
->dev
.parent
);
1021 port
= tb_port_at(tb_route(sw
), parent_sw
);
1022 down
= tb_find_pcie_down(parent_sw
, port
);
1026 tunnel
= tb_tunnel_alloc_pci(tb
, up
, down
);
1030 if (tb_tunnel_activate(tunnel
)) {
1032 "PCIe tunnel activation failed, aborting\n");
1033 tb_tunnel_free(tunnel
);
1037 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1041 static int tb_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1043 struct tb_cm
*tcm
= tb_priv(tb
);
1044 struct tb_port
*nhi_port
, *dst_port
;
1045 struct tb_tunnel
*tunnel
;
1046 struct tb_switch
*sw
;
1048 sw
= tb_to_switch(xd
->dev
.parent
);
1049 dst_port
= tb_port_at(xd
->route
, sw
);
1050 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
1052 mutex_lock(&tb
->lock
);
1053 tunnel
= tb_tunnel_alloc_dma(tb
, nhi_port
, dst_port
, xd
->transmit_ring
,
1054 xd
->transmit_path
, xd
->receive_ring
,
1057 mutex_unlock(&tb
->lock
);
1061 if (tb_tunnel_activate(tunnel
)) {
1062 tb_port_info(nhi_port
,
1063 "DMA tunnel activation failed, aborting\n");
1064 tb_tunnel_free(tunnel
);
1065 mutex_unlock(&tb
->lock
);
1069 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1070 mutex_unlock(&tb
->lock
);
1074 static void __tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1076 struct tb_port
*dst_port
;
1077 struct tb_tunnel
*tunnel
;
1078 struct tb_switch
*sw
;
1080 sw
= tb_to_switch(xd
->dev
.parent
);
1081 dst_port
= tb_port_at(xd
->route
, sw
);
1084 * It is possible that the tunnel was already teared down (in
1085 * case of cable disconnect) so it is fine if we cannot find it
1088 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DMA
, NULL
, dst_port
);
1089 tb_deactivate_and_free_tunnel(tunnel
);
1092 static int tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1094 if (!xd
->is_unplugged
) {
1095 mutex_lock(&tb
->lock
);
1096 __tb_disconnect_xdomain_paths(tb
, xd
);
1097 mutex_unlock(&tb
->lock
);
1102 /* hotplug handling */
1105 * tb_handle_hotplug() - handle hotplug event
1107 * Executes on tb->wq.
1109 static void tb_handle_hotplug(struct work_struct
*work
)
1111 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
1112 struct tb
*tb
= ev
->tb
;
1113 struct tb_cm
*tcm
= tb_priv(tb
);
1114 struct tb_switch
*sw
;
1115 struct tb_port
*port
;
1117 /* Bring the domain back from sleep if it was suspended */
1118 pm_runtime_get_sync(&tb
->dev
);
1120 mutex_lock(&tb
->lock
);
1121 if (!tcm
->hotplug_active
)
1122 goto out
; /* during init, suspend or shutdown */
1124 sw
= tb_switch_find_by_route(tb
, ev
->route
);
1127 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1128 ev
->route
, ev
->port
, ev
->unplug
);
1131 if (ev
->port
> sw
->config
.max_port_number
) {
1133 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1134 ev
->route
, ev
->port
, ev
->unplug
);
1137 port
= &sw
->ports
[ev
->port
];
1138 if (tb_is_upstream_port(port
)) {
1139 tb_dbg(tb
, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1140 ev
->route
, ev
->port
, ev
->unplug
);
1144 pm_runtime_get_sync(&sw
->dev
);
1147 tb_retimer_remove_all(port
);
1149 if (tb_port_has_remote(port
)) {
1150 tb_port_dbg(port
, "switch unplugged\n");
1151 tb_sw_set_unplugged(port
->remote
->sw
);
1152 tb_free_invalid_tunnels(tb
);
1153 tb_remove_dp_resources(port
->remote
->sw
);
1154 tb_switch_tmu_disable(port
->remote
->sw
);
1155 tb_switch_unconfigure_link(port
->remote
->sw
);
1156 tb_switch_lane_bonding_disable(port
->remote
->sw
);
1157 tb_switch_remove(port
->remote
->sw
);
1158 port
->remote
= NULL
;
1159 if (port
->dual_link_port
)
1160 port
->dual_link_port
->remote
= NULL
;
1161 /* Maybe we can create another DP tunnel */
1163 } else if (port
->xdomain
) {
1164 struct tb_xdomain
*xd
= tb_xdomain_get(port
->xdomain
);
1166 tb_port_dbg(port
, "xdomain unplugged\n");
1168 * Service drivers are unbound during
1169 * tb_xdomain_remove() so setting XDomain as
1170 * unplugged here prevents deadlock if they call
1171 * tb_xdomain_disable_paths(). We will tear down
1174 xd
->is_unplugged
= true;
1175 tb_xdomain_remove(xd
);
1176 port
->xdomain
= NULL
;
1177 __tb_disconnect_xdomain_paths(tb
, xd
);
1179 tb_port_unconfigure_xdomain(port
);
1180 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
1181 tb_dp_resource_unavailable(tb
, port
);
1184 "got unplug event for disconnected port, ignoring\n");
1186 } else if (port
->remote
) {
1187 tb_port_dbg(port
, "got plug event for connected port, ignoring\n");
1189 if (tb_port_is_null(port
)) {
1190 tb_port_dbg(port
, "hotplug: scanning\n");
1193 tb_port_dbg(port
, "hotplug: no switch found\n");
1194 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
1195 tb_dp_resource_available(tb
, port
);
1199 pm_runtime_mark_last_busy(&sw
->dev
);
1200 pm_runtime_put_autosuspend(&sw
->dev
);
1205 mutex_unlock(&tb
->lock
);
1207 pm_runtime_mark_last_busy(&tb
->dev
);
1208 pm_runtime_put_autosuspend(&tb
->dev
);
1214 * tb_schedule_hotplug_handler() - callback function for the control channel
1216 * Delegates to tb_handle_hotplug.
1218 static void tb_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1219 const void *buf
, size_t size
)
1221 const struct cfg_event_pkg
*pkg
= buf
;
1224 if (type
!= TB_CFG_PKG_EVENT
) {
1225 tb_warn(tb
, "unexpected event %#x, ignoring\n", type
);
1229 route
= tb_cfg_get_route(&pkg
->header
);
1231 if (tb_cfg_ack_plug(tb
->ctl
, route
, pkg
->port
, pkg
->unplug
)) {
1232 tb_warn(tb
, "could not ack plug event on %llx:%x\n", route
,
1236 tb_queue_hotplug(tb
, route
, pkg
->port
, pkg
->unplug
);
1239 static void tb_stop(struct tb
*tb
)
1241 struct tb_cm
*tcm
= tb_priv(tb
);
1242 struct tb_tunnel
*tunnel
;
1243 struct tb_tunnel
*n
;
1245 cancel_delayed_work(&tcm
->remove_work
);
1246 /* tunnels are only present after everything has been initialized */
1247 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1249 * DMA tunnels require the driver to be functional so we
1250 * tear them down. Other protocol tunnels can be left
1253 if (tb_tunnel_is_dma(tunnel
))
1254 tb_tunnel_deactivate(tunnel
);
1255 tb_tunnel_free(tunnel
);
1257 tb_switch_remove(tb
->root_switch
);
1258 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1261 static int tb_scan_finalize_switch(struct device
*dev
, void *data
)
1263 if (tb_is_switch(dev
)) {
1264 struct tb_switch
*sw
= tb_to_switch(dev
);
1267 * If we found that the switch was already setup by the
1268 * boot firmware, mark it as authorized now before we
1269 * send uevent to userspace.
1274 dev_set_uevent_suppress(dev
, false);
1275 kobject_uevent(&dev
->kobj
, KOBJ_ADD
);
1276 device_for_each_child(dev
, NULL
, tb_scan_finalize_switch
);
1282 static int tb_start(struct tb
*tb
)
1284 struct tb_cm
*tcm
= tb_priv(tb
);
1287 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
1288 if (IS_ERR(tb
->root_switch
))
1289 return PTR_ERR(tb
->root_switch
);
1292 * ICM firmware upgrade needs running firmware and in native
1293 * mode that is not available so disable firmware upgrade of the
1296 tb
->root_switch
->no_nvm_upgrade
= true;
1297 /* All USB4 routers support runtime PM */
1298 tb
->root_switch
->rpm
= tb_switch_is_usb4(tb
->root_switch
);
1300 ret
= tb_switch_configure(tb
->root_switch
);
1302 tb_switch_put(tb
->root_switch
);
1306 /* Announce the switch to the world */
1307 ret
= tb_switch_add(tb
->root_switch
);
1309 tb_switch_put(tb
->root_switch
);
1313 /* Enable TMU if it is off */
1314 tb_switch_tmu_enable(tb
->root_switch
);
1315 /* Full scan to discover devices added before the driver was loaded. */
1316 tb_scan_switch(tb
->root_switch
);
1317 /* Find out tunnels created by the boot firmware */
1318 tb_discover_tunnels(tb
->root_switch
);
1320 * If the boot firmware did not create USB 3.x tunnels create them
1321 * now for the whole topology.
1323 tb_create_usb3_tunnels(tb
->root_switch
);
1324 /* Add DP IN resources for the root switch */
1325 tb_add_dp_resources(tb
->root_switch
);
1326 /* Make the discovered switches available to the userspace */
1327 device_for_each_child(&tb
->root_switch
->dev
, NULL
,
1328 tb_scan_finalize_switch
);
1330 /* Allow tb_handle_hotplug to progress events */
1331 tcm
->hotplug_active
= true;
1335 static int tb_suspend_noirq(struct tb
*tb
)
1337 struct tb_cm
*tcm
= tb_priv(tb
);
1339 tb_dbg(tb
, "suspending...\n");
1340 tb_disconnect_and_release_dp(tb
);
1341 tb_switch_suspend(tb
->root_switch
, false);
1342 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1343 tb_dbg(tb
, "suspend finished\n");
1348 static void tb_restore_children(struct tb_switch
*sw
)
1350 struct tb_port
*port
;
1352 /* No need to restore if the router is already unplugged */
1353 if (sw
->is_unplugged
)
1356 if (tb_enable_tmu(sw
))
1357 tb_sw_warn(sw
, "failed to restore TMU configuration\n");
1359 tb_switch_for_each_port(sw
, port
) {
1360 if (!tb_port_has_remote(port
) && !port
->xdomain
)
1364 tb_switch_lane_bonding_enable(port
->remote
->sw
);
1365 tb_switch_configure_link(port
->remote
->sw
);
1367 tb_restore_children(port
->remote
->sw
);
1368 } else if (port
->xdomain
) {
1369 tb_port_configure_xdomain(port
);
1374 static int tb_resume_noirq(struct tb
*tb
)
1376 struct tb_cm
*tcm
= tb_priv(tb
);
1377 struct tb_tunnel
*tunnel
, *n
;
1379 tb_dbg(tb
, "resuming...\n");
1381 /* remove any pci devices the firmware might have setup */
1382 tb_switch_reset(tb
->root_switch
);
1384 tb_switch_resume(tb
->root_switch
);
1385 tb_free_invalid_tunnels(tb
);
1386 tb_free_unplugged_children(tb
->root_switch
);
1387 tb_restore_children(tb
->root_switch
);
1388 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1389 tb_tunnel_restart(tunnel
);
1390 if (!list_empty(&tcm
->tunnel_list
)) {
1392 * the pcie links need some time to get going.
1393 * 100ms works for me...
1395 tb_dbg(tb
, "tunnels restarted, sleeping for 100ms\n");
1398 /* Allow tb_handle_hotplug to progress events */
1399 tcm
->hotplug_active
= true;
1400 tb_dbg(tb
, "resume finished\n");
1405 static int tb_free_unplugged_xdomains(struct tb_switch
*sw
)
1407 struct tb_port
*port
;
1410 tb_switch_for_each_port(sw
, port
) {
1411 if (tb_is_upstream_port(port
))
1413 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
1414 tb_retimer_remove_all(port
);
1415 tb_xdomain_remove(port
->xdomain
);
1416 tb_port_unconfigure_xdomain(port
);
1417 port
->xdomain
= NULL
;
1419 } else if (port
->remote
) {
1420 ret
+= tb_free_unplugged_xdomains(port
->remote
->sw
);
1427 static int tb_freeze_noirq(struct tb
*tb
)
1429 struct tb_cm
*tcm
= tb_priv(tb
);
1431 tcm
->hotplug_active
= false;
1435 static int tb_thaw_noirq(struct tb
*tb
)
1437 struct tb_cm
*tcm
= tb_priv(tb
);
1439 tcm
->hotplug_active
= true;
1443 static void tb_complete(struct tb
*tb
)
1446 * Release any unplugged XDomains and if there is a case where
1447 * another domain is swapped in place of unplugged XDomain we
1448 * need to run another rescan.
1450 mutex_lock(&tb
->lock
);
1451 if (tb_free_unplugged_xdomains(tb
->root_switch
))
1452 tb_scan_switch(tb
->root_switch
);
1453 mutex_unlock(&tb
->lock
);
1456 static int tb_runtime_suspend(struct tb
*tb
)
1458 struct tb_cm
*tcm
= tb_priv(tb
);
1460 mutex_lock(&tb
->lock
);
1461 tb_switch_suspend(tb
->root_switch
, true);
1462 tcm
->hotplug_active
= false;
1463 mutex_unlock(&tb
->lock
);
1468 static void tb_remove_work(struct work_struct
*work
)
1470 struct tb_cm
*tcm
= container_of(work
, struct tb_cm
, remove_work
.work
);
1471 struct tb
*tb
= tcm_to_tb(tcm
);
1473 mutex_lock(&tb
->lock
);
1474 if (tb
->root_switch
) {
1475 tb_free_unplugged_children(tb
->root_switch
);
1476 tb_free_unplugged_xdomains(tb
->root_switch
);
1478 mutex_unlock(&tb
->lock
);
1481 static int tb_runtime_resume(struct tb
*tb
)
1483 struct tb_cm
*tcm
= tb_priv(tb
);
1484 struct tb_tunnel
*tunnel
, *n
;
1486 mutex_lock(&tb
->lock
);
1487 tb_switch_resume(tb
->root_switch
);
1488 tb_free_invalid_tunnels(tb
);
1489 tb_restore_children(tb
->root_switch
);
1490 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1491 tb_tunnel_restart(tunnel
);
1492 tcm
->hotplug_active
= true;
1493 mutex_unlock(&tb
->lock
);
1496 * Schedule cleanup of any unplugged devices. Run this in a
1497 * separate thread to avoid possible deadlock if the device
1498 * removal runtime resumes the unplugged device.
1500 queue_delayed_work(tb
->wq
, &tcm
->remove_work
, msecs_to_jiffies(50));
1504 static const struct tb_cm_ops tb_cm_ops
= {
1507 .suspend_noirq
= tb_suspend_noirq
,
1508 .resume_noirq
= tb_resume_noirq
,
1509 .freeze_noirq
= tb_freeze_noirq
,
1510 .thaw_noirq
= tb_thaw_noirq
,
1511 .complete
= tb_complete
,
1512 .runtime_suspend
= tb_runtime_suspend
,
1513 .runtime_resume
= tb_runtime_resume
,
1514 .handle_event
= tb_handle_event
,
1515 .approve_switch
= tb_tunnel_pci
,
1516 .approve_xdomain_paths
= tb_approve_xdomain_paths
,
1517 .disconnect_xdomain_paths
= tb_disconnect_xdomain_paths
,
1520 struct tb
*tb_probe(struct tb_nhi
*nhi
)
1525 tb
= tb_domain_alloc(nhi
, sizeof(*tcm
));
1529 tb
->security_level
= TB_SECURITY_USER
;
1530 tb
->cm_ops
= &tb_cm_ops
;
1533 INIT_LIST_HEAD(&tcm
->tunnel_list
);
1534 INIT_LIST_HEAD(&tcm
->dp_resources
);
1535 INIT_DELAYED_WORK(&tcm
->remove_work
, tb_remove_work
);
1537 tb_dbg(tb
, "using software connection manager\n");