1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
19 #define TB_TIMEOUT 100 /* ms */
20 #define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
23 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24 * direction. This is 40G - 10% guard band bandwidth.
26 #define TB_ASYM_MIN (40000 * 90 / 100)
29 * Threshold bandwidth (in Mb/s) that is used to switch the links to
30 * asymmetric and back. This is selected as 45G which means when the
31 * request is higher than this, we switch the link to asymmetric, and
32 * when it is less than this we switch it back. The 45G is selected so
33 * that we still have 27G (of the total 72G) for bulk PCIe traffic when
34 * switching back to symmetric.
36 #define TB_ASYM_THRESHOLD 45000
38 #define MAX_GROUPS 7 /* max Group_ID is 7 */
40 static unsigned int asym_threshold
= TB_ASYM_THRESHOLD
;
41 module_param_named(asym_threshold
, asym_threshold
, uint
, 0444);
42 MODULE_PARM_DESC(asym_threshold
,
43 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
44 __MODULE_STRING(TB_ASYM_THRESHOLD
) ")");
47 * struct tb_cm - Simple Thunderbolt connection manager
48 * @tunnel_list: List of active tunnels
49 * @dp_resources: List of available DP resources for DP tunneling
50 * @hotplug_active: tb_handle_hotplug will stop progressing plug
51 * events and exit if this is not set (it needs to
52 * acquire the lock one more time). Used to drain wq
53 * after cfg has been paused.
54 * @remove_work: Work used to remove any unplugged routers after
56 * @groups: Bandwidth groups used in this domain.
59 struct list_head tunnel_list
;
60 struct list_head dp_resources
;
62 struct delayed_work remove_work
;
63 struct tb_bandwidth_group groups
[MAX_GROUPS
];
66 static inline struct tb
*tcm_to_tb(struct tb_cm
*tcm
)
68 return ((void *)tcm
- sizeof(struct tb
));
71 struct tb_hotplug_event
{
72 struct work_struct work
;
79 static void tb_handle_hotplug(struct work_struct
*work
);
81 static void tb_queue_hotplug(struct tb
*tb
, u64 route
, u8 port
, bool unplug
)
83 struct tb_hotplug_event
*ev
;
85 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
93 INIT_WORK(&ev
->work
, tb_handle_hotplug
);
94 queue_work(tb
->wq
, &ev
->work
);
97 /* enumeration & hot plug handling */
99 static void tb_add_dp_resources(struct tb_switch
*sw
)
101 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
102 struct tb_port
*port
;
104 tb_switch_for_each_port(sw
, port
) {
105 if (!tb_port_is_dpin(port
))
108 if (!tb_switch_query_dp_resource(sw
, port
))
112 * If DP IN on device router exist, position it at the
113 * beginning of the DP resources list, so that it is used
114 * before DP IN of the host router. This way external GPU(s)
115 * will be prioritized when pairing DP IN to a DP OUT.
118 list_add(&port
->list
, &tcm
->dp_resources
);
120 list_add_tail(&port
->list
, &tcm
->dp_resources
);
122 tb_port_dbg(port
, "DP IN resource available\n");
126 static void tb_remove_dp_resources(struct tb_switch
*sw
)
128 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
129 struct tb_port
*port
, *tmp
;
131 /* Clear children resources first */
132 tb_switch_for_each_port(sw
, port
) {
133 if (tb_port_has_remote(port
))
134 tb_remove_dp_resources(port
->remote
->sw
);
137 list_for_each_entry_safe(port
, tmp
, &tcm
->dp_resources
, list
) {
138 if (port
->sw
== sw
) {
139 tb_port_dbg(port
, "DP OUT resource unavailable\n");
140 list_del_init(&port
->list
);
145 static void tb_discover_dp_resource(struct tb
*tb
, struct tb_port
*port
)
147 struct tb_cm
*tcm
= tb_priv(tb
);
150 list_for_each_entry(p
, &tcm
->dp_resources
, list
) {
155 tb_port_dbg(port
, "DP %s resource available discovered\n",
156 tb_port_is_dpin(port
) ? "IN" : "OUT");
157 list_add_tail(&port
->list
, &tcm
->dp_resources
);
160 static void tb_discover_dp_resources(struct tb
*tb
)
162 struct tb_cm
*tcm
= tb_priv(tb
);
163 struct tb_tunnel
*tunnel
;
165 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
166 if (tb_tunnel_is_dp(tunnel
))
167 tb_discover_dp_resource(tb
, tunnel
->dst_port
);
171 /* Enables CL states up to host router */
172 static int tb_enable_clx(struct tb_switch
*sw
)
174 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
175 unsigned int clx
= TB_CL0S
| TB_CL1
;
176 const struct tb_tunnel
*tunnel
;
180 * Currently only enable CLx for the first link. This is enough
181 * to allow the CPU to save energy at least on Intel hardware
182 * and makes it slightly simpler to implement. We may change
183 * this in the future to cover the whole topology if it turns
184 * out to be beneficial.
186 while (sw
&& tb_switch_depth(sw
) > 1)
187 sw
= tb_switch_parent(sw
);
192 if (tb_switch_depth(sw
) != 1)
196 * If we are re-enabling then check if there is an active DMA
197 * tunnel and in that case bail out.
199 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
200 if (tb_tunnel_is_dma(tunnel
)) {
201 if (tb_tunnel_port_on_path(tunnel
, tb_upstream_port(sw
)))
207 * Initially try with CL2. If that's not supported by the
208 * topology try with CL0s and CL1 and then give up.
210 ret
= tb_switch_clx_enable(sw
, clx
| TB_CL2
);
211 if (ret
== -EOPNOTSUPP
)
212 ret
= tb_switch_clx_enable(sw
, clx
);
213 return ret
== -EOPNOTSUPP
? 0 : ret
;
217 * tb_disable_clx() - Disable CL states up to host router
218 * @sw: Router to start
220 * Disables CL states from @sw up to the host router. Returns true if
221 * any CL state were disabled. This can be used to figure out whether
222 * the link was setup by us or the boot firmware so we don't
223 * accidentally enable them if they were not enabled during discovery.
225 static bool tb_disable_clx(struct tb_switch
*sw
)
227 bool disabled
= false;
232 ret
= tb_switch_clx_disable(sw
);
236 tb_sw_warn(sw
, "failed to disable CL states\n");
238 sw
= tb_switch_parent(sw
);
244 static int tb_increase_switch_tmu_accuracy(struct device
*dev
, void *data
)
246 struct tb_switch
*sw
;
248 sw
= tb_to_switch(dev
);
252 if (tb_switch_tmu_is_configured(sw
, TB_SWITCH_TMU_MODE_LOWRES
)) {
253 enum tb_switch_tmu_mode mode
;
256 if (tb_switch_clx_is_enabled(sw
, TB_CL1
))
257 mode
= TB_SWITCH_TMU_MODE_HIFI_UNI
;
259 mode
= TB_SWITCH_TMU_MODE_HIFI_BI
;
261 ret
= tb_switch_tmu_configure(sw
, mode
);
265 return tb_switch_tmu_enable(sw
);
271 static void tb_increase_tmu_accuracy(struct tb_tunnel
*tunnel
)
273 struct tb_switch
*sw
;
279 * Once first DP tunnel is established we change the TMU
280 * accuracy of first depth child routers (and the host router)
281 * to the highest. This is needed for the DP tunneling to work
282 * but also allows CL0s.
284 * If both routers are v2 then we don't need to do anything as
285 * they are using enhanced TMU mode that allows all CLx.
287 sw
= tunnel
->tb
->root_switch
;
288 device_for_each_child(&sw
->dev
, NULL
, tb_increase_switch_tmu_accuracy
);
291 static int tb_switch_tmu_hifi_uni_required(struct device
*dev
, void *not_used
)
293 struct tb_switch
*sw
= tb_to_switch(dev
);
295 if (sw
&& tb_switch_tmu_is_enabled(sw
) &&
296 tb_switch_tmu_is_configured(sw
, TB_SWITCH_TMU_MODE_HIFI_UNI
))
299 return device_for_each_child(dev
, NULL
,
300 tb_switch_tmu_hifi_uni_required
);
303 static bool tb_tmu_hifi_uni_required(struct tb
*tb
)
305 return device_for_each_child(&tb
->dev
, NULL
,
306 tb_switch_tmu_hifi_uni_required
) == 1;
309 static int tb_enable_tmu(struct tb_switch
*sw
)
314 * If both routers at the end of the link are v2 we simply
315 * enable the enhanched uni-directional mode. That covers all
316 * the CL states. For v1 and before we need to use the normal
317 * rate to allow CL1 (when supported). Otherwise we keep the TMU
318 * running at the highest accuracy.
320 ret
= tb_switch_tmu_configure(sw
,
321 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI
);
322 if (ret
== -EOPNOTSUPP
) {
323 if (tb_switch_clx_is_enabled(sw
, TB_CL1
)) {
325 * Figure out uni-directional HiFi TMU requirements
326 * currently in the domain. If there are no
327 * uni-directional HiFi requirements we can put the TMU
330 * Deliberately skip bi-directional HiFi links
331 * as these work independently of other links
332 * (and they do not allow any CL states anyway).
334 if (tb_tmu_hifi_uni_required(sw
->tb
))
335 ret
= tb_switch_tmu_configure(sw
,
336 TB_SWITCH_TMU_MODE_HIFI_UNI
);
338 ret
= tb_switch_tmu_configure(sw
,
339 TB_SWITCH_TMU_MODE_LOWRES
);
341 ret
= tb_switch_tmu_configure(sw
, TB_SWITCH_TMU_MODE_HIFI_BI
);
344 /* If not supported, fallback to bi-directional HiFi */
345 if (ret
== -EOPNOTSUPP
)
346 ret
= tb_switch_tmu_configure(sw
, TB_SWITCH_TMU_MODE_HIFI_BI
);
351 /* If it is already enabled in correct mode, don't touch it */
352 if (tb_switch_tmu_is_enabled(sw
))
355 ret
= tb_switch_tmu_disable(sw
);
359 ret
= tb_switch_tmu_post_time(sw
);
363 return tb_switch_tmu_enable(sw
);
366 static void tb_switch_discover_tunnels(struct tb_switch
*sw
,
367 struct list_head
*list
,
370 struct tb
*tb
= sw
->tb
;
371 struct tb_port
*port
;
373 tb_switch_for_each_port(sw
, port
) {
374 struct tb_tunnel
*tunnel
= NULL
;
376 switch (port
->config
.type
) {
377 case TB_TYPE_DP_HDMI_IN
:
378 tunnel
= tb_tunnel_discover_dp(tb
, port
, alloc_hopids
);
379 tb_increase_tmu_accuracy(tunnel
);
382 case TB_TYPE_PCIE_DOWN
:
383 tunnel
= tb_tunnel_discover_pci(tb
, port
, alloc_hopids
);
386 case TB_TYPE_USB3_DOWN
:
387 tunnel
= tb_tunnel_discover_usb3(tb
, port
, alloc_hopids
);
395 list_add_tail(&tunnel
->list
, list
);
398 tb_switch_for_each_port(sw
, port
) {
399 if (tb_port_has_remote(port
)) {
400 tb_switch_discover_tunnels(port
->remote
->sw
, list
,
406 static int tb_port_configure_xdomain(struct tb_port
*port
, struct tb_xdomain
*xd
)
408 if (tb_switch_is_usb4(port
->sw
))
409 return usb4_port_configure_xdomain(port
, xd
);
410 return tb_lc_configure_xdomain(port
);
413 static void tb_port_unconfigure_xdomain(struct tb_port
*port
)
415 if (tb_switch_is_usb4(port
->sw
))
416 usb4_port_unconfigure_xdomain(port
);
418 tb_lc_unconfigure_xdomain(port
);
421 static void tb_scan_xdomain(struct tb_port
*port
)
423 struct tb_switch
*sw
= port
->sw
;
424 struct tb
*tb
= sw
->tb
;
425 struct tb_xdomain
*xd
;
428 if (!tb_is_xdomain_enabled())
431 route
= tb_downstream_route(port
);
432 xd
= tb_xdomain_find_by_route(tb
, route
);
438 xd
= tb_xdomain_alloc(tb
, &sw
->dev
, route
, tb
->root_switch
->uuid
,
441 tb_port_at(route
, sw
)->xdomain
= xd
;
442 tb_port_configure_xdomain(port
, xd
);
448 * tb_find_unused_port() - return the first inactive port on @sw
449 * @sw: Switch to find the port on
450 * @type: Port type to look for
452 static struct tb_port
*tb_find_unused_port(struct tb_switch
*sw
,
453 enum tb_port_type type
)
455 struct tb_port
*port
;
457 tb_switch_for_each_port(sw
, port
) {
458 if (tb_is_upstream_port(port
))
460 if (port
->config
.type
!= type
)
464 if (tb_port_is_enabled(port
))
471 static struct tb_port
*tb_find_usb3_down(struct tb_switch
*sw
,
472 const struct tb_port
*port
)
474 struct tb_port
*down
;
476 down
= usb4_switch_map_usb3_down(sw
, port
);
477 if (down
&& !tb_usb3_port_is_enabled(down
))
482 static struct tb_tunnel
*tb_find_tunnel(struct tb
*tb
, enum tb_tunnel_type type
,
483 struct tb_port
*src_port
,
484 struct tb_port
*dst_port
)
486 struct tb_cm
*tcm
= tb_priv(tb
);
487 struct tb_tunnel
*tunnel
;
489 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
490 if (tunnel
->type
== type
&&
491 ((src_port
&& src_port
== tunnel
->src_port
) ||
492 (dst_port
&& dst_port
== tunnel
->dst_port
))) {
500 static struct tb_tunnel
*tb_find_first_usb3_tunnel(struct tb
*tb
,
501 struct tb_port
*src_port
,
502 struct tb_port
*dst_port
)
504 struct tb_port
*port
, *usb3_down
;
505 struct tb_switch
*sw
;
507 /* Pick the router that is deepest in the topology */
508 if (tb_port_path_direction_downstream(src_port
, dst_port
))
513 /* Can't be the host router */
514 if (sw
== tb
->root_switch
)
517 /* Find the downstream USB4 port that leads to this router */
518 port
= tb_port_at(tb_route(sw
), tb
->root_switch
);
519 /* Find the corresponding host router USB3 downstream port */
520 usb3_down
= usb4_switch_map_usb3_down(tb
->root_switch
, port
);
524 return tb_find_tunnel(tb
, TB_TUNNEL_USB3
, usb3_down
, NULL
);
528 * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
529 * @tb: Domain structure
530 * @src_port: Source protocol adapter
531 * @dst_port: Destination protocol adapter
532 * @port: USB4 port the consumed bandwidth is calculated
533 * @consumed_up: Consumed upsream bandwidth (Mb/s)
534 * @consumed_down: Consumed downstream bandwidth (Mb/s)
536 * Calculates consumed USB3 and PCIe bandwidth at @port between path
537 * from @src_port to @dst_port. Does not take USB3 tunnel starting from
538 * @src_port and ending on @src_port into account because that bandwidth is
539 * already included in as part of the "first hop" USB3 tunnel.
541 static int tb_consumed_usb3_pcie_bandwidth(struct tb
*tb
,
542 struct tb_port
*src_port
,
543 struct tb_port
*dst_port
,
544 struct tb_port
*port
,
548 int pci_consumed_up
, pci_consumed_down
;
549 struct tb_tunnel
*tunnel
;
551 *consumed_up
= *consumed_down
= 0;
553 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
554 if (tunnel
&& !tb_port_is_usb3_down(src_port
) &&
555 !tb_port_is_usb3_up(dst_port
)) {
558 ret
= tb_tunnel_consumed_bandwidth(tunnel
, consumed_up
,
565 * If there is anything reserved for PCIe bulk traffic take it
566 * into account here too.
568 if (tb_tunnel_reserved_pci(port
, &pci_consumed_up
, &pci_consumed_down
)) {
569 *consumed_up
+= pci_consumed_up
;
570 *consumed_down
+= pci_consumed_down
;
577 * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
578 * @tb: Domain structure
579 * @src_port: Source protocol adapter
580 * @dst_port: Destination protocol adapter
581 * @port: USB4 port the consumed bandwidth is calculated
582 * @consumed_up: Consumed upsream bandwidth (Mb/s)
583 * @consumed_down: Consumed downstream bandwidth (Mb/s)
585 * Calculates consumed DP bandwidth at @port between path from @src_port
586 * to @dst_port. Does not take tunnel starting from @src_port and ending
587 * from @src_port into account.
589 * If there is bandwidth reserved for any of the groups between
590 * @src_port and @dst_port (but not yet used) that is also taken into
591 * account in the returned consumed bandwidth.
593 static int tb_consumed_dp_bandwidth(struct tb
*tb
,
594 struct tb_port
*src_port
,
595 struct tb_port
*dst_port
,
596 struct tb_port
*port
,
600 int group_reserved
[MAX_GROUPS
] = {};
601 struct tb_cm
*tcm
= tb_priv(tb
);
602 struct tb_tunnel
*tunnel
;
606 *consumed_up
= *consumed_down
= 0;
609 * Find all DP tunnels that cross the port and reduce
610 * their consumed bandwidth from the available.
612 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
613 const struct tb_bandwidth_group
*group
;
614 int dp_consumed_up
, dp_consumed_down
;
616 if (tb_tunnel_is_invalid(tunnel
))
619 if (!tb_tunnel_is_dp(tunnel
))
622 if (!tb_tunnel_port_on_path(tunnel
, port
))
626 * Calculate what is reserved for groups crossing the
627 * same ports only once (as that is reserved for all the
628 * tunnels in the group).
630 group
= tunnel
->src_port
->group
;
631 if (group
&& group
->reserved
&& !group_reserved
[group
->index
])
632 group_reserved
[group
->index
] = group
->reserved
;
635 * Ignore the DP tunnel between src_port and dst_port
636 * because it is the same tunnel and we may be
637 * re-calculating estimated bandwidth.
639 if (tunnel
->src_port
== src_port
&&
640 tunnel
->dst_port
== dst_port
)
643 ret
= tb_tunnel_consumed_bandwidth(tunnel
, &dp_consumed_up
,
648 *consumed_up
+= dp_consumed_up
;
649 *consumed_down
+= dp_consumed_down
;
652 downstream
= tb_port_path_direction_downstream(src_port
, dst_port
);
653 for (i
= 0; i
< ARRAY_SIZE(group_reserved
); i
++) {
655 *consumed_down
+= group_reserved
[i
];
657 *consumed_up
+= group_reserved
[i
];
663 static bool tb_asym_supported(struct tb_port
*src_port
, struct tb_port
*dst_port
,
664 struct tb_port
*port
)
666 bool downstream
= tb_port_path_direction_downstream(src_port
, dst_port
);
667 enum tb_link_width width
;
669 if (tb_is_upstream_port(port
))
670 width
= downstream
? TB_LINK_WIDTH_ASYM_RX
: TB_LINK_WIDTH_ASYM_TX
;
672 width
= downstream
? TB_LINK_WIDTH_ASYM_TX
: TB_LINK_WIDTH_ASYM_RX
;
674 return tb_port_width_supported(port
, width
);
678 * tb_maximum_bandwidth() - Maximum bandwidth over a single link
679 * @tb: Domain structure
680 * @src_port: Source protocol adapter
681 * @dst_port: Destination protocol adapter
682 * @port: USB4 port the total bandwidth is calculated
683 * @max_up: Maximum upstream bandwidth (Mb/s)
684 * @max_down: Maximum downstream bandwidth (Mb/s)
685 * @include_asym: Include bandwidth if the link is switched from
686 * symmetric to asymmetric
688 * Returns maximum possible bandwidth in @max_up and @max_down over a
689 * single link at @port. If @include_asym is set then includes the
690 * additional banwdith if the links are transitioned into asymmetric to
691 * direction from @src_port to @dst_port.
693 static int tb_maximum_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
694 struct tb_port
*dst_port
, struct tb_port
*port
,
695 int *max_up
, int *max_down
, bool include_asym
)
697 bool downstream
= tb_port_path_direction_downstream(src_port
, dst_port
);
698 int link_speed
, link_width
, up_bw
, down_bw
;
701 * Can include asymmetric, only if it is actually supported by
704 if (!tb_asym_supported(src_port
, dst_port
, port
))
705 include_asym
= false;
707 if (tb_is_upstream_port(port
)) {
708 link_speed
= port
->sw
->link_speed
;
710 * sw->link_width is from upstream perspective so we use
711 * the opposite for downstream of the host router.
713 if (port
->sw
->link_width
== TB_LINK_WIDTH_ASYM_TX
) {
714 up_bw
= link_speed
* 3 * 1000;
715 down_bw
= link_speed
* 1 * 1000;
716 } else if (port
->sw
->link_width
== TB_LINK_WIDTH_ASYM_RX
) {
717 up_bw
= link_speed
* 1 * 1000;
718 down_bw
= link_speed
* 3 * 1000;
719 } else if (include_asym
) {
721 * The link is symmetric at the moment but we
722 * can switch it to asymmetric as needed. Report
723 * this bandwidth as available (even though it
724 * is not yet enabled).
727 up_bw
= link_speed
* 1 * 1000;
728 down_bw
= link_speed
* 3 * 1000;
730 up_bw
= link_speed
* 3 * 1000;
731 down_bw
= link_speed
* 1 * 1000;
734 up_bw
= link_speed
* port
->sw
->link_width
* 1000;
738 link_speed
= tb_port_get_link_speed(port
);
742 link_width
= tb_port_get_link_width(port
);
746 if (link_width
== TB_LINK_WIDTH_ASYM_TX
) {
747 up_bw
= link_speed
* 1 * 1000;
748 down_bw
= link_speed
* 3 * 1000;
749 } else if (link_width
== TB_LINK_WIDTH_ASYM_RX
) {
750 up_bw
= link_speed
* 3 * 1000;
751 down_bw
= link_speed
* 1 * 1000;
752 } else if (include_asym
) {
754 * The link is symmetric at the moment but we
755 * can switch it to asymmetric as needed. Report
756 * this bandwidth as available (even though it
757 * is not yet enabled).
760 up_bw
= link_speed
* 1 * 1000;
761 down_bw
= link_speed
* 3 * 1000;
763 up_bw
= link_speed
* 3 * 1000;
764 down_bw
= link_speed
* 1 * 1000;
767 up_bw
= link_speed
* link_width
* 1000;
772 /* Leave 10% guard band */
773 *max_up
= up_bw
- up_bw
/ 10;
774 *max_down
= down_bw
- down_bw
/ 10;
776 tb_port_dbg(port
, "link maximum bandwidth %d/%d Mb/s\n", *max_up
, *max_down
);
781 * tb_available_bandwidth() - Available bandwidth for tunneling
782 * @tb: Domain structure
783 * @src_port: Source protocol adapter
784 * @dst_port: Destination protocol adapter
785 * @available_up: Available bandwidth upstream (Mb/s)
786 * @available_down: Available bandwidth downstream (Mb/s)
787 * @include_asym: Include bandwidth if the link is switched from
788 * symmetric to asymmetric
790 * Calculates maximum available bandwidth for protocol tunneling between
791 * @src_port and @dst_port at the moment. This is minimum of maximum
792 * link bandwidth across all links reduced by currently consumed
793 * bandwidth on that link.
795 * If @include_asym is true then includes also bandwidth that can be
796 * added when the links are transitioned into asymmetric (but does not
797 * transition the links).
799 static int tb_available_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
800 struct tb_port
*dst_port
, int *available_up
,
801 int *available_down
, bool include_asym
)
803 struct tb_port
*port
;
806 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
807 *available_up
= *available_down
= 120000;
809 /* Find the minimum available bandwidth over all links */
810 tb_for_each_port_on_path(src_port
, dst_port
, port
) {
811 int max_up
, max_down
, consumed_up
, consumed_down
;
813 if (!tb_port_is_null(port
))
816 ret
= tb_maximum_bandwidth(tb
, src_port
, dst_port
, port
,
817 &max_up
, &max_down
, include_asym
);
821 ret
= tb_consumed_usb3_pcie_bandwidth(tb
, src_port
, dst_port
,
826 max_up
-= consumed_up
;
827 max_down
-= consumed_down
;
829 ret
= tb_consumed_dp_bandwidth(tb
, src_port
, dst_port
, port
,
830 &consumed_up
, &consumed_down
);
833 max_up
-= consumed_up
;
834 max_down
-= consumed_down
;
836 if (max_up
< *available_up
)
837 *available_up
= max_up
;
838 if (max_down
< *available_down
)
839 *available_down
= max_down
;
842 if (*available_up
< 0)
844 if (*available_down
< 0)
850 static int tb_release_unused_usb3_bandwidth(struct tb
*tb
,
851 struct tb_port
*src_port
,
852 struct tb_port
*dst_port
)
854 struct tb_tunnel
*tunnel
;
856 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
857 return tunnel
? tb_tunnel_release_unused_bandwidth(tunnel
) : 0;
860 static void tb_reclaim_usb3_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
861 struct tb_port
*dst_port
)
863 int ret
, available_up
, available_down
;
864 struct tb_tunnel
*tunnel
;
866 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
870 tb_tunnel_dbg(tunnel
, "reclaiming unused bandwidth\n");
873 * Calculate available bandwidth for the first hop USB3 tunnel.
874 * That determines the whole USB3 bandwidth for this branch.
876 ret
= tb_available_bandwidth(tb
, tunnel
->src_port
, tunnel
->dst_port
,
877 &available_up
, &available_down
, false);
879 tb_tunnel_warn(tunnel
, "failed to calculate available bandwidth\n");
883 tb_tunnel_dbg(tunnel
, "available bandwidth %d/%d Mb/s\n", available_up
,
886 tb_tunnel_reclaim_available_bandwidth(tunnel
, &available_up
, &available_down
);
889 static int tb_tunnel_usb3(struct tb
*tb
, struct tb_switch
*sw
)
891 struct tb_switch
*parent
= tb_switch_parent(sw
);
892 int ret
, available_up
, available_down
;
893 struct tb_port
*up
, *down
, *port
;
894 struct tb_cm
*tcm
= tb_priv(tb
);
895 struct tb_tunnel
*tunnel
;
897 if (!tb_acpi_may_tunnel_usb3()) {
898 tb_dbg(tb
, "USB3 tunneling disabled, not creating tunnel\n");
902 up
= tb_switch_find_port(sw
, TB_TYPE_USB3_UP
);
910 * Look up available down port. Since we are chaining it should
911 * be found right above this switch.
913 port
= tb_switch_downstream_port(sw
);
914 down
= tb_find_usb3_down(parent
, port
);
918 if (tb_route(parent
)) {
919 struct tb_port
*parent_up
;
921 * Check first that the parent switch has its upstream USB3
922 * port enabled. Otherwise the chain is not complete and
923 * there is no point setting up a new tunnel.
925 parent_up
= tb_switch_find_port(parent
, TB_TYPE_USB3_UP
);
926 if (!parent_up
|| !tb_port_is_enabled(parent_up
))
929 /* Make all unused bandwidth available for the new tunnel */
930 ret
= tb_release_unused_usb3_bandwidth(tb
, down
, up
);
935 ret
= tb_available_bandwidth(tb
, down
, up
, &available_up
, &available_down
,
940 tb_port_dbg(up
, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
941 available_up
, available_down
);
943 tunnel
= tb_tunnel_alloc_usb3(tb
, up
, down
, available_up
,
950 if (tb_tunnel_activate(tunnel
)) {
952 "USB3 tunnel activation failed, aborting\n");
957 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
958 if (tb_route(parent
))
959 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
964 tb_tunnel_free(tunnel
);
966 if (tb_route(parent
))
967 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
972 static int tb_create_usb3_tunnels(struct tb_switch
*sw
)
974 struct tb_port
*port
;
977 if (!tb_acpi_may_tunnel_usb3())
981 ret
= tb_tunnel_usb3(sw
->tb
, sw
);
986 tb_switch_for_each_port(sw
, port
) {
987 if (!tb_port_has_remote(port
))
989 ret
= tb_create_usb3_tunnels(port
->remote
->sw
);
998 * tb_configure_asym() - Transition links to asymmetric if needed
999 * @tb: Domain structure
1000 * @src_port: Source adapter to start the transition
1001 * @dst_port: Destination adapter
1002 * @requested_up: Additional bandwidth (Mb/s) required upstream
1003 * @requested_down: Additional bandwidth (Mb/s) required downstream
1005 * Transition links between @src_port and @dst_port into asymmetric, with
1006 * three lanes in the direction from @src_port towards @dst_port and one lane
1007 * in the opposite direction, if the bandwidth requirements
1008 * (requested + currently consumed) on that link exceed @asym_threshold.
1010 * Must be called with available >= requested over all links.
1012 static int tb_configure_asym(struct tb
*tb
, struct tb_port
*src_port
,
1013 struct tb_port
*dst_port
, int requested_up
,
1016 bool clx
= false, clx_disabled
= false, downstream
;
1017 struct tb_switch
*sw
;
1021 if (!asym_threshold
)
1024 downstream
= tb_port_path_direction_downstream(src_port
, dst_port
);
1025 /* Pick up router deepest in the hierarchy */
1031 tb_for_each_upstream_port_on_path(src_port
, dst_port
, up
) {
1032 struct tb_port
*down
= tb_switch_downstream_port(up
->sw
);
1033 enum tb_link_width width_up
, width_down
;
1034 int consumed_up
, consumed_down
;
1036 ret
= tb_consumed_dp_bandwidth(tb
, src_port
, dst_port
, up
,
1037 &consumed_up
, &consumed_down
);
1043 * Downstream so make sure upstream is within the 36G
1044 * (40G - guard band 10%), and the requested is above
1045 * what the threshold is.
1047 if (consumed_up
+ requested_up
>= TB_ASYM_MIN
) {
1051 /* Does consumed + requested exceed the threshold */
1052 if (consumed_down
+ requested_down
< asym_threshold
)
1055 width_up
= TB_LINK_WIDTH_ASYM_RX
;
1056 width_down
= TB_LINK_WIDTH_ASYM_TX
;
1058 /* Upstream, the opposite of above */
1059 if (consumed_down
+ requested_down
>= TB_ASYM_MIN
) {
1063 if (consumed_up
+ requested_up
< asym_threshold
)
1066 width_up
= TB_LINK_WIDTH_ASYM_TX
;
1067 width_down
= TB_LINK_WIDTH_ASYM_RX
;
1070 if (up
->sw
->link_width
== width_up
)
1073 if (!tb_port_width_supported(up
, width_up
) ||
1074 !tb_port_width_supported(down
, width_down
))
1078 * Disable CL states before doing any transitions. We
1079 * delayed it until now that we know there is a real
1080 * transition taking place.
1082 if (!clx_disabled
) {
1083 clx
= tb_disable_clx(sw
);
1084 clx_disabled
= true;
1087 tb_sw_dbg(up
->sw
, "configuring asymmetric link\n");
1090 * Here requested + consumed > threshold so we need to
1091 * transtion the link into asymmetric now.
1093 ret
= tb_switch_set_link_width(up
->sw
, width_up
);
1095 tb_sw_warn(up
->sw
, "failed to set link width\n");
1100 /* Re-enable CL states if they were previosly enabled */
1108 * tb_configure_sym() - Transition links to symmetric if possible
1109 * @tb: Domain structure
1110 * @src_port: Source adapter to start the transition
1111 * @dst_port: Destination adapter
1112 * @keep_asym: Keep asymmetric link if preferred
1114 * Goes over each link from @src_port to @dst_port and tries to
1115 * transition the link to symmetric if the currently consumed bandwidth
1116 * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1118 static int tb_configure_sym(struct tb
*tb
, struct tb_port
*src_port
,
1119 struct tb_port
*dst_port
, bool keep_asym
)
1121 bool clx
= false, clx_disabled
= false, downstream
;
1122 struct tb_switch
*sw
;
1126 if (!asym_threshold
)
1129 downstream
= tb_port_path_direction_downstream(src_port
, dst_port
);
1130 /* Pick up router deepest in the hierarchy */
1136 tb_for_each_upstream_port_on_path(src_port
, dst_port
, up
) {
1137 int consumed_up
, consumed_down
;
1139 /* Already symmetric */
1140 if (up
->sw
->link_width
<= TB_LINK_WIDTH_DUAL
)
1142 /* Unplugged, no need to switch */
1143 if (up
->sw
->is_unplugged
)
1146 ret
= tb_consumed_dp_bandwidth(tb
, src_port
, dst_port
, up
,
1147 &consumed_up
, &consumed_down
);
1153 * Downstream so we want the consumed_down < threshold.
1154 * Upstream traffic should be less than 36G (40G
1155 * guard band 10%) as the link was configured asymmetric
1158 if (consumed_down
>= asym_threshold
)
1161 if (consumed_up
>= asym_threshold
)
1165 if (up
->sw
->link_width
== TB_LINK_WIDTH_DUAL
)
1169 * Here consumed < threshold so we can transition the
1170 * link to symmetric.
1172 * However, if the router prefers asymmetric link we
1173 * honor that (unless @keep_asym is %false).
1176 up
->sw
->preferred_link_width
> TB_LINK_WIDTH_DUAL
) {
1177 tb_sw_dbg(up
->sw
, "keeping preferred asymmetric link\n");
1181 /* Disable CL states before doing any transitions */
1182 if (!clx_disabled
) {
1183 clx
= tb_disable_clx(sw
);
1184 clx_disabled
= true;
1187 tb_sw_dbg(up
->sw
, "configuring symmetric link\n");
1189 ret
= tb_switch_set_link_width(up
->sw
, TB_LINK_WIDTH_DUAL
);
1191 tb_sw_warn(up
->sw
, "failed to set link width\n");
1196 /* Re-enable CL states if they were previosly enabled */
1203 static void tb_configure_link(struct tb_port
*down
, struct tb_port
*up
,
1204 struct tb_switch
*sw
)
1206 struct tb
*tb
= sw
->tb
;
1208 /* Link the routers using both links if available */
1211 if (down
->dual_link_port
&& up
->dual_link_port
) {
1212 down
->dual_link_port
->remote
= up
->dual_link_port
;
1213 up
->dual_link_port
->remote
= down
->dual_link_port
;
1217 * Enable lane bonding if the link is currently two single lane
1220 if (sw
->link_width
< TB_LINK_WIDTH_DUAL
)
1221 tb_switch_set_link_width(sw
, TB_LINK_WIDTH_DUAL
);
1224 * Device router that comes up as symmetric link is
1225 * connected deeper in the hierarchy, we transition the links
1226 * above into symmetric if bandwidth allows.
1228 if (tb_switch_depth(sw
) > 1 &&
1229 tb_port_get_link_generation(up
) >= 4 &&
1230 up
->sw
->link_width
== TB_LINK_WIDTH_DUAL
) {
1231 struct tb_port
*host_port
;
1233 host_port
= tb_port_at(tb_route(sw
), tb
->root_switch
);
1234 tb_configure_sym(tb
, host_port
, up
, false);
1237 /* Set the link configured */
1238 tb_switch_configure_link(sw
);
1241 static void tb_scan_port(struct tb_port
*port
);
1244 * tb_scan_switch() - scan for and initialize downstream switches
1246 static void tb_scan_switch(struct tb_switch
*sw
)
1248 struct tb_port
*port
;
1250 pm_runtime_get_sync(&sw
->dev
);
1252 tb_switch_for_each_port(sw
, port
)
1255 pm_runtime_mark_last_busy(&sw
->dev
);
1256 pm_runtime_put_autosuspend(&sw
->dev
);
1260 * tb_scan_port() - check for and initialize switches below port
1262 static void tb_scan_port(struct tb_port
*port
)
1264 struct tb_cm
*tcm
= tb_priv(port
->sw
->tb
);
1265 struct tb_port
*upstream_port
;
1266 bool discovery
= false;
1267 struct tb_switch
*sw
;
1269 if (tb_is_upstream_port(port
))
1272 if (tb_port_is_dpout(port
) && tb_dp_port_hpd_is_active(port
) == 1 &&
1273 !tb_dp_port_is_enabled(port
)) {
1274 tb_port_dbg(port
, "DP adapter HPD set, queuing hotplug\n");
1275 tb_queue_hotplug(port
->sw
->tb
, tb_route(port
->sw
), port
->port
,
1280 if (port
->config
.type
!= TB_TYPE_PORT
)
1282 if (port
->dual_link_port
&& port
->link_nr
)
1284 * Downstream switch is reachable through two ports.
1285 * Only scan on the primary port (link_nr == 0).
1289 pm_runtime_get_sync(&port
->usb4
->dev
);
1291 if (tb_wait_for_port(port
, false) <= 0)
1294 tb_port_dbg(port
, "port already has a remote\n");
1298 tb_retimer_scan(port
, true);
1300 sw
= tb_switch_alloc(port
->sw
->tb
, &port
->sw
->dev
,
1301 tb_downstream_route(port
));
1304 * If there is an error accessing the connected switch
1305 * it may be connected to another domain. Also we allow
1306 * the other domain to be connected to a max depth switch.
1308 if (PTR_ERR(sw
) == -EIO
|| PTR_ERR(sw
) == -EADDRNOTAVAIL
)
1309 tb_scan_xdomain(port
);
1313 if (tb_switch_configure(sw
)) {
1319 * If there was previously another domain connected remove it
1322 if (port
->xdomain
) {
1323 tb_xdomain_remove(port
->xdomain
);
1324 tb_port_unconfigure_xdomain(port
);
1325 port
->xdomain
= NULL
;
1329 * Do not send uevents until we have discovered all existing
1330 * tunnels and know which switches were authorized already by
1331 * the boot firmware.
1333 if (!tcm
->hotplug_active
) {
1334 dev_set_uevent_suppress(&sw
->dev
, true);
1339 * At the moment Thunderbolt 2 and beyond (devices with LC) we
1340 * can support runtime PM.
1342 sw
->rpm
= sw
->generation
> 1;
1344 if (tb_switch_add(sw
)) {
1349 upstream_port
= tb_upstream_port(sw
);
1350 tb_configure_link(port
, upstream_port
, sw
);
1353 * CL0s and CL1 are enabled and supported together.
1354 * Silently ignore CLx enabling in case CLx is not supported.
1357 tb_sw_dbg(sw
, "discovery, not touching CL states\n");
1358 else if (tb_enable_clx(sw
))
1359 tb_sw_warn(sw
, "failed to enable CL states\n");
1361 if (tb_enable_tmu(sw
))
1362 tb_sw_warn(sw
, "failed to enable TMU\n");
1365 * Configuration valid needs to be set after the TMU has been
1366 * enabled for the upstream port of the router so we do it here.
1368 tb_switch_configuration_valid(sw
);
1370 /* Scan upstream retimers */
1371 tb_retimer_scan(upstream_port
, true);
1374 * Create USB 3.x tunnels only when the switch is plugged to the
1375 * domain. This is because we scan the domain also during discovery
1376 * and want to discover existing USB 3.x tunnels before we create
1379 if (tcm
->hotplug_active
&& tb_tunnel_usb3(sw
->tb
, sw
))
1380 tb_sw_warn(sw
, "USB3 tunnel creation failed\n");
1382 tb_add_dp_resources(sw
);
1387 pm_runtime_mark_last_busy(&port
->usb4
->dev
);
1388 pm_runtime_put_autosuspend(&port
->usb4
->dev
);
1393 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group
*group
)
1395 struct tb_tunnel
*first_tunnel
;
1396 struct tb
*tb
= group
->tb
;
1400 tb_dbg(tb
, "re-calculating bandwidth estimation for group %u\n",
1403 first_tunnel
= NULL
;
1404 list_for_each_entry(in
, &group
->ports
, group_list
) {
1405 int estimated_bw
, estimated_up
, estimated_down
;
1406 struct tb_tunnel
*tunnel
;
1407 struct tb_port
*out
;
1409 if (!usb4_dp_port_bandwidth_mode_enabled(in
))
1412 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, NULL
);
1413 if (WARN_ON(!tunnel
))
1416 if (!first_tunnel
) {
1418 * Since USB3 bandwidth is shared by all DP
1419 * tunnels under the host router USB4 port, even
1420 * if they do not begin from the host router, we
1421 * can release USB3 bandwidth just once and not
1422 * for each tunnel separately.
1424 first_tunnel
= tunnel
;
1425 ret
= tb_release_unused_usb3_bandwidth(tb
,
1426 first_tunnel
->src_port
, first_tunnel
->dst_port
);
1428 tb_tunnel_warn(tunnel
,
1429 "failed to release unused bandwidth\n");
1434 out
= tunnel
->dst_port
;
1435 ret
= tb_available_bandwidth(tb
, in
, out
, &estimated_up
,
1436 &estimated_down
, true);
1438 tb_tunnel_warn(tunnel
,
1439 "failed to re-calculate estimated bandwidth\n");
1444 * Estimated bandwidth includes:
1445 * - already allocated bandwidth for the DP tunnel
1446 * - available bandwidth along the path
1447 * - bandwidth allocated for USB 3.x but not used.
1449 if (tb_tunnel_direction_downstream(tunnel
))
1450 estimated_bw
= estimated_down
;
1452 estimated_bw
= estimated_up
;
1455 * If there is reserved bandwidth for the group that is
1456 * not yet released we report that too.
1458 tb_tunnel_dbg(tunnel
,
1459 "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
1460 estimated_bw
, group
->reserved
,
1461 estimated_bw
+ group
->reserved
);
1463 if (usb4_dp_port_set_estimated_bandwidth(in
,
1464 estimated_bw
+ group
->reserved
))
1465 tb_tunnel_warn(tunnel
,
1466 "failed to update estimated bandwidth\n");
1470 tb_reclaim_usb3_bandwidth(tb
, first_tunnel
->src_port
,
1471 first_tunnel
->dst_port
);
1473 tb_dbg(tb
, "bandwidth estimation for group %u done\n", group
->index
);
1476 static void tb_recalc_estimated_bandwidth(struct tb
*tb
)
1478 struct tb_cm
*tcm
= tb_priv(tb
);
1481 tb_dbg(tb
, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1483 for (i
= 0; i
< ARRAY_SIZE(tcm
->groups
); i
++) {
1484 struct tb_bandwidth_group
*group
= &tcm
->groups
[i
];
1486 if (!list_empty(&group
->ports
))
1487 tb_recalc_estimated_bandwidth_for_group(group
);
1490 tb_dbg(tb
, "bandwidth re-calculation done\n");
1493 static bool __release_group_bandwidth(struct tb_bandwidth_group
*group
)
1495 if (group
->reserved
) {
1496 tb_dbg(group
->tb
, "group %d released total %d Mb/s\n", group
->index
,
1498 group
->reserved
= 0;
1504 static void __configure_group_sym(struct tb_bandwidth_group
*group
)
1506 struct tb_tunnel
*tunnel
;
1509 if (list_empty(&group
->ports
))
1513 * All the tunnels in the group go through the same USB4 links
1514 * so we find the first one here and pass the IN and OUT
1515 * adapters to tb_configure_sym() which now transitions the
1516 * links back to symmetric if bandwidth requirement < asym_threshold.
1518 * We do this here to avoid unnecessary transitions (for example
1519 * if the graphics released bandwidth for other tunnel in the
1522 in
= list_first_entry(&group
->ports
, struct tb_port
, group_list
);
1523 tunnel
= tb_find_tunnel(group
->tb
, TB_TUNNEL_DP
, in
, NULL
);
1525 tb_configure_sym(group
->tb
, in
, tunnel
->dst_port
, true);
1528 static void tb_bandwidth_group_release_work(struct work_struct
*work
)
1530 struct tb_bandwidth_group
*group
=
1531 container_of(work
, typeof(*group
), release_work
.work
);
1532 struct tb
*tb
= group
->tb
;
1534 mutex_lock(&tb
->lock
);
1535 if (__release_group_bandwidth(group
))
1536 tb_recalc_estimated_bandwidth(tb
);
1537 __configure_group_sym(group
);
1538 mutex_unlock(&tb
->lock
);
1541 static void tb_init_bandwidth_groups(struct tb_cm
*tcm
)
1545 for (i
= 0; i
< ARRAY_SIZE(tcm
->groups
); i
++) {
1546 struct tb_bandwidth_group
*group
= &tcm
->groups
[i
];
1548 group
->tb
= tcm_to_tb(tcm
);
1549 group
->index
= i
+ 1;
1550 INIT_LIST_HEAD(&group
->ports
);
1551 INIT_DELAYED_WORK(&group
->release_work
,
1552 tb_bandwidth_group_release_work
);
1556 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group
*group
,
1559 if (!group
|| WARN_ON(in
->group
))
1563 list_add_tail(&in
->group_list
, &group
->ports
);
1565 tb_port_dbg(in
, "attached to bandwidth group %d\n", group
->index
);
1568 static struct tb_bandwidth_group
*tb_find_free_bandwidth_group(struct tb_cm
*tcm
)
1572 for (i
= 0; i
< ARRAY_SIZE(tcm
->groups
); i
++) {
1573 struct tb_bandwidth_group
*group
= &tcm
->groups
[i
];
1575 if (list_empty(&group
->ports
))
1582 static struct tb_bandwidth_group
*
1583 tb_attach_bandwidth_group(struct tb_cm
*tcm
, struct tb_port
*in
,
1584 struct tb_port
*out
)
1586 struct tb_bandwidth_group
*group
;
1587 struct tb_tunnel
*tunnel
;
1590 * Find all DP tunnels that go through all the same USB4 links
1591 * as this one. Because we always setup tunnels the same way we
1592 * can just check for the routers at both ends of the tunnels
1593 * and if they are the same we have a match.
1595 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
1596 if (!tb_tunnel_is_dp(tunnel
))
1599 if (tunnel
->src_port
->sw
== in
->sw
&&
1600 tunnel
->dst_port
->sw
== out
->sw
) {
1601 group
= tunnel
->src_port
->group
;
1603 tb_bandwidth_group_attach_port(group
, in
);
1609 /* Pick up next available group then */
1610 group
= tb_find_free_bandwidth_group(tcm
);
1612 tb_bandwidth_group_attach_port(group
, in
);
1614 tb_port_warn(in
, "no available bandwidth groups\n");
1619 static void tb_discover_bandwidth_group(struct tb_cm
*tcm
, struct tb_port
*in
,
1620 struct tb_port
*out
)
1622 if (usb4_dp_port_bandwidth_mode_enabled(in
)) {
1625 index
= usb4_dp_port_group_id(in
);
1626 for (i
= 0; i
< ARRAY_SIZE(tcm
->groups
); i
++) {
1627 if (tcm
->groups
[i
].index
== index
) {
1628 tb_bandwidth_group_attach_port(&tcm
->groups
[i
], in
);
1634 tb_attach_bandwidth_group(tcm
, in
, out
);
1637 static void tb_detach_bandwidth_group(struct tb_port
*in
)
1639 struct tb_bandwidth_group
*group
= in
->group
;
1643 list_del_init(&in
->group_list
);
1645 tb_port_dbg(in
, "detached from bandwidth group %d\n", group
->index
);
1647 /* No more tunnels so release the reserved bandwidth if any */
1648 if (list_empty(&group
->ports
)) {
1649 cancel_delayed_work(&group
->release_work
);
1650 __release_group_bandwidth(group
);
1655 static void tb_discover_tunnels(struct tb
*tb
)
1657 struct tb_cm
*tcm
= tb_priv(tb
);
1658 struct tb_tunnel
*tunnel
;
1660 tb_switch_discover_tunnels(tb
->root_switch
, &tcm
->tunnel_list
, true);
1662 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
1663 if (tb_tunnel_is_pci(tunnel
)) {
1664 struct tb_switch
*parent
= tunnel
->dst_port
->sw
;
1666 while (parent
!= tunnel
->src_port
->sw
) {
1667 parent
->boot
= true;
1668 parent
= tb_switch_parent(parent
);
1670 } else if (tb_tunnel_is_dp(tunnel
)) {
1671 struct tb_port
*in
= tunnel
->src_port
;
1672 struct tb_port
*out
= tunnel
->dst_port
;
1674 /* Keep the domain from powering down */
1675 pm_runtime_get_sync(&in
->sw
->dev
);
1676 pm_runtime_get_sync(&out
->sw
->dev
);
1678 tb_discover_bandwidth_group(tcm
, in
, out
);
1683 static void tb_deactivate_and_free_tunnel(struct tb_tunnel
*tunnel
)
1685 struct tb_port
*src_port
, *dst_port
;
1691 tb_tunnel_deactivate(tunnel
);
1692 list_del(&tunnel
->list
);
1695 src_port
= tunnel
->src_port
;
1696 dst_port
= tunnel
->dst_port
;
1698 switch (tunnel
->type
) {
1700 tb_detach_bandwidth_group(src_port
);
1702 * In case of DP tunnel make sure the DP IN resource is
1703 * deallocated properly.
1705 tb_switch_dealloc_dp_resource(src_port
->sw
, src_port
);
1707 * If bandwidth on a link is < asym_threshold
1708 * transition the link to symmetric.
1710 tb_configure_sym(tb
, src_port
, dst_port
, true);
1711 /* Now we can allow the domain to runtime suspend again */
1712 pm_runtime_mark_last_busy(&dst_port
->sw
->dev
);
1713 pm_runtime_put_autosuspend(&dst_port
->sw
->dev
);
1714 pm_runtime_mark_last_busy(&src_port
->sw
->dev
);
1715 pm_runtime_put_autosuspend(&src_port
->sw
->dev
);
1718 case TB_TUNNEL_USB3
:
1719 tb_reclaim_usb3_bandwidth(tb
, src_port
, dst_port
);
1724 * PCIe and DMA tunnels do not consume guaranteed
1730 tb_tunnel_free(tunnel
);
1734 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1736 static void tb_free_invalid_tunnels(struct tb
*tb
)
1738 struct tb_cm
*tcm
= tb_priv(tb
);
1739 struct tb_tunnel
*tunnel
;
1740 struct tb_tunnel
*n
;
1742 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1743 if (tb_tunnel_is_invalid(tunnel
))
1744 tb_deactivate_and_free_tunnel(tunnel
);
1749 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1751 static void tb_free_unplugged_children(struct tb_switch
*sw
)
1753 struct tb_port
*port
;
1755 tb_switch_for_each_port(sw
, port
) {
1756 if (!tb_port_has_remote(port
))
1759 if (port
->remote
->sw
->is_unplugged
) {
1760 tb_retimer_remove_all(port
);
1761 tb_remove_dp_resources(port
->remote
->sw
);
1762 tb_switch_unconfigure_link(port
->remote
->sw
);
1763 tb_switch_set_link_width(port
->remote
->sw
,
1764 TB_LINK_WIDTH_SINGLE
);
1765 tb_switch_remove(port
->remote
->sw
);
1766 port
->remote
= NULL
;
1767 if (port
->dual_link_port
)
1768 port
->dual_link_port
->remote
= NULL
;
1770 tb_free_unplugged_children(port
->remote
->sw
);
1775 static struct tb_port
*tb_find_pcie_down(struct tb_switch
*sw
,
1776 const struct tb_port
*port
)
1778 struct tb_port
*down
= NULL
;
1781 * To keep plugging devices consistently in the same PCIe
1782 * hierarchy, do mapping here for switch downstream PCIe ports.
1784 if (tb_switch_is_usb4(sw
)) {
1785 down
= usb4_switch_map_pcie_down(sw
, port
);
1786 } else if (!tb_route(sw
)) {
1787 int phy_port
= tb_phy_port_from_link(port
->port
);
1791 * Hard-coded Thunderbolt port to PCIe down port mapping
1794 if (tb_switch_is_cactus_ridge(sw
) ||
1795 tb_switch_is_alpine_ridge(sw
))
1796 index
= !phy_port
? 6 : 7;
1797 else if (tb_switch_is_falcon_ridge(sw
))
1798 index
= !phy_port
? 6 : 8;
1799 else if (tb_switch_is_titan_ridge(sw
))
1800 index
= !phy_port
? 8 : 9;
1804 /* Validate the hard-coding */
1805 if (WARN_ON(index
> sw
->config
.max_port_number
))
1808 down
= &sw
->ports
[index
];
1812 if (WARN_ON(!tb_port_is_pcie_down(down
)))
1814 if (tb_pci_port_is_enabled(down
))
1821 return tb_find_unused_port(sw
, TB_TYPE_PCIE_DOWN
);
1824 static struct tb_port
*tb_find_dp_out(struct tb
*tb
, struct tb_port
*in
)
1826 struct tb_port
*host_port
, *port
;
1827 struct tb_cm
*tcm
= tb_priv(tb
);
1829 host_port
= tb_route(in
->sw
) ?
1830 tb_port_at(tb_route(in
->sw
), tb
->root_switch
) : NULL
;
1832 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
1833 if (!tb_port_is_dpout(port
))
1836 if (tb_port_is_enabled(port
)) {
1837 tb_port_dbg(port
, "DP OUT in use\n");
1841 /* Needs to be on different routers */
1842 if (in
->sw
== port
->sw
) {
1843 tb_port_dbg(port
, "skipping DP OUT on same router\n");
1847 tb_port_dbg(port
, "DP OUT available\n");
1850 * Keep the DP tunnel under the topology starting from
1851 * the same host router downstream port.
1853 if (host_port
&& tb_route(port
->sw
)) {
1856 p
= tb_port_at(tb_route(port
->sw
), tb
->root_switch
);
1867 static bool tb_tunnel_one_dp(struct tb
*tb
, struct tb_port
*in
,
1868 struct tb_port
*out
)
1870 int available_up
, available_down
, ret
, link_nr
;
1871 struct tb_cm
*tcm
= tb_priv(tb
);
1872 int consumed_up
, consumed_down
;
1873 struct tb_tunnel
*tunnel
;
1876 * This is only applicable to links that are not bonded (so
1877 * when Thunderbolt 1 hardware is involved somewhere in the
1878 * topology). For these try to share the DP bandwidth between
1882 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
1883 if (tb_tunnel_is_dp(tunnel
)) {
1890 * DP stream needs the domain to be active so runtime resume
1891 * both ends of the tunnel.
1893 * This should bring the routers in the middle active as well
1894 * and keeps the domain from runtime suspending while the DP
1897 pm_runtime_get_sync(&in
->sw
->dev
);
1898 pm_runtime_get_sync(&out
->sw
->dev
);
1900 if (tb_switch_alloc_dp_resource(in
->sw
, in
)) {
1901 tb_port_dbg(in
, "no resource available for DP IN, not tunneling\n");
1905 if (!tb_attach_bandwidth_group(tcm
, in
, out
))
1906 goto err_dealloc_dp
;
1908 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1909 ret
= tb_release_unused_usb3_bandwidth(tb
, in
, out
);
1911 tb_warn(tb
, "failed to release unused bandwidth\n");
1912 goto err_detach_group
;
1915 ret
= tb_available_bandwidth(tb
, in
, out
, &available_up
, &available_down
,
1918 goto err_reclaim_usb
;
1920 tb_dbg(tb
, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1921 available_up
, available_down
);
1923 tunnel
= tb_tunnel_alloc_dp(tb
, in
, out
, link_nr
, available_up
,
1926 tb_port_dbg(out
, "could not allocate DP tunnel\n");
1927 goto err_reclaim_usb
;
1930 if (tb_tunnel_activate(tunnel
)) {
1931 tb_port_info(out
, "DP tunnel activation failed, aborting\n");
1935 /* If fail reading tunnel's consumed bandwidth, tear it down */
1936 ret
= tb_tunnel_consumed_bandwidth(tunnel
, &consumed_up
, &consumed_down
);
1938 goto err_deactivate
;
1940 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1942 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
1944 * Transition the links to asymmetric if the consumption exceeds
1947 tb_configure_asym(tb
, in
, out
, consumed_up
, consumed_down
);
1949 /* Update the domain with the new bandwidth estimation */
1950 tb_recalc_estimated_bandwidth(tb
);
1953 * In case of DP tunnel exists, change host router's 1st children
1954 * TMU mode to HiFi for CL0s to work.
1956 tb_increase_tmu_accuracy(tunnel
);
1960 tb_tunnel_deactivate(tunnel
);
1962 tb_tunnel_free(tunnel
);
1964 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
1966 tb_detach_bandwidth_group(in
);
1968 tb_switch_dealloc_dp_resource(in
->sw
, in
);
1970 pm_runtime_mark_last_busy(&out
->sw
->dev
);
1971 pm_runtime_put_autosuspend(&out
->sw
->dev
);
1972 pm_runtime_mark_last_busy(&in
->sw
->dev
);
1973 pm_runtime_put_autosuspend(&in
->sw
->dev
);
1978 static void tb_tunnel_dp(struct tb
*tb
)
1980 struct tb_cm
*tcm
= tb_priv(tb
);
1981 struct tb_port
*port
, *in
, *out
;
1983 if (!tb_acpi_may_tunnel_dp()) {
1984 tb_dbg(tb
, "DP tunneling disabled, not creating tunnel\n");
1989 * Find pair of inactive DP IN and DP OUT adapters and then
1990 * establish a DP tunnel between them.
1992 tb_dbg(tb
, "looking for DP IN <-> DP OUT pairs:\n");
1996 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
1997 if (!tb_port_is_dpin(port
))
2000 if (tb_port_is_enabled(port
)) {
2001 tb_port_dbg(port
, "DP IN in use\n");
2006 tb_port_dbg(in
, "DP IN available\n");
2008 out
= tb_find_dp_out(tb
, port
);
2010 tb_tunnel_one_dp(tb
, in
, out
);
2012 tb_port_dbg(in
, "no suitable DP OUT adapter available, not tunneling\n");
2016 tb_dbg(tb
, "no suitable DP IN adapter available, not tunneling\n");
2019 static void tb_enter_redrive(struct tb_port
*port
)
2021 struct tb_switch
*sw
= port
->sw
;
2023 if (!(sw
->quirks
& QUIRK_KEEP_POWER_IN_DP_REDRIVE
))
2027 * If we get hot-unplug for the DP IN port of the host router
2028 * and the DP resource is not available anymore it means there
2029 * is a monitor connected directly to the Type-C port and we are
2030 * in "redrive" mode. For this to work we cannot enter RTD3 so
2031 * we bump up the runtime PM reference count here.
2033 if (!tb_port_is_dpin(port
))
2037 if (!tb_switch_query_dp_resource(sw
, port
)) {
2038 port
->redrive
= true;
2039 pm_runtime_get(&sw
->dev
);
2040 tb_port_dbg(port
, "enter redrive mode, keeping powered\n");
2044 static void tb_exit_redrive(struct tb_port
*port
)
2046 struct tb_switch
*sw
= port
->sw
;
2048 if (!(sw
->quirks
& QUIRK_KEEP_POWER_IN_DP_REDRIVE
))
2051 if (!tb_port_is_dpin(port
))
2055 if (port
->redrive
&& tb_switch_query_dp_resource(sw
, port
)) {
2056 port
->redrive
= false;
2057 pm_runtime_put(&sw
->dev
);
2058 tb_port_dbg(port
, "exit redrive mode\n");
2062 static void tb_dp_resource_unavailable(struct tb
*tb
, struct tb_port
*port
)
2064 struct tb_port
*in
, *out
;
2065 struct tb_tunnel
*tunnel
;
2067 if (tb_port_is_dpin(port
)) {
2068 tb_port_dbg(port
, "DP IN resource unavailable\n");
2072 tb_port_dbg(port
, "DP OUT resource unavailable\n");
2077 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, out
);
2079 tb_deactivate_and_free_tunnel(tunnel
);
2081 tb_enter_redrive(port
);
2082 list_del_init(&port
->list
);
2085 * See if there is another DP OUT port that can be used for
2086 * to create another tunnel.
2088 tb_recalc_estimated_bandwidth(tb
);
2092 static void tb_dp_resource_available(struct tb
*tb
, struct tb_port
*port
)
2094 struct tb_cm
*tcm
= tb_priv(tb
);
2097 if (tb_port_is_enabled(port
))
2100 list_for_each_entry(p
, &tcm
->dp_resources
, list
) {
2105 tb_port_dbg(port
, "DP %s resource available after hotplug\n",
2106 tb_port_is_dpin(port
) ? "IN" : "OUT");
2107 list_add_tail(&port
->list
, &tcm
->dp_resources
);
2108 tb_exit_redrive(port
);
2110 /* Look for suitable DP IN <-> DP OUT pairs now */
2114 static void tb_disconnect_and_release_dp(struct tb
*tb
)
2116 struct tb_cm
*tcm
= tb_priv(tb
);
2117 struct tb_tunnel
*tunnel
, *n
;
2120 * Tear down all DP tunnels and release their resources. They
2121 * will be re-established after resume based on plug events.
2123 list_for_each_entry_safe_reverse(tunnel
, n
, &tcm
->tunnel_list
, list
) {
2124 if (tb_tunnel_is_dp(tunnel
))
2125 tb_deactivate_and_free_tunnel(tunnel
);
2128 while (!list_empty(&tcm
->dp_resources
)) {
2129 struct tb_port
*port
;
2131 port
= list_first_entry(&tcm
->dp_resources
,
2132 struct tb_port
, list
);
2133 list_del_init(&port
->list
);
2137 static int tb_disconnect_pci(struct tb
*tb
, struct tb_switch
*sw
)
2139 struct tb_tunnel
*tunnel
;
2142 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
2146 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_PCI
, NULL
, up
);
2147 if (WARN_ON(!tunnel
))
2150 tb_switch_xhci_disconnect(sw
);
2152 tb_tunnel_deactivate(tunnel
);
2153 list_del(&tunnel
->list
);
2154 tb_tunnel_free(tunnel
);
2158 static int tb_tunnel_pci(struct tb
*tb
, struct tb_switch
*sw
)
2160 struct tb_port
*up
, *down
, *port
;
2161 struct tb_cm
*tcm
= tb_priv(tb
);
2162 struct tb_tunnel
*tunnel
;
2164 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
2169 * Look up available down port. Since we are chaining it should
2170 * be found right above this switch.
2172 port
= tb_switch_downstream_port(sw
);
2173 down
= tb_find_pcie_down(tb_switch_parent(sw
), port
);
2177 tunnel
= tb_tunnel_alloc_pci(tb
, up
, down
);
2181 if (tb_tunnel_activate(tunnel
)) {
2183 "PCIe tunnel activation failed, aborting\n");
2184 tb_tunnel_free(tunnel
);
2189 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2192 if (tb_switch_pcie_l1_enable(sw
))
2193 tb_sw_warn(sw
, "failed to enable PCIe L1 for Titan Ridge\n");
2195 if (tb_switch_xhci_connect(sw
))
2196 tb_sw_warn(sw
, "failed to connect xHCI\n");
2198 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
2202 static int tb_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
,
2203 int transmit_path
, int transmit_ring
,
2204 int receive_path
, int receive_ring
)
2206 struct tb_cm
*tcm
= tb_priv(tb
);
2207 struct tb_port
*nhi_port
, *dst_port
;
2208 struct tb_tunnel
*tunnel
;
2209 struct tb_switch
*sw
;
2212 sw
= tb_to_switch(xd
->dev
.parent
);
2213 dst_port
= tb_port_at(xd
->route
, sw
);
2214 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
2216 mutex_lock(&tb
->lock
);
2219 * When tunneling DMA paths the link should not enter CL states
2220 * so disable them now.
2224 tunnel
= tb_tunnel_alloc_dma(tb
, nhi_port
, dst_port
, transmit_path
,
2225 transmit_ring
, receive_path
, receive_ring
);
2231 if (tb_tunnel_activate(tunnel
)) {
2232 tb_port_info(nhi_port
,
2233 "DMA tunnel activation failed, aborting\n");
2238 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
2239 mutex_unlock(&tb
->lock
);
2243 tb_tunnel_free(tunnel
);
2246 mutex_unlock(&tb
->lock
);
2251 static void __tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
,
2252 int transmit_path
, int transmit_ring
,
2253 int receive_path
, int receive_ring
)
2255 struct tb_cm
*tcm
= tb_priv(tb
);
2256 struct tb_port
*nhi_port
, *dst_port
;
2257 struct tb_tunnel
*tunnel
, *n
;
2258 struct tb_switch
*sw
;
2260 sw
= tb_to_switch(xd
->dev
.parent
);
2261 dst_port
= tb_port_at(xd
->route
, sw
);
2262 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
2264 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
2265 if (!tb_tunnel_is_dma(tunnel
))
2267 if (tunnel
->src_port
!= nhi_port
|| tunnel
->dst_port
!= dst_port
)
2270 if (tb_tunnel_match_dma(tunnel
, transmit_path
, transmit_ring
,
2271 receive_path
, receive_ring
))
2272 tb_deactivate_and_free_tunnel(tunnel
);
2276 * Try to re-enable CL states now, it is OK if this fails
2277 * because we may still have another DMA tunnel active through
2278 * the same host router USB4 downstream port.
2283 static int tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
,
2284 int transmit_path
, int transmit_ring
,
2285 int receive_path
, int receive_ring
)
2287 if (!xd
->is_unplugged
) {
2288 mutex_lock(&tb
->lock
);
2289 __tb_disconnect_xdomain_paths(tb
, xd
, transmit_path
,
2290 transmit_ring
, receive_path
,
2292 mutex_unlock(&tb
->lock
);
2297 /* hotplug handling */
2300 * tb_handle_hotplug() - handle hotplug event
2302 * Executes on tb->wq.
2304 static void tb_handle_hotplug(struct work_struct
*work
)
2306 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
2307 struct tb
*tb
= ev
->tb
;
2308 struct tb_cm
*tcm
= tb_priv(tb
);
2309 struct tb_switch
*sw
;
2310 struct tb_port
*port
;
2312 /* Bring the domain back from sleep if it was suspended */
2313 pm_runtime_get_sync(&tb
->dev
);
2315 mutex_lock(&tb
->lock
);
2316 if (!tcm
->hotplug_active
)
2317 goto out
; /* during init, suspend or shutdown */
2319 sw
= tb_switch_find_by_route(tb
, ev
->route
);
2322 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2323 ev
->route
, ev
->port
, ev
->unplug
);
2326 if (ev
->port
> sw
->config
.max_port_number
) {
2328 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2329 ev
->route
, ev
->port
, ev
->unplug
);
2332 port
= &sw
->ports
[ev
->port
];
2333 if (tb_is_upstream_port(port
)) {
2334 tb_dbg(tb
, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2335 ev
->route
, ev
->port
, ev
->unplug
);
2339 pm_runtime_get_sync(&sw
->dev
);
2342 tb_retimer_remove_all(port
);
2344 if (tb_port_has_remote(port
)) {
2345 tb_port_dbg(port
, "switch unplugged\n");
2346 tb_sw_set_unplugged(port
->remote
->sw
);
2347 tb_free_invalid_tunnels(tb
);
2348 tb_remove_dp_resources(port
->remote
->sw
);
2349 tb_switch_tmu_disable(port
->remote
->sw
);
2350 tb_switch_unconfigure_link(port
->remote
->sw
);
2351 tb_switch_set_link_width(port
->remote
->sw
,
2352 TB_LINK_WIDTH_SINGLE
);
2353 tb_switch_remove(port
->remote
->sw
);
2354 port
->remote
= NULL
;
2355 if (port
->dual_link_port
)
2356 port
->dual_link_port
->remote
= NULL
;
2357 /* Maybe we can create another DP tunnel */
2358 tb_recalc_estimated_bandwidth(tb
);
2360 } else if (port
->xdomain
) {
2361 struct tb_xdomain
*xd
= tb_xdomain_get(port
->xdomain
);
2363 tb_port_dbg(port
, "xdomain unplugged\n");
2365 * Service drivers are unbound during
2366 * tb_xdomain_remove() so setting XDomain as
2367 * unplugged here prevents deadlock if they call
2368 * tb_xdomain_disable_paths(). We will tear down
2369 * all the tunnels below.
2371 xd
->is_unplugged
= true;
2372 tb_xdomain_remove(xd
);
2373 port
->xdomain
= NULL
;
2374 __tb_disconnect_xdomain_paths(tb
, xd
, -1, -1, -1, -1);
2376 tb_port_unconfigure_xdomain(port
);
2377 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
2378 tb_dp_resource_unavailable(tb
, port
);
2379 } else if (!port
->port
) {
2380 tb_sw_dbg(sw
, "xHCI disconnect request\n");
2381 tb_switch_xhci_disconnect(sw
);
2384 "got unplug event for disconnected port, ignoring\n");
2386 } else if (port
->remote
) {
2387 tb_port_dbg(port
, "got plug event for connected port, ignoring\n");
2388 } else if (!port
->port
&& sw
->authorized
) {
2389 tb_sw_dbg(sw
, "xHCI connect request\n");
2390 tb_switch_xhci_connect(sw
);
2392 if (tb_port_is_null(port
)) {
2393 tb_port_dbg(port
, "hotplug: scanning\n");
2396 tb_port_dbg(port
, "hotplug: no switch found\n");
2397 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
2398 tb_dp_resource_available(tb
, port
);
2402 pm_runtime_mark_last_busy(&sw
->dev
);
2403 pm_runtime_put_autosuspend(&sw
->dev
);
2408 mutex_unlock(&tb
->lock
);
2410 pm_runtime_mark_last_busy(&tb
->dev
);
2411 pm_runtime_put_autosuspend(&tb
->dev
);
2416 static int tb_alloc_dp_bandwidth(struct tb_tunnel
*tunnel
, int *requested_up
,
2417 int *requested_down
)
2419 int allocated_up
, allocated_down
, available_up
, available_down
, ret
;
2420 int requested_up_corrected
, requested_down_corrected
, granularity
;
2421 int max_up
, max_down
, max_up_rounded
, max_down_rounded
;
2422 struct tb_bandwidth_group
*group
;
2423 struct tb
*tb
= tunnel
->tb
;
2424 struct tb_port
*in
, *out
;
2427 ret
= tb_tunnel_allocated_bandwidth(tunnel
, &allocated_up
, &allocated_down
);
2431 in
= tunnel
->src_port
;
2432 out
= tunnel
->dst_port
;
2434 tb_tunnel_dbg(tunnel
, "bandwidth allocated currently %d/%d Mb/s\n",
2435 allocated_up
, allocated_down
);
2438 * If we get rounded up request from graphics side, say HBR2 x 4
2439 * that is 17500 instead of 17280 (this is because of the
2440 * granularity), we allow it too. Here the graphics has already
2441 * negotiated with the DPRX the maximum possible rates (which is
2442 * 17280 in this case).
2444 * Since the link cannot go higher than 17280 we use that in our
2445 * calculations but the DP IN adapter Allocated BW write must be
2446 * the same value (17500) otherwise the adapter will mark it as
2447 * failed for graphics.
2449 ret
= tb_tunnel_maximum_bandwidth(tunnel
, &max_up
, &max_down
);
2453 ret
= usb4_dp_port_granularity(in
);
2458 max_up_rounded
= roundup(max_up
, granularity
);
2459 max_down_rounded
= roundup(max_down
, granularity
);
2462 * This will "fix" the request down to the maximum supported
2463 * rate * lanes if it is at the maximum rounded up level.
2465 requested_up_corrected
= *requested_up
;
2466 if (requested_up_corrected
== max_up_rounded
)
2467 requested_up_corrected
= max_up
;
2468 else if (requested_up_corrected
< 0)
2469 requested_up_corrected
= 0;
2470 requested_down_corrected
= *requested_down
;
2471 if (requested_down_corrected
== max_down_rounded
)
2472 requested_down_corrected
= max_down
;
2473 else if (requested_down_corrected
< 0)
2474 requested_down_corrected
= 0;
2476 tb_tunnel_dbg(tunnel
, "corrected bandwidth request %d/%d Mb/s\n",
2477 requested_up_corrected
, requested_down_corrected
);
2479 if ((*requested_up
>= 0 && requested_up_corrected
> max_up_rounded
) ||
2480 (*requested_down
>= 0 && requested_down_corrected
> max_down_rounded
)) {
2481 tb_tunnel_dbg(tunnel
,
2482 "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2483 requested_up_corrected
, requested_down_corrected
,
2484 max_up_rounded
, max_down_rounded
);
2489 downstream
= tb_tunnel_direction_downstream(tunnel
);
2492 if ((*requested_up
>= 0 && requested_up_corrected
<= allocated_up
) ||
2493 (*requested_down
>= 0 && requested_down_corrected
<= allocated_down
)) {
2494 if (tunnel
->bw_mode
) {
2497 * If requested bandwidth is less or equal than
2498 * what is currently allocated to that tunnel we
2499 * simply change the reservation of the tunnel
2500 * and add the released bandwidth for the group
2501 * for the next 10s. Then we release it for
2505 reserved
= allocated_down
- *requested_down
;
2507 reserved
= allocated_up
- *requested_up
;
2510 group
->reserved
+= reserved
;
2511 tb_dbg(tb
, "group %d reserved %d total %d Mb/s\n",
2512 group
->index
, reserved
, group
->reserved
);
2515 * If it was not already pending,
2516 * schedule release now. If it is then
2517 * postpone it for the next 10s (unless
2518 * it is already running in which case
2519 * the 10s already expired and we should
2520 * give the reserved back to others).
2522 mod_delayed_work(system_wq
, &group
->release_work
,
2523 msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT
));
2527 return tb_tunnel_alloc_bandwidth(tunnel
, requested_up
,
2532 * More bandwidth is requested. Release all the potential
2533 * bandwidth from USB3 first.
2535 ret
= tb_release_unused_usb3_bandwidth(tb
, in
, out
);
2540 * Then go over all tunnels that cross the same USB4 ports (they
2541 * are also in the same group but we use the same function here
2542 * that we use with the normal bandwidth allocation).
2544 ret
= tb_available_bandwidth(tb
, in
, out
, &available_up
, &available_down
,
2549 tb_tunnel_dbg(tunnel
, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
2550 available_up
, available_down
, group
->reserved
);
2552 if ((*requested_up
>= 0 &&
2553 available_up
+ group
->reserved
>= requested_up_corrected
) ||
2554 (*requested_down
>= 0 &&
2555 available_down
+ group
->reserved
>= requested_down_corrected
)) {
2559 * If bandwidth on a link is >= asym_threshold
2560 * transition the link to asymmetric.
2562 ret
= tb_configure_asym(tb
, in
, out
, *requested_up
,
2565 tb_configure_sym(tb
, in
, out
, true);
2569 ret
= tb_tunnel_alloc_bandwidth(tunnel
, requested_up
,
2572 tb_tunnel_warn(tunnel
, "failed to allocate bandwidth\n");
2573 tb_configure_sym(tb
, in
, out
, true);
2577 if (*requested_down
> available_down
)
2578 released
= *requested_down
- available_down
;
2580 if (*requested_up
> available_up
)
2581 released
= *requested_up
- available_up
;
2584 group
->reserved
-= released
;
2585 tb_dbg(tb
, "group %d released %d total %d Mb/s\n",
2586 group
->index
, released
, group
->reserved
);
2593 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
2595 if (ret
&& ret
!= -ENODEV
) {
2597 * Write back the same allocated (so no change), this
2598 * makes the DPTX request fail on graphics side.
2600 tb_tunnel_dbg(tunnel
,
2601 "failing the request by rewriting allocated %d/%d Mb/s\n",
2602 allocated_up
, allocated_down
);
2603 tb_tunnel_alloc_bandwidth(tunnel
, &allocated_up
, &allocated_down
);
2609 static void tb_handle_dp_bandwidth_request(struct work_struct
*work
)
2611 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
2612 int requested_bw
, requested_up
, requested_down
, ret
;
2613 struct tb_tunnel
*tunnel
;
2614 struct tb
*tb
= ev
->tb
;
2615 struct tb_cm
*tcm
= tb_priv(tb
);
2616 struct tb_switch
*sw
;
2619 pm_runtime_get_sync(&tb
->dev
);
2621 mutex_lock(&tb
->lock
);
2622 if (!tcm
->hotplug_active
)
2625 sw
= tb_switch_find_by_route(tb
, ev
->route
);
2627 tb_warn(tb
, "bandwidth request from non-existent router %llx\n",
2632 in
= &sw
->ports
[ev
->port
];
2633 if (!tb_port_is_dpin(in
)) {
2634 tb_port_warn(in
, "bandwidth request to non-DP IN adapter\n");
2638 tb_port_dbg(in
, "handling bandwidth allocation request\n");
2640 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, NULL
);
2642 tb_port_warn(in
, "failed to find tunnel\n");
2646 if (!usb4_dp_port_bandwidth_mode_enabled(in
)) {
2647 if (tunnel
->bw_mode
) {
2649 * Reset the tunnel back to use the legacy
2652 tunnel
->bw_mode
= false;
2653 tb_port_dbg(in
, "DPTX disabled bandwidth allocation mode\n");
2655 tb_port_warn(in
, "bandwidth allocation mode not enabled\n");
2660 ret
= usb4_dp_port_requested_bandwidth(in
);
2662 if (ret
== -ENODATA
) {
2664 * There is no request active so this means the
2665 * BW allocation mode was enabled from graphics
2666 * side. At this point we know that the graphics
2667 * driver has read the DRPX capabilities so we
2668 * can offer an better bandwidth estimatation.
2670 tb_port_dbg(in
, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
2671 tb_recalc_estimated_bandwidth(tb
);
2673 tb_port_warn(in
, "failed to read requested bandwidth\n");
2679 tb_port_dbg(in
, "requested bandwidth %d Mb/s\n", requested_bw
);
2681 if (tb_tunnel_direction_downstream(tunnel
)) {
2683 requested_down
= requested_bw
;
2685 requested_up
= requested_bw
;
2686 requested_down
= -1;
2689 ret
= tb_alloc_dp_bandwidth(tunnel
, &requested_up
, &requested_down
);
2691 if (ret
== -ENOBUFS
)
2692 tb_tunnel_warn(tunnel
,
2693 "not enough bandwidth available\n");
2695 tb_tunnel_warn(tunnel
,
2696 "failed to change bandwidth allocation\n");
2698 tb_tunnel_dbg(tunnel
,
2699 "bandwidth allocation changed to %d/%d Mb/s\n",
2700 requested_up
, requested_down
);
2702 /* Update other clients about the allocation change */
2703 tb_recalc_estimated_bandwidth(tb
);
2709 mutex_unlock(&tb
->lock
);
2711 pm_runtime_mark_last_busy(&tb
->dev
);
2712 pm_runtime_put_autosuspend(&tb
->dev
);
2717 static void tb_queue_dp_bandwidth_request(struct tb
*tb
, u64 route
, u8 port
)
2719 struct tb_hotplug_event
*ev
;
2721 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
2728 INIT_WORK(&ev
->work
, tb_handle_dp_bandwidth_request
);
2729 queue_work(tb
->wq
, &ev
->work
);
2732 static void tb_handle_notification(struct tb
*tb
, u64 route
,
2733 const struct cfg_error_pkg
*error
)
2736 switch (error
->error
) {
2737 case TB_CFG_ERROR_PCIE_WAKE
:
2738 case TB_CFG_ERROR_DP_CON_CHANGE
:
2739 case TB_CFG_ERROR_DPTX_DISCOVERY
:
2740 if (tb_cfg_ack_notification(tb
->ctl
, route
, error
))
2741 tb_warn(tb
, "could not ack notification on %llx\n",
2745 case TB_CFG_ERROR_DP_BW
:
2746 if (tb_cfg_ack_notification(tb
->ctl
, route
, error
))
2747 tb_warn(tb
, "could not ack notification on %llx\n",
2749 tb_queue_dp_bandwidth_request(tb
, route
, error
->port
);
2753 /* Ignore for now */
2759 * tb_schedule_hotplug_handler() - callback function for the control channel
2761 * Delegates to tb_handle_hotplug.
2763 static void tb_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
2764 const void *buf
, size_t size
)
2766 const struct cfg_event_pkg
*pkg
= buf
;
2767 u64 route
= tb_cfg_get_route(&pkg
->header
);
2770 case TB_CFG_PKG_ERROR
:
2771 tb_handle_notification(tb
, route
, (const struct cfg_error_pkg
*)buf
);
2773 case TB_CFG_PKG_EVENT
:
2776 tb_warn(tb
, "unexpected event %#x, ignoring\n", type
);
2780 if (tb_cfg_ack_plug(tb
->ctl
, route
, pkg
->port
, pkg
->unplug
)) {
2781 tb_warn(tb
, "could not ack plug event on %llx:%x\n", route
,
2785 tb_queue_hotplug(tb
, route
, pkg
->port
, pkg
->unplug
);
2788 static void tb_stop(struct tb
*tb
)
2790 struct tb_cm
*tcm
= tb_priv(tb
);
2791 struct tb_tunnel
*tunnel
;
2792 struct tb_tunnel
*n
;
2794 cancel_delayed_work(&tcm
->remove_work
);
2795 /* tunnels are only present after everything has been initialized */
2796 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
2798 * DMA tunnels require the driver to be functional so we
2799 * tear them down. Other protocol tunnels can be left
2802 if (tb_tunnel_is_dma(tunnel
))
2803 tb_tunnel_deactivate(tunnel
);
2804 tb_tunnel_free(tunnel
);
2806 tb_switch_remove(tb
->root_switch
);
2807 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
2810 static void tb_deinit(struct tb
*tb
)
2812 struct tb_cm
*tcm
= tb_priv(tb
);
2815 /* Cancel all the release bandwidth workers */
2816 for (i
= 0; i
< ARRAY_SIZE(tcm
->groups
); i
++)
2817 cancel_delayed_work_sync(&tcm
->groups
[i
].release_work
);
2820 static int tb_scan_finalize_switch(struct device
*dev
, void *data
)
2822 if (tb_is_switch(dev
)) {
2823 struct tb_switch
*sw
= tb_to_switch(dev
);
2826 * If we found that the switch was already setup by the
2827 * boot firmware, mark it as authorized now before we
2828 * send uevent to userspace.
2833 dev_set_uevent_suppress(dev
, false);
2834 kobject_uevent(&dev
->kobj
, KOBJ_ADD
);
2835 device_for_each_child(dev
, NULL
, tb_scan_finalize_switch
);
2841 static int tb_start(struct tb
*tb
, bool reset
)
2843 struct tb_cm
*tcm
= tb_priv(tb
);
2844 bool discover
= true;
2847 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
2848 if (IS_ERR(tb
->root_switch
))
2849 return PTR_ERR(tb
->root_switch
);
2852 * ICM firmware upgrade needs running firmware and in native
2853 * mode that is not available so disable firmware upgrade of the
2856 * However, USB4 routers support NVM firmware upgrade if they
2857 * implement the necessary router operations.
2859 tb
->root_switch
->no_nvm_upgrade
= !tb_switch_is_usb4(tb
->root_switch
);
2860 /* All USB4 routers support runtime PM */
2861 tb
->root_switch
->rpm
= tb_switch_is_usb4(tb
->root_switch
);
2863 ret
= tb_switch_configure(tb
->root_switch
);
2865 tb_switch_put(tb
->root_switch
);
2869 /* Announce the switch to the world */
2870 ret
= tb_switch_add(tb
->root_switch
);
2872 tb_switch_put(tb
->root_switch
);
2877 * To support highest CLx state, we set host router's TMU to
2880 tb_switch_tmu_configure(tb
->root_switch
, TB_SWITCH_TMU_MODE_LOWRES
);
2881 /* Enable TMU if it is off */
2882 tb_switch_tmu_enable(tb
->root_switch
);
2885 * Boot firmware might have created tunnels of its own. Since we
2886 * cannot be sure they are usable for us, tear them down and
2887 * reset the ports to handle it as new hotplug for USB4 v1
2888 * routers (for USB4 v2 and beyond we already do host reset).
2890 if (reset
&& tb_switch_is_usb4(tb
->root_switch
)) {
2892 if (usb4_switch_version(tb
->root_switch
) == 1)
2893 tb_switch_reset(tb
->root_switch
);
2897 /* Full scan to discover devices added before the driver was loaded. */
2898 tb_scan_switch(tb
->root_switch
);
2899 /* Find out tunnels created by the boot firmware */
2900 tb_discover_tunnels(tb
);
2901 /* Add DP resources from the DP tunnels created by the boot firmware */
2902 tb_discover_dp_resources(tb
);
2906 * If the boot firmware did not create USB 3.x tunnels create them
2907 * now for the whole topology.
2909 tb_create_usb3_tunnels(tb
->root_switch
);
2910 /* Add DP IN resources for the root switch */
2911 tb_add_dp_resources(tb
->root_switch
);
2912 /* Make the discovered switches available to the userspace */
2913 device_for_each_child(&tb
->root_switch
->dev
, NULL
,
2914 tb_scan_finalize_switch
);
2916 /* Allow tb_handle_hotplug to progress events */
2917 tcm
->hotplug_active
= true;
2921 static int tb_suspend_noirq(struct tb
*tb
)
2923 struct tb_cm
*tcm
= tb_priv(tb
);
2925 tb_dbg(tb
, "suspending...\n");
2926 tb_disconnect_and_release_dp(tb
);
2927 tb_switch_suspend(tb
->root_switch
, false);
2928 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
2929 tb_dbg(tb
, "suspend finished\n");
2934 static void tb_restore_children(struct tb_switch
*sw
)
2936 struct tb_port
*port
;
2938 /* No need to restore if the router is already unplugged */
2939 if (sw
->is_unplugged
)
2942 if (tb_enable_clx(sw
))
2943 tb_sw_warn(sw
, "failed to re-enable CL states\n");
2945 if (tb_enable_tmu(sw
))
2946 tb_sw_warn(sw
, "failed to restore TMU configuration\n");
2948 tb_switch_configuration_valid(sw
);
2950 tb_switch_for_each_port(sw
, port
) {
2951 if (!tb_port_has_remote(port
) && !port
->xdomain
)
2955 tb_switch_set_link_width(port
->remote
->sw
,
2956 port
->remote
->sw
->link_width
);
2957 tb_switch_configure_link(port
->remote
->sw
);
2959 tb_restore_children(port
->remote
->sw
);
2960 } else if (port
->xdomain
) {
2961 tb_port_configure_xdomain(port
, port
->xdomain
);
2966 static int tb_resume_noirq(struct tb
*tb
)
2968 struct tb_cm
*tcm
= tb_priv(tb
);
2969 struct tb_tunnel
*tunnel
, *n
;
2970 unsigned int usb3_delay
= 0;
2973 tb_dbg(tb
, "resuming...\n");
2976 * For non-USB4 hosts (Apple systems) remove any PCIe devices
2977 * the firmware might have setup.
2979 if (!tb_switch_is_usb4(tb
->root_switch
))
2980 tb_switch_reset(tb
->root_switch
);
2982 tb_switch_resume(tb
->root_switch
, false);
2983 tb_free_invalid_tunnels(tb
);
2984 tb_free_unplugged_children(tb
->root_switch
);
2985 tb_restore_children(tb
->root_switch
);
2988 * If we get here from suspend to disk the boot firmware or the
2989 * restore kernel might have created tunnels of its own. Since
2990 * we cannot be sure they are usable for us we find and tear
2993 tb_switch_discover_tunnels(tb
->root_switch
, &tunnels
, false);
2994 list_for_each_entry_safe_reverse(tunnel
, n
, &tunnels
, list
) {
2995 if (tb_tunnel_is_usb3(tunnel
))
2997 tb_tunnel_deactivate(tunnel
);
2998 tb_tunnel_free(tunnel
);
3001 /* Re-create our tunnels now */
3002 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
3003 /* USB3 requires delay before it can be re-activated */
3004 if (tb_tunnel_is_usb3(tunnel
)) {
3006 /* Only need to do it once */
3009 tb_tunnel_restart(tunnel
);
3011 if (!list_empty(&tcm
->tunnel_list
)) {
3013 * the pcie links need some time to get going.
3014 * 100ms works for me...
3016 tb_dbg(tb
, "tunnels restarted, sleeping for 100ms\n");
3019 /* Allow tb_handle_hotplug to progress events */
3020 tcm
->hotplug_active
= true;
3021 tb_dbg(tb
, "resume finished\n");
3026 static int tb_free_unplugged_xdomains(struct tb_switch
*sw
)
3028 struct tb_port
*port
;
3031 tb_switch_for_each_port(sw
, port
) {
3032 if (tb_is_upstream_port(port
))
3034 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
3035 tb_retimer_remove_all(port
);
3036 tb_xdomain_remove(port
->xdomain
);
3037 tb_port_unconfigure_xdomain(port
);
3038 port
->xdomain
= NULL
;
3040 } else if (port
->remote
) {
3041 ret
+= tb_free_unplugged_xdomains(port
->remote
->sw
);
3048 static int tb_freeze_noirq(struct tb
*tb
)
3050 struct tb_cm
*tcm
= tb_priv(tb
);
3052 tcm
->hotplug_active
= false;
3056 static int tb_thaw_noirq(struct tb
*tb
)
3058 struct tb_cm
*tcm
= tb_priv(tb
);
3060 tcm
->hotplug_active
= true;
3064 static void tb_complete(struct tb
*tb
)
3067 * Release any unplugged XDomains and if there is a case where
3068 * another domain is swapped in place of unplugged XDomain we
3069 * need to run another rescan.
3071 mutex_lock(&tb
->lock
);
3072 if (tb_free_unplugged_xdomains(tb
->root_switch
))
3073 tb_scan_switch(tb
->root_switch
);
3074 mutex_unlock(&tb
->lock
);
3077 static int tb_runtime_suspend(struct tb
*tb
)
3079 struct tb_cm
*tcm
= tb_priv(tb
);
3081 mutex_lock(&tb
->lock
);
3082 tb_switch_suspend(tb
->root_switch
, true);
3083 tcm
->hotplug_active
= false;
3084 mutex_unlock(&tb
->lock
);
3089 static void tb_remove_work(struct work_struct
*work
)
3091 struct tb_cm
*tcm
= container_of(work
, struct tb_cm
, remove_work
.work
);
3092 struct tb
*tb
= tcm_to_tb(tcm
);
3094 mutex_lock(&tb
->lock
);
3095 if (tb
->root_switch
) {
3096 tb_free_unplugged_children(tb
->root_switch
);
3097 tb_free_unplugged_xdomains(tb
->root_switch
);
3099 mutex_unlock(&tb
->lock
);
3102 static int tb_runtime_resume(struct tb
*tb
)
3104 struct tb_cm
*tcm
= tb_priv(tb
);
3105 struct tb_tunnel
*tunnel
, *n
;
3107 mutex_lock(&tb
->lock
);
3108 tb_switch_resume(tb
->root_switch
, true);
3109 tb_free_invalid_tunnels(tb
);
3110 tb_restore_children(tb
->root_switch
);
3111 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
3112 tb_tunnel_restart(tunnel
);
3113 tcm
->hotplug_active
= true;
3114 mutex_unlock(&tb
->lock
);
3117 * Schedule cleanup of any unplugged devices. Run this in a
3118 * separate thread to avoid possible deadlock if the device
3119 * removal runtime resumes the unplugged device.
3121 queue_delayed_work(tb
->wq
, &tcm
->remove_work
, msecs_to_jiffies(50));
3125 static const struct tb_cm_ops tb_cm_ops
= {
3128 .deinit
= tb_deinit
,
3129 .suspend_noirq
= tb_suspend_noirq
,
3130 .resume_noirq
= tb_resume_noirq
,
3131 .freeze_noirq
= tb_freeze_noirq
,
3132 .thaw_noirq
= tb_thaw_noirq
,
3133 .complete
= tb_complete
,
3134 .runtime_suspend
= tb_runtime_suspend
,
3135 .runtime_resume
= tb_runtime_resume
,
3136 .handle_event
= tb_handle_event
,
3137 .disapprove_switch
= tb_disconnect_pci
,
3138 .approve_switch
= tb_tunnel_pci
,
3139 .approve_xdomain_paths
= tb_approve_xdomain_paths
,
3140 .disconnect_xdomain_paths
= tb_disconnect_xdomain_paths
,
3144 * During suspend the Thunderbolt controller is reset and all PCIe
3145 * tunnels are lost. The NHI driver will try to reestablish all tunnels
3146 * during resume. This adds device links between the tunneled PCIe
3147 * downstream ports and the NHI so that the device core will make sure
3148 * NHI is resumed first before the rest.
3150 static bool tb_apple_add_links(struct tb_nhi
*nhi
)
3152 struct pci_dev
*upstream
, *pdev
;
3155 if (!x86_apple_machine
)
3158 switch (nhi
->pdev
->device
) {
3159 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
3160 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
3161 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI
:
3162 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI
:
3168 upstream
= pci_upstream_bridge(nhi
->pdev
);
3170 if (!pci_is_pcie(upstream
))
3172 if (pci_pcie_type(upstream
) == PCI_EXP_TYPE_UPSTREAM
)
3174 upstream
= pci_upstream_bridge(upstream
);
3181 * For each hotplug downstream port, create add device link
3182 * back to NHI so that PCIe tunnels can be re-established after
3186 for_each_pci_bridge(pdev
, upstream
->subordinate
) {
3187 const struct device_link
*link
;
3189 if (!pci_is_pcie(pdev
))
3191 if (pci_pcie_type(pdev
) != PCI_EXP_TYPE_DOWNSTREAM
||
3192 !pdev
->is_hotplug_bridge
)
3195 link
= device_link_add(&pdev
->dev
, &nhi
->pdev
->dev
,
3196 DL_FLAG_AUTOREMOVE_SUPPLIER
|
3197 DL_FLAG_PM_RUNTIME
);
3199 dev_dbg(&nhi
->pdev
->dev
, "created link from %s\n",
3200 dev_name(&pdev
->dev
));
3203 dev_warn(&nhi
->pdev
->dev
, "device link creation from %s failed\n",
3204 dev_name(&pdev
->dev
));
3211 struct tb
*tb_probe(struct tb_nhi
*nhi
)
3216 tb
= tb_domain_alloc(nhi
, TB_TIMEOUT
, sizeof(*tcm
));
3220 if (tb_acpi_may_tunnel_pcie())
3221 tb
->security_level
= TB_SECURITY_USER
;
3223 tb
->security_level
= TB_SECURITY_NOPCIE
;
3225 tb
->cm_ops
= &tb_cm_ops
;
3228 INIT_LIST_HEAD(&tcm
->tunnel_list
);
3229 INIT_LIST_HEAD(&tcm
->dp_resources
);
3230 INIT_DELAYED_WORK(&tcm
->remove_work
, tb_remove_work
);
3231 tb_init_bandwidth_groups(tcm
);
3233 tb_dbg(tb
, "using software connection manager\n");
3236 * Device links are needed to make sure we establish tunnels
3237 * before the PCIe/USB stack is resumed so complain here if we
3238 * found them missing.
3240 if (!tb_apple_add_links(nhi
) && !tb_acpi_add_links(nhi
))
3241 tb_warn(tb
, "device links to tunneled native ports are missing!\n");