1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
19 /* Switch NVM support */
23 struct nvm_auth_status
{
24 struct list_head list
;
30 WRITE_AND_AUTHENTICATE
= 1,
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
39 static LIST_HEAD(nvm_auth_status_cache
);
40 static DEFINE_MUTEX(nvm_auth_status_lock
);
42 static struct nvm_auth_status
*__nvm_get_auth_status(const struct tb_switch
*sw
)
44 struct nvm_auth_status
*st
;
46 list_for_each_entry(st
, &nvm_auth_status_cache
, list
) {
47 if (uuid_equal(&st
->uuid
, sw
->uuid
))
54 static void nvm_get_auth_status(const struct tb_switch
*sw
, u32
*status
)
56 struct nvm_auth_status
*st
;
58 mutex_lock(&nvm_auth_status_lock
);
59 st
= __nvm_get_auth_status(sw
);
60 mutex_unlock(&nvm_auth_status_lock
);
62 *status
= st
? st
->status
: 0;
65 static void nvm_set_auth_status(const struct tb_switch
*sw
, u32 status
)
67 struct nvm_auth_status
*st
;
69 if (WARN_ON(!sw
->uuid
))
72 mutex_lock(&nvm_auth_status_lock
);
73 st
= __nvm_get_auth_status(sw
);
76 st
= kzalloc(sizeof(*st
), GFP_KERNEL
);
80 memcpy(&st
->uuid
, sw
->uuid
, sizeof(st
->uuid
));
81 INIT_LIST_HEAD(&st
->list
);
82 list_add_tail(&st
->list
, &nvm_auth_status_cache
);
87 mutex_unlock(&nvm_auth_status_lock
);
90 static void nvm_clear_auth_status(const struct tb_switch
*sw
)
92 struct nvm_auth_status
*st
;
94 mutex_lock(&nvm_auth_status_lock
);
95 st
= __nvm_get_auth_status(sw
);
100 mutex_unlock(&nvm_auth_status_lock
);
103 static int nvm_validate_and_write(struct tb_switch
*sw
)
105 unsigned int image_size
, hdr_size
;
106 const u8
*buf
= sw
->nvm
->buf
;
113 image_size
= sw
->nvm
->buf_data_size
;
114 if (image_size
< NVM_MIN_SIZE
|| image_size
> NVM_MAX_SIZE
)
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
121 hdr_size
= (*(u32
*)buf
) & 0xffffff;
122 if (hdr_size
+ NVM_DEVID
+ 2 >= image_size
)
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size
, SZ_4K
))
130 * Read digital section size and check that it also fits inside
133 ds_size
= *(u16
*)(buf
+ hdr_size
);
134 if (ds_size
>= image_size
)
137 if (!sw
->safe_mode
) {
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
144 device_id
= *(u16
*)(buf
+ hdr_size
+ NVM_DEVID
);
145 if (device_id
!= sw
->config
.device_id
)
148 if (sw
->generation
< 3) {
149 /* Write CSS headers first */
150 ret
= dma_port_flash_write(sw
->dma_port
,
151 DMA_PORT_CSS_ADDRESS
, buf
+ NVM_CSS
,
152 DMA_PORT_CSS_MAX_SIZE
);
157 /* Skip headers in the image */
159 image_size
-= hdr_size
;
162 if (tb_switch_is_usb4(sw
))
163 ret
= usb4_switch_nvm_write(sw
, 0, buf
, image_size
);
165 ret
= dma_port_flash_write(sw
->dma_port
, 0, buf
, image_size
);
167 sw
->nvm
->flushed
= true;
171 static int nvm_authenticate_host_dma_port(struct tb_switch
*sw
)
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
180 if (!sw
->safe_mode
) {
183 ret
= tb_domain_disconnect_all_paths(sw
->tb
);
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
190 ret
= dma_port_flash_update_auth(sw
->dma_port
);
191 if (!ret
|| ret
== -ETIMEDOUT
)
195 * Any error from update auth operation requires power
196 * cycling of the host router.
198 tb_sw_warn(sw
, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw
->dma_port
, &status
) > 0)
200 nvm_set_auth_status(sw
, status
);
204 * From safe mode we can get out by just power cycling the
207 dma_port_power_cycle(sw
->dma_port
);
211 static int nvm_authenticate_device_dma_port(struct tb_switch
*sw
)
213 int ret
, retries
= 10;
215 ret
= dma_port_flash_update_auth(sw
->dma_port
);
221 /* Power cycle is required */
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
236 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
237 if (ret
< 0 && ret
!= -ETIMEDOUT
)
241 tb_sw_warn(sw
, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw
, status
);
245 tb_sw_info(sw
, "power cycling the switch now\n");
246 dma_port_power_cycle(sw
->dma_port
);
256 static void nvm_authenticate_start_dma_port(struct tb_switch
*sw
)
258 struct pci_dev
*root_port
;
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
266 root_port
= pcie_find_root_port(sw
->tb
->nhi
->pdev
);
268 pm_runtime_get_noresume(&root_port
->dev
);
271 static void nvm_authenticate_complete_dma_port(struct tb_switch
*sw
)
273 struct pci_dev
*root_port
;
275 root_port
= pcie_find_root_port(sw
->tb
->nhi
->pdev
);
277 pm_runtime_put(&root_port
->dev
);
280 static inline bool nvm_readable(struct tb_switch
*sw
)
282 if (tb_switch_is_usb4(sw
)) {
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
289 return usb4_switch_nvm_sector_size(sw
) > 0;
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw
->dma_port
;
296 static inline bool nvm_upgradeable(struct tb_switch
*sw
)
298 if (sw
->no_nvm_upgrade
)
300 return nvm_readable(sw
);
303 static inline int nvm_read(struct tb_switch
*sw
, unsigned int address
,
304 void *buf
, size_t size
)
306 if (tb_switch_is_usb4(sw
))
307 return usb4_switch_nvm_read(sw
, address
, buf
, size
);
308 return dma_port_flash_read(sw
->dma_port
, address
, buf
, size
);
311 static int nvm_authenticate(struct tb_switch
*sw
)
315 if (tb_switch_is_usb4(sw
))
316 return usb4_switch_nvm_authenticate(sw
);
319 nvm_authenticate_start_dma_port(sw
);
320 ret
= nvm_authenticate_host_dma_port(sw
);
322 ret
= nvm_authenticate_device_dma_port(sw
);
328 static int tb_switch_nvm_read(void *priv
, unsigned int offset
, void *val
,
331 struct tb_nvm
*nvm
= priv
;
332 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
335 pm_runtime_get_sync(&sw
->dev
);
337 if (!mutex_trylock(&sw
->tb
->lock
)) {
338 ret
= restart_syscall();
342 ret
= nvm_read(sw
, offset
, val
, bytes
);
343 mutex_unlock(&sw
->tb
->lock
);
346 pm_runtime_mark_last_busy(&sw
->dev
);
347 pm_runtime_put_autosuspend(&sw
->dev
);
352 static int tb_switch_nvm_write(void *priv
, unsigned int offset
, void *val
,
355 struct tb_nvm
*nvm
= priv
;
356 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
359 if (!mutex_trylock(&sw
->tb
->lock
))
360 return restart_syscall();
363 * Since writing the NVM image might require some special steps,
364 * for example when CSS headers are written, we cache the image
365 * locally here and handle the special cases when the user asks
366 * us to authenticate the image.
368 ret
= tb_nvm_write_buf(nvm
, offset
, val
, bytes
);
369 mutex_unlock(&sw
->tb
->lock
);
374 static int tb_switch_nvm_add(struct tb_switch
*sw
)
380 if (!nvm_readable(sw
))
384 * The NVM format of non-Intel hardware is not known so
385 * currently restrict NVM upgrade for Intel hardware. We may
386 * relax this in the future when we learn other NVM formats.
388 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
&&
389 sw
->config
.vendor_id
!= 0x8087) {
391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392 sw
->config
.vendor_id
);
396 nvm
= tb_nvm_alloc(&sw
->dev
);
401 * If the switch is in safe-mode the only accessible portion of
402 * the NVM is the non-active one where userspace is expected to
403 * write new functional NVM.
405 if (!sw
->safe_mode
) {
406 u32 nvm_size
, hdr_size
;
408 ret
= nvm_read(sw
, NVM_FLASH_SIZE
, &val
, sizeof(val
));
412 hdr_size
= sw
->generation
< 3 ? SZ_8K
: SZ_16K
;
413 nvm_size
= (SZ_1M
<< (val
& 7)) / 8;
414 nvm_size
= (nvm_size
- hdr_size
) / 2;
416 ret
= nvm_read(sw
, NVM_VERSION
, &val
, sizeof(val
));
420 nvm
->major
= val
>> 16;
421 nvm
->minor
= val
>> 8;
423 ret
= tb_nvm_add_active(nvm
, nvm_size
, tb_switch_nvm_read
);
428 if (!sw
->no_nvm_upgrade
) {
429 ret
= tb_nvm_add_non_active(nvm
, NVM_MAX_SIZE
,
430 tb_switch_nvm_write
);
443 static void tb_switch_nvm_remove(struct tb_switch
*sw
)
453 /* Remove authentication status in case the switch is unplugged */
454 if (!nvm
->authenticating
)
455 nvm_clear_auth_status(sw
);
460 /* port utility functions */
462 static const char *tb_port_type(struct tb_regs_port_header
*port
)
464 switch (port
->type
>> 16) {
466 switch ((u8
) port
->type
) {
491 static void tb_dump_port(struct tb
*tb
, struct tb_regs_port_header
*port
)
494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495 port
->port_number
, port
->vendor_id
, port
->device_id
,
496 port
->revision
, port
->thunderbolt_version
, tb_port_type(port
),
498 tb_dbg(tb
, " Max hop id (in/out): %d/%d\n",
499 port
->max_in_hop_id
, port
->max_out_hop_id
);
500 tb_dbg(tb
, " Max counters: %d\n", port
->max_counters
);
501 tb_dbg(tb
, " NFC Credits: %#x\n", port
->nfc_credits
);
505 * tb_port_state() - get connectedness state of a port
506 * @port: the port to check
508 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
510 * Return: Returns an enum tb_port_state on success or an error code on failure.
512 int tb_port_state(struct tb_port
*port
)
514 struct tb_cap_phy phy
;
516 if (port
->cap_phy
== 0) {
517 tb_port_WARN(port
, "does not have a PHY\n");
520 res
= tb_port_read(port
, &phy
, TB_CFG_PORT
, port
->cap_phy
, 2);
527 * tb_wait_for_port() - wait for a port to become ready
529 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
530 * wait_if_unplugged is set then we also wait if the port is in state
531 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
532 * switch resume). Otherwise we only wait if a device is registered but the link
533 * has not yet been established.
535 * Return: Returns an error code on failure. Returns 0 if the port is not
536 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
537 * if the port is connected and in state TB_PORT_UP.
539 int tb_wait_for_port(struct tb_port
*port
, bool wait_if_unplugged
)
543 if (!port
->cap_phy
) {
544 tb_port_WARN(port
, "does not have PHY\n");
547 if (tb_is_upstream_port(port
)) {
548 tb_port_WARN(port
, "is the upstream port\n");
553 state
= tb_port_state(port
);
556 if (state
== TB_PORT_DISABLED
) {
557 tb_port_dbg(port
, "is disabled (state: 0)\n");
560 if (state
== TB_PORT_UNPLUGGED
) {
561 if (wait_if_unplugged
) {
562 /* used during resume */
564 "is unplugged (state: 7), retrying...\n");
568 tb_port_dbg(port
, "is unplugged (state: 7)\n");
571 if (state
== TB_PORT_UP
) {
572 tb_port_dbg(port
, "is connected, link is up (state: 2)\n");
577 * After plug-in the state is TB_PORT_CONNECTING. Give it some
581 "is connected, link is not up (state: %d), retrying...\n",
586 "failed to reach state TB_PORT_UP. Ignoring port...\n");
591 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
593 * Change the number of NFC credits allocated to @port by @credits. To remove
594 * NFC credits pass a negative amount of credits.
596 * Return: Returns 0 on success or an error code on failure.
598 int tb_port_add_nfc_credits(struct tb_port
*port
, int credits
)
602 if (credits
== 0 || port
->sw
->is_unplugged
)
606 * USB4 restricts programming NFC buffers to lane adapters only
607 * so skip other ports.
609 if (tb_switch_is_usb4(port
->sw
) && !tb_port_is_null(port
))
612 nfc_credits
= port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
;
613 nfc_credits
+= credits
;
615 tb_port_dbg(port
, "adding %d NFC credits to %lu", credits
,
616 port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
);
618 port
->config
.nfc_credits
&= ~ADP_CS_4_NFC_BUFFERS_MASK
;
619 port
->config
.nfc_credits
|= nfc_credits
;
621 return tb_port_write(port
, &port
->config
.nfc_credits
,
622 TB_CFG_PORT
, ADP_CS_4
, 1);
626 * tb_port_set_initial_credits() - Set initial port link credits allocated
627 * @port: Port to set the initial credits
628 * @credits: Number of credits to to allocate
630 * Set initial credits value to be used for ingress shared buffering.
632 int tb_port_set_initial_credits(struct tb_port
*port
, u32 credits
)
637 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
, ADP_CS_5
, 1);
641 data
&= ~ADP_CS_5_LCA_MASK
;
642 data
|= (credits
<< ADP_CS_5_LCA_SHIFT
) & ADP_CS_5_LCA_MASK
;
644 return tb_port_write(port
, &data
, TB_CFG_PORT
, ADP_CS_5
, 1);
648 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
650 * Return: Returns 0 on success or an error code on failure.
652 int tb_port_clear_counter(struct tb_port
*port
, int counter
)
654 u32 zero
[3] = { 0, 0, 0 };
655 tb_port_dbg(port
, "clearing counter %d\n", counter
);
656 return tb_port_write(port
, zero
, TB_CFG_COUNTERS
, 3 * counter
, 3);
660 * tb_port_unlock() - Unlock downstream port
661 * @port: Port to unlock
663 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
664 * downstream router accessible for CM.
666 int tb_port_unlock(struct tb_port
*port
)
668 if (tb_switch_is_icm(port
->sw
))
670 if (!tb_port_is_null(port
))
672 if (tb_switch_is_usb4(port
->sw
))
673 return usb4_port_unlock(port
);
677 static int __tb_port_enable(struct tb_port
*port
, bool enable
)
682 if (!tb_port_is_null(port
))
685 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
686 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
691 phy
&= ~LANE_ADP_CS_1_LD
;
693 phy
|= LANE_ADP_CS_1_LD
;
695 return tb_port_write(port
, &phy
, TB_CFG_PORT
,
696 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
700 * tb_port_enable() - Enable lane adapter
701 * @port: Port to enable (can be %NULL)
703 * This is used for lane 0 and 1 adapters to enable it.
705 int tb_port_enable(struct tb_port
*port
)
707 return __tb_port_enable(port
, true);
711 * tb_port_disable() - Disable lane adapter
712 * @port: Port to disable (can be %NULL)
714 * This is used for lane 0 and 1 adapters to disable it.
716 int tb_port_disable(struct tb_port
*port
)
718 return __tb_port_enable(port
, false);
722 * tb_init_port() - initialize a port
724 * This is a helper method for tb_switch_alloc. Does not check or initialize
725 * any downstream switches.
727 * Return: Returns 0 on success or an error code on failure.
729 static int tb_init_port(struct tb_port
*port
)
734 res
= tb_port_read(port
, &port
->config
, TB_CFG_PORT
, 0, 8);
736 if (res
== -ENODEV
) {
737 tb_dbg(port
->sw
->tb
, " Port %d: not implemented\n",
739 port
->disabled
= true;
745 /* Port 0 is the switch itself and has no PHY. */
746 if (port
->config
.type
== TB_TYPE_PORT
&& port
->port
!= 0) {
747 cap
= tb_port_find_cap(port
, TB_PORT_CAP_PHY
);
752 tb_port_WARN(port
, "non switch port without a PHY\n");
754 cap
= tb_port_find_cap(port
, TB_PORT_CAP_USB4
);
756 port
->cap_usb4
= cap
;
757 } else if (port
->port
!= 0) {
758 cap
= tb_port_find_cap(port
, TB_PORT_CAP_ADAP
);
760 port
->cap_adap
= cap
;
763 tb_dump_port(port
->sw
->tb
, &port
->config
);
765 /* Control port does not need HopID allocation */
767 ida_init(&port
->in_hopids
);
768 ida_init(&port
->out_hopids
);
771 INIT_LIST_HEAD(&port
->list
);
776 static int tb_port_alloc_hopid(struct tb_port
*port
, bool in
, int min_hopid
,
783 port_max_hopid
= port
->config
.max_in_hop_id
;
784 ida
= &port
->in_hopids
;
786 port_max_hopid
= port
->config
.max_out_hop_id
;
787 ida
= &port
->out_hopids
;
791 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
794 if (!tb_port_is_nhi(port
) && min_hopid
< TB_PATH_MIN_HOPID
)
795 min_hopid
= TB_PATH_MIN_HOPID
;
797 if (max_hopid
< 0 || max_hopid
> port_max_hopid
)
798 max_hopid
= port_max_hopid
;
800 return ida_simple_get(ida
, min_hopid
, max_hopid
+ 1, GFP_KERNEL
);
804 * tb_port_alloc_in_hopid() - Allocate input HopID from port
805 * @port: Port to allocate HopID for
806 * @min_hopid: Minimum acceptable input HopID
807 * @max_hopid: Maximum acceptable input HopID
809 * Return: HopID between @min_hopid and @max_hopid or negative errno in
812 int tb_port_alloc_in_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
814 return tb_port_alloc_hopid(port
, true, min_hopid
, max_hopid
);
818 * tb_port_alloc_out_hopid() - Allocate output HopID from port
819 * @port: Port to allocate HopID for
820 * @min_hopid: Minimum acceptable output HopID
821 * @max_hopid: Maximum acceptable output HopID
823 * Return: HopID between @min_hopid and @max_hopid or negative errno in
826 int tb_port_alloc_out_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
828 return tb_port_alloc_hopid(port
, false, min_hopid
, max_hopid
);
832 * tb_port_release_in_hopid() - Release allocated input HopID from port
833 * @port: Port whose HopID to release
834 * @hopid: HopID to release
836 void tb_port_release_in_hopid(struct tb_port
*port
, int hopid
)
838 ida_simple_remove(&port
->in_hopids
, hopid
);
842 * tb_port_release_out_hopid() - Release allocated output HopID from port
843 * @port: Port whose HopID to release
844 * @hopid: HopID to release
846 void tb_port_release_out_hopid(struct tb_port
*port
, int hopid
)
848 ida_simple_remove(&port
->out_hopids
, hopid
);
851 static inline bool tb_switch_is_reachable(const struct tb_switch
*parent
,
852 const struct tb_switch
*sw
)
854 u64 mask
= (1ULL << parent
->config
.depth
* 8) - 1;
855 return (tb_route(parent
) & mask
) == (tb_route(sw
) & mask
);
859 * tb_next_port_on_path() - Return next port for given port on a path
860 * @start: Start port of the walk
861 * @end: End port of the walk
862 * @prev: Previous port (%NULL if this is the first)
864 * This function can be used to walk from one port to another if they
865 * are connected through zero or more switches. If the @prev is dual
866 * link port, the function follows that link and returns another end on
869 * If the @end port has been reached, return %NULL.
871 * Domain tb->lock must be held when this function is called.
873 struct tb_port
*tb_next_port_on_path(struct tb_port
*start
, struct tb_port
*end
,
874 struct tb_port
*prev
)
876 struct tb_port
*next
;
881 if (prev
->sw
== end
->sw
) {
887 if (tb_switch_is_reachable(prev
->sw
, end
->sw
)) {
888 next
= tb_port_at(tb_route(end
->sw
), prev
->sw
);
889 /* Walk down the topology if next == prev */
891 (next
== prev
|| next
->dual_link_port
== prev
))
894 if (tb_is_upstream_port(prev
)) {
897 next
= tb_upstream_port(prev
->sw
);
899 * Keep the same link if prev and next are both
902 if (next
->dual_link_port
&&
903 next
->link_nr
!= prev
->link_nr
) {
904 next
= next
->dual_link_port
;
909 return next
!= prev
? next
: NULL
;
913 * tb_port_get_link_speed() - Get current link speed
914 * @port: Port to check (USB4 or CIO)
916 * Returns link speed in Gb/s or negative errno in case of failure.
918 int tb_port_get_link_speed(struct tb_port
*port
)
926 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
927 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
931 speed
= (val
& LANE_ADP_CS_1_CURRENT_SPEED_MASK
) >>
932 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT
;
933 return speed
== LANE_ADP_CS_1_CURRENT_SPEED_GEN3
? 20 : 10;
937 * tb_port_get_link_width() - Get current link width
938 * @port: Port to check (USB4 or CIO)
940 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
941 * or negative errno in case of failure.
943 int tb_port_get_link_width(struct tb_port
*port
)
951 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
952 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
956 return (val
& LANE_ADP_CS_1_CURRENT_WIDTH_MASK
) >>
957 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT
;
960 static bool tb_port_is_width_supported(struct tb_port
*port
, int width
)
968 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
969 port
->cap_phy
+ LANE_ADP_CS_0
, 1);
973 widths
= (phy
& LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK
) >>
974 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT
;
976 return !!(widths
& width
);
979 static int tb_port_set_link_width(struct tb_port
*port
, unsigned int width
)
987 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
988 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
992 val
&= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK
;
995 val
|= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE
<<
996 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
999 val
|= LANE_ADP_CS_1_TARGET_WIDTH_DUAL
<<
1000 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
1006 val
|= LANE_ADP_CS_1_LB
;
1008 return tb_port_write(port
, &val
, TB_CFG_PORT
,
1009 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1013 * tb_port_lane_bonding_enable() - Enable bonding on port
1014 * @port: port to enable
1016 * Enable bonding by setting the link width of the port and the
1017 * other port in case of dual link port.
1019 * Return: %0 in case of success and negative errno in case of error
1021 int tb_port_lane_bonding_enable(struct tb_port
*port
)
1026 * Enable lane bonding for both links if not already enabled by
1027 * for example the boot firmware.
1029 ret
= tb_port_get_link_width(port
);
1031 ret
= tb_port_set_link_width(port
, 2);
1036 ret
= tb_port_get_link_width(port
->dual_link_port
);
1038 ret
= tb_port_set_link_width(port
->dual_link_port
, 2);
1040 tb_port_set_link_width(port
, 1);
1045 port
->bonded
= true;
1046 port
->dual_link_port
->bonded
= true;
1052 * tb_port_lane_bonding_disable() - Disable bonding on port
1053 * @port: port to disable
1055 * Disable bonding by setting the link width of the port and the
1056 * other port in case of dual link port.
1059 void tb_port_lane_bonding_disable(struct tb_port
*port
)
1061 port
->dual_link_port
->bonded
= false;
1062 port
->bonded
= false;
1064 tb_port_set_link_width(port
->dual_link_port
, 1);
1065 tb_port_set_link_width(port
, 1);
1069 * tb_port_is_enabled() - Is the adapter port enabled
1070 * @port: Port to check
1072 bool tb_port_is_enabled(struct tb_port
*port
)
1074 switch (port
->config
.type
) {
1075 case TB_TYPE_PCIE_UP
:
1076 case TB_TYPE_PCIE_DOWN
:
1077 return tb_pci_port_is_enabled(port
);
1079 case TB_TYPE_DP_HDMI_IN
:
1080 case TB_TYPE_DP_HDMI_OUT
:
1081 return tb_dp_port_is_enabled(port
);
1083 case TB_TYPE_USB3_UP
:
1084 case TB_TYPE_USB3_DOWN
:
1085 return tb_usb3_port_is_enabled(port
);
1093 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1094 * @port: USB3 adapter port to check
1096 bool tb_usb3_port_is_enabled(struct tb_port
*port
)
1100 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1101 port
->cap_adap
+ ADP_USB3_CS_0
, 1))
1104 return !!(data
& ADP_USB3_CS_0_PE
);
1108 * tb_usb3_port_enable() - Enable USB3 adapter port
1109 * @port: USB3 adapter port to enable
1110 * @enable: Enable/disable the USB3 adapter
1112 int tb_usb3_port_enable(struct tb_port
*port
, bool enable
)
1114 u32 word
= enable
? (ADP_USB3_CS_0_PE
| ADP_USB3_CS_0_V
)
1117 if (!port
->cap_adap
)
1119 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1120 port
->cap_adap
+ ADP_USB3_CS_0
, 1);
1124 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1125 * @port: PCIe port to check
1127 bool tb_pci_port_is_enabled(struct tb_port
*port
)
1131 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1132 port
->cap_adap
+ ADP_PCIE_CS_0
, 1))
1135 return !!(data
& ADP_PCIE_CS_0_PE
);
1139 * tb_pci_port_enable() - Enable PCIe adapter port
1140 * @port: PCIe port to enable
1141 * @enable: Enable/disable the PCIe adapter
1143 int tb_pci_port_enable(struct tb_port
*port
, bool enable
)
1145 u32 word
= enable
? ADP_PCIE_CS_0_PE
: 0x0;
1146 if (!port
->cap_adap
)
1148 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1149 port
->cap_adap
+ ADP_PCIE_CS_0
, 1);
1153 * tb_dp_port_hpd_is_active() - Is HPD already active
1154 * @port: DP out port to check
1156 * Checks if the DP OUT adapter port has HDP bit already set.
1158 int tb_dp_port_hpd_is_active(struct tb_port
*port
)
1163 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1164 port
->cap_adap
+ ADP_DP_CS_2
, 1);
1168 return !!(data
& ADP_DP_CS_2_HDP
);
1172 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1173 * @port: Port to clear HPD
1175 * If the DP IN port has HDP set, this function can be used to clear it.
1177 int tb_dp_port_hpd_clear(struct tb_port
*port
)
1182 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1183 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1187 data
|= ADP_DP_CS_3_HDPC
;
1188 return tb_port_write(port
, &data
, TB_CFG_PORT
,
1189 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1193 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1194 * @port: DP IN/OUT port to set hops
1195 * @video: Video Hop ID
1196 * @aux_tx: AUX TX Hop ID
1197 * @aux_rx: AUX RX Hop ID
1199 * Programs specified Hop IDs for DP IN/OUT port.
1201 int tb_dp_port_set_hops(struct tb_port
*port
, unsigned int video
,
1202 unsigned int aux_tx
, unsigned int aux_rx
)
1207 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1208 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1212 data
[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1213 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1214 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1216 data
[0] |= (video
<< ADP_DP_CS_0_VIDEO_HOPID_SHIFT
) &
1217 ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1218 data
[1] |= aux_tx
& ADP_DP_CS_1_AUX_TX_HOPID_MASK
;
1219 data
[1] |= (aux_rx
<< ADP_DP_CS_1_AUX_RX_HOPID_SHIFT
) &
1220 ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1222 return tb_port_write(port
, data
, TB_CFG_PORT
,
1223 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1227 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1228 * @port: DP adapter port to check
1230 bool tb_dp_port_is_enabled(struct tb_port
*port
)
1234 if (tb_port_read(port
, data
, TB_CFG_PORT
, port
->cap_adap
+ ADP_DP_CS_0
,
1238 return !!(data
[0] & (ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
));
1242 * tb_dp_port_enable() - Enables/disables DP paths of a port
1243 * @port: DP IN/OUT port
1244 * @enable: Enable/disable DP path
1246 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1247 * calling this function.
1249 int tb_dp_port_enable(struct tb_port
*port
, bool enable
)
1254 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1255 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1260 data
[0] |= ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
;
1262 data
[0] &= ~(ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
);
1264 return tb_port_write(port
, data
, TB_CFG_PORT
,
1265 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1268 /* switch utility functions */
1270 static const char *tb_switch_generation_name(const struct tb_switch
*sw
)
1272 switch (sw
->generation
) {
1274 return "Thunderbolt 1";
1276 return "Thunderbolt 2";
1278 return "Thunderbolt 3";
1286 static void tb_dump_switch(const struct tb
*tb
, const struct tb_switch
*sw
)
1288 const struct tb_regs_switch_header
*regs
= &sw
->config
;
1290 tb_dbg(tb
, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1291 tb_switch_generation_name(sw
), regs
->vendor_id
, regs
->device_id
,
1292 regs
->revision
, regs
->thunderbolt_version
);
1293 tb_dbg(tb
, " Max Port Number: %d\n", regs
->max_port_number
);
1294 tb_dbg(tb
, " Config:\n");
1296 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1297 regs
->upstream_port_number
, regs
->depth
,
1298 (((u64
) regs
->route_hi
) << 32) | regs
->route_lo
,
1299 regs
->enabled
, regs
->plug_events_delay
);
1300 tb_dbg(tb
, " unknown1: %#x unknown4: %#x\n",
1301 regs
->__unknown1
, regs
->__unknown4
);
1305 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
1306 * @sw: Switch to reset
1308 * Return: Returns 0 on success or an error code on failure.
1310 int tb_switch_reset(struct tb_switch
*sw
)
1312 struct tb_cfg_result res
;
1314 if (sw
->generation
> 1)
1317 tb_sw_dbg(sw
, "resetting switch\n");
1319 res
.err
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 2,
1320 TB_CFG_SWITCH
, 2, 2);
1323 res
= tb_cfg_reset(sw
->tb
->ctl
, tb_route(sw
), TB_CFG_DEFAULT_TIMEOUT
);
1330 * tb_plug_events_active() - enable/disable plug events on a switch
1332 * Also configures a sane plug_events_delay of 255ms.
1334 * Return: Returns 0 on success or an error code on failure.
1336 static int tb_plug_events_active(struct tb_switch
*sw
, bool active
)
1341 if (tb_switch_is_icm(sw
) || tb_switch_is_usb4(sw
))
1344 sw
->config
.plug_events_delay
= 0xff;
1345 res
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 4, TB_CFG_SWITCH
, 4, 1);
1349 res
= tb_sw_read(sw
, &data
, TB_CFG_SWITCH
, sw
->cap_plug_events
+ 1, 1);
1354 data
= data
& 0xFFFFFF83;
1355 switch (sw
->config
.device_id
) {
1356 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1357 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1358 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1366 return tb_sw_write(sw
, &data
, TB_CFG_SWITCH
,
1367 sw
->cap_plug_events
+ 1, 1);
1370 static ssize_t
authorized_show(struct device
*dev
,
1371 struct device_attribute
*attr
,
1374 struct tb_switch
*sw
= tb_to_switch(dev
);
1376 return sprintf(buf
, "%u\n", sw
->authorized
);
1379 static int tb_switch_set_authorized(struct tb_switch
*sw
, unsigned int val
)
1383 if (!mutex_trylock(&sw
->tb
->lock
))
1384 return restart_syscall();
1390 /* Approve switch */
1393 ret
= tb_domain_approve_switch_key(sw
->tb
, sw
);
1395 ret
= tb_domain_approve_switch(sw
->tb
, sw
);
1398 /* Challenge switch */
1401 ret
= tb_domain_challenge_switch_key(sw
->tb
, sw
);
1409 sw
->authorized
= val
;
1410 /* Notify status change to the userspace */
1411 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
1415 mutex_unlock(&sw
->tb
->lock
);
1419 static ssize_t
authorized_store(struct device
*dev
,
1420 struct device_attribute
*attr
,
1421 const char *buf
, size_t count
)
1423 struct tb_switch
*sw
= tb_to_switch(dev
);
1427 ret
= kstrtouint(buf
, 0, &val
);
1433 pm_runtime_get_sync(&sw
->dev
);
1434 ret
= tb_switch_set_authorized(sw
, val
);
1435 pm_runtime_mark_last_busy(&sw
->dev
);
1436 pm_runtime_put_autosuspend(&sw
->dev
);
1438 return ret
? ret
: count
;
1440 static DEVICE_ATTR_RW(authorized
);
1442 static ssize_t
boot_show(struct device
*dev
, struct device_attribute
*attr
,
1445 struct tb_switch
*sw
= tb_to_switch(dev
);
1447 return sprintf(buf
, "%u\n", sw
->boot
);
1449 static DEVICE_ATTR_RO(boot
);
1451 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
1454 struct tb_switch
*sw
= tb_to_switch(dev
);
1456 return sprintf(buf
, "%#x\n", sw
->device
);
1458 static DEVICE_ATTR_RO(device
);
1461 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1463 struct tb_switch
*sw
= tb_to_switch(dev
);
1465 return sprintf(buf
, "%s\n", sw
->device_name
? sw
->device_name
: "");
1467 static DEVICE_ATTR_RO(device_name
);
1470 generation_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1472 struct tb_switch
*sw
= tb_to_switch(dev
);
1474 return sprintf(buf
, "%u\n", sw
->generation
);
1476 static DEVICE_ATTR_RO(generation
);
1478 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
1481 struct tb_switch
*sw
= tb_to_switch(dev
);
1484 if (!mutex_trylock(&sw
->tb
->lock
))
1485 return restart_syscall();
1488 ret
= sprintf(buf
, "%*phN\n", TB_SWITCH_KEY_SIZE
, sw
->key
);
1490 ret
= sprintf(buf
, "\n");
1492 mutex_unlock(&sw
->tb
->lock
);
1496 static ssize_t
key_store(struct device
*dev
, struct device_attribute
*attr
,
1497 const char *buf
, size_t count
)
1499 struct tb_switch
*sw
= tb_to_switch(dev
);
1500 u8 key
[TB_SWITCH_KEY_SIZE
];
1501 ssize_t ret
= count
;
1504 if (!strcmp(buf
, "\n"))
1506 else if (hex2bin(key
, buf
, sizeof(key
)))
1509 if (!mutex_trylock(&sw
->tb
->lock
))
1510 return restart_syscall();
1512 if (sw
->authorized
) {
1519 sw
->key
= kmemdup(key
, sizeof(key
), GFP_KERNEL
);
1525 mutex_unlock(&sw
->tb
->lock
);
1528 static DEVICE_ATTR(key
, 0600, key_show
, key_store
);
1530 static ssize_t
speed_show(struct device
*dev
, struct device_attribute
*attr
,
1533 struct tb_switch
*sw
= tb_to_switch(dev
);
1535 return sprintf(buf
, "%u.0 Gb/s\n", sw
->link_speed
);
1539 * Currently all lanes must run at the same speed but we expose here
1540 * both directions to allow possible asymmetric links in the future.
1542 static DEVICE_ATTR(rx_speed
, 0444, speed_show
, NULL
);
1543 static DEVICE_ATTR(tx_speed
, 0444, speed_show
, NULL
);
1545 static ssize_t
lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1548 struct tb_switch
*sw
= tb_to_switch(dev
);
1550 return sprintf(buf
, "%u\n", sw
->link_width
);
1554 * Currently link has same amount of lanes both directions (1 or 2) but
1555 * expose them separately to allow possible asymmetric links in the future.
1557 static DEVICE_ATTR(rx_lanes
, 0444, lanes_show
, NULL
);
1558 static DEVICE_ATTR(tx_lanes
, 0444, lanes_show
, NULL
);
1560 static ssize_t
nvm_authenticate_show(struct device
*dev
,
1561 struct device_attribute
*attr
, char *buf
)
1563 struct tb_switch
*sw
= tb_to_switch(dev
);
1566 nvm_get_auth_status(sw
, &status
);
1567 return sprintf(buf
, "%#x\n", status
);
1570 static ssize_t
nvm_authenticate_sysfs(struct device
*dev
, const char *buf
,
1573 struct tb_switch
*sw
= tb_to_switch(dev
);
1577 pm_runtime_get_sync(&sw
->dev
);
1579 if (!mutex_trylock(&sw
->tb
->lock
)) {
1580 ret
= restart_syscall();
1584 /* If NVMem devices are not yet added */
1590 ret
= kstrtoint(buf
, 10, &val
);
1594 /* Always clear the authentication status */
1595 nvm_clear_auth_status(sw
);
1598 if (!sw
->nvm
->flushed
) {
1599 if (!sw
->nvm
->buf
) {
1604 ret
= nvm_validate_and_write(sw
);
1605 if (ret
|| val
== WRITE_ONLY
)
1608 if (val
== WRITE_AND_AUTHENTICATE
) {
1610 ret
= tb_lc_force_power(sw
);
1612 sw
->nvm
->authenticating
= true;
1613 ret
= nvm_authenticate(sw
);
1619 mutex_unlock(&sw
->tb
->lock
);
1621 pm_runtime_mark_last_busy(&sw
->dev
);
1622 pm_runtime_put_autosuspend(&sw
->dev
);
1627 static ssize_t
nvm_authenticate_store(struct device
*dev
,
1628 struct device_attribute
*attr
, const char *buf
, size_t count
)
1630 int ret
= nvm_authenticate_sysfs(dev
, buf
, false);
1635 static DEVICE_ATTR_RW(nvm_authenticate
);
1637 static ssize_t
nvm_authenticate_on_disconnect_show(struct device
*dev
,
1638 struct device_attribute
*attr
, char *buf
)
1640 return nvm_authenticate_show(dev
, attr
, buf
);
1643 static ssize_t
nvm_authenticate_on_disconnect_store(struct device
*dev
,
1644 struct device_attribute
*attr
, const char *buf
, size_t count
)
1648 ret
= nvm_authenticate_sysfs(dev
, buf
, true);
1649 return ret
? ret
: count
;
1651 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect
);
1653 static ssize_t
nvm_version_show(struct device
*dev
,
1654 struct device_attribute
*attr
, char *buf
)
1656 struct tb_switch
*sw
= tb_to_switch(dev
);
1659 if (!mutex_trylock(&sw
->tb
->lock
))
1660 return restart_syscall();
1667 ret
= sprintf(buf
, "%x.%x\n", sw
->nvm
->major
, sw
->nvm
->minor
);
1669 mutex_unlock(&sw
->tb
->lock
);
1673 static DEVICE_ATTR_RO(nvm_version
);
1675 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
1678 struct tb_switch
*sw
= tb_to_switch(dev
);
1680 return sprintf(buf
, "%#x\n", sw
->vendor
);
1682 static DEVICE_ATTR_RO(vendor
);
1685 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1687 struct tb_switch
*sw
= tb_to_switch(dev
);
1689 return sprintf(buf
, "%s\n", sw
->vendor_name
? sw
->vendor_name
: "");
1691 static DEVICE_ATTR_RO(vendor_name
);
1693 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
1696 struct tb_switch
*sw
= tb_to_switch(dev
);
1698 return sprintf(buf
, "%pUb\n", sw
->uuid
);
1700 static DEVICE_ATTR_RO(unique_id
);
1702 static struct attribute
*switch_attrs
[] = {
1703 &dev_attr_authorized
.attr
,
1704 &dev_attr_boot
.attr
,
1705 &dev_attr_device
.attr
,
1706 &dev_attr_device_name
.attr
,
1707 &dev_attr_generation
.attr
,
1709 &dev_attr_nvm_authenticate
.attr
,
1710 &dev_attr_nvm_authenticate_on_disconnect
.attr
,
1711 &dev_attr_nvm_version
.attr
,
1712 &dev_attr_rx_speed
.attr
,
1713 &dev_attr_rx_lanes
.attr
,
1714 &dev_attr_tx_speed
.attr
,
1715 &dev_attr_tx_lanes
.attr
,
1716 &dev_attr_vendor
.attr
,
1717 &dev_attr_vendor_name
.attr
,
1718 &dev_attr_unique_id
.attr
,
1722 static umode_t
switch_attr_is_visible(struct kobject
*kobj
,
1723 struct attribute
*attr
, int n
)
1725 struct device
*dev
= kobj_to_dev(kobj
);
1726 struct tb_switch
*sw
= tb_to_switch(dev
);
1728 if (attr
== &dev_attr_device
.attr
) {
1731 } else if (attr
== &dev_attr_device_name
.attr
) {
1732 if (!sw
->device_name
)
1734 } else if (attr
== &dev_attr_vendor
.attr
) {
1737 } else if (attr
== &dev_attr_vendor_name
.attr
) {
1738 if (!sw
->vendor_name
)
1740 } else if (attr
== &dev_attr_key
.attr
) {
1742 sw
->tb
->security_level
== TB_SECURITY_SECURE
&&
1743 sw
->security_level
== TB_SECURITY_SECURE
)
1746 } else if (attr
== &dev_attr_rx_speed
.attr
||
1747 attr
== &dev_attr_rx_lanes
.attr
||
1748 attr
== &dev_attr_tx_speed
.attr
||
1749 attr
== &dev_attr_tx_lanes
.attr
) {
1753 } else if (attr
== &dev_attr_nvm_authenticate
.attr
) {
1754 if (nvm_upgradeable(sw
))
1757 } else if (attr
== &dev_attr_nvm_version
.attr
) {
1758 if (nvm_readable(sw
))
1761 } else if (attr
== &dev_attr_boot
.attr
) {
1765 } else if (attr
== &dev_attr_nvm_authenticate_on_disconnect
.attr
) {
1766 if (sw
->quirks
& QUIRK_FORCE_POWER_LINK_CONTROLLER
)
1771 return sw
->safe_mode
? 0 : attr
->mode
;
1774 static struct attribute_group switch_group
= {
1775 .is_visible
= switch_attr_is_visible
,
1776 .attrs
= switch_attrs
,
1779 static const struct attribute_group
*switch_groups
[] = {
1784 static void tb_switch_release(struct device
*dev
)
1786 struct tb_switch
*sw
= tb_to_switch(dev
);
1787 struct tb_port
*port
;
1789 dma_port_free(sw
->dma_port
);
1791 tb_switch_for_each_port(sw
, port
) {
1792 if (!port
->disabled
) {
1793 ida_destroy(&port
->in_hopids
);
1794 ida_destroy(&port
->out_hopids
);
1799 kfree(sw
->device_name
);
1800 kfree(sw
->vendor_name
);
1808 * Currently only need to provide the callbacks. Everything else is handled
1809 * in the connection manager.
1811 static int __maybe_unused
tb_switch_runtime_suspend(struct device
*dev
)
1813 struct tb_switch
*sw
= tb_to_switch(dev
);
1814 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
1816 if (cm_ops
->runtime_suspend_switch
)
1817 return cm_ops
->runtime_suspend_switch(sw
);
1822 static int __maybe_unused
tb_switch_runtime_resume(struct device
*dev
)
1824 struct tb_switch
*sw
= tb_to_switch(dev
);
1825 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
1827 if (cm_ops
->runtime_resume_switch
)
1828 return cm_ops
->runtime_resume_switch(sw
);
1832 static const struct dev_pm_ops tb_switch_pm_ops
= {
1833 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend
, tb_switch_runtime_resume
,
1837 struct device_type tb_switch_type
= {
1838 .name
= "thunderbolt_device",
1839 .release
= tb_switch_release
,
1840 .pm
= &tb_switch_pm_ops
,
1843 static int tb_switch_get_generation(struct tb_switch
*sw
)
1845 switch (sw
->config
.device_id
) {
1846 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1847 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1848 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK
:
1849 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C
:
1850 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
1851 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1852 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE
:
1853 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE
:
1856 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE
:
1857 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE
:
1858 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE
:
1861 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
1862 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
1863 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
1864 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
1865 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
1866 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE
:
1867 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE
:
1868 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE
:
1869 case PCI_DEVICE_ID_INTEL_ICL_NHI0
:
1870 case PCI_DEVICE_ID_INTEL_ICL_NHI1
:
1874 if (tb_switch_is_usb4(sw
))
1878 * For unknown switches assume generation to be 1 to be
1881 tb_sw_warn(sw
, "unsupported switch device id %#x\n",
1882 sw
->config
.device_id
);
1887 static bool tb_switch_exceeds_max_depth(const struct tb_switch
*sw
, int depth
)
1891 if (tb_switch_is_usb4(sw
) ||
1892 (sw
->tb
->root_switch
&& tb_switch_is_usb4(sw
->tb
->root_switch
)))
1893 max_depth
= USB4_SWITCH_MAX_DEPTH
;
1895 max_depth
= TB_SWITCH_MAX_DEPTH
;
1897 return depth
> max_depth
;
1901 * tb_switch_alloc() - allocate a switch
1902 * @tb: Pointer to the owning domain
1903 * @parent: Parent device for this switch
1904 * @route: Route string for this switch
1906 * Allocates and initializes a switch. Will not upload configuration to
1907 * the switch. For that you need to call tb_switch_configure()
1908 * separately. The returned switch should be released by calling
1911 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1914 struct tb_switch
*tb_switch_alloc(struct tb
*tb
, struct device
*parent
,
1917 struct tb_switch
*sw
;
1921 /* Unlock the downstream port so we can access the switch below */
1923 struct tb_switch
*parent_sw
= tb_to_switch(parent
);
1924 struct tb_port
*down
;
1926 down
= tb_port_at(route
, parent_sw
);
1927 tb_port_unlock(down
);
1930 depth
= tb_route_length(route
);
1932 upstream_port
= tb_cfg_get_upstream_port(tb
->ctl
, route
);
1933 if (upstream_port
< 0)
1934 return ERR_PTR(upstream_port
);
1936 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
1938 return ERR_PTR(-ENOMEM
);
1941 ret
= tb_cfg_read(tb
->ctl
, &sw
->config
, route
, 0, TB_CFG_SWITCH
, 0, 5);
1943 goto err_free_sw_ports
;
1945 sw
->generation
= tb_switch_get_generation(sw
);
1947 tb_dbg(tb
, "current switch config:\n");
1948 tb_dump_switch(tb
, sw
);
1950 /* configure switch */
1951 sw
->config
.upstream_port_number
= upstream_port
;
1952 sw
->config
.depth
= depth
;
1953 sw
->config
.route_hi
= upper_32_bits(route
);
1954 sw
->config
.route_lo
= lower_32_bits(route
);
1955 sw
->config
.enabled
= 0;
1957 /* Make sure we do not exceed maximum topology limit */
1958 if (tb_switch_exceeds_max_depth(sw
, depth
)) {
1959 ret
= -EADDRNOTAVAIL
;
1960 goto err_free_sw_ports
;
1963 /* initialize ports */
1964 sw
->ports
= kcalloc(sw
->config
.max_port_number
+ 1, sizeof(*sw
->ports
),
1968 goto err_free_sw_ports
;
1971 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
1972 /* minimum setup for tb_find_cap and tb_drom_read to work */
1973 sw
->ports
[i
].sw
= sw
;
1974 sw
->ports
[i
].port
= i
;
1977 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_PLUG_EVENTS
);
1979 sw
->cap_plug_events
= ret
;
1981 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_LINK_CONTROLLER
);
1985 /* Root switch is always authorized */
1987 sw
->authorized
= true;
1989 device_initialize(&sw
->dev
);
1990 sw
->dev
.parent
= parent
;
1991 sw
->dev
.bus
= &tb_bus_type
;
1992 sw
->dev
.type
= &tb_switch_type
;
1993 sw
->dev
.groups
= switch_groups
;
1994 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
2002 return ERR_PTR(ret
);
2006 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2007 * @tb: Pointer to the owning domain
2008 * @parent: Parent device for this switch
2009 * @route: Route string for this switch
2011 * This creates a switch in safe mode. This means the switch pretty much
2012 * lacks all capabilities except DMA configuration port before it is
2013 * flashed with a valid NVM firmware.
2015 * The returned switch must be released by calling tb_switch_put().
2017 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2020 tb_switch_alloc_safe_mode(struct tb
*tb
, struct device
*parent
, u64 route
)
2022 struct tb_switch
*sw
;
2024 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
2026 return ERR_PTR(-ENOMEM
);
2029 sw
->config
.depth
= tb_route_length(route
);
2030 sw
->config
.route_hi
= upper_32_bits(route
);
2031 sw
->config
.route_lo
= lower_32_bits(route
);
2032 sw
->safe_mode
= true;
2034 device_initialize(&sw
->dev
);
2035 sw
->dev
.parent
= parent
;
2036 sw
->dev
.bus
= &tb_bus_type
;
2037 sw
->dev
.type
= &tb_switch_type
;
2038 sw
->dev
.groups
= switch_groups
;
2039 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
2045 * tb_switch_configure() - Uploads configuration to the switch
2046 * @sw: Switch to configure
2048 * Call this function before the switch is added to the system. It will
2049 * upload configuration to the switch and makes it available for the
2050 * connection manager to use. Can be called to the switch again after
2051 * resume from low power states to re-initialize it.
2053 * Return: %0 in case of success and negative errno in case of failure
2055 int tb_switch_configure(struct tb_switch
*sw
)
2057 struct tb
*tb
= sw
->tb
;
2061 route
= tb_route(sw
);
2063 tb_dbg(tb
, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2064 sw
->config
.enabled
? "restoring" : "initializing", route
,
2065 tb_route_length(route
), sw
->config
.upstream_port_number
);
2067 sw
->config
.enabled
= 1;
2069 if (tb_switch_is_usb4(sw
)) {
2071 * For USB4 devices, we need to program the CM version
2072 * accordingly so that it knows to expose all the
2073 * additional capabilities.
2075 sw
->config
.cmuv
= USB4_VERSION_1_0
;
2077 /* Enumerate the switch */
2078 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2083 ret
= usb4_switch_setup(sw
);
2085 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
)
2086 tb_sw_warn(sw
, "unknown switch vendor id %#x\n",
2087 sw
->config
.vendor_id
);
2089 if (!sw
->cap_plug_events
) {
2090 tb_sw_warn(sw
, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2094 /* Enumerate the switch */
2095 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2101 return tb_plug_events_active(sw
, true);
2104 static int tb_switch_set_uuid(struct tb_switch
*sw
)
2113 if (tb_switch_is_usb4(sw
)) {
2114 ret
= usb4_switch_read_uid(sw
, &sw
->uid
);
2120 * The newer controllers include fused UUID as part of
2121 * link controller specific registers
2123 ret
= tb_lc_read_uuid(sw
, uuid
);
2133 * ICM generates UUID based on UID and fills the upper
2134 * two words with ones. This is not strictly following
2135 * UUID format but we want to be compatible with it so
2136 * we do the same here.
2138 uuid
[0] = sw
->uid
& 0xffffffff;
2139 uuid
[1] = (sw
->uid
>> 32) & 0xffffffff;
2140 uuid
[2] = 0xffffffff;
2141 uuid
[3] = 0xffffffff;
2144 sw
->uuid
= kmemdup(uuid
, sizeof(uuid
), GFP_KERNEL
);
2150 static int tb_switch_add_dma_port(struct tb_switch
*sw
)
2155 switch (sw
->generation
) {
2157 /* Only root switch can be upgraded */
2164 ret
= tb_switch_set_uuid(sw
);
2171 * DMA port is the only thing available when the switch
2179 if (sw
->no_nvm_upgrade
)
2182 if (tb_switch_is_usb4(sw
)) {
2183 ret
= usb4_switch_nvm_authenticate_status(sw
, &status
);
2188 tb_sw_info(sw
, "switch flash authentication failed\n");
2189 nvm_set_auth_status(sw
, status
);
2195 /* Root switch DMA port requires running firmware */
2196 if (!tb_route(sw
) && !tb_switch_is_icm(sw
))
2199 sw
->dma_port
= dma_port_alloc(sw
);
2204 * If there is status already set then authentication failed
2205 * when the dma_port_flash_update_auth() returned. Power cycling
2206 * is not needed (it was done already) so only thing we do here
2207 * is to unblock runtime PM of the root port.
2209 nvm_get_auth_status(sw
, &status
);
2212 nvm_authenticate_complete_dma_port(sw
);
2217 * Check status of the previous flash authentication. If there
2218 * is one we need to power cycle the switch in any case to make
2219 * it functional again.
2221 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
2225 /* Now we can allow root port to suspend again */
2227 nvm_authenticate_complete_dma_port(sw
);
2230 tb_sw_info(sw
, "switch flash authentication failed\n");
2231 nvm_set_auth_status(sw
, status
);
2234 tb_sw_info(sw
, "power cycling the switch now\n");
2235 dma_port_power_cycle(sw
->dma_port
);
2238 * We return error here which causes the switch adding failure.
2239 * It should appear back after power cycle is complete.
2244 static void tb_switch_default_link_ports(struct tb_switch
*sw
)
2248 for (i
= 1; i
<= sw
->config
.max_port_number
; i
+= 2) {
2249 struct tb_port
*port
= &sw
->ports
[i
];
2250 struct tb_port
*subordinate
;
2252 if (!tb_port_is_null(port
))
2255 /* Check for the subordinate port */
2256 if (i
== sw
->config
.max_port_number
||
2257 !tb_port_is_null(&sw
->ports
[i
+ 1]))
2260 /* Link them if not already done so (by DROM) */
2261 subordinate
= &sw
->ports
[i
+ 1];
2262 if (!port
->dual_link_port
&& !subordinate
->dual_link_port
) {
2264 port
->dual_link_port
= subordinate
;
2265 subordinate
->link_nr
= 1;
2266 subordinate
->dual_link_port
= port
;
2268 tb_sw_dbg(sw
, "linked ports %d <-> %d\n",
2269 port
->port
, subordinate
->port
);
2274 static bool tb_switch_lane_bonding_possible(struct tb_switch
*sw
)
2276 const struct tb_port
*up
= tb_upstream_port(sw
);
2278 if (!up
->dual_link_port
|| !up
->dual_link_port
->remote
)
2281 if (tb_switch_is_usb4(sw
))
2282 return usb4_switch_lane_bonding_possible(sw
);
2283 return tb_lc_lane_bonding_possible(sw
);
2286 static int tb_switch_update_link_attributes(struct tb_switch
*sw
)
2289 bool change
= false;
2292 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2295 up
= tb_upstream_port(sw
);
2297 ret
= tb_port_get_link_speed(up
);
2300 if (sw
->link_speed
!= ret
)
2302 sw
->link_speed
= ret
;
2304 ret
= tb_port_get_link_width(up
);
2307 if (sw
->link_width
!= ret
)
2309 sw
->link_width
= ret
;
2311 /* Notify userspace that there is possible link attribute change */
2312 if (device_is_registered(&sw
->dev
) && change
)
2313 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
2319 * tb_switch_lane_bonding_enable() - Enable lane bonding
2320 * @sw: Switch to enable lane bonding
2322 * Connection manager can call this function to enable lane bonding of a
2323 * switch. If conditions are correct and both switches support the feature,
2324 * lanes are bonded. It is safe to call this to any switch.
2326 int tb_switch_lane_bonding_enable(struct tb_switch
*sw
)
2328 struct tb_switch
*parent
= tb_to_switch(sw
->dev
.parent
);
2329 struct tb_port
*up
, *down
;
2330 u64 route
= tb_route(sw
);
2336 if (!tb_switch_lane_bonding_possible(sw
))
2339 up
= tb_upstream_port(sw
);
2340 down
= tb_port_at(route
, parent
);
2342 if (!tb_port_is_width_supported(up
, 2) ||
2343 !tb_port_is_width_supported(down
, 2))
2346 ret
= tb_port_lane_bonding_enable(up
);
2348 tb_port_warn(up
, "failed to enable lane bonding\n");
2352 ret
= tb_port_lane_bonding_enable(down
);
2354 tb_port_warn(down
, "failed to enable lane bonding\n");
2355 tb_port_lane_bonding_disable(up
);
2359 tb_switch_update_link_attributes(sw
);
2361 tb_sw_dbg(sw
, "lane bonding enabled\n");
2366 * tb_switch_lane_bonding_disable() - Disable lane bonding
2367 * @sw: Switch whose lane bonding to disable
2369 * Disables lane bonding between @sw and parent. This can be called even
2370 * if lanes were not bonded originally.
2372 void tb_switch_lane_bonding_disable(struct tb_switch
*sw
)
2374 struct tb_switch
*parent
= tb_to_switch(sw
->dev
.parent
);
2375 struct tb_port
*up
, *down
;
2380 up
= tb_upstream_port(sw
);
2384 down
= tb_port_at(tb_route(sw
), parent
);
2386 tb_port_lane_bonding_disable(up
);
2387 tb_port_lane_bonding_disable(down
);
2389 tb_switch_update_link_attributes(sw
);
2390 tb_sw_dbg(sw
, "lane bonding disabled\n");
2394 * tb_switch_configure_link() - Set link configured
2395 * @sw: Switch whose link is configured
2397 * Sets the link upstream from @sw configured (from both ends) so that
2398 * it will not be disconnected when the domain exits sleep. Can be
2399 * called for any switch.
2401 * It is recommended that this is called after lane bonding is enabled.
2403 * Returns %0 on success and negative errno in case of error.
2405 int tb_switch_configure_link(struct tb_switch
*sw
)
2407 struct tb_port
*up
, *down
;
2410 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2413 up
= tb_upstream_port(sw
);
2414 if (tb_switch_is_usb4(up
->sw
))
2415 ret
= usb4_port_configure(up
);
2417 ret
= tb_lc_configure_port(up
);
2422 if (tb_switch_is_usb4(down
->sw
))
2423 return usb4_port_configure(down
);
2424 return tb_lc_configure_port(down
);
2428 * tb_switch_unconfigure_link() - Unconfigure link
2429 * @sw: Switch whose link is unconfigured
2431 * Sets the link unconfigured so the @sw will be disconnected if the
2432 * domain exists sleep.
2434 void tb_switch_unconfigure_link(struct tb_switch
*sw
)
2436 struct tb_port
*up
, *down
;
2438 if (sw
->is_unplugged
)
2440 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2443 up
= tb_upstream_port(sw
);
2444 if (tb_switch_is_usb4(up
->sw
))
2445 usb4_port_unconfigure(up
);
2447 tb_lc_unconfigure_port(up
);
2450 if (tb_switch_is_usb4(down
->sw
))
2451 usb4_port_unconfigure(down
);
2453 tb_lc_unconfigure_port(down
);
2457 * tb_switch_add() - Add a switch to the domain
2458 * @sw: Switch to add
2460 * This is the last step in adding switch to the domain. It will read
2461 * identification information from DROM and initializes ports so that
2462 * they can be used to connect other switches. The switch will be
2463 * exposed to the userspace when this function successfully returns. To
2464 * remove and release the switch, call tb_switch_remove().
2466 * Return: %0 in case of success and negative errno in case of failure
2468 int tb_switch_add(struct tb_switch
*sw
)
2473 * Initialize DMA control port now before we read DROM. Recent
2474 * host controllers have more complete DROM on NVM that includes
2475 * vendor and model identification strings which we then expose
2476 * to the userspace. NVM can be accessed through DMA
2477 * configuration based mailbox.
2479 ret
= tb_switch_add_dma_port(sw
);
2481 dev_err(&sw
->dev
, "failed to add DMA port\n");
2485 if (!sw
->safe_mode
) {
2487 ret
= tb_drom_read(sw
);
2489 dev_err(&sw
->dev
, "reading DROM failed\n");
2492 tb_sw_dbg(sw
, "uid: %#llx\n", sw
->uid
);
2494 ret
= tb_switch_set_uuid(sw
);
2496 dev_err(&sw
->dev
, "failed to set UUID\n");
2500 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
2501 if (sw
->ports
[i
].disabled
) {
2502 tb_port_dbg(&sw
->ports
[i
], "disabled by eeprom\n");
2505 ret
= tb_init_port(&sw
->ports
[i
]);
2507 dev_err(&sw
->dev
, "failed to initialize port %d\n", i
);
2512 tb_switch_default_link_ports(sw
);
2514 ret
= tb_switch_update_link_attributes(sw
);
2518 ret
= tb_switch_tmu_init(sw
);
2523 ret
= device_add(&sw
->dev
);
2525 dev_err(&sw
->dev
, "failed to add device: %d\n", ret
);
2530 dev_info(&sw
->dev
, "new device found, vendor=%#x device=%#x\n",
2531 sw
->vendor
, sw
->device
);
2532 if (sw
->vendor_name
&& sw
->device_name
)
2533 dev_info(&sw
->dev
, "%s %s\n", sw
->vendor_name
,
2537 ret
= tb_switch_nvm_add(sw
);
2539 dev_err(&sw
->dev
, "failed to add NVM devices\n");
2540 device_del(&sw
->dev
);
2545 * Thunderbolt routers do not generate wakeups themselves but
2546 * they forward wakeups from tunneled protocols, so enable it
2549 device_init_wakeup(&sw
->dev
, true);
2551 pm_runtime_set_active(&sw
->dev
);
2553 pm_runtime_set_autosuspend_delay(&sw
->dev
, TB_AUTOSUSPEND_DELAY
);
2554 pm_runtime_use_autosuspend(&sw
->dev
);
2555 pm_runtime_mark_last_busy(&sw
->dev
);
2556 pm_runtime_enable(&sw
->dev
);
2557 pm_request_autosuspend(&sw
->dev
);
2560 tb_switch_debugfs_init(sw
);
2565 * tb_switch_remove() - Remove and release a switch
2566 * @sw: Switch to remove
2568 * This will remove the switch from the domain and release it after last
2569 * reference count drops to zero. If there are switches connected below
2570 * this switch, they will be removed as well.
2572 void tb_switch_remove(struct tb_switch
*sw
)
2574 struct tb_port
*port
;
2576 tb_switch_debugfs_remove(sw
);
2579 pm_runtime_get_sync(&sw
->dev
);
2580 pm_runtime_disable(&sw
->dev
);
2583 /* port 0 is the switch itself and never has a remote */
2584 tb_switch_for_each_port(sw
, port
) {
2585 if (tb_port_has_remote(port
)) {
2586 tb_switch_remove(port
->remote
->sw
);
2587 port
->remote
= NULL
;
2588 } else if (port
->xdomain
) {
2589 tb_xdomain_remove(port
->xdomain
);
2590 port
->xdomain
= NULL
;
2593 /* Remove any downstream retimers */
2594 tb_retimer_remove_all(port
);
2597 if (!sw
->is_unplugged
)
2598 tb_plug_events_active(sw
, false);
2600 tb_switch_nvm_remove(sw
);
2603 dev_info(&sw
->dev
, "device disconnected\n");
2604 device_unregister(&sw
->dev
);
2608 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2610 void tb_sw_set_unplugged(struct tb_switch
*sw
)
2612 struct tb_port
*port
;
2614 if (sw
== sw
->tb
->root_switch
) {
2615 tb_sw_WARN(sw
, "cannot unplug root switch\n");
2618 if (sw
->is_unplugged
) {
2619 tb_sw_WARN(sw
, "is_unplugged already set\n");
2622 sw
->is_unplugged
= true;
2623 tb_switch_for_each_port(sw
, port
) {
2624 if (tb_port_has_remote(port
))
2625 tb_sw_set_unplugged(port
->remote
->sw
);
2626 else if (port
->xdomain
)
2627 port
->xdomain
->is_unplugged
= true;
2631 static int tb_switch_set_wake(struct tb_switch
*sw
, unsigned int flags
)
2634 tb_sw_dbg(sw
, "enabling wakeup: %#x\n", flags
);
2636 tb_sw_dbg(sw
, "disabling wakeup\n");
2638 if (tb_switch_is_usb4(sw
))
2639 return usb4_switch_set_wake(sw
, flags
);
2640 return tb_lc_set_wake(sw
, flags
);
2643 int tb_switch_resume(struct tb_switch
*sw
)
2645 struct tb_port
*port
;
2648 tb_sw_dbg(sw
, "resuming switch\n");
2651 * Check for UID of the connected switches except for root
2652 * switch which we assume cannot be removed.
2658 * Check first that we can still read the switch config
2659 * space. It may be that there is now another domain
2662 err
= tb_cfg_get_upstream_port(sw
->tb
->ctl
, tb_route(sw
));
2664 tb_sw_info(sw
, "switch not present anymore\n");
2668 if (tb_switch_is_usb4(sw
))
2669 err
= usb4_switch_read_uid(sw
, &uid
);
2671 err
= tb_drom_read_uid_only(sw
, &uid
);
2673 tb_sw_warn(sw
, "uid read failed\n");
2676 if (sw
->uid
!= uid
) {
2678 "changed while suspended (uid %#llx -> %#llx)\n",
2684 err
= tb_switch_configure(sw
);
2689 tb_switch_set_wake(sw
, 0);
2691 err
= tb_switch_tmu_init(sw
);
2695 /* check for surviving downstream switches */
2696 tb_switch_for_each_port(sw
, port
) {
2697 if (!tb_port_has_remote(port
) && !port
->xdomain
)
2700 if (tb_wait_for_port(port
, true) <= 0) {
2702 "lost during suspend, disconnecting\n");
2703 if (tb_port_has_remote(port
))
2704 tb_sw_set_unplugged(port
->remote
->sw
);
2705 else if (port
->xdomain
)
2706 port
->xdomain
->is_unplugged
= true;
2707 } else if (tb_port_has_remote(port
) || port
->xdomain
) {
2709 * Always unlock the port so the downstream
2710 * switch/domain is accessible.
2712 if (tb_port_unlock(port
))
2713 tb_port_warn(port
, "failed to unlock port\n");
2714 if (port
->remote
&& tb_switch_resume(port
->remote
->sw
)) {
2716 "lost during suspend, disconnecting\n");
2717 tb_sw_set_unplugged(port
->remote
->sw
);
2725 * tb_switch_suspend() - Put a switch to sleep
2726 * @sw: Switch to suspend
2727 * @runtime: Is this runtime suspend or system sleep
2729 * Suspends router and all its children. Enables wakes according to
2730 * value of @runtime and then sets sleep bit for the router. If @sw is
2731 * host router the domain is ready to go to sleep once this function
2734 void tb_switch_suspend(struct tb_switch
*sw
, bool runtime
)
2736 unsigned int flags
= 0;
2737 struct tb_port
*port
;
2740 tb_sw_dbg(sw
, "suspending switch\n");
2742 err
= tb_plug_events_active(sw
, false);
2746 tb_switch_for_each_port(sw
, port
) {
2747 if (tb_port_has_remote(port
))
2748 tb_switch_suspend(port
->remote
->sw
, runtime
);
2752 /* Trigger wake when something is plugged in/out */
2753 flags
|= TB_WAKE_ON_CONNECT
| TB_WAKE_ON_DISCONNECT
;
2754 flags
|= TB_WAKE_ON_USB4
| TB_WAKE_ON_USB3
| TB_WAKE_ON_PCIE
;
2755 } else if (device_may_wakeup(&sw
->dev
)) {
2756 flags
|= TB_WAKE_ON_USB4
| TB_WAKE_ON_USB3
| TB_WAKE_ON_PCIE
;
2759 tb_switch_set_wake(sw
, flags
);
2761 if (tb_switch_is_usb4(sw
))
2762 usb4_switch_set_sleep(sw
);
2764 tb_lc_set_sleep(sw
);
2768 * tb_switch_query_dp_resource() - Query availability of DP resource
2769 * @sw: Switch whose DP resource is queried
2772 * Queries availability of DP resource for DP tunneling using switch
2773 * specific means. Returns %true if resource is available.
2775 bool tb_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
2777 if (tb_switch_is_usb4(sw
))
2778 return usb4_switch_query_dp_resource(sw
, in
);
2779 return tb_lc_dp_sink_query(sw
, in
);
2783 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2784 * @sw: Switch whose DP resource is allocated
2787 * Allocates DP resource for DP tunneling. The resource must be
2788 * available for this to succeed (see tb_switch_query_dp_resource()).
2789 * Returns %0 in success and negative errno otherwise.
2791 int tb_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
2793 if (tb_switch_is_usb4(sw
))
2794 return usb4_switch_alloc_dp_resource(sw
, in
);
2795 return tb_lc_dp_sink_alloc(sw
, in
);
2799 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2800 * @sw: Switch whose DP resource is de-allocated
2803 * De-allocates DP resource that was previously allocated for DP
2806 void tb_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
2810 if (tb_switch_is_usb4(sw
))
2811 ret
= usb4_switch_dealloc_dp_resource(sw
, in
);
2813 ret
= tb_lc_dp_sink_dealloc(sw
, in
);
2816 tb_sw_warn(sw
, "failed to de-allocate DP resource for port %d\n",
2820 struct tb_sw_lookup
{
2828 static int tb_switch_match(struct device
*dev
, const void *data
)
2830 struct tb_switch
*sw
= tb_to_switch(dev
);
2831 const struct tb_sw_lookup
*lookup
= data
;
2835 if (sw
->tb
!= lookup
->tb
)
2839 return !memcmp(sw
->uuid
, lookup
->uuid
, sizeof(*lookup
->uuid
));
2841 if (lookup
->route
) {
2842 return sw
->config
.route_lo
== lower_32_bits(lookup
->route
) &&
2843 sw
->config
.route_hi
== upper_32_bits(lookup
->route
);
2846 /* Root switch is matched only by depth */
2850 return sw
->link
== lookup
->link
&& sw
->depth
== lookup
->depth
;
2854 * tb_switch_find_by_link_depth() - Find switch by link and depth
2855 * @tb: Domain the switch belongs
2856 * @link: Link number the switch is connected
2857 * @depth: Depth of the switch in link
2859 * Returned switch has reference count increased so the caller needs to
2860 * call tb_switch_put() when done with the switch.
2862 struct tb_switch
*tb_switch_find_by_link_depth(struct tb
*tb
, u8 link
, u8 depth
)
2864 struct tb_sw_lookup lookup
;
2867 memset(&lookup
, 0, sizeof(lookup
));
2870 lookup
.depth
= depth
;
2872 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2874 return tb_to_switch(dev
);
2880 * tb_switch_find_by_uuid() - Find switch by UUID
2881 * @tb: Domain the switch belongs
2882 * @uuid: UUID to look for
2884 * Returned switch has reference count increased so the caller needs to
2885 * call tb_switch_put() when done with the switch.
2887 struct tb_switch
*tb_switch_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
2889 struct tb_sw_lookup lookup
;
2892 memset(&lookup
, 0, sizeof(lookup
));
2896 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2898 return tb_to_switch(dev
);
2904 * tb_switch_find_by_route() - Find switch by route string
2905 * @tb: Domain the switch belongs
2906 * @route: Route string to look for
2908 * Returned switch has reference count increased so the caller needs to
2909 * call tb_switch_put() when done with the switch.
2911 struct tb_switch
*tb_switch_find_by_route(struct tb
*tb
, u64 route
)
2913 struct tb_sw_lookup lookup
;
2917 return tb_switch_get(tb
->root_switch
);
2919 memset(&lookup
, 0, sizeof(lookup
));
2921 lookup
.route
= route
;
2923 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2925 return tb_to_switch(dev
);
2931 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2932 * @sw: Switch to find the port from
2933 * @type: Port type to look for
2935 struct tb_port
*tb_switch_find_port(struct tb_switch
*sw
,
2936 enum tb_port_type type
)
2938 struct tb_port
*port
;
2940 tb_switch_for_each_port(sw
, port
) {
2941 if (port
->config
.type
== type
)