1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
20 /* Switch NVM support */
22 #define NVM_DEVID 0x05
23 #define NVM_VERSION 0x08
25 #define NVM_FLASH_SIZE 0x45
27 #define NVM_MIN_SIZE SZ_32K
28 #define NVM_MAX_SIZE SZ_512K
30 static DEFINE_IDA(nvm_ida
);
32 struct nvm_auth_status
{
33 struct list_head list
;
39 * Hold NVM authentication failure status per switch This information
40 * needs to stay around even when the switch gets power cycled so we
43 static LIST_HEAD(nvm_auth_status_cache
);
44 static DEFINE_MUTEX(nvm_auth_status_lock
);
46 static struct nvm_auth_status
*__nvm_get_auth_status(const struct tb_switch
*sw
)
48 struct nvm_auth_status
*st
;
50 list_for_each_entry(st
, &nvm_auth_status_cache
, list
) {
51 if (uuid_equal(&st
->uuid
, sw
->uuid
))
58 static void nvm_get_auth_status(const struct tb_switch
*sw
, u32
*status
)
60 struct nvm_auth_status
*st
;
62 mutex_lock(&nvm_auth_status_lock
);
63 st
= __nvm_get_auth_status(sw
);
64 mutex_unlock(&nvm_auth_status_lock
);
66 *status
= st
? st
->status
: 0;
69 static void nvm_set_auth_status(const struct tb_switch
*sw
, u32 status
)
71 struct nvm_auth_status
*st
;
73 if (WARN_ON(!sw
->uuid
))
76 mutex_lock(&nvm_auth_status_lock
);
77 st
= __nvm_get_auth_status(sw
);
80 st
= kzalloc(sizeof(*st
), GFP_KERNEL
);
84 memcpy(&st
->uuid
, sw
->uuid
, sizeof(st
->uuid
));
85 INIT_LIST_HEAD(&st
->list
);
86 list_add_tail(&st
->list
, &nvm_auth_status_cache
);
91 mutex_unlock(&nvm_auth_status_lock
);
94 static void nvm_clear_auth_status(const struct tb_switch
*sw
)
96 struct nvm_auth_status
*st
;
98 mutex_lock(&nvm_auth_status_lock
);
99 st
= __nvm_get_auth_status(sw
);
104 mutex_unlock(&nvm_auth_status_lock
);
107 static int nvm_validate_and_write(struct tb_switch
*sw
)
109 unsigned int image_size
, hdr_size
;
110 const u8
*buf
= sw
->nvm
->buf
;
117 image_size
= sw
->nvm
->buf_data_size
;
118 if (image_size
< NVM_MIN_SIZE
|| image_size
> NVM_MAX_SIZE
)
122 * FARB pointer must point inside the image and must at least
123 * contain parts of the digital section we will be reading here.
125 hdr_size
= (*(u32
*)buf
) & 0xffffff;
126 if (hdr_size
+ NVM_DEVID
+ 2 >= image_size
)
129 /* Digital section start should be aligned to 4k page */
130 if (!IS_ALIGNED(hdr_size
, SZ_4K
))
134 * Read digital section size and check that it also fits inside
137 ds_size
= *(u16
*)(buf
+ hdr_size
);
138 if (ds_size
>= image_size
)
141 if (!sw
->safe_mode
) {
145 * Make sure the device ID in the image matches the one
146 * we read from the switch config space.
148 device_id
= *(u16
*)(buf
+ hdr_size
+ NVM_DEVID
);
149 if (device_id
!= sw
->config
.device_id
)
152 if (sw
->generation
< 3) {
153 /* Write CSS headers first */
154 ret
= dma_port_flash_write(sw
->dma_port
,
155 DMA_PORT_CSS_ADDRESS
, buf
+ NVM_CSS
,
156 DMA_PORT_CSS_MAX_SIZE
);
161 /* Skip headers in the image */
163 image_size
-= hdr_size
;
166 if (tb_switch_is_usb4(sw
))
167 return usb4_switch_nvm_write(sw
, 0, buf
, image_size
);
168 return dma_port_flash_write(sw
->dma_port
, 0, buf
, image_size
);
171 static int nvm_authenticate_host_dma_port(struct tb_switch
*sw
)
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
180 if (!sw
->safe_mode
) {
183 ret
= tb_domain_disconnect_all_paths(sw
->tb
);
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
190 ret
= dma_port_flash_update_auth(sw
->dma_port
);
191 if (!ret
|| ret
== -ETIMEDOUT
)
195 * Any error from update auth operation requires power
196 * cycling of the host router.
198 tb_sw_warn(sw
, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw
->dma_port
, &status
) > 0)
200 nvm_set_auth_status(sw
, status
);
204 * From safe mode we can get out by just power cycling the
207 dma_port_power_cycle(sw
->dma_port
);
211 static int nvm_authenticate_device_dma_port(struct tb_switch
*sw
)
213 int ret
, retries
= 10;
215 ret
= dma_port_flash_update_auth(sw
->dma_port
);
221 /* Power cycle is required */
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
236 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
237 if (ret
< 0 && ret
!= -ETIMEDOUT
)
241 tb_sw_warn(sw
, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw
, status
);
245 tb_sw_info(sw
, "power cycling the switch now\n");
246 dma_port_power_cycle(sw
->dma_port
);
256 static void nvm_authenticate_start_dma_port(struct tb_switch
*sw
)
258 struct pci_dev
*root_port
;
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
266 root_port
= pci_find_pcie_root_port(sw
->tb
->nhi
->pdev
);
268 pm_runtime_get_noresume(&root_port
->dev
);
271 static void nvm_authenticate_complete_dma_port(struct tb_switch
*sw
)
273 struct pci_dev
*root_port
;
275 root_port
= pci_find_pcie_root_port(sw
->tb
->nhi
->pdev
);
277 pm_runtime_put(&root_port
->dev
);
280 static inline bool nvm_readable(struct tb_switch
*sw
)
282 if (tb_switch_is_usb4(sw
)) {
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
289 return usb4_switch_nvm_sector_size(sw
) > 0;
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw
->dma_port
;
296 static inline bool nvm_upgradeable(struct tb_switch
*sw
)
298 if (sw
->no_nvm_upgrade
)
300 return nvm_readable(sw
);
303 static inline int nvm_read(struct tb_switch
*sw
, unsigned int address
,
304 void *buf
, size_t size
)
306 if (tb_switch_is_usb4(sw
))
307 return usb4_switch_nvm_read(sw
, address
, buf
, size
);
308 return dma_port_flash_read(sw
->dma_port
, address
, buf
, size
);
311 static int nvm_authenticate(struct tb_switch
*sw
)
315 if (tb_switch_is_usb4(sw
))
316 return usb4_switch_nvm_authenticate(sw
);
319 nvm_authenticate_start_dma_port(sw
);
320 ret
= nvm_authenticate_host_dma_port(sw
);
322 ret
= nvm_authenticate_device_dma_port(sw
);
328 static int tb_switch_nvm_read(void *priv
, unsigned int offset
, void *val
,
331 struct tb_switch
*sw
= priv
;
334 pm_runtime_get_sync(&sw
->dev
);
336 if (!mutex_trylock(&sw
->tb
->lock
)) {
337 ret
= restart_syscall();
341 ret
= nvm_read(sw
, offset
, val
, bytes
);
342 mutex_unlock(&sw
->tb
->lock
);
345 pm_runtime_mark_last_busy(&sw
->dev
);
346 pm_runtime_put_autosuspend(&sw
->dev
);
351 static int tb_switch_nvm_no_read(void *priv
, unsigned int offset
, void *val
,
357 static int tb_switch_nvm_write(void *priv
, unsigned int offset
, void *val
,
360 struct tb_switch
*sw
= priv
;
363 if (!mutex_trylock(&sw
->tb
->lock
))
364 return restart_syscall();
367 * Since writing the NVM image might require some special steps,
368 * for example when CSS headers are written, we cache the image
369 * locally here and handle the special cases when the user asks
370 * us to authenticate the image.
373 sw
->nvm
->buf
= vmalloc(NVM_MAX_SIZE
);
380 sw
->nvm
->buf_data_size
= offset
+ bytes
;
381 memcpy(sw
->nvm
->buf
+ offset
, val
, bytes
);
384 mutex_unlock(&sw
->tb
->lock
);
389 static struct nvmem_device
*register_nvmem(struct tb_switch
*sw
, int id
,
390 size_t size
, bool active
)
392 struct nvmem_config config
;
394 memset(&config
, 0, sizeof(config
));
397 config
.name
= "nvm_active";
398 config
.reg_read
= tb_switch_nvm_read
;
399 config
.read_only
= true;
401 config
.name
= "nvm_non_active";
402 config
.reg_read
= tb_switch_nvm_no_read
;
403 config
.reg_write
= tb_switch_nvm_write
;
404 config
.root_only
= true;
409 config
.word_size
= 4;
411 config
.dev
= &sw
->dev
;
412 config
.owner
= THIS_MODULE
;
415 return nvmem_register(&config
);
418 static int tb_switch_nvm_add(struct tb_switch
*sw
)
420 struct nvmem_device
*nvm_dev
;
421 struct tb_switch_nvm
*nvm
;
425 if (!nvm_readable(sw
))
429 * The NVM format of non-Intel hardware is not known so
430 * currently restrict NVM upgrade for Intel hardware. We may
431 * relax this in the future when we learn other NVM formats.
433 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
) {
435 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
436 sw
->config
.vendor_id
);
440 nvm
= kzalloc(sizeof(*nvm
), GFP_KERNEL
);
444 nvm
->id
= ida_simple_get(&nvm_ida
, 0, 0, GFP_KERNEL
);
447 * If the switch is in safe-mode the only accessible portion of
448 * the NVM is the non-active one where userspace is expected to
449 * write new functional NVM.
451 if (!sw
->safe_mode
) {
452 u32 nvm_size
, hdr_size
;
454 ret
= nvm_read(sw
, NVM_FLASH_SIZE
, &val
, sizeof(val
));
458 hdr_size
= sw
->generation
< 3 ? SZ_8K
: SZ_16K
;
459 nvm_size
= (SZ_1M
<< (val
& 7)) / 8;
460 nvm_size
= (nvm_size
- hdr_size
) / 2;
462 ret
= nvm_read(sw
, NVM_VERSION
, &val
, sizeof(val
));
466 nvm
->major
= val
>> 16;
467 nvm
->minor
= val
>> 8;
469 nvm_dev
= register_nvmem(sw
, nvm
->id
, nvm_size
, true);
470 if (IS_ERR(nvm_dev
)) {
471 ret
= PTR_ERR(nvm_dev
);
474 nvm
->active
= nvm_dev
;
477 if (!sw
->no_nvm_upgrade
) {
478 nvm_dev
= register_nvmem(sw
, nvm
->id
, NVM_MAX_SIZE
, false);
479 if (IS_ERR(nvm_dev
)) {
480 ret
= PTR_ERR(nvm_dev
);
483 nvm
->non_active
= nvm_dev
;
491 nvmem_unregister(nvm
->active
);
493 ida_simple_remove(&nvm_ida
, nvm
->id
);
499 static void tb_switch_nvm_remove(struct tb_switch
*sw
)
501 struct tb_switch_nvm
*nvm
;
509 /* Remove authentication status in case the switch is unplugged */
510 if (!nvm
->authenticating
)
511 nvm_clear_auth_status(sw
);
514 nvmem_unregister(nvm
->non_active
);
516 nvmem_unregister(nvm
->active
);
517 ida_simple_remove(&nvm_ida
, nvm
->id
);
522 /* port utility functions */
524 static const char *tb_port_type(struct tb_regs_port_header
*port
)
526 switch (port
->type
>> 16) {
528 switch ((u8
) port
->type
) {
553 static void tb_dump_port(struct tb
*tb
, struct tb_regs_port_header
*port
)
556 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
557 port
->port_number
, port
->vendor_id
, port
->device_id
,
558 port
->revision
, port
->thunderbolt_version
, tb_port_type(port
),
560 tb_dbg(tb
, " Max hop id (in/out): %d/%d\n",
561 port
->max_in_hop_id
, port
->max_out_hop_id
);
562 tb_dbg(tb
, " Max counters: %d\n", port
->max_counters
);
563 tb_dbg(tb
, " NFC Credits: %#x\n", port
->nfc_credits
);
567 * tb_port_state() - get connectedness state of a port
569 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
571 * Return: Returns an enum tb_port_state on success or an error code on failure.
573 static int tb_port_state(struct tb_port
*port
)
575 struct tb_cap_phy phy
;
577 if (port
->cap_phy
== 0) {
578 tb_port_WARN(port
, "does not have a PHY\n");
581 res
= tb_port_read(port
, &phy
, TB_CFG_PORT
, port
->cap_phy
, 2);
588 * tb_wait_for_port() - wait for a port to become ready
590 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
591 * wait_if_unplugged is set then we also wait if the port is in state
592 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
593 * switch resume). Otherwise we only wait if a device is registered but the link
594 * has not yet been established.
596 * Return: Returns an error code on failure. Returns 0 if the port is not
597 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
598 * if the port is connected and in state TB_PORT_UP.
600 int tb_wait_for_port(struct tb_port
*port
, bool wait_if_unplugged
)
604 if (!port
->cap_phy
) {
605 tb_port_WARN(port
, "does not have PHY\n");
608 if (tb_is_upstream_port(port
)) {
609 tb_port_WARN(port
, "is the upstream port\n");
614 state
= tb_port_state(port
);
617 if (state
== TB_PORT_DISABLED
) {
618 tb_port_dbg(port
, "is disabled (state: 0)\n");
621 if (state
== TB_PORT_UNPLUGGED
) {
622 if (wait_if_unplugged
) {
623 /* used during resume */
625 "is unplugged (state: 7), retrying...\n");
629 tb_port_dbg(port
, "is unplugged (state: 7)\n");
632 if (state
== TB_PORT_UP
) {
633 tb_port_dbg(port
, "is connected, link is up (state: 2)\n");
638 * After plug-in the state is TB_PORT_CONNECTING. Give it some
642 "is connected, link is not up (state: %d), retrying...\n",
647 "failed to reach state TB_PORT_UP. Ignoring port...\n");
652 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
654 * Change the number of NFC credits allocated to @port by @credits. To remove
655 * NFC credits pass a negative amount of credits.
657 * Return: Returns 0 on success or an error code on failure.
659 int tb_port_add_nfc_credits(struct tb_port
*port
, int credits
)
663 if (credits
== 0 || port
->sw
->is_unplugged
)
666 nfc_credits
= port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
;
667 nfc_credits
+= credits
;
669 tb_port_dbg(port
, "adding %d NFC credits to %lu", credits
,
670 port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
);
672 port
->config
.nfc_credits
&= ~ADP_CS_4_NFC_BUFFERS_MASK
;
673 port
->config
.nfc_credits
|= nfc_credits
;
675 return tb_port_write(port
, &port
->config
.nfc_credits
,
676 TB_CFG_PORT
, ADP_CS_4
, 1);
680 * tb_port_set_initial_credits() - Set initial port link credits allocated
681 * @port: Port to set the initial credits
682 * @credits: Number of credits to to allocate
684 * Set initial credits value to be used for ingress shared buffering.
686 int tb_port_set_initial_credits(struct tb_port
*port
, u32 credits
)
691 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
, ADP_CS_5
, 1);
695 data
&= ~ADP_CS_5_LCA_MASK
;
696 data
|= (credits
<< ADP_CS_5_LCA_SHIFT
) & ADP_CS_5_LCA_MASK
;
698 return tb_port_write(port
, &data
, TB_CFG_PORT
, ADP_CS_5
, 1);
702 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
704 * Return: Returns 0 on success or an error code on failure.
706 int tb_port_clear_counter(struct tb_port
*port
, int counter
)
708 u32 zero
[3] = { 0, 0, 0 };
709 tb_port_dbg(port
, "clearing counter %d\n", counter
);
710 return tb_port_write(port
, zero
, TB_CFG_COUNTERS
, 3 * counter
, 3);
714 * tb_port_unlock() - Unlock downstream port
715 * @port: Port to unlock
717 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
718 * downstream router accessible for CM.
720 int tb_port_unlock(struct tb_port
*port
)
722 if (tb_switch_is_icm(port
->sw
))
724 if (!tb_port_is_null(port
))
726 if (tb_switch_is_usb4(port
->sw
))
727 return usb4_port_unlock(port
);
732 * tb_init_port() - initialize a port
734 * This is a helper method for tb_switch_alloc. Does not check or initialize
735 * any downstream switches.
737 * Return: Returns 0 on success or an error code on failure.
739 static int tb_init_port(struct tb_port
*port
)
744 res
= tb_port_read(port
, &port
->config
, TB_CFG_PORT
, 0, 8);
746 if (res
== -ENODEV
) {
747 tb_dbg(port
->sw
->tb
, " Port %d: not implemented\n",
754 /* Port 0 is the switch itself and has no PHY. */
755 if (port
->config
.type
== TB_TYPE_PORT
&& port
->port
!= 0) {
756 cap
= tb_port_find_cap(port
, TB_PORT_CAP_PHY
);
761 tb_port_WARN(port
, "non switch port without a PHY\n");
763 cap
= tb_port_find_cap(port
, TB_PORT_CAP_USB4
);
765 port
->cap_usb4
= cap
;
766 } else if (port
->port
!= 0) {
767 cap
= tb_port_find_cap(port
, TB_PORT_CAP_ADAP
);
769 port
->cap_adap
= cap
;
772 tb_dump_port(port
->sw
->tb
, &port
->config
);
774 /* Control port does not need HopID allocation */
776 ida_init(&port
->in_hopids
);
777 ida_init(&port
->out_hopids
);
780 INIT_LIST_HEAD(&port
->list
);
785 static int tb_port_alloc_hopid(struct tb_port
*port
, bool in
, int min_hopid
,
792 port_max_hopid
= port
->config
.max_in_hop_id
;
793 ida
= &port
->in_hopids
;
795 port_max_hopid
= port
->config
.max_out_hop_id
;
796 ida
= &port
->out_hopids
;
799 /* HopIDs 0-7 are reserved */
800 if (min_hopid
< TB_PATH_MIN_HOPID
)
801 min_hopid
= TB_PATH_MIN_HOPID
;
803 if (max_hopid
< 0 || max_hopid
> port_max_hopid
)
804 max_hopid
= port_max_hopid
;
806 return ida_simple_get(ida
, min_hopid
, max_hopid
+ 1, GFP_KERNEL
);
810 * tb_port_alloc_in_hopid() - Allocate input HopID from port
811 * @port: Port to allocate HopID for
812 * @min_hopid: Minimum acceptable input HopID
813 * @max_hopid: Maximum acceptable input HopID
815 * Return: HopID between @min_hopid and @max_hopid or negative errno in
818 int tb_port_alloc_in_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
820 return tb_port_alloc_hopid(port
, true, min_hopid
, max_hopid
);
824 * tb_port_alloc_out_hopid() - Allocate output HopID from port
825 * @port: Port to allocate HopID for
826 * @min_hopid: Minimum acceptable output HopID
827 * @max_hopid: Maximum acceptable output HopID
829 * Return: HopID between @min_hopid and @max_hopid or negative errno in
832 int tb_port_alloc_out_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
834 return tb_port_alloc_hopid(port
, false, min_hopid
, max_hopid
);
838 * tb_port_release_in_hopid() - Release allocated input HopID from port
839 * @port: Port whose HopID to release
840 * @hopid: HopID to release
842 void tb_port_release_in_hopid(struct tb_port
*port
, int hopid
)
844 ida_simple_remove(&port
->in_hopids
, hopid
);
848 * tb_port_release_out_hopid() - Release allocated output HopID from port
849 * @port: Port whose HopID to release
850 * @hopid: HopID to release
852 void tb_port_release_out_hopid(struct tb_port
*port
, int hopid
)
854 ida_simple_remove(&port
->out_hopids
, hopid
);
858 * tb_next_port_on_path() - Return next port for given port on a path
859 * @start: Start port of the walk
860 * @end: End port of the walk
861 * @prev: Previous port (%NULL if this is the first)
863 * This function can be used to walk from one port to another if they
864 * are connected through zero or more switches. If the @prev is dual
865 * link port, the function follows that link and returns another end on
868 * If the @end port has been reached, return %NULL.
870 * Domain tb->lock must be held when this function is called.
872 struct tb_port
*tb_next_port_on_path(struct tb_port
*start
, struct tb_port
*end
,
873 struct tb_port
*prev
)
875 struct tb_port
*next
;
880 if (prev
->sw
== end
->sw
) {
886 if (start
->sw
->config
.depth
< end
->sw
->config
.depth
) {
888 prev
->remote
->sw
->config
.depth
> prev
->sw
->config
.depth
)
891 next
= tb_port_at(tb_route(end
->sw
), prev
->sw
);
893 if (tb_is_upstream_port(prev
)) {
896 next
= tb_upstream_port(prev
->sw
);
898 * Keep the same link if prev and next are both
901 if (next
->dual_link_port
&&
902 next
->link_nr
!= prev
->link_nr
) {
903 next
= next
->dual_link_port
;
911 static int tb_port_get_link_speed(struct tb_port
*port
)
919 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
920 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
924 speed
= (val
& LANE_ADP_CS_1_CURRENT_SPEED_MASK
) >>
925 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT
;
926 return speed
== LANE_ADP_CS_1_CURRENT_SPEED_GEN3
? 20 : 10;
929 static int tb_port_get_link_width(struct tb_port
*port
)
937 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
938 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
942 return (val
& LANE_ADP_CS_1_CURRENT_WIDTH_MASK
) >>
943 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT
;
946 static bool tb_port_is_width_supported(struct tb_port
*port
, int width
)
954 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
955 port
->cap_phy
+ LANE_ADP_CS_0
, 1);
959 widths
= (phy
& LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK
) >>
960 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT
;
962 return !!(widths
& width
);
965 static int tb_port_set_link_width(struct tb_port
*port
, unsigned int width
)
973 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
974 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
978 val
&= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK
;
981 val
|= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE
<<
982 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
985 val
|= LANE_ADP_CS_1_TARGET_WIDTH_DUAL
<<
986 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
992 val
|= LANE_ADP_CS_1_LB
;
994 return tb_port_write(port
, &val
, TB_CFG_PORT
,
995 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
998 static int tb_port_lane_bonding_enable(struct tb_port
*port
)
1003 * Enable lane bonding for both links if not already enabled by
1004 * for example the boot firmware.
1006 ret
= tb_port_get_link_width(port
);
1008 ret
= tb_port_set_link_width(port
, 2);
1013 ret
= tb_port_get_link_width(port
->dual_link_port
);
1015 ret
= tb_port_set_link_width(port
->dual_link_port
, 2);
1017 tb_port_set_link_width(port
, 1);
1022 port
->bonded
= true;
1023 port
->dual_link_port
->bonded
= true;
1028 static void tb_port_lane_bonding_disable(struct tb_port
*port
)
1030 port
->dual_link_port
->bonded
= false;
1031 port
->bonded
= false;
1033 tb_port_set_link_width(port
->dual_link_port
, 1);
1034 tb_port_set_link_width(port
, 1);
1038 * tb_port_is_enabled() - Is the adapter port enabled
1039 * @port: Port to check
1041 bool tb_port_is_enabled(struct tb_port
*port
)
1043 switch (port
->config
.type
) {
1044 case TB_TYPE_PCIE_UP
:
1045 case TB_TYPE_PCIE_DOWN
:
1046 return tb_pci_port_is_enabled(port
);
1048 case TB_TYPE_DP_HDMI_IN
:
1049 case TB_TYPE_DP_HDMI_OUT
:
1050 return tb_dp_port_is_enabled(port
);
1052 case TB_TYPE_USB3_UP
:
1053 case TB_TYPE_USB3_DOWN
:
1054 return tb_usb3_port_is_enabled(port
);
1062 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1063 * @port: USB3 adapter port to check
1065 bool tb_usb3_port_is_enabled(struct tb_port
*port
)
1069 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1070 port
->cap_adap
+ ADP_USB3_CS_0
, 1))
1073 return !!(data
& ADP_USB3_CS_0_PE
);
1077 * tb_usb3_port_enable() - Enable USB3 adapter port
1078 * @port: USB3 adapter port to enable
1079 * @enable: Enable/disable the USB3 adapter
1081 int tb_usb3_port_enable(struct tb_port
*port
, bool enable
)
1083 u32 word
= enable
? (ADP_USB3_CS_0_PE
| ADP_USB3_CS_0_V
)
1086 if (!port
->cap_adap
)
1088 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1089 port
->cap_adap
+ ADP_USB3_CS_0
, 1);
1093 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1094 * @port: PCIe port to check
1096 bool tb_pci_port_is_enabled(struct tb_port
*port
)
1100 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1101 port
->cap_adap
+ ADP_PCIE_CS_0
, 1))
1104 return !!(data
& ADP_PCIE_CS_0_PE
);
1108 * tb_pci_port_enable() - Enable PCIe adapter port
1109 * @port: PCIe port to enable
1110 * @enable: Enable/disable the PCIe adapter
1112 int tb_pci_port_enable(struct tb_port
*port
, bool enable
)
1114 u32 word
= enable
? ADP_PCIE_CS_0_PE
: 0x0;
1115 if (!port
->cap_adap
)
1117 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1118 port
->cap_adap
+ ADP_PCIE_CS_0
, 1);
1122 * tb_dp_port_hpd_is_active() - Is HPD already active
1123 * @port: DP out port to check
1125 * Checks if the DP OUT adapter port has HDP bit already set.
1127 int tb_dp_port_hpd_is_active(struct tb_port
*port
)
1132 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1133 port
->cap_adap
+ ADP_DP_CS_2
, 1);
1137 return !!(data
& ADP_DP_CS_2_HDP
);
1141 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1142 * @port: Port to clear HPD
1144 * If the DP IN port has HDP set, this function can be used to clear it.
1146 int tb_dp_port_hpd_clear(struct tb_port
*port
)
1151 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1152 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1156 data
|= ADP_DP_CS_3_HDPC
;
1157 return tb_port_write(port
, &data
, TB_CFG_PORT
,
1158 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1162 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1163 * @port: DP IN/OUT port to set hops
1164 * @video: Video Hop ID
1165 * @aux_tx: AUX TX Hop ID
1166 * @aux_rx: AUX RX Hop ID
1168 * Programs specified Hop IDs for DP IN/OUT port.
1170 int tb_dp_port_set_hops(struct tb_port
*port
, unsigned int video
,
1171 unsigned int aux_tx
, unsigned int aux_rx
)
1176 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1177 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1181 data
[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1182 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1183 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1185 data
[0] |= (video
<< ADP_DP_CS_0_VIDEO_HOPID_SHIFT
) &
1186 ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1187 data
[1] |= aux_tx
& ADP_DP_CS_1_AUX_TX_HOPID_MASK
;
1188 data
[1] |= (aux_rx
<< ADP_DP_CS_1_AUX_RX_HOPID_SHIFT
) &
1189 ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1191 return tb_port_write(port
, data
, TB_CFG_PORT
,
1192 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1196 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1197 * @port: DP adapter port to check
1199 bool tb_dp_port_is_enabled(struct tb_port
*port
)
1203 if (tb_port_read(port
, data
, TB_CFG_PORT
, port
->cap_adap
+ ADP_DP_CS_0
,
1207 return !!(data
[0] & (ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
));
1211 * tb_dp_port_enable() - Enables/disables DP paths of a port
1212 * @port: DP IN/OUT port
1213 * @enable: Enable/disable DP path
1215 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1216 * calling this function.
1218 int tb_dp_port_enable(struct tb_port
*port
, bool enable
)
1223 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1224 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1229 data
[0] |= ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
;
1231 data
[0] &= ~(ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
);
1233 return tb_port_write(port
, data
, TB_CFG_PORT
,
1234 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1237 /* switch utility functions */
1239 static const char *tb_switch_generation_name(const struct tb_switch
*sw
)
1241 switch (sw
->generation
) {
1243 return "Thunderbolt 1";
1245 return "Thunderbolt 2";
1247 return "Thunderbolt 3";
1255 static void tb_dump_switch(const struct tb
*tb
, const struct tb_switch
*sw
)
1257 const struct tb_regs_switch_header
*regs
= &sw
->config
;
1259 tb_dbg(tb
, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1260 tb_switch_generation_name(sw
), regs
->vendor_id
, regs
->device_id
,
1261 regs
->revision
, regs
->thunderbolt_version
);
1262 tb_dbg(tb
, " Max Port Number: %d\n", regs
->max_port_number
);
1263 tb_dbg(tb
, " Config:\n");
1265 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1266 regs
->upstream_port_number
, regs
->depth
,
1267 (((u64
) regs
->route_hi
) << 32) | regs
->route_lo
,
1268 regs
->enabled
, regs
->plug_events_delay
);
1269 tb_dbg(tb
, " unknown1: %#x unknown4: %#x\n",
1270 regs
->__unknown1
, regs
->__unknown4
);
1274 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
1276 * Return: Returns 0 on success or an error code on failure.
1278 int tb_switch_reset(struct tb
*tb
, u64 route
)
1280 struct tb_cfg_result res
;
1281 struct tb_regs_switch_header header
= {
1282 header
.route_hi
= route
>> 32,
1283 header
.route_lo
= route
,
1284 header
.enabled
= true,
1286 tb_dbg(tb
, "resetting switch at %llx\n", route
);
1287 res
.err
= tb_cfg_write(tb
->ctl
, ((u32
*) &header
) + 2, route
,
1291 res
= tb_cfg_reset(tb
->ctl
, route
, TB_CFG_DEFAULT_TIMEOUT
);
1298 * tb_plug_events_active() - enable/disable plug events on a switch
1300 * Also configures a sane plug_events_delay of 255ms.
1302 * Return: Returns 0 on success or an error code on failure.
1304 static int tb_plug_events_active(struct tb_switch
*sw
, bool active
)
1309 if (tb_switch_is_icm(sw
))
1312 sw
->config
.plug_events_delay
= 0xff;
1313 res
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 4, TB_CFG_SWITCH
, 4, 1);
1317 /* Plug events are always enabled in USB4 */
1318 if (tb_switch_is_usb4(sw
))
1321 res
= tb_sw_read(sw
, &data
, TB_CFG_SWITCH
, sw
->cap_plug_events
+ 1, 1);
1326 data
= data
& 0xFFFFFF83;
1327 switch (sw
->config
.device_id
) {
1328 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1329 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1330 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1338 return tb_sw_write(sw
, &data
, TB_CFG_SWITCH
,
1339 sw
->cap_plug_events
+ 1, 1);
1342 static ssize_t
authorized_show(struct device
*dev
,
1343 struct device_attribute
*attr
,
1346 struct tb_switch
*sw
= tb_to_switch(dev
);
1348 return sprintf(buf
, "%u\n", sw
->authorized
);
1351 static int tb_switch_set_authorized(struct tb_switch
*sw
, unsigned int val
)
1355 if (!mutex_trylock(&sw
->tb
->lock
))
1356 return restart_syscall();
1362 /* Approve switch */
1365 ret
= tb_domain_approve_switch_key(sw
->tb
, sw
);
1367 ret
= tb_domain_approve_switch(sw
->tb
, sw
);
1370 /* Challenge switch */
1373 ret
= tb_domain_challenge_switch_key(sw
->tb
, sw
);
1381 sw
->authorized
= val
;
1382 /* Notify status change to the userspace */
1383 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
1387 mutex_unlock(&sw
->tb
->lock
);
1391 static ssize_t
authorized_store(struct device
*dev
,
1392 struct device_attribute
*attr
,
1393 const char *buf
, size_t count
)
1395 struct tb_switch
*sw
= tb_to_switch(dev
);
1399 ret
= kstrtouint(buf
, 0, &val
);
1405 pm_runtime_get_sync(&sw
->dev
);
1406 ret
= tb_switch_set_authorized(sw
, val
);
1407 pm_runtime_mark_last_busy(&sw
->dev
);
1408 pm_runtime_put_autosuspend(&sw
->dev
);
1410 return ret
? ret
: count
;
1412 static DEVICE_ATTR_RW(authorized
);
1414 static ssize_t
boot_show(struct device
*dev
, struct device_attribute
*attr
,
1417 struct tb_switch
*sw
= tb_to_switch(dev
);
1419 return sprintf(buf
, "%u\n", sw
->boot
);
1421 static DEVICE_ATTR_RO(boot
);
1423 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
1426 struct tb_switch
*sw
= tb_to_switch(dev
);
1428 return sprintf(buf
, "%#x\n", sw
->device
);
1430 static DEVICE_ATTR_RO(device
);
1433 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1435 struct tb_switch
*sw
= tb_to_switch(dev
);
1437 return sprintf(buf
, "%s\n", sw
->device_name
? sw
->device_name
: "");
1439 static DEVICE_ATTR_RO(device_name
);
1442 generation_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1444 struct tb_switch
*sw
= tb_to_switch(dev
);
1446 return sprintf(buf
, "%u\n", sw
->generation
);
1448 static DEVICE_ATTR_RO(generation
);
1450 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
1453 struct tb_switch
*sw
= tb_to_switch(dev
);
1456 if (!mutex_trylock(&sw
->tb
->lock
))
1457 return restart_syscall();
1460 ret
= sprintf(buf
, "%*phN\n", TB_SWITCH_KEY_SIZE
, sw
->key
);
1462 ret
= sprintf(buf
, "\n");
1464 mutex_unlock(&sw
->tb
->lock
);
1468 static ssize_t
key_store(struct device
*dev
, struct device_attribute
*attr
,
1469 const char *buf
, size_t count
)
1471 struct tb_switch
*sw
= tb_to_switch(dev
);
1472 u8 key
[TB_SWITCH_KEY_SIZE
];
1473 ssize_t ret
= count
;
1476 if (!strcmp(buf
, "\n"))
1478 else if (hex2bin(key
, buf
, sizeof(key
)))
1481 if (!mutex_trylock(&sw
->tb
->lock
))
1482 return restart_syscall();
1484 if (sw
->authorized
) {
1491 sw
->key
= kmemdup(key
, sizeof(key
), GFP_KERNEL
);
1497 mutex_unlock(&sw
->tb
->lock
);
1500 static DEVICE_ATTR(key
, 0600, key_show
, key_store
);
1502 static ssize_t
speed_show(struct device
*dev
, struct device_attribute
*attr
,
1505 struct tb_switch
*sw
= tb_to_switch(dev
);
1507 return sprintf(buf
, "%u.0 Gb/s\n", sw
->link_speed
);
1511 * Currently all lanes must run at the same speed but we expose here
1512 * both directions to allow possible asymmetric links in the future.
1514 static DEVICE_ATTR(rx_speed
, 0444, speed_show
, NULL
);
1515 static DEVICE_ATTR(tx_speed
, 0444, speed_show
, NULL
);
1517 static ssize_t
lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1520 struct tb_switch
*sw
= tb_to_switch(dev
);
1522 return sprintf(buf
, "%u\n", sw
->link_width
);
1526 * Currently link has same amount of lanes both directions (1 or 2) but
1527 * expose them separately to allow possible asymmetric links in the future.
1529 static DEVICE_ATTR(rx_lanes
, 0444, lanes_show
, NULL
);
1530 static DEVICE_ATTR(tx_lanes
, 0444, lanes_show
, NULL
);
1532 static ssize_t
nvm_authenticate_show(struct device
*dev
,
1533 struct device_attribute
*attr
, char *buf
)
1535 struct tb_switch
*sw
= tb_to_switch(dev
);
1538 nvm_get_auth_status(sw
, &status
);
1539 return sprintf(buf
, "%#x\n", status
);
1542 static ssize_t
nvm_authenticate_store(struct device
*dev
,
1543 struct device_attribute
*attr
, const char *buf
, size_t count
)
1545 struct tb_switch
*sw
= tb_to_switch(dev
);
1549 pm_runtime_get_sync(&sw
->dev
);
1551 if (!mutex_trylock(&sw
->tb
->lock
)) {
1552 ret
= restart_syscall();
1556 /* If NVMem devices are not yet added */
1562 ret
= kstrtobool(buf
, &val
);
1566 /* Always clear the authentication status */
1567 nvm_clear_auth_status(sw
);
1570 if (!sw
->nvm
->buf
) {
1575 ret
= nvm_validate_and_write(sw
);
1579 sw
->nvm
->authenticating
= true;
1580 ret
= nvm_authenticate(sw
);
1584 mutex_unlock(&sw
->tb
->lock
);
1586 pm_runtime_mark_last_busy(&sw
->dev
);
1587 pm_runtime_put_autosuspend(&sw
->dev
);
1593 static DEVICE_ATTR_RW(nvm_authenticate
);
1595 static ssize_t
nvm_version_show(struct device
*dev
,
1596 struct device_attribute
*attr
, char *buf
)
1598 struct tb_switch
*sw
= tb_to_switch(dev
);
1601 if (!mutex_trylock(&sw
->tb
->lock
))
1602 return restart_syscall();
1609 ret
= sprintf(buf
, "%x.%x\n", sw
->nvm
->major
, sw
->nvm
->minor
);
1611 mutex_unlock(&sw
->tb
->lock
);
1615 static DEVICE_ATTR_RO(nvm_version
);
1617 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
1620 struct tb_switch
*sw
= tb_to_switch(dev
);
1622 return sprintf(buf
, "%#x\n", sw
->vendor
);
1624 static DEVICE_ATTR_RO(vendor
);
1627 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1629 struct tb_switch
*sw
= tb_to_switch(dev
);
1631 return sprintf(buf
, "%s\n", sw
->vendor_name
? sw
->vendor_name
: "");
1633 static DEVICE_ATTR_RO(vendor_name
);
1635 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
1638 struct tb_switch
*sw
= tb_to_switch(dev
);
1640 return sprintf(buf
, "%pUb\n", sw
->uuid
);
1642 static DEVICE_ATTR_RO(unique_id
);
1644 static struct attribute
*switch_attrs
[] = {
1645 &dev_attr_authorized
.attr
,
1646 &dev_attr_boot
.attr
,
1647 &dev_attr_device
.attr
,
1648 &dev_attr_device_name
.attr
,
1649 &dev_attr_generation
.attr
,
1651 &dev_attr_nvm_authenticate
.attr
,
1652 &dev_attr_nvm_version
.attr
,
1653 &dev_attr_rx_speed
.attr
,
1654 &dev_attr_rx_lanes
.attr
,
1655 &dev_attr_tx_speed
.attr
,
1656 &dev_attr_tx_lanes
.attr
,
1657 &dev_attr_vendor
.attr
,
1658 &dev_attr_vendor_name
.attr
,
1659 &dev_attr_unique_id
.attr
,
1663 static umode_t
switch_attr_is_visible(struct kobject
*kobj
,
1664 struct attribute
*attr
, int n
)
1666 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1667 struct tb_switch
*sw
= tb_to_switch(dev
);
1669 if (attr
== &dev_attr_device
.attr
) {
1672 } else if (attr
== &dev_attr_device_name
.attr
) {
1673 if (!sw
->device_name
)
1675 } else if (attr
== &dev_attr_vendor
.attr
) {
1678 } else if (attr
== &dev_attr_vendor_name
.attr
) {
1679 if (!sw
->vendor_name
)
1681 } else if (attr
== &dev_attr_key
.attr
) {
1683 sw
->tb
->security_level
== TB_SECURITY_SECURE
&&
1684 sw
->security_level
== TB_SECURITY_SECURE
)
1687 } else if (attr
== &dev_attr_rx_speed
.attr
||
1688 attr
== &dev_attr_rx_lanes
.attr
||
1689 attr
== &dev_attr_tx_speed
.attr
||
1690 attr
== &dev_attr_tx_lanes
.attr
) {
1694 } else if (attr
== &dev_attr_nvm_authenticate
.attr
) {
1695 if (nvm_upgradeable(sw
))
1698 } else if (attr
== &dev_attr_nvm_version
.attr
) {
1699 if (nvm_readable(sw
))
1702 } else if (attr
== &dev_attr_boot
.attr
) {
1708 return sw
->safe_mode
? 0 : attr
->mode
;
1711 static struct attribute_group switch_group
= {
1712 .is_visible
= switch_attr_is_visible
,
1713 .attrs
= switch_attrs
,
1716 static const struct attribute_group
*switch_groups
[] = {
1721 static void tb_switch_release(struct device
*dev
)
1723 struct tb_switch
*sw
= tb_to_switch(dev
);
1724 struct tb_port
*port
;
1726 dma_port_free(sw
->dma_port
);
1728 tb_switch_for_each_port(sw
, port
) {
1729 if (!port
->disabled
) {
1730 ida_destroy(&port
->in_hopids
);
1731 ida_destroy(&port
->out_hopids
);
1736 kfree(sw
->device_name
);
1737 kfree(sw
->vendor_name
);
1745 * Currently only need to provide the callbacks. Everything else is handled
1746 * in the connection manager.
1748 static int __maybe_unused
tb_switch_runtime_suspend(struct device
*dev
)
1750 struct tb_switch
*sw
= tb_to_switch(dev
);
1751 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
1753 if (cm_ops
->runtime_suspend_switch
)
1754 return cm_ops
->runtime_suspend_switch(sw
);
1759 static int __maybe_unused
tb_switch_runtime_resume(struct device
*dev
)
1761 struct tb_switch
*sw
= tb_to_switch(dev
);
1762 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
1764 if (cm_ops
->runtime_resume_switch
)
1765 return cm_ops
->runtime_resume_switch(sw
);
1769 static const struct dev_pm_ops tb_switch_pm_ops
= {
1770 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend
, tb_switch_runtime_resume
,
1774 struct device_type tb_switch_type
= {
1775 .name
= "thunderbolt_device",
1776 .release
= tb_switch_release
,
1777 .pm
= &tb_switch_pm_ops
,
1780 static int tb_switch_get_generation(struct tb_switch
*sw
)
1782 switch (sw
->config
.device_id
) {
1783 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1784 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1785 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK
:
1786 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C
:
1787 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
1788 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1789 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE
:
1790 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE
:
1793 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE
:
1794 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE
:
1795 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE
:
1798 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
1799 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
1800 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
1801 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
1802 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
1803 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE
:
1804 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE
:
1805 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE
:
1806 case PCI_DEVICE_ID_INTEL_ICL_NHI0
:
1807 case PCI_DEVICE_ID_INTEL_ICL_NHI1
:
1811 if (tb_switch_is_usb4(sw
))
1815 * For unknown switches assume generation to be 1 to be
1818 tb_sw_warn(sw
, "unsupported switch device id %#x\n",
1819 sw
->config
.device_id
);
1824 static bool tb_switch_exceeds_max_depth(const struct tb_switch
*sw
, int depth
)
1828 if (tb_switch_is_usb4(sw
) ||
1829 (sw
->tb
->root_switch
&& tb_switch_is_usb4(sw
->tb
->root_switch
)))
1830 max_depth
= USB4_SWITCH_MAX_DEPTH
;
1832 max_depth
= TB_SWITCH_MAX_DEPTH
;
1834 return depth
> max_depth
;
1838 * tb_switch_alloc() - allocate a switch
1839 * @tb: Pointer to the owning domain
1840 * @parent: Parent device for this switch
1841 * @route: Route string for this switch
1843 * Allocates and initializes a switch. Will not upload configuration to
1844 * the switch. For that you need to call tb_switch_configure()
1845 * separately. The returned switch should be released by calling
1848 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1851 struct tb_switch
*tb_switch_alloc(struct tb
*tb
, struct device
*parent
,
1854 struct tb_switch
*sw
;
1858 /* Unlock the downstream port so we can access the switch below */
1860 struct tb_switch
*parent_sw
= tb_to_switch(parent
);
1861 struct tb_port
*down
;
1863 down
= tb_port_at(route
, parent_sw
);
1864 tb_port_unlock(down
);
1867 depth
= tb_route_length(route
);
1869 upstream_port
= tb_cfg_get_upstream_port(tb
->ctl
, route
);
1870 if (upstream_port
< 0)
1871 return ERR_PTR(upstream_port
);
1873 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
1875 return ERR_PTR(-ENOMEM
);
1878 ret
= tb_cfg_read(tb
->ctl
, &sw
->config
, route
, 0, TB_CFG_SWITCH
, 0, 5);
1880 goto err_free_sw_ports
;
1882 sw
->generation
= tb_switch_get_generation(sw
);
1884 tb_dbg(tb
, "current switch config:\n");
1885 tb_dump_switch(tb
, sw
);
1887 /* configure switch */
1888 sw
->config
.upstream_port_number
= upstream_port
;
1889 sw
->config
.depth
= depth
;
1890 sw
->config
.route_hi
= upper_32_bits(route
);
1891 sw
->config
.route_lo
= lower_32_bits(route
);
1892 sw
->config
.enabled
= 0;
1894 /* Make sure we do not exceed maximum topology limit */
1895 if (tb_switch_exceeds_max_depth(sw
, depth
)) {
1896 ret
= -EADDRNOTAVAIL
;
1897 goto err_free_sw_ports
;
1900 /* initialize ports */
1901 sw
->ports
= kcalloc(sw
->config
.max_port_number
+ 1, sizeof(*sw
->ports
),
1905 goto err_free_sw_ports
;
1908 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
1909 /* minimum setup for tb_find_cap and tb_drom_read to work */
1910 sw
->ports
[i
].sw
= sw
;
1911 sw
->ports
[i
].port
= i
;
1914 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_PLUG_EVENTS
);
1916 sw
->cap_plug_events
= ret
;
1918 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_LINK_CONTROLLER
);
1922 /* Root switch is always authorized */
1924 sw
->authorized
= true;
1926 device_initialize(&sw
->dev
);
1927 sw
->dev
.parent
= parent
;
1928 sw
->dev
.bus
= &tb_bus_type
;
1929 sw
->dev
.type
= &tb_switch_type
;
1930 sw
->dev
.groups
= switch_groups
;
1931 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
1939 return ERR_PTR(ret
);
1943 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1944 * @tb: Pointer to the owning domain
1945 * @parent: Parent device for this switch
1946 * @route: Route string for this switch
1948 * This creates a switch in safe mode. This means the switch pretty much
1949 * lacks all capabilities except DMA configuration port before it is
1950 * flashed with a valid NVM firmware.
1952 * The returned switch must be released by calling tb_switch_put().
1954 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1957 tb_switch_alloc_safe_mode(struct tb
*tb
, struct device
*parent
, u64 route
)
1959 struct tb_switch
*sw
;
1961 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
1963 return ERR_PTR(-ENOMEM
);
1966 sw
->config
.depth
= tb_route_length(route
);
1967 sw
->config
.route_hi
= upper_32_bits(route
);
1968 sw
->config
.route_lo
= lower_32_bits(route
);
1969 sw
->safe_mode
= true;
1971 device_initialize(&sw
->dev
);
1972 sw
->dev
.parent
= parent
;
1973 sw
->dev
.bus
= &tb_bus_type
;
1974 sw
->dev
.type
= &tb_switch_type
;
1975 sw
->dev
.groups
= switch_groups
;
1976 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
1982 * tb_switch_configure() - Uploads configuration to the switch
1983 * @sw: Switch to configure
1985 * Call this function before the switch is added to the system. It will
1986 * upload configuration to the switch and makes it available for the
1987 * connection manager to use. Can be called to the switch again after
1988 * resume from low power states to re-initialize it.
1990 * Return: %0 in case of success and negative errno in case of failure
1992 int tb_switch_configure(struct tb_switch
*sw
)
1994 struct tb
*tb
= sw
->tb
;
1998 route
= tb_route(sw
);
2000 tb_dbg(tb
, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2001 sw
->config
.enabled
? "restoring " : "initializing", route
,
2002 tb_route_length(route
), sw
->config
.upstream_port_number
);
2004 sw
->config
.enabled
= 1;
2006 if (tb_switch_is_usb4(sw
)) {
2008 * For USB4 devices, we need to program the CM version
2009 * accordingly so that it knows to expose all the
2010 * additional capabilities.
2012 sw
->config
.cmuv
= USB4_VERSION_1_0
;
2014 /* Enumerate the switch */
2015 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2020 ret
= usb4_switch_setup(sw
);
2024 ret
= usb4_switch_configure_link(sw
);
2026 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
)
2027 tb_sw_warn(sw
, "unknown switch vendor id %#x\n",
2028 sw
->config
.vendor_id
);
2030 if (!sw
->cap_plug_events
) {
2031 tb_sw_warn(sw
, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2035 /* Enumerate the switch */
2036 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2041 ret
= tb_lc_configure_link(sw
);
2046 return tb_plug_events_active(sw
, true);
2049 static int tb_switch_set_uuid(struct tb_switch
*sw
)
2058 if (tb_switch_is_usb4(sw
)) {
2059 ret
= usb4_switch_read_uid(sw
, &sw
->uid
);
2065 * The newer controllers include fused UUID as part of
2066 * link controller specific registers
2068 ret
= tb_lc_read_uuid(sw
, uuid
);
2078 * ICM generates UUID based on UID and fills the upper
2079 * two words with ones. This is not strictly following
2080 * UUID format but we want to be compatible with it so
2081 * we do the same here.
2083 uuid
[0] = sw
->uid
& 0xffffffff;
2084 uuid
[1] = (sw
->uid
>> 32) & 0xffffffff;
2085 uuid
[2] = 0xffffffff;
2086 uuid
[3] = 0xffffffff;
2089 sw
->uuid
= kmemdup(uuid
, sizeof(uuid
), GFP_KERNEL
);
2095 static int tb_switch_add_dma_port(struct tb_switch
*sw
)
2100 switch (sw
->generation
) {
2102 /* Only root switch can be upgraded */
2108 ret
= tb_switch_set_uuid(sw
);
2115 * DMA port is the only thing available when the switch
2123 /* Root switch DMA port requires running firmware */
2124 if (!tb_route(sw
) && !tb_switch_is_icm(sw
))
2127 sw
->dma_port
= dma_port_alloc(sw
);
2131 if (sw
->no_nvm_upgrade
)
2135 * If there is status already set then authentication failed
2136 * when the dma_port_flash_update_auth() returned. Power cycling
2137 * is not needed (it was done already) so only thing we do here
2138 * is to unblock runtime PM of the root port.
2140 nvm_get_auth_status(sw
, &status
);
2143 nvm_authenticate_complete_dma_port(sw
);
2148 * Check status of the previous flash authentication. If there
2149 * is one we need to power cycle the switch in any case to make
2150 * it functional again.
2152 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
2156 /* Now we can allow root port to suspend again */
2158 nvm_authenticate_complete_dma_port(sw
);
2161 tb_sw_info(sw
, "switch flash authentication failed\n");
2162 nvm_set_auth_status(sw
, status
);
2165 tb_sw_info(sw
, "power cycling the switch now\n");
2166 dma_port_power_cycle(sw
->dma_port
);
2169 * We return error here which causes the switch adding failure.
2170 * It should appear back after power cycle is complete.
2175 static void tb_switch_default_link_ports(struct tb_switch
*sw
)
2179 for (i
= 1; i
<= sw
->config
.max_port_number
; i
+= 2) {
2180 struct tb_port
*port
= &sw
->ports
[i
];
2181 struct tb_port
*subordinate
;
2183 if (!tb_port_is_null(port
))
2186 /* Check for the subordinate port */
2187 if (i
== sw
->config
.max_port_number
||
2188 !tb_port_is_null(&sw
->ports
[i
+ 1]))
2191 /* Link them if not already done so (by DROM) */
2192 subordinate
= &sw
->ports
[i
+ 1];
2193 if (!port
->dual_link_port
&& !subordinate
->dual_link_port
) {
2195 port
->dual_link_port
= subordinate
;
2196 subordinate
->link_nr
= 1;
2197 subordinate
->dual_link_port
= port
;
2199 tb_sw_dbg(sw
, "linked ports %d <-> %d\n",
2200 port
->port
, subordinate
->port
);
2205 static bool tb_switch_lane_bonding_possible(struct tb_switch
*sw
)
2207 const struct tb_port
*up
= tb_upstream_port(sw
);
2209 if (!up
->dual_link_port
|| !up
->dual_link_port
->remote
)
2212 if (tb_switch_is_usb4(sw
))
2213 return usb4_switch_lane_bonding_possible(sw
);
2214 return tb_lc_lane_bonding_possible(sw
);
2217 static int tb_switch_update_link_attributes(struct tb_switch
*sw
)
2220 bool change
= false;
2223 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2226 up
= tb_upstream_port(sw
);
2228 ret
= tb_port_get_link_speed(up
);
2231 if (sw
->link_speed
!= ret
)
2233 sw
->link_speed
= ret
;
2235 ret
= tb_port_get_link_width(up
);
2238 if (sw
->link_width
!= ret
)
2240 sw
->link_width
= ret
;
2242 /* Notify userspace that there is possible link attribute change */
2243 if (device_is_registered(&sw
->dev
) && change
)
2244 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
2250 * tb_switch_lane_bonding_enable() - Enable lane bonding
2251 * @sw: Switch to enable lane bonding
2253 * Connection manager can call this function to enable lane bonding of a
2254 * switch. If conditions are correct and both switches support the feature,
2255 * lanes are bonded. It is safe to call this to any switch.
2257 int tb_switch_lane_bonding_enable(struct tb_switch
*sw
)
2259 struct tb_switch
*parent
= tb_to_switch(sw
->dev
.parent
);
2260 struct tb_port
*up
, *down
;
2261 u64 route
= tb_route(sw
);
2267 if (!tb_switch_lane_bonding_possible(sw
))
2270 up
= tb_upstream_port(sw
);
2271 down
= tb_port_at(route
, parent
);
2273 if (!tb_port_is_width_supported(up
, 2) ||
2274 !tb_port_is_width_supported(down
, 2))
2277 ret
= tb_port_lane_bonding_enable(up
);
2279 tb_port_warn(up
, "failed to enable lane bonding\n");
2283 ret
= tb_port_lane_bonding_enable(down
);
2285 tb_port_warn(down
, "failed to enable lane bonding\n");
2286 tb_port_lane_bonding_disable(up
);
2290 tb_switch_update_link_attributes(sw
);
2292 tb_sw_dbg(sw
, "lane bonding enabled\n");
2297 * tb_switch_lane_bonding_disable() - Disable lane bonding
2298 * @sw: Switch whose lane bonding to disable
2300 * Disables lane bonding between @sw and parent. This can be called even
2301 * if lanes were not bonded originally.
2303 void tb_switch_lane_bonding_disable(struct tb_switch
*sw
)
2305 struct tb_switch
*parent
= tb_to_switch(sw
->dev
.parent
);
2306 struct tb_port
*up
, *down
;
2311 up
= tb_upstream_port(sw
);
2315 down
= tb_port_at(tb_route(sw
), parent
);
2317 tb_port_lane_bonding_disable(up
);
2318 tb_port_lane_bonding_disable(down
);
2320 tb_switch_update_link_attributes(sw
);
2321 tb_sw_dbg(sw
, "lane bonding disabled\n");
2325 * tb_switch_add() - Add a switch to the domain
2326 * @sw: Switch to add
2328 * This is the last step in adding switch to the domain. It will read
2329 * identification information from DROM and initializes ports so that
2330 * they can be used to connect other switches. The switch will be
2331 * exposed to the userspace when this function successfully returns. To
2332 * remove and release the switch, call tb_switch_remove().
2334 * Return: %0 in case of success and negative errno in case of failure
2336 int tb_switch_add(struct tb_switch
*sw
)
2341 * Initialize DMA control port now before we read DROM. Recent
2342 * host controllers have more complete DROM on NVM that includes
2343 * vendor and model identification strings which we then expose
2344 * to the userspace. NVM can be accessed through DMA
2345 * configuration based mailbox.
2347 ret
= tb_switch_add_dma_port(sw
);
2349 dev_err(&sw
->dev
, "failed to add DMA port\n");
2353 if (!sw
->safe_mode
) {
2355 ret
= tb_drom_read(sw
);
2357 dev_err(&sw
->dev
, "reading DROM failed\n");
2360 tb_sw_dbg(sw
, "uid: %#llx\n", sw
->uid
);
2362 ret
= tb_switch_set_uuid(sw
);
2364 dev_err(&sw
->dev
, "failed to set UUID\n");
2368 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
2369 if (sw
->ports
[i
].disabled
) {
2370 tb_port_dbg(&sw
->ports
[i
], "disabled by eeprom\n");
2373 ret
= tb_init_port(&sw
->ports
[i
]);
2375 dev_err(&sw
->dev
, "failed to initialize port %d\n", i
);
2380 tb_switch_default_link_ports(sw
);
2382 ret
= tb_switch_update_link_attributes(sw
);
2386 ret
= tb_switch_tmu_init(sw
);
2391 ret
= device_add(&sw
->dev
);
2393 dev_err(&sw
->dev
, "failed to add device: %d\n", ret
);
2398 dev_info(&sw
->dev
, "new device found, vendor=%#x device=%#x\n",
2399 sw
->vendor
, sw
->device
);
2400 if (sw
->vendor_name
&& sw
->device_name
)
2401 dev_info(&sw
->dev
, "%s %s\n", sw
->vendor_name
,
2405 ret
= tb_switch_nvm_add(sw
);
2407 dev_err(&sw
->dev
, "failed to add NVM devices\n");
2408 device_del(&sw
->dev
);
2412 pm_runtime_set_active(&sw
->dev
);
2414 pm_runtime_set_autosuspend_delay(&sw
->dev
, TB_AUTOSUSPEND_DELAY
);
2415 pm_runtime_use_autosuspend(&sw
->dev
);
2416 pm_runtime_mark_last_busy(&sw
->dev
);
2417 pm_runtime_enable(&sw
->dev
);
2418 pm_request_autosuspend(&sw
->dev
);
2425 * tb_switch_remove() - Remove and release a switch
2426 * @sw: Switch to remove
2428 * This will remove the switch from the domain and release it after last
2429 * reference count drops to zero. If there are switches connected below
2430 * this switch, they will be removed as well.
2432 void tb_switch_remove(struct tb_switch
*sw
)
2434 struct tb_port
*port
;
2437 pm_runtime_get_sync(&sw
->dev
);
2438 pm_runtime_disable(&sw
->dev
);
2441 /* port 0 is the switch itself and never has a remote */
2442 tb_switch_for_each_port(sw
, port
) {
2443 if (tb_port_has_remote(port
)) {
2444 tb_switch_remove(port
->remote
->sw
);
2445 port
->remote
= NULL
;
2446 } else if (port
->xdomain
) {
2447 tb_xdomain_remove(port
->xdomain
);
2448 port
->xdomain
= NULL
;
2452 if (!sw
->is_unplugged
)
2453 tb_plug_events_active(sw
, false);
2455 if (tb_switch_is_usb4(sw
))
2456 usb4_switch_unconfigure_link(sw
);
2458 tb_lc_unconfigure_link(sw
);
2460 tb_switch_nvm_remove(sw
);
2463 dev_info(&sw
->dev
, "device disconnected\n");
2464 device_unregister(&sw
->dev
);
2468 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2470 void tb_sw_set_unplugged(struct tb_switch
*sw
)
2472 struct tb_port
*port
;
2474 if (sw
== sw
->tb
->root_switch
) {
2475 tb_sw_WARN(sw
, "cannot unplug root switch\n");
2478 if (sw
->is_unplugged
) {
2479 tb_sw_WARN(sw
, "is_unplugged already set\n");
2482 sw
->is_unplugged
= true;
2483 tb_switch_for_each_port(sw
, port
) {
2484 if (tb_port_has_remote(port
))
2485 tb_sw_set_unplugged(port
->remote
->sw
);
2486 else if (port
->xdomain
)
2487 port
->xdomain
->is_unplugged
= true;
2491 int tb_switch_resume(struct tb_switch
*sw
)
2493 struct tb_port
*port
;
2496 tb_sw_dbg(sw
, "resuming switch\n");
2499 * Check for UID of the connected switches except for root
2500 * switch which we assume cannot be removed.
2506 * Check first that we can still read the switch config
2507 * space. It may be that there is now another domain
2510 err
= tb_cfg_get_upstream_port(sw
->tb
->ctl
, tb_route(sw
));
2512 tb_sw_info(sw
, "switch not present anymore\n");
2516 if (tb_switch_is_usb4(sw
))
2517 err
= usb4_switch_read_uid(sw
, &uid
);
2519 err
= tb_drom_read_uid_only(sw
, &uid
);
2521 tb_sw_warn(sw
, "uid read failed\n");
2524 if (sw
->uid
!= uid
) {
2526 "changed while suspended (uid %#llx -> %#llx)\n",
2532 err
= tb_switch_configure(sw
);
2536 /* check for surviving downstream switches */
2537 tb_switch_for_each_port(sw
, port
) {
2538 if (!tb_port_has_remote(port
) && !port
->xdomain
)
2541 if (tb_wait_for_port(port
, true) <= 0) {
2543 "lost during suspend, disconnecting\n");
2544 if (tb_port_has_remote(port
))
2545 tb_sw_set_unplugged(port
->remote
->sw
);
2546 else if (port
->xdomain
)
2547 port
->xdomain
->is_unplugged
= true;
2548 } else if (tb_port_has_remote(port
) || port
->xdomain
) {
2550 * Always unlock the port so the downstream
2551 * switch/domain is accessible.
2553 if (tb_port_unlock(port
))
2554 tb_port_warn(port
, "failed to unlock port\n");
2555 if (port
->remote
&& tb_switch_resume(port
->remote
->sw
)) {
2557 "lost during suspend, disconnecting\n");
2558 tb_sw_set_unplugged(port
->remote
->sw
);
2565 void tb_switch_suspend(struct tb_switch
*sw
)
2567 struct tb_port
*port
;
2570 err
= tb_plug_events_active(sw
, false);
2574 tb_switch_for_each_port(sw
, port
) {
2575 if (tb_port_has_remote(port
))
2576 tb_switch_suspend(port
->remote
->sw
);
2579 if (tb_switch_is_usb4(sw
))
2580 usb4_switch_set_sleep(sw
);
2582 tb_lc_set_sleep(sw
);
2586 * tb_switch_query_dp_resource() - Query availability of DP resource
2587 * @sw: Switch whose DP resource is queried
2590 * Queries availability of DP resource for DP tunneling using switch
2591 * specific means. Returns %true if resource is available.
2593 bool tb_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
2595 if (tb_switch_is_usb4(sw
))
2596 return usb4_switch_query_dp_resource(sw
, in
);
2597 return tb_lc_dp_sink_query(sw
, in
);
2601 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2602 * @sw: Switch whose DP resource is allocated
2605 * Allocates DP resource for DP tunneling. The resource must be
2606 * available for this to succeed (see tb_switch_query_dp_resource()).
2607 * Returns %0 in success and negative errno otherwise.
2609 int tb_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
2611 if (tb_switch_is_usb4(sw
))
2612 return usb4_switch_alloc_dp_resource(sw
, in
);
2613 return tb_lc_dp_sink_alloc(sw
, in
);
2617 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2618 * @sw: Switch whose DP resource is de-allocated
2621 * De-allocates DP resource that was previously allocated for DP
2624 void tb_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
2628 if (tb_switch_is_usb4(sw
))
2629 ret
= usb4_switch_dealloc_dp_resource(sw
, in
);
2631 ret
= tb_lc_dp_sink_dealloc(sw
, in
);
2634 tb_sw_warn(sw
, "failed to de-allocate DP resource for port %d\n",
2638 struct tb_sw_lookup
{
2646 static int tb_switch_match(struct device
*dev
, const void *data
)
2648 struct tb_switch
*sw
= tb_to_switch(dev
);
2649 const struct tb_sw_lookup
*lookup
= data
;
2653 if (sw
->tb
!= lookup
->tb
)
2657 return !memcmp(sw
->uuid
, lookup
->uuid
, sizeof(*lookup
->uuid
));
2659 if (lookup
->route
) {
2660 return sw
->config
.route_lo
== lower_32_bits(lookup
->route
) &&
2661 sw
->config
.route_hi
== upper_32_bits(lookup
->route
);
2664 /* Root switch is matched only by depth */
2668 return sw
->link
== lookup
->link
&& sw
->depth
== lookup
->depth
;
2672 * tb_switch_find_by_link_depth() - Find switch by link and depth
2673 * @tb: Domain the switch belongs
2674 * @link: Link number the switch is connected
2675 * @depth: Depth of the switch in link
2677 * Returned switch has reference count increased so the caller needs to
2678 * call tb_switch_put() when done with the switch.
2680 struct tb_switch
*tb_switch_find_by_link_depth(struct tb
*tb
, u8 link
, u8 depth
)
2682 struct tb_sw_lookup lookup
;
2685 memset(&lookup
, 0, sizeof(lookup
));
2688 lookup
.depth
= depth
;
2690 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2692 return tb_to_switch(dev
);
2698 * tb_switch_find_by_uuid() - Find switch by UUID
2699 * @tb: Domain the switch belongs
2700 * @uuid: UUID to look for
2702 * Returned switch has reference count increased so the caller needs to
2703 * call tb_switch_put() when done with the switch.
2705 struct tb_switch
*tb_switch_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
2707 struct tb_sw_lookup lookup
;
2710 memset(&lookup
, 0, sizeof(lookup
));
2714 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2716 return tb_to_switch(dev
);
2722 * tb_switch_find_by_route() - Find switch by route string
2723 * @tb: Domain the switch belongs
2724 * @route: Route string to look for
2726 * Returned switch has reference count increased so the caller needs to
2727 * call tb_switch_put() when done with the switch.
2729 struct tb_switch
*tb_switch_find_by_route(struct tb
*tb
, u64 route
)
2731 struct tb_sw_lookup lookup
;
2735 return tb_switch_get(tb
->root_switch
);
2737 memset(&lookup
, 0, sizeof(lookup
));
2739 lookup
.route
= route
;
2741 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2743 return tb_to_switch(dev
);
2749 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2750 * @sw: Switch to find the port from
2751 * @type: Port type to look for
2753 struct tb_port
*tb_switch_find_port(struct tb_switch
*sw
,
2754 enum tb_port_type type
)
2756 struct tb_port
*port
;
2758 tb_switch_for_each_port(sw
, port
) {
2759 if (port
->config
.type
== type
)
2766 void tb_switch_exit(void)
2768 ida_destroy(&nvm_ida
);