1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
20 /* Switch NVM support */
22 #define NVM_DEVID 0x05
23 #define NVM_VERSION 0x08
25 #define NVM_FLASH_SIZE 0x45
27 #define NVM_MIN_SIZE SZ_32K
28 #define NVM_MAX_SIZE SZ_512K
30 static DEFINE_IDA(nvm_ida
);
32 struct nvm_auth_status
{
33 struct list_head list
;
39 * Hold NVM authentication failure status per switch This information
40 * needs to stay around even when the switch gets power cycled so we
43 static LIST_HEAD(nvm_auth_status_cache
);
44 static DEFINE_MUTEX(nvm_auth_status_lock
);
46 static struct nvm_auth_status
*__nvm_get_auth_status(const struct tb_switch
*sw
)
48 struct nvm_auth_status
*st
;
50 list_for_each_entry(st
, &nvm_auth_status_cache
, list
) {
51 if (uuid_equal(&st
->uuid
, sw
->uuid
))
58 static void nvm_get_auth_status(const struct tb_switch
*sw
, u32
*status
)
60 struct nvm_auth_status
*st
;
62 mutex_lock(&nvm_auth_status_lock
);
63 st
= __nvm_get_auth_status(sw
);
64 mutex_unlock(&nvm_auth_status_lock
);
66 *status
= st
? st
->status
: 0;
69 static void nvm_set_auth_status(const struct tb_switch
*sw
, u32 status
)
71 struct nvm_auth_status
*st
;
73 if (WARN_ON(!sw
->uuid
))
76 mutex_lock(&nvm_auth_status_lock
);
77 st
= __nvm_get_auth_status(sw
);
80 st
= kzalloc(sizeof(*st
), GFP_KERNEL
);
84 memcpy(&st
->uuid
, sw
->uuid
, sizeof(st
->uuid
));
85 INIT_LIST_HEAD(&st
->list
);
86 list_add_tail(&st
->list
, &nvm_auth_status_cache
);
91 mutex_unlock(&nvm_auth_status_lock
);
94 static void nvm_clear_auth_status(const struct tb_switch
*sw
)
96 struct nvm_auth_status
*st
;
98 mutex_lock(&nvm_auth_status_lock
);
99 st
= __nvm_get_auth_status(sw
);
104 mutex_unlock(&nvm_auth_status_lock
);
107 static int nvm_validate_and_write(struct tb_switch
*sw
)
109 unsigned int image_size
, hdr_size
;
110 const u8
*buf
= sw
->nvm
->buf
;
117 image_size
= sw
->nvm
->buf_data_size
;
118 if (image_size
< NVM_MIN_SIZE
|| image_size
> NVM_MAX_SIZE
)
122 * FARB pointer must point inside the image and must at least
123 * contain parts of the digital section we will be reading here.
125 hdr_size
= (*(u32
*)buf
) & 0xffffff;
126 if (hdr_size
+ NVM_DEVID
+ 2 >= image_size
)
129 /* Digital section start should be aligned to 4k page */
130 if (!IS_ALIGNED(hdr_size
, SZ_4K
))
134 * Read digital section size and check that it also fits inside
137 ds_size
= *(u16
*)(buf
+ hdr_size
);
138 if (ds_size
>= image_size
)
141 if (!sw
->safe_mode
) {
145 * Make sure the device ID in the image matches the one
146 * we read from the switch config space.
148 device_id
= *(u16
*)(buf
+ hdr_size
+ NVM_DEVID
);
149 if (device_id
!= sw
->config
.device_id
)
152 if (sw
->generation
< 3) {
153 /* Write CSS headers first */
154 ret
= dma_port_flash_write(sw
->dma_port
,
155 DMA_PORT_CSS_ADDRESS
, buf
+ NVM_CSS
,
156 DMA_PORT_CSS_MAX_SIZE
);
161 /* Skip headers in the image */
163 image_size
-= hdr_size
;
166 return dma_port_flash_write(sw
->dma_port
, 0, buf
, image_size
);
169 static int nvm_authenticate_host(struct tb_switch
*sw
)
174 * Root switch NVM upgrade requires that we disconnect the
175 * existing paths first (in case it is not in safe mode
178 if (!sw
->safe_mode
) {
179 ret
= tb_domain_disconnect_all_paths(sw
->tb
);
183 * The host controller goes away pretty soon after this if
184 * everything goes well so getting timeout is expected.
186 ret
= dma_port_flash_update_auth(sw
->dma_port
);
187 return ret
== -ETIMEDOUT
? 0 : ret
;
191 * From safe mode we can get out by just power cycling the
194 dma_port_power_cycle(sw
->dma_port
);
198 static int nvm_authenticate_device(struct tb_switch
*sw
)
200 int ret
, retries
= 10;
202 ret
= dma_port_flash_update_auth(sw
->dma_port
);
203 if (ret
&& ret
!= -ETIMEDOUT
)
207 * Poll here for the authentication status. It takes some time
208 * for the device to respond (we get timeout for a while). Once
209 * we get response the device needs to be power cycled in order
210 * to the new NVM to be taken into use.
215 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
216 if (ret
< 0 && ret
!= -ETIMEDOUT
)
220 tb_sw_warn(sw
, "failed to authenticate NVM\n");
221 nvm_set_auth_status(sw
, status
);
224 tb_sw_info(sw
, "power cycling the switch now\n");
225 dma_port_power_cycle(sw
->dma_port
);
235 static int tb_switch_nvm_read(void *priv
, unsigned int offset
, void *val
,
238 struct tb_switch
*sw
= priv
;
241 pm_runtime_get_sync(&sw
->dev
);
243 if (!mutex_trylock(&sw
->tb
->lock
)) {
244 ret
= restart_syscall();
248 ret
= dma_port_flash_read(sw
->dma_port
, offset
, val
, bytes
);
249 mutex_unlock(&sw
->tb
->lock
);
252 pm_runtime_mark_last_busy(&sw
->dev
);
253 pm_runtime_put_autosuspend(&sw
->dev
);
258 static int tb_switch_nvm_write(void *priv
, unsigned int offset
, void *val
,
261 struct tb_switch
*sw
= priv
;
264 if (!mutex_trylock(&sw
->tb
->lock
))
265 return restart_syscall();
268 * Since writing the NVM image might require some special steps,
269 * for example when CSS headers are written, we cache the image
270 * locally here and handle the special cases when the user asks
271 * us to authenticate the image.
274 sw
->nvm
->buf
= vmalloc(NVM_MAX_SIZE
);
281 sw
->nvm
->buf_data_size
= offset
+ bytes
;
282 memcpy(sw
->nvm
->buf
+ offset
, val
, bytes
);
285 mutex_unlock(&sw
->tb
->lock
);
290 static struct nvmem_device
*register_nvmem(struct tb_switch
*sw
, int id
,
291 size_t size
, bool active
)
293 struct nvmem_config config
;
295 memset(&config
, 0, sizeof(config
));
298 config
.name
= "nvm_active";
299 config
.reg_read
= tb_switch_nvm_read
;
300 config
.read_only
= true;
302 config
.name
= "nvm_non_active";
303 config
.reg_write
= tb_switch_nvm_write
;
304 config
.root_only
= true;
309 config
.word_size
= 4;
311 config
.dev
= &sw
->dev
;
312 config
.owner
= THIS_MODULE
;
315 return nvmem_register(&config
);
318 static int tb_switch_nvm_add(struct tb_switch
*sw
)
320 struct nvmem_device
*nvm_dev
;
321 struct tb_switch_nvm
*nvm
;
328 nvm
= kzalloc(sizeof(*nvm
), GFP_KERNEL
);
332 nvm
->id
= ida_simple_get(&nvm_ida
, 0, 0, GFP_KERNEL
);
335 * If the switch is in safe-mode the only accessible portion of
336 * the NVM is the non-active one where userspace is expected to
337 * write new functional NVM.
339 if (!sw
->safe_mode
) {
340 u32 nvm_size
, hdr_size
;
342 ret
= dma_port_flash_read(sw
->dma_port
, NVM_FLASH_SIZE
, &val
,
347 hdr_size
= sw
->generation
< 3 ? SZ_8K
: SZ_16K
;
348 nvm_size
= (SZ_1M
<< (val
& 7)) / 8;
349 nvm_size
= (nvm_size
- hdr_size
) / 2;
351 ret
= dma_port_flash_read(sw
->dma_port
, NVM_VERSION
, &val
,
356 nvm
->major
= val
>> 16;
357 nvm
->minor
= val
>> 8;
359 nvm_dev
= register_nvmem(sw
, nvm
->id
, nvm_size
, true);
360 if (IS_ERR(nvm_dev
)) {
361 ret
= PTR_ERR(nvm_dev
);
364 nvm
->active
= nvm_dev
;
367 nvm_dev
= register_nvmem(sw
, nvm
->id
, NVM_MAX_SIZE
, false);
368 if (IS_ERR(nvm_dev
)) {
369 ret
= PTR_ERR(nvm_dev
);
372 nvm
->non_active
= nvm_dev
;
379 nvmem_unregister(nvm
->active
);
381 ida_simple_remove(&nvm_ida
, nvm
->id
);
387 static void tb_switch_nvm_remove(struct tb_switch
*sw
)
389 struct tb_switch_nvm
*nvm
;
397 /* Remove authentication status in case the switch is unplugged */
398 if (!nvm
->authenticating
)
399 nvm_clear_auth_status(sw
);
401 nvmem_unregister(nvm
->non_active
);
403 nvmem_unregister(nvm
->active
);
404 ida_simple_remove(&nvm_ida
, nvm
->id
);
409 /* port utility functions */
411 static const char *tb_port_type(struct tb_regs_port_header
*port
)
413 switch (port
->type
>> 16) {
415 switch ((u8
) port
->type
) {
440 static void tb_dump_port(struct tb
*tb
, struct tb_regs_port_header
*port
)
443 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
444 port
->port_number
, port
->vendor_id
, port
->device_id
,
445 port
->revision
, port
->thunderbolt_version
, tb_port_type(port
),
447 tb_dbg(tb
, " Max hop id (in/out): %d/%d\n",
448 port
->max_in_hop_id
, port
->max_out_hop_id
);
449 tb_dbg(tb
, " Max counters: %d\n", port
->max_counters
);
450 tb_dbg(tb
, " NFC Credits: %#x\n", port
->nfc_credits
);
454 * tb_port_state() - get connectedness state of a port
456 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
458 * Return: Returns an enum tb_port_state on success or an error code on failure.
460 static int tb_port_state(struct tb_port
*port
)
462 struct tb_cap_phy phy
;
464 if (port
->cap_phy
== 0) {
465 tb_port_WARN(port
, "does not have a PHY\n");
468 res
= tb_port_read(port
, &phy
, TB_CFG_PORT
, port
->cap_phy
, 2);
475 * tb_wait_for_port() - wait for a port to become ready
477 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
478 * wait_if_unplugged is set then we also wait if the port is in state
479 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
480 * switch resume). Otherwise we only wait if a device is registered but the link
481 * has not yet been established.
483 * Return: Returns an error code on failure. Returns 0 if the port is not
484 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
485 * if the port is connected and in state TB_PORT_UP.
487 int tb_wait_for_port(struct tb_port
*port
, bool wait_if_unplugged
)
491 if (!port
->cap_phy
) {
492 tb_port_WARN(port
, "does not have PHY\n");
495 if (tb_is_upstream_port(port
)) {
496 tb_port_WARN(port
, "is the upstream port\n");
501 state
= tb_port_state(port
);
504 if (state
== TB_PORT_DISABLED
) {
505 tb_port_dbg(port
, "is disabled (state: 0)\n");
508 if (state
== TB_PORT_UNPLUGGED
) {
509 if (wait_if_unplugged
) {
510 /* used during resume */
512 "is unplugged (state: 7), retrying...\n");
516 tb_port_dbg(port
, "is unplugged (state: 7)\n");
519 if (state
== TB_PORT_UP
) {
520 tb_port_dbg(port
, "is connected, link is up (state: 2)\n");
525 * After plug-in the state is TB_PORT_CONNECTING. Give it some
529 "is connected, link is not up (state: %d), retrying...\n",
534 "failed to reach state TB_PORT_UP. Ignoring port...\n");
539 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
541 * Change the number of NFC credits allocated to @port by @credits. To remove
542 * NFC credits pass a negative amount of credits.
544 * Return: Returns 0 on success or an error code on failure.
546 int tb_port_add_nfc_credits(struct tb_port
*port
, int credits
)
550 if (credits
== 0 || port
->sw
->is_unplugged
)
553 nfc_credits
= port
->config
.nfc_credits
& TB_PORT_NFC_CREDITS_MASK
;
554 nfc_credits
+= credits
;
556 tb_port_dbg(port
, "adding %d NFC credits to %lu",
557 credits
, port
->config
.nfc_credits
& TB_PORT_NFC_CREDITS_MASK
);
559 port
->config
.nfc_credits
&= ~TB_PORT_NFC_CREDITS_MASK
;
560 port
->config
.nfc_credits
|= nfc_credits
;
562 return tb_port_write(port
, &port
->config
.nfc_credits
,
567 * tb_port_set_initial_credits() - Set initial port link credits allocated
568 * @port: Port to set the initial credits
569 * @credits: Number of credits to to allocate
571 * Set initial credits value to be used for ingress shared buffering.
573 int tb_port_set_initial_credits(struct tb_port
*port
, u32 credits
)
578 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
, 5, 1);
582 data
&= ~TB_PORT_LCA_MASK
;
583 data
|= (credits
<< TB_PORT_LCA_SHIFT
) & TB_PORT_LCA_MASK
;
585 return tb_port_write(port
, &data
, TB_CFG_PORT
, 5, 1);
589 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
591 * Return: Returns 0 on success or an error code on failure.
593 int tb_port_clear_counter(struct tb_port
*port
, int counter
)
595 u32 zero
[3] = { 0, 0, 0 };
596 tb_port_dbg(port
, "clearing counter %d\n", counter
);
597 return tb_port_write(port
, zero
, TB_CFG_COUNTERS
, 3 * counter
, 3);
601 * tb_init_port() - initialize a port
603 * This is a helper method for tb_switch_alloc. Does not check or initialize
604 * any downstream switches.
606 * Return: Returns 0 on success or an error code on failure.
608 static int tb_init_port(struct tb_port
*port
)
613 res
= tb_port_read(port
, &port
->config
, TB_CFG_PORT
, 0, 8);
617 /* Port 0 is the switch itself and has no PHY. */
618 if (port
->config
.type
== TB_TYPE_PORT
&& port
->port
!= 0) {
619 cap
= tb_port_find_cap(port
, TB_PORT_CAP_PHY
);
624 tb_port_WARN(port
, "non switch port without a PHY\n");
625 } else if (port
->port
!= 0) {
626 cap
= tb_port_find_cap(port
, TB_PORT_CAP_ADAP
);
628 port
->cap_adap
= cap
;
631 tb_dump_port(port
->sw
->tb
, &port
->config
);
633 /* Control port does not need HopID allocation */
635 ida_init(&port
->in_hopids
);
636 ida_init(&port
->out_hopids
);
643 static int tb_port_alloc_hopid(struct tb_port
*port
, bool in
, int min_hopid
,
650 port_max_hopid
= port
->config
.max_in_hop_id
;
651 ida
= &port
->in_hopids
;
653 port_max_hopid
= port
->config
.max_out_hop_id
;
654 ida
= &port
->out_hopids
;
657 /* HopIDs 0-7 are reserved */
658 if (min_hopid
< TB_PATH_MIN_HOPID
)
659 min_hopid
= TB_PATH_MIN_HOPID
;
661 if (max_hopid
< 0 || max_hopid
> port_max_hopid
)
662 max_hopid
= port_max_hopid
;
664 return ida_simple_get(ida
, min_hopid
, max_hopid
+ 1, GFP_KERNEL
);
668 * tb_port_alloc_in_hopid() - Allocate input HopID from port
669 * @port: Port to allocate HopID for
670 * @min_hopid: Minimum acceptable input HopID
671 * @max_hopid: Maximum acceptable input HopID
673 * Return: HopID between @min_hopid and @max_hopid or negative errno in
676 int tb_port_alloc_in_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
678 return tb_port_alloc_hopid(port
, true, min_hopid
, max_hopid
);
682 * tb_port_alloc_out_hopid() - Allocate output HopID from port
683 * @port: Port to allocate HopID for
684 * @min_hopid: Minimum acceptable output HopID
685 * @max_hopid: Maximum acceptable output HopID
687 * Return: HopID between @min_hopid and @max_hopid or negative errno in
690 int tb_port_alloc_out_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
692 return tb_port_alloc_hopid(port
, false, min_hopid
, max_hopid
);
696 * tb_port_release_in_hopid() - Release allocated input HopID from port
697 * @port: Port whose HopID to release
698 * @hopid: HopID to release
700 void tb_port_release_in_hopid(struct tb_port
*port
, int hopid
)
702 ida_simple_remove(&port
->in_hopids
, hopid
);
706 * tb_port_release_out_hopid() - Release allocated output HopID from port
707 * @port: Port whose HopID to release
708 * @hopid: HopID to release
710 void tb_port_release_out_hopid(struct tb_port
*port
, int hopid
)
712 ida_simple_remove(&port
->out_hopids
, hopid
);
716 * tb_next_port_on_path() - Return next port for given port on a path
717 * @start: Start port of the walk
718 * @end: End port of the walk
719 * @prev: Previous port (%NULL if this is the first)
721 * This function can be used to walk from one port to another if they
722 * are connected through zero or more switches. If the @prev is dual
723 * link port, the function follows that link and returns another end on
726 * If the @end port has been reached, return %NULL.
728 * Domain tb->lock must be held when this function is called.
730 struct tb_port
*tb_next_port_on_path(struct tb_port
*start
, struct tb_port
*end
,
731 struct tb_port
*prev
)
733 struct tb_port
*next
;
738 if (prev
->sw
== end
->sw
) {
744 if (start
->sw
->config
.depth
< end
->sw
->config
.depth
) {
746 prev
->remote
->sw
->config
.depth
> prev
->sw
->config
.depth
)
749 next
= tb_port_at(tb_route(end
->sw
), prev
->sw
);
751 if (tb_is_upstream_port(prev
)) {
754 next
= tb_upstream_port(prev
->sw
);
756 * Keep the same link if prev and next are both
759 if (next
->dual_link_port
&&
760 next
->link_nr
!= prev
->link_nr
) {
761 next
= next
->dual_link_port
;
770 * tb_port_is_enabled() - Is the adapter port enabled
771 * @port: Port to check
773 bool tb_port_is_enabled(struct tb_port
*port
)
775 switch (port
->config
.type
) {
776 case TB_TYPE_PCIE_UP
:
777 case TB_TYPE_PCIE_DOWN
:
778 return tb_pci_port_is_enabled(port
);
780 case TB_TYPE_DP_HDMI_IN
:
781 case TB_TYPE_DP_HDMI_OUT
:
782 return tb_dp_port_is_enabled(port
);
790 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
791 * @port: PCIe port to check
793 bool tb_pci_port_is_enabled(struct tb_port
*port
)
797 if (tb_port_read(port
, &data
, TB_CFG_PORT
, port
->cap_adap
, 1))
800 return !!(data
& TB_PCI_EN
);
804 * tb_pci_port_enable() - Enable PCIe adapter port
805 * @port: PCIe port to enable
806 * @enable: Enable/disable the PCIe adapter
808 int tb_pci_port_enable(struct tb_port
*port
, bool enable
)
810 u32 word
= enable
? TB_PCI_EN
: 0x0;
813 return tb_port_write(port
, &word
, TB_CFG_PORT
, port
->cap_adap
, 1);
817 * tb_dp_port_hpd_is_active() - Is HPD already active
818 * @port: DP out port to check
820 * Checks if the DP OUT adapter port has HDP bit already set.
822 int tb_dp_port_hpd_is_active(struct tb_port
*port
)
827 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
, port
->cap_adap
+ 2, 1);
831 return !!(data
& TB_DP_HDP
);
835 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
836 * @port: Port to clear HPD
838 * If the DP IN port has HDP set, this function can be used to clear it.
840 int tb_dp_port_hpd_clear(struct tb_port
*port
)
845 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
, port
->cap_adap
+ 3, 1);
850 return tb_port_write(port
, &data
, TB_CFG_PORT
, port
->cap_adap
+ 3, 1);
854 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
855 * @port: DP IN/OUT port to set hops
856 * @video: Video Hop ID
857 * @aux_tx: AUX TX Hop ID
858 * @aux_rx: AUX RX Hop ID
860 * Programs specified Hop IDs for DP IN/OUT port.
862 int tb_dp_port_set_hops(struct tb_port
*port
, unsigned int video
,
863 unsigned int aux_tx
, unsigned int aux_rx
)
868 ret
= tb_port_read(port
, data
, TB_CFG_PORT
, port
->cap_adap
,
873 data
[0] &= ~TB_DP_VIDEO_HOPID_MASK
;
874 data
[1] &= ~(TB_DP_AUX_RX_HOPID_MASK
| TB_DP_AUX_TX_HOPID_MASK
);
876 data
[0] |= (video
<< TB_DP_VIDEO_HOPID_SHIFT
) & TB_DP_VIDEO_HOPID_MASK
;
877 data
[1] |= aux_tx
& TB_DP_AUX_TX_HOPID_MASK
;
878 data
[1] |= (aux_rx
<< TB_DP_AUX_RX_HOPID_SHIFT
) & TB_DP_AUX_RX_HOPID_MASK
;
880 return tb_port_write(port
, data
, TB_CFG_PORT
, port
->cap_adap
,
885 * tb_dp_port_is_enabled() - Is DP adapter port enabled
886 * @port: DP adapter port to check
888 bool tb_dp_port_is_enabled(struct tb_port
*port
)
892 if (tb_port_read(port
, &data
, TB_CFG_PORT
, port
->cap_adap
, 1))
895 return !!(data
& (TB_DP_VIDEO_EN
| TB_DP_AUX_EN
));
899 * tb_dp_port_enable() - Enables/disables DP paths of a port
900 * @port: DP IN/OUT port
901 * @enable: Enable/disable DP path
903 * Once Hop IDs are programmed DP paths can be enabled or disabled by
904 * calling this function.
906 int tb_dp_port_enable(struct tb_port
*port
, bool enable
)
911 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
, port
->cap_adap
, 1);
916 data
|= TB_DP_VIDEO_EN
| TB_DP_AUX_EN
;
918 data
&= ~(TB_DP_VIDEO_EN
| TB_DP_AUX_EN
);
920 return tb_port_write(port
, &data
, TB_CFG_PORT
, port
->cap_adap
, 1);
923 /* switch utility functions */
925 static void tb_dump_switch(struct tb
*tb
, struct tb_regs_switch_header
*sw
)
927 tb_dbg(tb
, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
928 sw
->vendor_id
, sw
->device_id
, sw
->revision
,
929 sw
->thunderbolt_version
);
930 tb_dbg(tb
, " Max Port Number: %d\n", sw
->max_port_number
);
931 tb_dbg(tb
, " Config:\n");
933 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
934 sw
->upstream_port_number
, sw
->depth
,
935 (((u64
) sw
->route_hi
) << 32) | sw
->route_lo
,
936 sw
->enabled
, sw
->plug_events_delay
);
937 tb_dbg(tb
, " unknown1: %#x unknown4: %#x\n",
938 sw
->__unknown1
, sw
->__unknown4
);
942 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
944 * Return: Returns 0 on success or an error code on failure.
946 int tb_switch_reset(struct tb
*tb
, u64 route
)
948 struct tb_cfg_result res
;
949 struct tb_regs_switch_header header
= {
950 header
.route_hi
= route
>> 32,
951 header
.route_lo
= route
,
952 header
.enabled
= true,
954 tb_dbg(tb
, "resetting switch at %llx\n", route
);
955 res
.err
= tb_cfg_write(tb
->ctl
, ((u32
*) &header
) + 2, route
,
959 res
= tb_cfg_reset(tb
->ctl
, route
, TB_CFG_DEFAULT_TIMEOUT
);
966 * tb_plug_events_active() - enable/disable plug events on a switch
968 * Also configures a sane plug_events_delay of 255ms.
970 * Return: Returns 0 on success or an error code on failure.
972 static int tb_plug_events_active(struct tb_switch
*sw
, bool active
)
977 if (!sw
->config
.enabled
)
980 sw
->config
.plug_events_delay
= 0xff;
981 res
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 4, TB_CFG_SWITCH
, 4, 1);
985 res
= tb_sw_read(sw
, &data
, TB_CFG_SWITCH
, sw
->cap_plug_events
+ 1, 1);
990 data
= data
& 0xFFFFFF83;
991 switch (sw
->config
.device_id
) {
992 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
993 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
994 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1002 return tb_sw_write(sw
, &data
, TB_CFG_SWITCH
,
1003 sw
->cap_plug_events
+ 1, 1);
1006 static ssize_t
authorized_show(struct device
*dev
,
1007 struct device_attribute
*attr
,
1010 struct tb_switch
*sw
= tb_to_switch(dev
);
1012 return sprintf(buf
, "%u\n", sw
->authorized
);
1015 static int tb_switch_set_authorized(struct tb_switch
*sw
, unsigned int val
)
1019 if (!mutex_trylock(&sw
->tb
->lock
))
1020 return restart_syscall();
1026 * Make sure there is no PCIe rescan ongoing when a new PCIe
1027 * tunnel is created. Otherwise the PCIe rescan code might find
1028 * the new tunnel too early.
1030 pci_lock_rescan_remove();
1033 /* Approve switch */
1036 ret
= tb_domain_approve_switch_key(sw
->tb
, sw
);
1038 ret
= tb_domain_approve_switch(sw
->tb
, sw
);
1041 /* Challenge switch */
1044 ret
= tb_domain_challenge_switch_key(sw
->tb
, sw
);
1051 pci_unlock_rescan_remove();
1054 sw
->authorized
= val
;
1055 /* Notify status change to the userspace */
1056 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
1060 mutex_unlock(&sw
->tb
->lock
);
1064 static ssize_t
authorized_store(struct device
*dev
,
1065 struct device_attribute
*attr
,
1066 const char *buf
, size_t count
)
1068 struct tb_switch
*sw
= tb_to_switch(dev
);
1072 ret
= kstrtouint(buf
, 0, &val
);
1078 pm_runtime_get_sync(&sw
->dev
);
1079 ret
= tb_switch_set_authorized(sw
, val
);
1080 pm_runtime_mark_last_busy(&sw
->dev
);
1081 pm_runtime_put_autosuspend(&sw
->dev
);
1083 return ret
? ret
: count
;
1085 static DEVICE_ATTR_RW(authorized
);
1087 static ssize_t
boot_show(struct device
*dev
, struct device_attribute
*attr
,
1090 struct tb_switch
*sw
= tb_to_switch(dev
);
1092 return sprintf(buf
, "%u\n", sw
->boot
);
1094 static DEVICE_ATTR_RO(boot
);
1096 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
1099 struct tb_switch
*sw
= tb_to_switch(dev
);
1101 return sprintf(buf
, "%#x\n", sw
->device
);
1103 static DEVICE_ATTR_RO(device
);
1106 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1108 struct tb_switch
*sw
= tb_to_switch(dev
);
1110 return sprintf(buf
, "%s\n", sw
->device_name
? sw
->device_name
: "");
1112 static DEVICE_ATTR_RO(device_name
);
1114 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
1117 struct tb_switch
*sw
= tb_to_switch(dev
);
1120 if (!mutex_trylock(&sw
->tb
->lock
))
1121 return restart_syscall();
1124 ret
= sprintf(buf
, "%*phN\n", TB_SWITCH_KEY_SIZE
, sw
->key
);
1126 ret
= sprintf(buf
, "\n");
1128 mutex_unlock(&sw
->tb
->lock
);
1132 static ssize_t
key_store(struct device
*dev
, struct device_attribute
*attr
,
1133 const char *buf
, size_t count
)
1135 struct tb_switch
*sw
= tb_to_switch(dev
);
1136 u8 key
[TB_SWITCH_KEY_SIZE
];
1137 ssize_t ret
= count
;
1140 if (!strcmp(buf
, "\n"))
1142 else if (hex2bin(key
, buf
, sizeof(key
)))
1145 if (!mutex_trylock(&sw
->tb
->lock
))
1146 return restart_syscall();
1148 if (sw
->authorized
) {
1155 sw
->key
= kmemdup(key
, sizeof(key
), GFP_KERNEL
);
1161 mutex_unlock(&sw
->tb
->lock
);
1164 static DEVICE_ATTR(key
, 0600, key_show
, key_store
);
1166 static void nvm_authenticate_start(struct tb_switch
*sw
)
1168 struct pci_dev
*root_port
;
1171 * During host router NVM upgrade we should not allow root port to
1172 * go into D3cold because some root ports cannot trigger PME
1173 * itself. To be on the safe side keep the root port in D0 during
1174 * the whole upgrade process.
1176 root_port
= pci_find_pcie_root_port(sw
->tb
->nhi
->pdev
);
1178 pm_runtime_get_noresume(&root_port
->dev
);
1181 static void nvm_authenticate_complete(struct tb_switch
*sw
)
1183 struct pci_dev
*root_port
;
1185 root_port
= pci_find_pcie_root_port(sw
->tb
->nhi
->pdev
);
1187 pm_runtime_put(&root_port
->dev
);
1190 static ssize_t
nvm_authenticate_show(struct device
*dev
,
1191 struct device_attribute
*attr
, char *buf
)
1193 struct tb_switch
*sw
= tb_to_switch(dev
);
1196 nvm_get_auth_status(sw
, &status
);
1197 return sprintf(buf
, "%#x\n", status
);
1200 static ssize_t
nvm_authenticate_store(struct device
*dev
,
1201 struct device_attribute
*attr
, const char *buf
, size_t count
)
1203 struct tb_switch
*sw
= tb_to_switch(dev
);
1207 pm_runtime_get_sync(&sw
->dev
);
1209 if (!mutex_trylock(&sw
->tb
->lock
)) {
1210 ret
= restart_syscall();
1214 /* If NVMem devices are not yet added */
1220 ret
= kstrtobool(buf
, &val
);
1224 /* Always clear the authentication status */
1225 nvm_clear_auth_status(sw
);
1228 if (!sw
->nvm
->buf
) {
1233 ret
= nvm_validate_and_write(sw
);
1237 sw
->nvm
->authenticating
= true;
1239 if (!tb_route(sw
)) {
1241 * Keep root port from suspending as long as the
1242 * NVM upgrade process is running.
1244 nvm_authenticate_start(sw
);
1245 ret
= nvm_authenticate_host(sw
);
1247 nvm_authenticate_complete(sw
);
1249 ret
= nvm_authenticate_device(sw
);
1254 mutex_unlock(&sw
->tb
->lock
);
1256 pm_runtime_mark_last_busy(&sw
->dev
);
1257 pm_runtime_put_autosuspend(&sw
->dev
);
1263 static DEVICE_ATTR_RW(nvm_authenticate
);
1265 static ssize_t
nvm_version_show(struct device
*dev
,
1266 struct device_attribute
*attr
, char *buf
)
1268 struct tb_switch
*sw
= tb_to_switch(dev
);
1271 if (!mutex_trylock(&sw
->tb
->lock
))
1272 return restart_syscall();
1279 ret
= sprintf(buf
, "%x.%x\n", sw
->nvm
->major
, sw
->nvm
->minor
);
1281 mutex_unlock(&sw
->tb
->lock
);
1285 static DEVICE_ATTR_RO(nvm_version
);
1287 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
1290 struct tb_switch
*sw
= tb_to_switch(dev
);
1292 return sprintf(buf
, "%#x\n", sw
->vendor
);
1294 static DEVICE_ATTR_RO(vendor
);
1297 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1299 struct tb_switch
*sw
= tb_to_switch(dev
);
1301 return sprintf(buf
, "%s\n", sw
->vendor_name
? sw
->vendor_name
: "");
1303 static DEVICE_ATTR_RO(vendor_name
);
1305 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
1308 struct tb_switch
*sw
= tb_to_switch(dev
);
1310 return sprintf(buf
, "%pUb\n", sw
->uuid
);
1312 static DEVICE_ATTR_RO(unique_id
);
1314 static struct attribute
*switch_attrs
[] = {
1315 &dev_attr_authorized
.attr
,
1316 &dev_attr_boot
.attr
,
1317 &dev_attr_device
.attr
,
1318 &dev_attr_device_name
.attr
,
1320 &dev_attr_nvm_authenticate
.attr
,
1321 &dev_attr_nvm_version
.attr
,
1322 &dev_attr_vendor
.attr
,
1323 &dev_attr_vendor_name
.attr
,
1324 &dev_attr_unique_id
.attr
,
1328 static umode_t
switch_attr_is_visible(struct kobject
*kobj
,
1329 struct attribute
*attr
, int n
)
1331 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1332 struct tb_switch
*sw
= tb_to_switch(dev
);
1334 if (attr
== &dev_attr_key
.attr
) {
1336 sw
->tb
->security_level
== TB_SECURITY_SECURE
&&
1337 sw
->security_level
== TB_SECURITY_SECURE
)
1340 } else if (attr
== &dev_attr_nvm_authenticate
.attr
||
1341 attr
== &dev_attr_nvm_version
.attr
) {
1345 } else if (attr
== &dev_attr_boot
.attr
) {
1351 return sw
->safe_mode
? 0 : attr
->mode
;
1354 static struct attribute_group switch_group
= {
1355 .is_visible
= switch_attr_is_visible
,
1356 .attrs
= switch_attrs
,
1359 static const struct attribute_group
*switch_groups
[] = {
1364 static void tb_switch_release(struct device
*dev
)
1366 struct tb_switch
*sw
= tb_to_switch(dev
);
1369 dma_port_free(sw
->dma_port
);
1371 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1372 if (!sw
->ports
[i
].disabled
) {
1373 ida_destroy(&sw
->ports
[i
].in_hopids
);
1374 ida_destroy(&sw
->ports
[i
].out_hopids
);
1379 kfree(sw
->device_name
);
1380 kfree(sw
->vendor_name
);
1388 * Currently only need to provide the callbacks. Everything else is handled
1389 * in the connection manager.
1391 static int __maybe_unused
tb_switch_runtime_suspend(struct device
*dev
)
1393 struct tb_switch
*sw
= tb_to_switch(dev
);
1394 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
1396 if (cm_ops
->runtime_suspend_switch
)
1397 return cm_ops
->runtime_suspend_switch(sw
);
1402 static int __maybe_unused
tb_switch_runtime_resume(struct device
*dev
)
1404 struct tb_switch
*sw
= tb_to_switch(dev
);
1405 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
1407 if (cm_ops
->runtime_resume_switch
)
1408 return cm_ops
->runtime_resume_switch(sw
);
1412 static const struct dev_pm_ops tb_switch_pm_ops
= {
1413 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend
, tb_switch_runtime_resume
,
1417 struct device_type tb_switch_type
= {
1418 .name
= "thunderbolt_device",
1419 .release
= tb_switch_release
,
1420 .pm
= &tb_switch_pm_ops
,
1423 static int tb_switch_get_generation(struct tb_switch
*sw
)
1425 switch (sw
->config
.device_id
) {
1426 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1427 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1428 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK
:
1429 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C
:
1430 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
1431 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1432 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE
:
1433 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE
:
1436 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE
:
1437 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE
:
1438 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE
:
1441 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
1442 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
1443 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
1444 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
1445 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
1446 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE
:
1447 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE
:
1448 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE
:
1453 * For unknown switches assume generation to be 1 to be
1456 tb_sw_warn(sw
, "unsupported switch device id %#x\n",
1457 sw
->config
.device_id
);
1463 * tb_switch_alloc() - allocate a switch
1464 * @tb: Pointer to the owning domain
1465 * @parent: Parent device for this switch
1466 * @route: Route string for this switch
1468 * Allocates and initializes a switch. Will not upload configuration to
1469 * the switch. For that you need to call tb_switch_configure()
1470 * separately. The returned switch should be released by calling
1473 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1476 struct tb_switch
*tb_switch_alloc(struct tb
*tb
, struct device
*parent
,
1479 struct tb_switch
*sw
;
1483 /* Make sure we do not exceed maximum topology limit */
1484 depth
= tb_route_length(route
);
1485 if (depth
> TB_SWITCH_MAX_DEPTH
)
1486 return ERR_PTR(-EADDRNOTAVAIL
);
1488 upstream_port
= tb_cfg_get_upstream_port(tb
->ctl
, route
);
1489 if (upstream_port
< 0)
1490 return ERR_PTR(upstream_port
);
1492 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
1494 return ERR_PTR(-ENOMEM
);
1497 ret
= tb_cfg_read(tb
->ctl
, &sw
->config
, route
, 0, TB_CFG_SWITCH
, 0, 5);
1499 goto err_free_sw_ports
;
1501 tb_dbg(tb
, "current switch config:\n");
1502 tb_dump_switch(tb
, &sw
->config
);
1504 /* configure switch */
1505 sw
->config
.upstream_port_number
= upstream_port
;
1506 sw
->config
.depth
= depth
;
1507 sw
->config
.route_hi
= upper_32_bits(route
);
1508 sw
->config
.route_lo
= lower_32_bits(route
);
1509 sw
->config
.enabled
= 0;
1511 /* initialize ports */
1512 sw
->ports
= kcalloc(sw
->config
.max_port_number
+ 1, sizeof(*sw
->ports
),
1516 goto err_free_sw_ports
;
1519 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
1520 /* minimum setup for tb_find_cap and tb_drom_read to work */
1521 sw
->ports
[i
].sw
= sw
;
1522 sw
->ports
[i
].port
= i
;
1525 sw
->generation
= tb_switch_get_generation(sw
);
1527 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_PLUG_EVENTS
);
1529 tb_sw_warn(sw
, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1530 goto err_free_sw_ports
;
1532 sw
->cap_plug_events
= ret
;
1534 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_LINK_CONTROLLER
);
1538 /* Root switch is always authorized */
1540 sw
->authorized
= true;
1542 device_initialize(&sw
->dev
);
1543 sw
->dev
.parent
= parent
;
1544 sw
->dev
.bus
= &tb_bus_type
;
1545 sw
->dev
.type
= &tb_switch_type
;
1546 sw
->dev
.groups
= switch_groups
;
1547 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
1555 return ERR_PTR(ret
);
1559 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1560 * @tb: Pointer to the owning domain
1561 * @parent: Parent device for this switch
1562 * @route: Route string for this switch
1564 * This creates a switch in safe mode. This means the switch pretty much
1565 * lacks all capabilities except DMA configuration port before it is
1566 * flashed with a valid NVM firmware.
1568 * The returned switch must be released by calling tb_switch_put().
1570 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1573 tb_switch_alloc_safe_mode(struct tb
*tb
, struct device
*parent
, u64 route
)
1575 struct tb_switch
*sw
;
1577 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
1579 return ERR_PTR(-ENOMEM
);
1582 sw
->config
.depth
= tb_route_length(route
);
1583 sw
->config
.route_hi
= upper_32_bits(route
);
1584 sw
->config
.route_lo
= lower_32_bits(route
);
1585 sw
->safe_mode
= true;
1587 device_initialize(&sw
->dev
);
1588 sw
->dev
.parent
= parent
;
1589 sw
->dev
.bus
= &tb_bus_type
;
1590 sw
->dev
.type
= &tb_switch_type
;
1591 sw
->dev
.groups
= switch_groups
;
1592 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
1598 * tb_switch_configure() - Uploads configuration to the switch
1599 * @sw: Switch to configure
1601 * Call this function before the switch is added to the system. It will
1602 * upload configuration to the switch and makes it available for the
1603 * connection manager to use.
1605 * Return: %0 in case of success and negative errno in case of failure
1607 int tb_switch_configure(struct tb_switch
*sw
)
1609 struct tb
*tb
= sw
->tb
;
1613 route
= tb_route(sw
);
1614 tb_dbg(tb
, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1615 route
, tb_route_length(route
), sw
->config
.upstream_port_number
);
1617 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
)
1618 tb_sw_warn(sw
, "unknown switch vendor id %#x\n",
1619 sw
->config
.vendor_id
);
1621 sw
->config
.enabled
= 1;
1623 /* upload configuration */
1624 ret
= tb_sw_write(sw
, 1 + (u32
*)&sw
->config
, TB_CFG_SWITCH
, 1, 3);
1628 ret
= tb_lc_configure_link(sw
);
1632 return tb_plug_events_active(sw
, true);
1635 static int tb_switch_set_uuid(struct tb_switch
*sw
)
1644 * The newer controllers include fused UUID as part of link
1645 * controller specific registers
1647 ret
= tb_lc_read_uuid(sw
, uuid
);
1650 * ICM generates UUID based on UID and fills the upper
1651 * two words with ones. This is not strictly following
1652 * UUID format but we want to be compatible with it so
1653 * we do the same here.
1655 uuid
[0] = sw
->uid
& 0xffffffff;
1656 uuid
[1] = (sw
->uid
>> 32) & 0xffffffff;
1657 uuid
[2] = 0xffffffff;
1658 uuid
[3] = 0xffffffff;
1661 sw
->uuid
= kmemdup(uuid
, sizeof(uuid
), GFP_KERNEL
);
1667 static int tb_switch_add_dma_port(struct tb_switch
*sw
)
1672 switch (sw
->generation
) {
1677 /* Only root switch can be upgraded */
1684 * DMA port is the only thing available when the switch
1692 if (sw
->no_nvm_upgrade
)
1695 sw
->dma_port
= dma_port_alloc(sw
);
1700 * Check status of the previous flash authentication. If there
1701 * is one we need to power cycle the switch in any case to make
1702 * it functional again.
1704 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
1708 /* Now we can allow root port to suspend again */
1710 nvm_authenticate_complete(sw
);
1713 tb_sw_info(sw
, "switch flash authentication failed\n");
1714 ret
= tb_switch_set_uuid(sw
);
1717 nvm_set_auth_status(sw
, status
);
1720 tb_sw_info(sw
, "power cycling the switch now\n");
1721 dma_port_power_cycle(sw
->dma_port
);
1724 * We return error here which causes the switch adding failure.
1725 * It should appear back after power cycle is complete.
1731 * tb_switch_add() - Add a switch to the domain
1732 * @sw: Switch to add
1734 * This is the last step in adding switch to the domain. It will read
1735 * identification information from DROM and initializes ports so that
1736 * they can be used to connect other switches. The switch will be
1737 * exposed to the userspace when this function successfully returns. To
1738 * remove and release the switch, call tb_switch_remove().
1740 * Return: %0 in case of success and negative errno in case of failure
1742 int tb_switch_add(struct tb_switch
*sw
)
1747 * Initialize DMA control port now before we read DROM. Recent
1748 * host controllers have more complete DROM on NVM that includes
1749 * vendor and model identification strings which we then expose
1750 * to the userspace. NVM can be accessed through DMA
1751 * configuration based mailbox.
1753 ret
= tb_switch_add_dma_port(sw
);
1757 if (!sw
->safe_mode
) {
1759 ret
= tb_drom_read(sw
);
1761 tb_sw_warn(sw
, "tb_eeprom_read_rom failed\n");
1764 tb_sw_dbg(sw
, "uid: %#llx\n", sw
->uid
);
1766 ret
= tb_switch_set_uuid(sw
);
1770 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
1771 if (sw
->ports
[i
].disabled
) {
1772 tb_port_dbg(&sw
->ports
[i
], "disabled by eeprom\n");
1775 ret
= tb_init_port(&sw
->ports
[i
]);
1781 ret
= device_add(&sw
->dev
);
1786 dev_info(&sw
->dev
, "new device found, vendor=%#x device=%#x\n",
1787 sw
->vendor
, sw
->device
);
1788 if (sw
->vendor_name
&& sw
->device_name
)
1789 dev_info(&sw
->dev
, "%s %s\n", sw
->vendor_name
,
1793 ret
= tb_switch_nvm_add(sw
);
1795 device_del(&sw
->dev
);
1799 pm_runtime_set_active(&sw
->dev
);
1801 pm_runtime_set_autosuspend_delay(&sw
->dev
, TB_AUTOSUSPEND_DELAY
);
1802 pm_runtime_use_autosuspend(&sw
->dev
);
1803 pm_runtime_mark_last_busy(&sw
->dev
);
1804 pm_runtime_enable(&sw
->dev
);
1805 pm_request_autosuspend(&sw
->dev
);
1812 * tb_switch_remove() - Remove and release a switch
1813 * @sw: Switch to remove
1815 * This will remove the switch from the domain and release it after last
1816 * reference count drops to zero. If there are switches connected below
1817 * this switch, they will be removed as well.
1819 void tb_switch_remove(struct tb_switch
*sw
)
1824 pm_runtime_get_sync(&sw
->dev
);
1825 pm_runtime_disable(&sw
->dev
);
1828 /* port 0 is the switch itself and never has a remote */
1829 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1830 if (tb_port_has_remote(&sw
->ports
[i
])) {
1831 tb_switch_remove(sw
->ports
[i
].remote
->sw
);
1832 sw
->ports
[i
].remote
= NULL
;
1833 } else if (sw
->ports
[i
].xdomain
) {
1834 tb_xdomain_remove(sw
->ports
[i
].xdomain
);
1835 sw
->ports
[i
].xdomain
= NULL
;
1839 if (!sw
->is_unplugged
)
1840 tb_plug_events_active(sw
, false);
1841 tb_lc_unconfigure_link(sw
);
1843 tb_switch_nvm_remove(sw
);
1846 dev_info(&sw
->dev
, "device disconnected\n");
1847 device_unregister(&sw
->dev
);
1851 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1853 void tb_sw_set_unplugged(struct tb_switch
*sw
)
1856 if (sw
== sw
->tb
->root_switch
) {
1857 tb_sw_WARN(sw
, "cannot unplug root switch\n");
1860 if (sw
->is_unplugged
) {
1861 tb_sw_WARN(sw
, "is_unplugged already set\n");
1864 sw
->is_unplugged
= true;
1865 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
1866 if (tb_port_has_remote(&sw
->ports
[i
]))
1867 tb_sw_set_unplugged(sw
->ports
[i
].remote
->sw
);
1868 else if (sw
->ports
[i
].xdomain
)
1869 sw
->ports
[i
].xdomain
->is_unplugged
= true;
1873 int tb_switch_resume(struct tb_switch
*sw
)
1876 tb_sw_dbg(sw
, "resuming switch\n");
1879 * Check for UID of the connected switches except for root
1880 * switch which we assume cannot be removed.
1886 * Check first that we can still read the switch config
1887 * space. It may be that there is now another domain
1890 err
= tb_cfg_get_upstream_port(sw
->tb
->ctl
, tb_route(sw
));
1892 tb_sw_info(sw
, "switch not present anymore\n");
1896 err
= tb_drom_read_uid_only(sw
, &uid
);
1898 tb_sw_warn(sw
, "uid read failed\n");
1901 if (sw
->uid
!= uid
) {
1903 "changed while suspended (uid %#llx -> %#llx)\n",
1909 /* upload configuration */
1910 err
= tb_sw_write(sw
, 1 + (u32
*) &sw
->config
, TB_CFG_SWITCH
, 1, 3);
1914 err
= tb_lc_configure_link(sw
);
1918 err
= tb_plug_events_active(sw
, true);
1922 /* check for surviving downstream switches */
1923 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1924 struct tb_port
*port
= &sw
->ports
[i
];
1926 if (!tb_port_has_remote(port
) && !port
->xdomain
)
1929 if (tb_wait_for_port(port
, true) <= 0) {
1931 "lost during suspend, disconnecting\n");
1932 if (tb_port_has_remote(port
))
1933 tb_sw_set_unplugged(port
->remote
->sw
);
1934 else if (port
->xdomain
)
1935 port
->xdomain
->is_unplugged
= true;
1936 } else if (tb_port_has_remote(port
)) {
1937 if (tb_switch_resume(port
->remote
->sw
)) {
1939 "lost during suspend, disconnecting\n");
1940 tb_sw_set_unplugged(port
->remote
->sw
);
1947 void tb_switch_suspend(struct tb_switch
*sw
)
1950 err
= tb_plug_events_active(sw
, false);
1954 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1955 if (tb_port_has_remote(&sw
->ports
[i
]))
1956 tb_switch_suspend(sw
->ports
[i
].remote
->sw
);
1959 tb_lc_set_sleep(sw
);
1962 struct tb_sw_lookup
{
1970 static int tb_switch_match(struct device
*dev
, void *data
)
1972 struct tb_switch
*sw
= tb_to_switch(dev
);
1973 struct tb_sw_lookup
*lookup
= data
;
1977 if (sw
->tb
!= lookup
->tb
)
1981 return !memcmp(sw
->uuid
, lookup
->uuid
, sizeof(*lookup
->uuid
));
1983 if (lookup
->route
) {
1984 return sw
->config
.route_lo
== lower_32_bits(lookup
->route
) &&
1985 sw
->config
.route_hi
== upper_32_bits(lookup
->route
);
1988 /* Root switch is matched only by depth */
1992 return sw
->link
== lookup
->link
&& sw
->depth
== lookup
->depth
;
1996 * tb_switch_find_by_link_depth() - Find switch by link and depth
1997 * @tb: Domain the switch belongs
1998 * @link: Link number the switch is connected
1999 * @depth: Depth of the switch in link
2001 * Returned switch has reference count increased so the caller needs to
2002 * call tb_switch_put() when done with the switch.
2004 struct tb_switch
*tb_switch_find_by_link_depth(struct tb
*tb
, u8 link
, u8 depth
)
2006 struct tb_sw_lookup lookup
;
2009 memset(&lookup
, 0, sizeof(lookup
));
2012 lookup
.depth
= depth
;
2014 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2016 return tb_to_switch(dev
);
2022 * tb_switch_find_by_uuid() - Find switch by UUID
2023 * @tb: Domain the switch belongs
2024 * @uuid: UUID to look for
2026 * Returned switch has reference count increased so the caller needs to
2027 * call tb_switch_put() when done with the switch.
2029 struct tb_switch
*tb_switch_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
2031 struct tb_sw_lookup lookup
;
2034 memset(&lookup
, 0, sizeof(lookup
));
2038 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2040 return tb_to_switch(dev
);
2046 * tb_switch_find_by_route() - Find switch by route string
2047 * @tb: Domain the switch belongs
2048 * @route: Route string to look for
2050 * Returned switch has reference count increased so the caller needs to
2051 * call tb_switch_put() when done with the switch.
2053 struct tb_switch
*tb_switch_find_by_route(struct tb
*tb
, u64 route
)
2055 struct tb_sw_lookup lookup
;
2059 return tb_switch_get(tb
->root_switch
);
2061 memset(&lookup
, 0, sizeof(lookup
));
2063 lookup
.route
= route
;
2065 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
2067 return tb_to_switch(dev
);
2072 void tb_switch_exit(void)
2074 ida_destroy(&nvm_ida
);