1 // SPDX-License-Identifier: GPL-2.0
3 * USB4 specific functionality
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
15 #define USB4_DATA_DWORDS 16
16 #define USB4_DATA_RETRIES 3
19 USB4_SWITCH_OP_QUERY_DP_RESOURCE
= 0x10,
20 USB4_SWITCH_OP_ALLOC_DP_RESOURCE
= 0x11,
21 USB4_SWITCH_OP_DEALLOC_DP_RESOURCE
= 0x12,
22 USB4_SWITCH_OP_NVM_WRITE
= 0x20,
23 USB4_SWITCH_OP_NVM_AUTH
= 0x21,
24 USB4_SWITCH_OP_NVM_READ
= 0x22,
25 USB4_SWITCH_OP_NVM_SET_OFFSET
= 0x23,
26 USB4_SWITCH_OP_DROM_READ
= 0x24,
27 USB4_SWITCH_OP_NVM_SECTOR_SIZE
= 0x25,
30 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
31 #define USB4_NVM_READ_OFFSET_SHIFT 2
32 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
33 #define USB4_NVM_READ_LENGTH_SHIFT 24
35 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
36 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
38 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
39 #define USB4_DROM_ADDRESS_SHIFT 2
40 #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
41 #define USB4_DROM_SIZE_SHIFT 15
43 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
45 typedef int (*read_block_fn
)(struct tb_switch
*, unsigned int, void *, size_t);
46 typedef int (*write_block_fn
)(struct tb_switch
*, const void *, size_t);
48 static int usb4_switch_wait_for_bit(struct tb_switch
*sw
, u32 offset
, u32 bit
,
49 u32 value
, int timeout_msec
)
51 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
57 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
61 if ((val
& bit
) == value
)
64 usleep_range(50, 100);
65 } while (ktime_before(ktime_get(), timeout
));
70 static int usb4_switch_op_read_data(struct tb_switch
*sw
, void *data
,
73 if (dwords
> USB4_DATA_DWORDS
)
76 return tb_sw_read(sw
, data
, TB_CFG_SWITCH
, ROUTER_CS_9
, dwords
);
79 static int usb4_switch_op_write_data(struct tb_switch
*sw
, const void *data
,
82 if (dwords
> USB4_DATA_DWORDS
)
85 return tb_sw_write(sw
, data
, TB_CFG_SWITCH
, ROUTER_CS_9
, dwords
);
88 static int usb4_switch_op_read_metadata(struct tb_switch
*sw
, u32
*metadata
)
90 return tb_sw_read(sw
, metadata
, TB_CFG_SWITCH
, ROUTER_CS_25
, 1);
93 static int usb4_switch_op_write_metadata(struct tb_switch
*sw
, u32 metadata
)
95 return tb_sw_write(sw
, &metadata
, TB_CFG_SWITCH
, ROUTER_CS_25
, 1);
98 static int usb4_switch_do_read_data(struct tb_switch
*sw
, u16 address
,
99 void *buf
, size_t size
, read_block_fn read_block
)
101 unsigned int retries
= USB4_DATA_RETRIES
;
104 offset
= address
& 3;
105 address
= address
& ~3;
108 size_t nbytes
= min_t(size_t, size
, USB4_DATA_DWORDS
* 4);
109 unsigned int dwaddress
, dwords
;
110 u8 data
[USB4_DATA_DWORDS
* 4];
113 dwaddress
= address
/ 4;
114 dwords
= ALIGN(nbytes
, 4) / 4;
116 ret
= read_block(sw
, dwaddress
, data
, dwords
);
118 if (ret
== -ETIMEDOUT
) {
126 memcpy(buf
, data
+ offset
, nbytes
);
136 static int usb4_switch_do_write_data(struct tb_switch
*sw
, u16 address
,
137 const void *buf
, size_t size
, write_block_fn write_next_block
)
139 unsigned int retries
= USB4_DATA_RETRIES
;
142 offset
= address
& 3;
143 address
= address
& ~3;
146 u32 nbytes
= min_t(u32
, size
, USB4_DATA_DWORDS
* 4);
147 u8 data
[USB4_DATA_DWORDS
* 4];
150 memcpy(data
+ offset
, buf
, nbytes
);
152 ret
= write_next_block(sw
, data
, nbytes
/ 4);
154 if (ret
== -ETIMEDOUT
) {
170 static int usb4_switch_op(struct tb_switch
*sw
, u16 opcode
, u8
*status
)
175 val
= opcode
| ROUTER_CS_26_OV
;
176 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_26
, 1);
180 ret
= usb4_switch_wait_for_bit(sw
, ROUTER_CS_26
, ROUTER_CS_26_OV
, 0, 500);
184 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_26
, 1);
185 if (val
& ROUTER_CS_26_ONS
)
188 *status
= (val
& ROUTER_CS_26_STATUS_MASK
) >> ROUTER_CS_26_STATUS_SHIFT
;
193 * usb4_switch_setup() - Additional setup for USB4 device
194 * @sw: USB4 router to setup
196 * USB4 routers need additional settings in order to enable all the
197 * tunneling. This function enables USB and PCIe tunneling if it can be
198 * enabled (e.g the parent switch also supports them). If USB tunneling
199 * is not available for some reason (like that there is Thunderbolt 3
200 * switch upstream) then the internal xHCI controller is enabled
203 int usb4_switch_setup(struct tb_switch
*sw
)
205 struct tb_switch
*parent
;
213 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_6
, 1);
217 xhci
= val
& ROUTER_CS_6_HCI
;
218 tbt3
= !(val
& ROUTER_CS_6_TNS
);
220 tb_sw_dbg(sw
, "TBT3 support: %s, xHCI: %s\n",
221 tbt3
? "yes" : "no", xhci
? "yes" : "no");
223 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
227 parent
= tb_switch_parent(sw
);
229 if (tb_switch_find_port(parent
, TB_TYPE_USB3_DOWN
)) {
230 val
|= ROUTER_CS_5_UTO
;
234 /* Only enable PCIe tunneling if the parent router supports it */
235 if (tb_switch_find_port(parent
, TB_TYPE_PCIE_DOWN
)) {
236 val
|= ROUTER_CS_5_PTO
;
238 * xHCI can be enabled if PCIe tunneling is supported
239 * and the parent does not have any USB3 dowstream
240 * adapters (so we cannot do USB 3.x tunneling).
243 val
|= ROUTER_CS_5_HCO
;
246 /* TBT3 supported by the CM */
247 val
|= ROUTER_CS_5_C3S
;
248 /* Tunneling configuration is ready now */
249 val
|= ROUTER_CS_5_CV
;
251 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
255 return usb4_switch_wait_for_bit(sw
, ROUTER_CS_6
, ROUTER_CS_6_CR
,
260 * usb4_switch_read_uid() - Read UID from USB4 router
263 * Reads 64-bit UID from USB4 router config space.
265 int usb4_switch_read_uid(struct tb_switch
*sw
, u64
*uid
)
267 return tb_sw_read(sw
, uid
, TB_CFG_SWITCH
, ROUTER_CS_7
, 2);
270 static int usb4_switch_drom_read_block(struct tb_switch
*sw
,
271 unsigned int dwaddress
, void *buf
,
278 metadata
= (dwords
<< USB4_DROM_SIZE_SHIFT
) & USB4_DROM_SIZE_MASK
;
279 metadata
|= (dwaddress
<< USB4_DROM_ADDRESS_SHIFT
) &
280 USB4_DROM_ADDRESS_MASK
;
282 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
286 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_DROM_READ
, &status
);
293 return usb4_switch_op_read_data(sw
, buf
, dwords
);
297 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
300 * Uses USB4 router operations to read router DROM. For devices this
301 * should always work but for hosts it may return %-EOPNOTSUPP in which
302 * case the host router does not have DROM.
304 int usb4_switch_drom_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
307 return usb4_switch_do_read_data(sw
, address
, buf
, size
,
308 usb4_switch_drom_read_block
);
311 static int usb4_set_port_configured(struct tb_port
*port
, bool configured
)
316 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
317 port
->cap_usb4
+ PORT_CS_19
, 1);
322 val
|= PORT_CS_19_PC
;
324 val
&= ~PORT_CS_19_PC
;
326 return tb_port_write(port
, &val
, TB_CFG_PORT
,
327 port
->cap_usb4
+ PORT_CS_19
, 1);
331 * usb4_switch_configure_link() - Set upstream USB4 link configured
334 * Sets the upstream USB4 link to be configured for power management
337 int usb4_switch_configure_link(struct tb_switch
*sw
)
344 up
= tb_upstream_port(sw
);
345 return usb4_set_port_configured(up
, true);
349 * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
352 * Reverse of usb4_switch_configure_link().
354 void usb4_switch_unconfigure_link(struct tb_switch
*sw
)
358 if (sw
->is_unplugged
|| !tb_route(sw
))
361 up
= tb_upstream_port(sw
);
362 usb4_set_port_configured(up
, false);
366 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
369 * Checks whether conditions are met so that lane bonding can be
370 * established with the upstream router. Call only for device routers.
372 bool usb4_switch_lane_bonding_possible(struct tb_switch
*sw
)
378 up
= tb_upstream_port(sw
);
379 ret
= tb_port_read(up
, &val
, TB_CFG_PORT
, up
->cap_usb4
+ PORT_CS_18
, 1);
383 return !!(val
& PORT_CS_18_BE
);
387 * usb4_switch_set_sleep() - Prepare the router to enter sleep
390 * Enables wakes and sets sleep bit for the router. Returns when the
391 * router sleep ready bit has been asserted.
393 int usb4_switch_set_sleep(struct tb_switch
*sw
)
398 /* Set sleep bit and wait for sleep ready to be asserted */
399 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
403 val
|= ROUTER_CS_5_SLP
;
405 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
409 return usb4_switch_wait_for_bit(sw
, ROUTER_CS_6
, ROUTER_CS_6_SLPR
,
410 ROUTER_CS_6_SLPR
, 500);
414 * usb4_switch_nvm_sector_size() - Return router NVM sector size
417 * If the router supports NVM operations this function returns the NVM
418 * sector size in bytes. If NVM operations are not supported returns
421 int usb4_switch_nvm_sector_size(struct tb_switch
*sw
)
427 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_SECTOR_SIZE
, &status
);
432 return status
== 0x2 ? -EOPNOTSUPP
: -EIO
;
434 ret
= usb4_switch_op_read_metadata(sw
, &metadata
);
438 return metadata
& USB4_NVM_SECTOR_SIZE_MASK
;
441 static int usb4_switch_nvm_read_block(struct tb_switch
*sw
,
442 unsigned int dwaddress
, void *buf
, size_t dwords
)
448 metadata
= (dwords
<< USB4_NVM_READ_LENGTH_SHIFT
) &
449 USB4_NVM_READ_LENGTH_MASK
;
450 metadata
|= (dwaddress
<< USB4_NVM_READ_OFFSET_SHIFT
) &
451 USB4_NVM_READ_OFFSET_MASK
;
453 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
457 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_READ
, &status
);
464 return usb4_switch_op_read_data(sw
, buf
, dwords
);
468 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
470 * @address: Starting address in bytes
471 * @buf: Read data is placed here
472 * @size: How many bytes to read
474 * Reads NVM contents of the router. If NVM is not supported returns
477 int usb4_switch_nvm_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
480 return usb4_switch_do_read_data(sw
, address
, buf
, size
,
481 usb4_switch_nvm_read_block
);
484 static int usb4_switch_nvm_set_offset(struct tb_switch
*sw
,
485 unsigned int address
)
487 u32 metadata
, dwaddress
;
491 dwaddress
= address
/ 4;
492 metadata
= (dwaddress
<< USB4_NVM_SET_OFFSET_SHIFT
) &
493 USB4_NVM_SET_OFFSET_MASK
;
495 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
499 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_SET_OFFSET
, &status
);
503 return status
? -EIO
: 0;
506 static int usb4_switch_nvm_write_next_block(struct tb_switch
*sw
,
507 const void *buf
, size_t dwords
)
512 ret
= usb4_switch_op_write_data(sw
, buf
, dwords
);
516 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_WRITE
, &status
);
520 return status
? -EIO
: 0;
524 * usb4_switch_nvm_write() - Write to the router NVM
526 * @address: Start address where to write in bytes
527 * @buf: Pointer to the data to write
528 * @size: Size of @buf in bytes
530 * Writes @buf to the router NVM using USB4 router operations. If NVM
531 * write is not supported returns %-EOPNOTSUPP.
533 int usb4_switch_nvm_write(struct tb_switch
*sw
, unsigned int address
,
534 const void *buf
, size_t size
)
538 ret
= usb4_switch_nvm_set_offset(sw
, address
);
542 return usb4_switch_do_write_data(sw
, address
, buf
, size
,
543 usb4_switch_nvm_write_next_block
);
547 * usb4_switch_nvm_authenticate() - Authenticate new NVM
550 * After the new NVM has been written via usb4_switch_nvm_write(), this
551 * function triggers NVM authentication process. If the authentication
552 * is successful the router is power cycled and the new NVM starts
553 * running. In case of failure returns negative errno.
555 int usb4_switch_nvm_authenticate(struct tb_switch
*sw
)
560 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_AUTH
, &status
);
566 tb_sw_dbg(sw
, "NVM authentication successful\n");
580 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
584 * For DP tunneling this function can be used to query availability of
585 * DP IN resource. Returns true if the resource is available for DP
586 * tunneling, false otherwise.
588 bool usb4_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
593 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
597 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_QUERY_DP_RESOURCE
, &status
);
599 * If DP resource allocation is not supported assume it is
602 if (ret
== -EOPNOTSUPP
)
611 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
615 * Allocates DP IN resource for DP tunneling using USB4 router
616 * operations. If the resource was allocated returns %0. Otherwise
617 * returns negative errno, in particular %-EBUSY if the resource is
620 int usb4_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
625 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
629 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_ALLOC_DP_RESOURCE
, &status
);
630 if (ret
== -EOPNOTSUPP
)
635 return status
? -EBUSY
: 0;
639 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
643 * Releases the previously allocated DP IN resource.
645 int usb4_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
650 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
654 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE
, &status
);
655 if (ret
== -EOPNOTSUPP
)
660 return status
? -EIO
: 0;
663 static int usb4_port_idx(const struct tb_switch
*sw
, const struct tb_port
*port
)
668 /* Assume port is primary */
669 tb_switch_for_each_port(sw
, p
) {
670 if (!tb_port_is_null(p
))
672 if (tb_is_upstream_port(p
))
685 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
689 * USB4 routers have direct mapping between USB4 ports and PCIe
690 * downstream adapters where the PCIe topology is extended. This
691 * function returns the corresponding downstream PCIe adapter or %NULL
692 * if no such mapping was possible.
694 struct tb_port
*usb4_switch_map_pcie_down(struct tb_switch
*sw
,
695 const struct tb_port
*port
)
697 int usb4_idx
= usb4_port_idx(sw
, port
);
701 /* Find PCIe down port matching usb4_port */
702 tb_switch_for_each_port(sw
, p
) {
703 if (!tb_port_is_pcie_down(p
))
706 if (pcie_idx
== usb4_idx
&& !tb_pci_port_is_enabled(p
))
716 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
720 * USB4 routers have direct mapping between USB4 ports and USB 3.x
721 * downstream adapters where the USB 3.x topology is extended. This
722 * function returns the corresponding downstream USB 3.x adapter or
723 * %NULL if no such mapping was possible.
725 struct tb_port
*usb4_switch_map_usb3_down(struct tb_switch
*sw
,
726 const struct tb_port
*port
)
728 int usb4_idx
= usb4_port_idx(sw
, port
);
732 /* Find USB3 down port matching usb4_port */
733 tb_switch_for_each_port(sw
, p
) {
734 if (!tb_port_is_usb3_down(p
))
737 if (usb_idx
== usb4_idx
&& !tb_usb3_port_is_enabled(p
))
747 * usb4_port_unlock() - Unlock USB4 downstream port
748 * @port: USB4 port to unlock
750 * Unlocks USB4 downstream port so that the connection manager can
751 * access the router below this port.
753 int usb4_port_unlock(struct tb_port
*port
)
758 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
, ADP_CS_4
, 1);
762 val
&= ~ADP_CS_4_LCK
;
763 return tb_port_write(port
, &val
, TB_CFG_PORT
, ADP_CS_4
, 1);