1 // SPDX-License-Identifier: GPL-2.0
3 * USB4 specific functionality
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
15 #define USB4_DATA_DWORDS 16
16 #define USB4_DATA_RETRIES 3
19 USB4_SWITCH_OP_QUERY_DP_RESOURCE
= 0x10,
20 USB4_SWITCH_OP_ALLOC_DP_RESOURCE
= 0x11,
21 USB4_SWITCH_OP_DEALLOC_DP_RESOURCE
= 0x12,
22 USB4_SWITCH_OP_NVM_WRITE
= 0x20,
23 USB4_SWITCH_OP_NVM_AUTH
= 0x21,
24 USB4_SWITCH_OP_NVM_READ
= 0x22,
25 USB4_SWITCH_OP_NVM_SET_OFFSET
= 0x23,
26 USB4_SWITCH_OP_DROM_READ
= 0x24,
27 USB4_SWITCH_OP_NVM_SECTOR_SIZE
= 0x25,
30 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
31 #define USB4_NVM_READ_OFFSET_SHIFT 2
32 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
33 #define USB4_NVM_READ_LENGTH_SHIFT 24
35 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
36 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
38 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
39 #define USB4_DROM_ADDRESS_SHIFT 2
40 #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
41 #define USB4_DROM_SIZE_SHIFT 15
43 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
45 typedef int (*read_block_fn
)(struct tb_switch
*, unsigned int, void *, size_t);
46 typedef int (*write_block_fn
)(struct tb_switch
*, const void *, size_t);
48 static int usb4_switch_wait_for_bit(struct tb_switch
*sw
, u32 offset
, u32 bit
,
49 u32 value
, int timeout_msec
)
51 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
57 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
61 if ((val
& bit
) == value
)
64 usleep_range(50, 100);
65 } while (ktime_before(ktime_get(), timeout
));
70 static int usb4_switch_op_read_data(struct tb_switch
*sw
, void *data
,
73 if (dwords
> USB4_DATA_DWORDS
)
76 return tb_sw_read(sw
, data
, TB_CFG_SWITCH
, ROUTER_CS_9
, dwords
);
79 static int usb4_switch_op_write_data(struct tb_switch
*sw
, const void *data
,
82 if (dwords
> USB4_DATA_DWORDS
)
85 return tb_sw_write(sw
, data
, TB_CFG_SWITCH
, ROUTER_CS_9
, dwords
);
88 static int usb4_switch_op_read_metadata(struct tb_switch
*sw
, u32
*metadata
)
90 return tb_sw_read(sw
, metadata
, TB_CFG_SWITCH
, ROUTER_CS_25
, 1);
93 static int usb4_switch_op_write_metadata(struct tb_switch
*sw
, u32 metadata
)
95 return tb_sw_write(sw
, &metadata
, TB_CFG_SWITCH
, ROUTER_CS_25
, 1);
98 static int usb4_switch_do_read_data(struct tb_switch
*sw
, u16 address
,
99 void *buf
, size_t size
, read_block_fn read_block
)
101 unsigned int retries
= USB4_DATA_RETRIES
;
104 offset
= address
& 3;
105 address
= address
& ~3;
108 size_t nbytes
= min_t(size_t, size
, USB4_DATA_DWORDS
* 4);
109 unsigned int dwaddress
, dwords
;
110 u8 data
[USB4_DATA_DWORDS
* 4];
113 dwaddress
= address
/ 4;
114 dwords
= ALIGN(nbytes
, 4) / 4;
116 ret
= read_block(sw
, dwaddress
, data
, dwords
);
118 if (ret
== -ETIMEDOUT
) {
126 memcpy(buf
, data
+ offset
, nbytes
);
136 static int usb4_switch_do_write_data(struct tb_switch
*sw
, u16 address
,
137 const void *buf
, size_t size
, write_block_fn write_next_block
)
139 unsigned int retries
= USB4_DATA_RETRIES
;
142 offset
= address
& 3;
143 address
= address
& ~3;
146 u32 nbytes
= min_t(u32
, size
, USB4_DATA_DWORDS
* 4);
147 u8 data
[USB4_DATA_DWORDS
* 4];
150 memcpy(data
+ offset
, buf
, nbytes
);
152 ret
= write_next_block(sw
, data
, nbytes
/ 4);
154 if (ret
== -ETIMEDOUT
) {
170 static int usb4_switch_op(struct tb_switch
*sw
, u16 opcode
, u8
*status
)
175 val
= opcode
| ROUTER_CS_26_OV
;
176 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_26
, 1);
180 ret
= usb4_switch_wait_for_bit(sw
, ROUTER_CS_26
, ROUTER_CS_26_OV
, 0, 500);
184 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_26
, 1);
185 if (val
& ROUTER_CS_26_ONS
)
188 *status
= (val
& ROUTER_CS_26_STATUS_MASK
) >> ROUTER_CS_26_STATUS_SHIFT
;
193 * usb4_switch_setup() - Additional setup for USB4 device
194 * @sw: USB4 router to setup
196 * USB4 routers need additional settings in order to enable all the
197 * tunneling. This function enables USB and PCIe tunneling if it can be
198 * enabled (e.g the parent switch also supports them). If USB tunneling
199 * is not available for some reason (like that there is Thunderbolt 3
200 * switch upstream) then the internal xHCI controller is enabled
203 int usb4_switch_setup(struct tb_switch
*sw
)
205 struct tb_switch
*parent
;
213 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_6
, 1);
217 xhci
= val
& ROUTER_CS_6_HCI
;
218 tbt3
= !(val
& ROUTER_CS_6_TNS
);
220 tb_sw_dbg(sw
, "TBT3 support: %s, xHCI: %s\n",
221 tbt3
? "yes" : "no", xhci
? "yes" : "no");
223 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
227 parent
= tb_switch_parent(sw
);
229 if (tb_switch_find_port(parent
, TB_TYPE_USB3_DOWN
)) {
230 val
|= ROUTER_CS_5_UTO
;
234 /* Only enable PCIe tunneling if the parent router supports it */
235 if (tb_switch_find_port(parent
, TB_TYPE_PCIE_DOWN
)) {
236 val
|= ROUTER_CS_5_PTO
;
238 * xHCI can be enabled if PCIe tunneling is supported
239 * and the parent does not have any USB3 dowstream
240 * adapters (so we cannot do USB 3.x tunneling).
243 val
|= ROUTER_CS_5_HCO
;
246 /* TBT3 supported by the CM */
247 val
|= ROUTER_CS_5_C3S
;
248 /* Tunneling configuration is ready now */
249 val
|= ROUTER_CS_5_CV
;
251 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
255 return usb4_switch_wait_for_bit(sw
, ROUTER_CS_6
, ROUTER_CS_6_CR
,
260 * usb4_switch_read_uid() - Read UID from USB4 router
262 * @uid: UID is stored here
264 * Reads 64-bit UID from USB4 router config space.
266 int usb4_switch_read_uid(struct tb_switch
*sw
, u64
*uid
)
268 return tb_sw_read(sw
, uid
, TB_CFG_SWITCH
, ROUTER_CS_7
, 2);
271 static int usb4_switch_drom_read_block(struct tb_switch
*sw
,
272 unsigned int dwaddress
, void *buf
,
279 metadata
= (dwords
<< USB4_DROM_SIZE_SHIFT
) & USB4_DROM_SIZE_MASK
;
280 metadata
|= (dwaddress
<< USB4_DROM_ADDRESS_SHIFT
) &
281 USB4_DROM_ADDRESS_MASK
;
283 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
287 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_DROM_READ
, &status
);
294 return usb4_switch_op_read_data(sw
, buf
, dwords
);
298 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
300 * @address: Byte address inside DROM to start reading
301 * @buf: Buffer where the DROM content is stored
302 * @size: Number of bytes to read from DROM
304 * Uses USB4 router operations to read router DROM. For devices this
305 * should always work but for hosts it may return %-EOPNOTSUPP in which
306 * case the host router does not have DROM.
308 int usb4_switch_drom_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
311 return usb4_switch_do_read_data(sw
, address
, buf
, size
,
312 usb4_switch_drom_read_block
);
315 static int usb4_set_port_configured(struct tb_port
*port
, bool configured
)
320 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
321 port
->cap_usb4
+ PORT_CS_19
, 1);
326 val
|= PORT_CS_19_PC
;
328 val
&= ~PORT_CS_19_PC
;
330 return tb_port_write(port
, &val
, TB_CFG_PORT
,
331 port
->cap_usb4
+ PORT_CS_19
, 1);
335 * usb4_switch_configure_link() - Set upstream USB4 link configured
338 * Sets the upstream USB4 link to be configured for power management
341 int usb4_switch_configure_link(struct tb_switch
*sw
)
348 up
= tb_upstream_port(sw
);
349 return usb4_set_port_configured(up
, true);
353 * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
356 * Reverse of usb4_switch_configure_link().
358 void usb4_switch_unconfigure_link(struct tb_switch
*sw
)
362 if (sw
->is_unplugged
|| !tb_route(sw
))
365 up
= tb_upstream_port(sw
);
366 usb4_set_port_configured(up
, false);
370 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
373 * Checks whether conditions are met so that lane bonding can be
374 * established with the upstream router. Call only for device routers.
376 bool usb4_switch_lane_bonding_possible(struct tb_switch
*sw
)
382 up
= tb_upstream_port(sw
);
383 ret
= tb_port_read(up
, &val
, TB_CFG_PORT
, up
->cap_usb4
+ PORT_CS_18
, 1);
387 return !!(val
& PORT_CS_18_BE
);
391 * usb4_switch_set_sleep() - Prepare the router to enter sleep
394 * Enables wakes and sets sleep bit for the router. Returns when the
395 * router sleep ready bit has been asserted.
397 int usb4_switch_set_sleep(struct tb_switch
*sw
)
402 /* Set sleep bit and wait for sleep ready to be asserted */
403 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
407 val
|= ROUTER_CS_5_SLP
;
409 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
413 return usb4_switch_wait_for_bit(sw
, ROUTER_CS_6
, ROUTER_CS_6_SLPR
,
414 ROUTER_CS_6_SLPR
, 500);
418 * usb4_switch_nvm_sector_size() - Return router NVM sector size
421 * If the router supports NVM operations this function returns the NVM
422 * sector size in bytes. If NVM operations are not supported returns
425 int usb4_switch_nvm_sector_size(struct tb_switch
*sw
)
431 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_SECTOR_SIZE
, &status
);
436 return status
== 0x2 ? -EOPNOTSUPP
: -EIO
;
438 ret
= usb4_switch_op_read_metadata(sw
, &metadata
);
442 return metadata
& USB4_NVM_SECTOR_SIZE_MASK
;
445 static int usb4_switch_nvm_read_block(struct tb_switch
*sw
,
446 unsigned int dwaddress
, void *buf
, size_t dwords
)
452 metadata
= (dwords
<< USB4_NVM_READ_LENGTH_SHIFT
) &
453 USB4_NVM_READ_LENGTH_MASK
;
454 metadata
|= (dwaddress
<< USB4_NVM_READ_OFFSET_SHIFT
) &
455 USB4_NVM_READ_OFFSET_MASK
;
457 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
461 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_READ
, &status
);
468 return usb4_switch_op_read_data(sw
, buf
, dwords
);
472 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
474 * @address: Starting address in bytes
475 * @buf: Read data is placed here
476 * @size: How many bytes to read
478 * Reads NVM contents of the router. If NVM is not supported returns
481 int usb4_switch_nvm_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
484 return usb4_switch_do_read_data(sw
, address
, buf
, size
,
485 usb4_switch_nvm_read_block
);
488 static int usb4_switch_nvm_set_offset(struct tb_switch
*sw
,
489 unsigned int address
)
491 u32 metadata
, dwaddress
;
495 dwaddress
= address
/ 4;
496 metadata
= (dwaddress
<< USB4_NVM_SET_OFFSET_SHIFT
) &
497 USB4_NVM_SET_OFFSET_MASK
;
499 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
503 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_SET_OFFSET
, &status
);
507 return status
? -EIO
: 0;
510 static int usb4_switch_nvm_write_next_block(struct tb_switch
*sw
,
511 const void *buf
, size_t dwords
)
516 ret
= usb4_switch_op_write_data(sw
, buf
, dwords
);
520 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_WRITE
, &status
);
524 return status
? -EIO
: 0;
528 * usb4_switch_nvm_write() - Write to the router NVM
530 * @address: Start address where to write in bytes
531 * @buf: Pointer to the data to write
532 * @size: Size of @buf in bytes
534 * Writes @buf to the router NVM using USB4 router operations. If NVM
535 * write is not supported returns %-EOPNOTSUPP.
537 int usb4_switch_nvm_write(struct tb_switch
*sw
, unsigned int address
,
538 const void *buf
, size_t size
)
542 ret
= usb4_switch_nvm_set_offset(sw
, address
);
546 return usb4_switch_do_write_data(sw
, address
, buf
, size
,
547 usb4_switch_nvm_write_next_block
);
551 * usb4_switch_nvm_authenticate() - Authenticate new NVM
554 * After the new NVM has been written via usb4_switch_nvm_write(), this
555 * function triggers NVM authentication process. If the authentication
556 * is successful the router is power cycled and the new NVM starts
557 * running. In case of failure returns negative errno.
559 int usb4_switch_nvm_authenticate(struct tb_switch
*sw
)
564 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_AUTH
, &status
);
570 tb_sw_dbg(sw
, "NVM authentication successful\n");
584 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
588 * For DP tunneling this function can be used to query availability of
589 * DP IN resource. Returns true if the resource is available for DP
590 * tunneling, false otherwise.
592 bool usb4_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
597 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
601 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_QUERY_DP_RESOURCE
, &status
);
603 * If DP resource allocation is not supported assume it is
606 if (ret
== -EOPNOTSUPP
)
615 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
619 * Allocates DP IN resource for DP tunneling using USB4 router
620 * operations. If the resource was allocated returns %0. Otherwise
621 * returns negative errno, in particular %-EBUSY if the resource is
624 int usb4_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
629 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
633 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_ALLOC_DP_RESOURCE
, &status
);
634 if (ret
== -EOPNOTSUPP
)
639 return status
? -EBUSY
: 0;
643 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
647 * Releases the previously allocated DP IN resource.
649 int usb4_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
654 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
658 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE
, &status
);
659 if (ret
== -EOPNOTSUPP
)
664 return status
? -EIO
: 0;
667 static int usb4_port_idx(const struct tb_switch
*sw
, const struct tb_port
*port
)
672 /* Assume port is primary */
673 tb_switch_for_each_port(sw
, p
) {
674 if (!tb_port_is_null(p
))
676 if (tb_is_upstream_port(p
))
689 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
693 * USB4 routers have direct mapping between USB4 ports and PCIe
694 * downstream adapters where the PCIe topology is extended. This
695 * function returns the corresponding downstream PCIe adapter or %NULL
696 * if no such mapping was possible.
698 struct tb_port
*usb4_switch_map_pcie_down(struct tb_switch
*sw
,
699 const struct tb_port
*port
)
701 int usb4_idx
= usb4_port_idx(sw
, port
);
705 /* Find PCIe down port matching usb4_port */
706 tb_switch_for_each_port(sw
, p
) {
707 if (!tb_port_is_pcie_down(p
))
710 if (pcie_idx
== usb4_idx
&& !tb_pci_port_is_enabled(p
))
720 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
724 * USB4 routers have direct mapping between USB4 ports and USB 3.x
725 * downstream adapters where the USB 3.x topology is extended. This
726 * function returns the corresponding downstream USB 3.x adapter or
727 * %NULL if no such mapping was possible.
729 struct tb_port
*usb4_switch_map_usb3_down(struct tb_switch
*sw
,
730 const struct tb_port
*port
)
732 int usb4_idx
= usb4_port_idx(sw
, port
);
736 /* Find USB3 down port matching usb4_port */
737 tb_switch_for_each_port(sw
, p
) {
738 if (!tb_port_is_usb3_down(p
))
741 if (usb_idx
== usb4_idx
&& !tb_usb3_port_is_enabled(p
))
751 * usb4_port_unlock() - Unlock USB4 downstream port
752 * @port: USB4 port to unlock
754 * Unlocks USB4 downstream port so that the connection manager can
755 * access the router below this port.
757 int usb4_port_unlock(struct tb_port
*port
)
762 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
, ADP_CS_4
, 1);
766 val
&= ~ADP_CS_4_LCK
;
767 return tb_port_write(port
, &val
, TB_CFG_PORT
, ADP_CS_4
, 1);