2 * Internal Thunderbolt Connection Manager. This is a firmware running on
3 * the Thunderbolt host controller performing most of the low-level
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_data/x86/apple.h>
20 #include <linux/sizes.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
28 #define PCIE2CIO_CMD 0x30
29 #define PCIE2CIO_CMD_TIMEOUT BIT(31)
30 #define PCIE2CIO_CMD_START BIT(30)
31 #define PCIE2CIO_CMD_WRITE BIT(21)
32 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
33 #define PCIE2CIO_CMD_CS_SHIFT 19
34 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
35 #define PCIE2CIO_CMD_PORT_SHIFT 13
37 #define PCIE2CIO_WRDATA 0x34
38 #define PCIE2CIO_RDDATA 0x38
40 #define PHY_PORT_CS1 0x37
41 #define PHY_PORT_CS1_LINK_DISABLE BIT(14)
42 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
43 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26
45 #define ICM_TIMEOUT 5000 /* ms */
46 #define ICM_APPROVE_TIMEOUT 10000 /* ms */
47 #define ICM_MAX_LINK 4
48 #define ICM_MAX_DEPTH 6
51 * struct icm - Internal connection manager private data
52 * @request_lock: Makes sure only one message is send to ICM at time
53 * @rescan_work: Work used to rescan the surviving switches after resume
54 * @upstream_port: Pointer to the PCIe upstream port this host
55 * controller is connected. This is only set for systems
56 * where ICM needs to be started manually
57 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
58 * (only set when @upstream_port is not %NULL)
59 * @safe_mode: ICM is in safe mode
60 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
61 * @rpm: Does the controller support runtime PM (RTD3)
62 * @is_supported: Checks if we can support ICM on this controller
63 * @get_mode: Read and return the ICM firmware mode (optional)
64 * @get_route: Find a route string for given switch
65 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
66 * @driver_ready: Send driver ready message to ICM
67 * @device_connected: Handle device connected ICM message
68 * @device_disconnected: Handle device disconnected ICM message
69 * @xdomain_connected - Handle XDomain connected ICM message
70 * @xdomain_disconnected - Handle XDomain disconnected ICM message
73 struct mutex request_lock
;
74 struct delayed_work rescan_work
;
75 struct pci_dev
*upstream_port
;
80 bool (*is_supported
)(struct tb
*tb
);
81 int (*get_mode
)(struct tb
*tb
);
82 int (*get_route
)(struct tb
*tb
, u8 link
, u8 depth
, u64
*route
);
83 void (*save_devices
)(struct tb
*tb
);
84 int (*driver_ready
)(struct tb
*tb
,
85 enum tb_security_level
*security_level
,
86 size_t *nboot_acl
, bool *rpm
);
87 void (*device_connected
)(struct tb
*tb
,
88 const struct icm_pkg_header
*hdr
);
89 void (*device_disconnected
)(struct tb
*tb
,
90 const struct icm_pkg_header
*hdr
);
91 void (*xdomain_connected
)(struct tb
*tb
,
92 const struct icm_pkg_header
*hdr
);
93 void (*xdomain_disconnected
)(struct tb
*tb
,
94 const struct icm_pkg_header
*hdr
);
97 struct icm_notification
{
98 struct work_struct work
;
99 struct icm_pkg_header
*pkg
;
103 struct ep_name_entry
{
109 #define EP_NAME_INTEL_VSS 0x10
111 /* Intel Vendor specific structure */
121 #define INTEL_VSS_FLAGS_RTD3 BIT(0)
123 static const struct intel_vss
*parse_intel_vss(const void *ep_name
, size_t size
)
125 const void *end
= ep_name
+ size
;
127 while (ep_name
< end
) {
128 const struct ep_name_entry
*ep
= ep_name
;
132 if (ep_name
+ ep
->len
> end
)
135 if (ep
->type
== EP_NAME_INTEL_VSS
)
136 return (const struct intel_vss
*)ep
->data
;
144 static inline struct tb
*icm_to_tb(struct icm
*icm
)
146 return ((void *)icm
- sizeof(struct tb
));
149 static inline u8
phy_port_from_route(u64 route
, u8 depth
)
153 link
= depth
? route
>> ((depth
- 1) * 8) : route
;
154 return tb_phy_port_from_link(link
);
157 static inline u8
dual_link_from_link(u8 link
)
159 return link
? ((link
- 1) ^ 0x01) + 1 : 0;
162 static inline u64
get_route(u32 route_hi
, u32 route_lo
)
164 return (u64
)route_hi
<< 32 | route_lo
;
167 static inline u64
get_parent_route(u64 route
)
169 int depth
= tb_route_length(route
);
170 return depth
? route
& ~(0xffULL
<< (depth
- 1) * TB_ROUTE_SHIFT
) : 0;
173 static bool icm_match(const struct tb_cfg_request
*req
,
174 const struct ctl_pkg
*pkg
)
176 const struct icm_pkg_header
*res_hdr
= pkg
->buffer
;
177 const struct icm_pkg_header
*req_hdr
= req
->request
;
179 if (pkg
->frame
.eof
!= req
->response_type
)
181 if (res_hdr
->code
!= req_hdr
->code
)
187 static bool icm_copy(struct tb_cfg_request
*req
, const struct ctl_pkg
*pkg
)
189 const struct icm_pkg_header
*hdr
= pkg
->buffer
;
191 if (hdr
->packet_id
< req
->npackets
) {
192 size_t offset
= hdr
->packet_id
* req
->response_size
;
194 memcpy(req
->response
+ offset
, pkg
->buffer
, req
->response_size
);
197 return hdr
->packet_id
== hdr
->total_packets
- 1;
200 static int icm_request(struct tb
*tb
, const void *request
, size_t request_size
,
201 void *response
, size_t response_size
, size_t npackets
,
202 unsigned int timeout_msec
)
204 struct icm
*icm
= tb_priv(tb
);
208 struct tb_cfg_request
*req
;
209 struct tb_cfg_result res
;
211 req
= tb_cfg_request_alloc();
215 req
->match
= icm_match
;
216 req
->copy
= icm_copy
;
217 req
->request
= request
;
218 req
->request_size
= request_size
;
219 req
->request_type
= TB_CFG_PKG_ICM_CMD
;
220 req
->response
= response
;
221 req
->npackets
= npackets
;
222 req
->response_size
= response_size
;
223 req
->response_type
= TB_CFG_PKG_ICM_RESP
;
225 mutex_lock(&icm
->request_lock
);
226 res
= tb_cfg_request_sync(tb
->ctl
, req
, timeout_msec
);
227 mutex_unlock(&icm
->request_lock
);
229 tb_cfg_request_put(req
);
231 if (res
.err
!= -ETIMEDOUT
)
232 return res
.err
== 1 ? -EIO
: res
.err
;
234 usleep_range(20, 50);
240 static bool icm_fr_is_supported(struct tb
*tb
)
242 return !x86_apple_machine
;
245 static inline int icm_fr_get_switch_index(u32 port
)
249 if ((port
& ICM_PORT_TYPE_MASK
) != TB_TYPE_PORT
)
252 index
= port
>> ICM_PORT_INDEX_SHIFT
;
253 return index
!= 0xff ? index
: 0;
256 static int icm_fr_get_route(struct tb
*tb
, u8 link
, u8 depth
, u64
*route
)
258 struct icm_fr_pkg_get_topology_response
*switches
, *sw
;
259 struct icm_fr_pkg_get_topology request
= {
260 .hdr
= { .code
= ICM_GET_TOPOLOGY
},
262 size_t npackets
= ICM_GET_TOPOLOGY_PACKETS
;
266 switches
= kcalloc(npackets
, sizeof(*switches
), GFP_KERNEL
);
270 ret
= icm_request(tb
, &request
, sizeof(request
), switches
,
271 sizeof(*switches
), npackets
, ICM_TIMEOUT
);
276 index
= icm_fr_get_switch_index(sw
->ports
[link
]);
282 sw
= &switches
[index
];
283 for (i
= 1; i
< depth
; i
++) {
286 if (!(sw
->first_data
& ICM_SWITCH_USED
)) {
291 for (j
= 0; j
< ARRAY_SIZE(sw
->ports
); j
++) {
292 index
= icm_fr_get_switch_index(sw
->ports
[j
]);
293 if (index
> sw
->switch_index
) {
294 sw
= &switches
[index
];
300 *route
= get_route(sw
->route_hi
, sw
->route_lo
);
307 static void icm_fr_save_devices(struct tb
*tb
)
309 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_SAVE_DEVS
, 0);
313 icm_fr_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
314 size_t *nboot_acl
, bool *rpm
)
316 struct icm_fr_pkg_driver_ready_response reply
;
317 struct icm_pkg_driver_ready request
= {
318 .hdr
.code
= ICM_DRIVER_READY
,
322 memset(&reply
, 0, sizeof(reply
));
323 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
329 *security_level
= reply
.security_level
& ICM_FR_SLEVEL_MASK
;
334 static int icm_fr_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
336 struct icm_fr_pkg_approve_device request
;
337 struct icm_fr_pkg_approve_device reply
;
340 memset(&request
, 0, sizeof(request
));
341 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
342 request
.hdr
.code
= ICM_APPROVE_DEVICE
;
343 request
.connection_id
= sw
->connection_id
;
344 request
.connection_key
= sw
->connection_key
;
346 memset(&reply
, 0, sizeof(reply
));
347 /* Use larger timeout as establishing tunnels can take some time */
348 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
349 1, ICM_APPROVE_TIMEOUT
);
353 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
354 tb_warn(tb
, "PCIe tunnel creation failed\n");
361 static int icm_fr_add_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
363 struct icm_fr_pkg_add_device_key request
;
364 struct icm_fr_pkg_add_device_key_response reply
;
367 memset(&request
, 0, sizeof(request
));
368 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
369 request
.hdr
.code
= ICM_ADD_DEVICE_KEY
;
370 request
.connection_id
= sw
->connection_id
;
371 request
.connection_key
= sw
->connection_key
;
372 memcpy(request
.key
, sw
->key
, TB_SWITCH_KEY_SIZE
);
374 memset(&reply
, 0, sizeof(reply
));
375 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
380 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
381 tb_warn(tb
, "Adding key to switch failed\n");
388 static int icm_fr_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
,
389 const u8
*challenge
, u8
*response
)
391 struct icm_fr_pkg_challenge_device request
;
392 struct icm_fr_pkg_challenge_device_response reply
;
395 memset(&request
, 0, sizeof(request
));
396 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
397 request
.hdr
.code
= ICM_CHALLENGE_DEVICE
;
398 request
.connection_id
= sw
->connection_id
;
399 request
.connection_key
= sw
->connection_key
;
400 memcpy(request
.challenge
, challenge
, TB_SWITCH_KEY_SIZE
);
402 memset(&reply
, 0, sizeof(reply
));
403 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
408 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
409 return -EKEYREJECTED
;
410 if (reply
.hdr
.flags
& ICM_FLAGS_NO_KEY
)
413 memcpy(response
, reply
.response
, TB_SWITCH_KEY_SIZE
);
418 static int icm_fr_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
420 struct icm_fr_pkg_approve_xdomain_response reply
;
421 struct icm_fr_pkg_approve_xdomain request
;
424 memset(&request
, 0, sizeof(request
));
425 request
.hdr
.code
= ICM_APPROVE_XDOMAIN
;
426 request
.link_info
= xd
->depth
<< ICM_LINK_INFO_DEPTH_SHIFT
| xd
->link
;
427 memcpy(&request
.remote_uuid
, xd
->remote_uuid
, sizeof(*xd
->remote_uuid
));
429 request
.transmit_path
= xd
->transmit_path
;
430 request
.transmit_ring
= xd
->transmit_ring
;
431 request
.receive_path
= xd
->receive_path
;
432 request
.receive_ring
= xd
->receive_ring
;
434 memset(&reply
, 0, sizeof(reply
));
435 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
440 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
446 static int icm_fr_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
451 phy_port
= tb_phy_port_from_link(xd
->link
);
453 cmd
= NHI_MAILBOX_DISCONNECT_PA
;
455 cmd
= NHI_MAILBOX_DISCONNECT_PB
;
457 nhi_mailbox_cmd(tb
->nhi
, cmd
, 1);
458 usleep_range(10, 50);
459 nhi_mailbox_cmd(tb
->nhi
, cmd
, 2);
463 static void add_switch(struct tb_switch
*parent_sw
, u64 route
,
464 const uuid_t
*uuid
, const u8
*ep_name
,
465 size_t ep_name_size
, u8 connection_id
, u8 connection_key
,
466 u8 link
, u8 depth
, enum tb_security_level security_level
,
467 bool authorized
, bool boot
)
469 const struct intel_vss
*vss
;
470 struct tb_switch
*sw
;
472 pm_runtime_get_sync(&parent_sw
->dev
);
474 sw
= tb_switch_alloc(parent_sw
->tb
, &parent_sw
->dev
, route
);
478 sw
->uuid
= kmemdup(uuid
, sizeof(*uuid
), GFP_KERNEL
);
480 tb_sw_warn(sw
, "cannot allocate memory for switch\n");
484 sw
->connection_id
= connection_id
;
485 sw
->connection_key
= connection_key
;
488 sw
->authorized
= authorized
;
489 sw
->security_level
= security_level
;
492 vss
= parse_intel_vss(ep_name
, ep_name_size
);
494 sw
->rpm
= !!(vss
->flags
& INTEL_VSS_FLAGS_RTD3
);
496 /* Link the two switches now */
497 tb_port_at(route
, parent_sw
)->remote
= tb_upstream_port(sw
);
498 tb_upstream_port(sw
)->remote
= tb_port_at(route
, parent_sw
);
500 if (tb_switch_add(sw
)) {
501 tb_port_at(tb_route(sw
), parent_sw
)->remote
= NULL
;
506 pm_runtime_mark_last_busy(&parent_sw
->dev
);
507 pm_runtime_put_autosuspend(&parent_sw
->dev
);
510 static void update_switch(struct tb_switch
*parent_sw
, struct tb_switch
*sw
,
511 u64 route
, u8 connection_id
, u8 connection_key
,
512 u8 link
, u8 depth
, bool boot
)
514 /* Disconnect from parent */
515 tb_port_at(tb_route(sw
), parent_sw
)->remote
= NULL
;
516 /* Re-connect via updated port*/
517 tb_port_at(route
, parent_sw
)->remote
= tb_upstream_port(sw
);
519 /* Update with the new addressing information */
520 sw
->config
.route_hi
= upper_32_bits(route
);
521 sw
->config
.route_lo
= lower_32_bits(route
);
522 sw
->connection_id
= connection_id
;
523 sw
->connection_key
= connection_key
;
528 /* This switch still exists */
529 sw
->is_unplugged
= false;
532 static void remove_switch(struct tb_switch
*sw
)
534 struct tb_switch
*parent_sw
;
536 parent_sw
= tb_to_switch(sw
->dev
.parent
);
537 tb_port_at(tb_route(sw
), parent_sw
)->remote
= NULL
;
538 tb_switch_remove(sw
);
541 static void add_xdomain(struct tb_switch
*sw
, u64 route
,
542 const uuid_t
*local_uuid
, const uuid_t
*remote_uuid
,
545 struct tb_xdomain
*xd
;
547 pm_runtime_get_sync(&sw
->dev
);
549 xd
= tb_xdomain_alloc(sw
->tb
, &sw
->dev
, route
, local_uuid
, remote_uuid
);
556 tb_port_at(route
, sw
)->xdomain
= xd
;
561 pm_runtime_mark_last_busy(&sw
->dev
);
562 pm_runtime_put_autosuspend(&sw
->dev
);
565 static void update_xdomain(struct tb_xdomain
*xd
, u64 route
, u8 link
)
569 xd
->is_unplugged
= false;
572 static void remove_xdomain(struct tb_xdomain
*xd
)
574 struct tb_switch
*sw
;
576 sw
= tb_to_switch(xd
->dev
.parent
);
577 tb_port_at(xd
->route
, sw
)->xdomain
= NULL
;
578 tb_xdomain_remove(xd
);
582 icm_fr_device_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
584 const struct icm_fr_event_device_connected
*pkg
=
585 (const struct icm_fr_event_device_connected
*)hdr
;
586 enum tb_security_level security_level
;
587 struct tb_switch
*sw
, *parent_sw
;
588 struct icm
*icm
= tb_priv(tb
);
589 bool authorized
= false;
590 struct tb_xdomain
*xd
;
596 link
= pkg
->link_info
& ICM_LINK_INFO_LINK_MASK
;
597 depth
= (pkg
->link_info
& ICM_LINK_INFO_DEPTH_MASK
) >>
598 ICM_LINK_INFO_DEPTH_SHIFT
;
599 authorized
= pkg
->link_info
& ICM_LINK_INFO_APPROVED
;
600 security_level
= (pkg
->hdr
.flags
& ICM_FLAGS_SLEVEL_MASK
) >>
601 ICM_FLAGS_SLEVEL_SHIFT
;
602 boot
= pkg
->link_info
& ICM_LINK_INFO_BOOT
;
604 if (pkg
->link_info
& ICM_LINK_INFO_REJECTED
) {
605 tb_info(tb
, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
610 sw
= tb_switch_find_by_uuid(tb
, &pkg
->ep_uuid
);
612 u8 phy_port
, sw_phy_port
;
614 parent_sw
= tb_to_switch(sw
->dev
.parent
);
615 sw_phy_port
= tb_phy_port_from_link(sw
->link
);
616 phy_port
= tb_phy_port_from_link(link
);
619 * On resume ICM will send us connected events for the
620 * devices that still are present. However, that
621 * information might have changed for example by the
622 * fact that a switch on a dual-link connection might
623 * have been enumerated using the other link now. Make
624 * sure our book keeping matches that.
626 if (sw
->depth
== depth
&& sw_phy_port
== phy_port
&&
627 !!sw
->authorized
== authorized
) {
629 * It was enumerated through another link so update
630 * route string accordingly.
632 if (sw
->link
!= link
) {
633 ret
= icm
->get_route(tb
, link
, depth
, &route
);
635 tb_err(tb
, "failed to update route string for switch at %u.%u\n",
641 route
= tb_route(sw
);
644 update_switch(parent_sw
, sw
, route
, pkg
->connection_id
,
645 pkg
->connection_key
, link
, depth
, boot
);
651 * User connected the same switch to another physical
652 * port or to another part of the topology. Remove the
653 * existing switch now before adding the new one.
660 * If the switch was not found by UUID, look for a switch on
661 * same physical port (taking possible link aggregation into
662 * account) and depth. If we found one it is definitely a stale
663 * one so remove it first.
665 sw
= tb_switch_find_by_link_depth(tb
, link
, depth
);
669 dual_link
= dual_link_from_link(link
);
671 sw
= tb_switch_find_by_link_depth(tb
, dual_link
, depth
);
678 /* Remove existing XDomain connection if found */
679 xd
= tb_xdomain_find_by_link_depth(tb
, link
, depth
);
685 parent_sw
= tb_switch_find_by_link_depth(tb
, link
, depth
- 1);
687 tb_err(tb
, "failed to find parent switch for %u.%u\n",
692 ret
= icm
->get_route(tb
, link
, depth
, &route
);
694 tb_err(tb
, "failed to find route string for switch at %u.%u\n",
696 tb_switch_put(parent_sw
);
700 add_switch(parent_sw
, route
, &pkg
->ep_uuid
, (const u8
*)pkg
->ep_name
,
701 sizeof(pkg
->ep_name
), pkg
->connection_id
,
702 pkg
->connection_key
, link
, depth
, security_level
,
705 tb_switch_put(parent_sw
);
709 icm_fr_device_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
711 const struct icm_fr_event_device_disconnected
*pkg
=
712 (const struct icm_fr_event_device_disconnected
*)hdr
;
713 struct tb_switch
*sw
;
716 link
= pkg
->link_info
& ICM_LINK_INFO_LINK_MASK
;
717 depth
= (pkg
->link_info
& ICM_LINK_INFO_DEPTH_MASK
) >>
718 ICM_LINK_INFO_DEPTH_SHIFT
;
720 if (link
> ICM_MAX_LINK
|| depth
> ICM_MAX_DEPTH
) {
721 tb_warn(tb
, "invalid topology %u.%u, ignoring\n", link
, depth
);
725 sw
= tb_switch_find_by_link_depth(tb
, link
, depth
);
727 tb_warn(tb
, "no switch exists at %u.%u, ignoring\n", link
,
737 icm_fr_xdomain_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
739 const struct icm_fr_event_xdomain_connected
*pkg
=
740 (const struct icm_fr_event_xdomain_connected
*)hdr
;
741 struct tb_xdomain
*xd
;
742 struct tb_switch
*sw
;
746 link
= pkg
->link_info
& ICM_LINK_INFO_LINK_MASK
;
747 depth
= (pkg
->link_info
& ICM_LINK_INFO_DEPTH_MASK
) >>
748 ICM_LINK_INFO_DEPTH_SHIFT
;
750 if (link
> ICM_MAX_LINK
|| depth
> ICM_MAX_DEPTH
) {
751 tb_warn(tb
, "invalid topology %u.%u, ignoring\n", link
, depth
);
755 route
= get_route(pkg
->local_route_hi
, pkg
->local_route_lo
);
757 xd
= tb_xdomain_find_by_uuid(tb
, &pkg
->remote_uuid
);
759 u8 xd_phy_port
, phy_port
;
761 xd_phy_port
= phy_port_from_route(xd
->route
, xd
->depth
);
762 phy_port
= phy_port_from_route(route
, depth
);
764 if (xd
->depth
== depth
&& xd_phy_port
== phy_port
) {
765 update_xdomain(xd
, route
, link
);
771 * If we find an existing XDomain connection remove it
772 * now. We need to go through login handshake and
773 * everything anyway to be able to re-establish the
781 * Look if there already exists an XDomain in the same place
782 * than the new one and in that case remove it because it is
783 * most likely another host that got disconnected.
785 xd
= tb_xdomain_find_by_link_depth(tb
, link
, depth
);
789 dual_link
= dual_link_from_link(link
);
791 xd
= tb_xdomain_find_by_link_depth(tb
, dual_link
,
800 * If the user disconnected a switch during suspend and
801 * connected another host to the same port, remove the switch
804 sw
= tb_switch_find_by_route(tb
, route
);
810 sw
= tb_switch_find_by_link_depth(tb
, link
, depth
);
812 tb_warn(tb
, "no switch exists at %u.%u, ignoring\n", link
,
817 add_xdomain(sw
, route
, &pkg
->local_uuid
, &pkg
->remote_uuid
, link
,
823 icm_fr_xdomain_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
825 const struct icm_fr_event_xdomain_disconnected
*pkg
=
826 (const struct icm_fr_event_xdomain_disconnected
*)hdr
;
827 struct tb_xdomain
*xd
;
830 * If the connection is through one or multiple devices, the
831 * XDomain device is removed along with them so it is fine if we
832 * cannot find it here.
834 xd
= tb_xdomain_find_by_uuid(tb
, &pkg
->remote_uuid
);
842 icm_tr_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
843 size_t *nboot_acl
, bool *rpm
)
845 struct icm_tr_pkg_driver_ready_response reply
;
846 struct icm_pkg_driver_ready request
= {
847 .hdr
.code
= ICM_DRIVER_READY
,
851 memset(&reply
, 0, sizeof(reply
));
852 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
858 *security_level
= reply
.info
& ICM_TR_INFO_SLEVEL_MASK
;
860 *nboot_acl
= (reply
.info
& ICM_TR_INFO_BOOT_ACL_MASK
) >>
861 ICM_TR_INFO_BOOT_ACL_SHIFT
;
863 *rpm
= !!(reply
.hdr
.flags
& ICM_TR_FLAGS_RTD3
);
868 static int icm_tr_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
870 struct icm_tr_pkg_approve_device request
;
871 struct icm_tr_pkg_approve_device reply
;
874 memset(&request
, 0, sizeof(request
));
875 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
876 request
.hdr
.code
= ICM_APPROVE_DEVICE
;
877 request
.route_lo
= sw
->config
.route_lo
;
878 request
.route_hi
= sw
->config
.route_hi
;
879 request
.connection_id
= sw
->connection_id
;
881 memset(&reply
, 0, sizeof(reply
));
882 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
883 1, ICM_APPROVE_TIMEOUT
);
887 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
888 tb_warn(tb
, "PCIe tunnel creation failed\n");
895 static int icm_tr_add_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
897 struct icm_tr_pkg_add_device_key_response reply
;
898 struct icm_tr_pkg_add_device_key request
;
901 memset(&request
, 0, sizeof(request
));
902 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
903 request
.hdr
.code
= ICM_ADD_DEVICE_KEY
;
904 request
.route_lo
= sw
->config
.route_lo
;
905 request
.route_hi
= sw
->config
.route_hi
;
906 request
.connection_id
= sw
->connection_id
;
907 memcpy(request
.key
, sw
->key
, TB_SWITCH_KEY_SIZE
);
909 memset(&reply
, 0, sizeof(reply
));
910 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
915 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
916 tb_warn(tb
, "Adding key to switch failed\n");
923 static int icm_tr_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
,
924 const u8
*challenge
, u8
*response
)
926 struct icm_tr_pkg_challenge_device_response reply
;
927 struct icm_tr_pkg_challenge_device request
;
930 memset(&request
, 0, sizeof(request
));
931 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
932 request
.hdr
.code
= ICM_CHALLENGE_DEVICE
;
933 request
.route_lo
= sw
->config
.route_lo
;
934 request
.route_hi
= sw
->config
.route_hi
;
935 request
.connection_id
= sw
->connection_id
;
936 memcpy(request
.challenge
, challenge
, TB_SWITCH_KEY_SIZE
);
938 memset(&reply
, 0, sizeof(reply
));
939 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
944 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
945 return -EKEYREJECTED
;
946 if (reply
.hdr
.flags
& ICM_FLAGS_NO_KEY
)
949 memcpy(response
, reply
.response
, TB_SWITCH_KEY_SIZE
);
954 static int icm_tr_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
956 struct icm_tr_pkg_approve_xdomain_response reply
;
957 struct icm_tr_pkg_approve_xdomain request
;
960 memset(&request
, 0, sizeof(request
));
961 request
.hdr
.code
= ICM_APPROVE_XDOMAIN
;
962 request
.route_hi
= upper_32_bits(xd
->route
);
963 request
.route_lo
= lower_32_bits(xd
->route
);
964 request
.transmit_path
= xd
->transmit_path
;
965 request
.transmit_ring
= xd
->transmit_ring
;
966 request
.receive_path
= xd
->receive_path
;
967 request
.receive_ring
= xd
->receive_ring
;
968 memcpy(&request
.remote_uuid
, xd
->remote_uuid
, sizeof(*xd
->remote_uuid
));
970 memset(&reply
, 0, sizeof(reply
));
971 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
976 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
982 static int icm_tr_xdomain_tear_down(struct tb
*tb
, struct tb_xdomain
*xd
,
985 struct icm_tr_pkg_disconnect_xdomain_response reply
;
986 struct icm_tr_pkg_disconnect_xdomain request
;
989 memset(&request
, 0, sizeof(request
));
990 request
.hdr
.code
= ICM_DISCONNECT_XDOMAIN
;
991 request
.stage
= stage
;
992 request
.route_hi
= upper_32_bits(xd
->route
);
993 request
.route_lo
= lower_32_bits(xd
->route
);
994 memcpy(&request
.remote_uuid
, xd
->remote_uuid
, sizeof(*xd
->remote_uuid
));
996 memset(&reply
, 0, sizeof(reply
));
997 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1002 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1008 static int icm_tr_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1012 ret
= icm_tr_xdomain_tear_down(tb
, xd
, 1);
1016 usleep_range(10, 50);
1017 return icm_tr_xdomain_tear_down(tb
, xd
, 2);
1021 icm_tr_device_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1023 const struct icm_tr_event_device_connected
*pkg
=
1024 (const struct icm_tr_event_device_connected
*)hdr
;
1025 enum tb_security_level security_level
;
1026 struct tb_switch
*sw
, *parent_sw
;
1027 struct tb_xdomain
*xd
;
1028 bool authorized
, boot
;
1032 * Currently we don't use the QoS information coming with the
1033 * device connected message so simply just ignore that extra
1036 if (pkg
->hdr
.packet_id
)
1039 route
= get_route(pkg
->route_hi
, pkg
->route_lo
);
1040 authorized
= pkg
->link_info
& ICM_LINK_INFO_APPROVED
;
1041 security_level
= (pkg
->hdr
.flags
& ICM_FLAGS_SLEVEL_MASK
) >>
1042 ICM_FLAGS_SLEVEL_SHIFT
;
1043 boot
= pkg
->link_info
& ICM_LINK_INFO_BOOT
;
1045 if (pkg
->link_info
& ICM_LINK_INFO_REJECTED
) {
1046 tb_info(tb
, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1051 sw
= tb_switch_find_by_uuid(tb
, &pkg
->ep_uuid
);
1053 /* Update the switch if it is still in the same place */
1054 if (tb_route(sw
) == route
&& !!sw
->authorized
== authorized
) {
1055 parent_sw
= tb_to_switch(sw
->dev
.parent
);
1056 update_switch(parent_sw
, sw
, route
, pkg
->connection_id
,
1066 /* Another switch with the same address */
1067 sw
= tb_switch_find_by_route(tb
, route
);
1073 /* XDomain connection with the same address */
1074 xd
= tb_xdomain_find_by_route(tb
, route
);
1080 parent_sw
= tb_switch_find_by_route(tb
, get_parent_route(route
));
1082 tb_err(tb
, "failed to find parent switch for %llx\n", route
);
1086 add_switch(parent_sw
, route
, &pkg
->ep_uuid
, (const u8
*)pkg
->ep_name
,
1087 sizeof(pkg
->ep_name
), pkg
->connection_id
,
1088 0, 0, 0, security_level
, authorized
, boot
);
1090 tb_switch_put(parent_sw
);
1094 icm_tr_device_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1096 const struct icm_tr_event_device_disconnected
*pkg
=
1097 (const struct icm_tr_event_device_disconnected
*)hdr
;
1098 struct tb_switch
*sw
;
1101 route
= get_route(pkg
->route_hi
, pkg
->route_lo
);
1103 sw
= tb_switch_find_by_route(tb
, route
);
1105 tb_warn(tb
, "no switch exists at %llx, ignoring\n", route
);
1114 icm_tr_xdomain_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1116 const struct icm_tr_event_xdomain_connected
*pkg
=
1117 (const struct icm_tr_event_xdomain_connected
*)hdr
;
1118 struct tb_xdomain
*xd
;
1119 struct tb_switch
*sw
;
1122 if (!tb
->root_switch
)
1125 route
= get_route(pkg
->local_route_hi
, pkg
->local_route_lo
);
1127 xd
= tb_xdomain_find_by_uuid(tb
, &pkg
->remote_uuid
);
1129 if (xd
->route
== route
) {
1130 update_xdomain(xd
, route
, 0);
1139 /* An existing xdomain with the same address */
1140 xd
= tb_xdomain_find_by_route(tb
, route
);
1147 * If the user disconnected a switch during suspend and
1148 * connected another host to the same port, remove the switch
1151 sw
= tb_switch_find_by_route(tb
, route
);
1157 sw
= tb_switch_find_by_route(tb
, get_parent_route(route
));
1159 tb_warn(tb
, "no switch exists at %llx, ignoring\n", route
);
1163 add_xdomain(sw
, route
, &pkg
->local_uuid
, &pkg
->remote_uuid
, 0, 0);
1168 icm_tr_xdomain_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1170 const struct icm_tr_event_xdomain_disconnected
*pkg
=
1171 (const struct icm_tr_event_xdomain_disconnected
*)hdr
;
1172 struct tb_xdomain
*xd
;
1175 route
= get_route(pkg
->route_hi
, pkg
->route_lo
);
1177 xd
= tb_xdomain_find_by_route(tb
, route
);
1184 static struct pci_dev
*get_upstream_port(struct pci_dev
*pdev
)
1186 struct pci_dev
*parent
;
1188 parent
= pci_upstream_bridge(pdev
);
1190 if (!pci_is_pcie(parent
))
1192 if (pci_pcie_type(parent
) == PCI_EXP_TYPE_UPSTREAM
)
1194 parent
= pci_upstream_bridge(parent
);
1200 switch (parent
->device
) {
1201 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
1202 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
1203 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
1204 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
1205 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
1212 static bool icm_ar_is_supported(struct tb
*tb
)
1214 struct pci_dev
*upstream_port
;
1215 struct icm
*icm
= tb_priv(tb
);
1218 * Starting from Alpine Ridge we can use ICM on Apple machines
1219 * as well. We just need to reset and re-enable it first.
1221 if (!x86_apple_machine
)
1225 * Find the upstream PCIe port in case we need to do reset
1226 * through its vendor specific registers.
1228 upstream_port
= get_upstream_port(tb
->nhi
->pdev
);
1229 if (upstream_port
) {
1232 cap
= pci_find_ext_capability(upstream_port
,
1233 PCI_EXT_CAP_ID_VNDR
);
1235 icm
->upstream_port
= upstream_port
;
1245 static int icm_ar_get_mode(struct tb
*tb
)
1247 struct tb_nhi
*nhi
= tb
->nhi
;
1252 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1253 if (val
& REG_FW_STS_NVM_AUTH_DONE
)
1256 } while (--retries
);
1259 dev_err(&nhi
->pdev
->dev
, "ICM firmware not authenticated\n");
1263 return nhi_mailbox_mode(nhi
);
1267 icm_ar_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
1268 size_t *nboot_acl
, bool *rpm
)
1270 struct icm_ar_pkg_driver_ready_response reply
;
1271 struct icm_pkg_driver_ready request
= {
1272 .hdr
.code
= ICM_DRIVER_READY
,
1276 memset(&reply
, 0, sizeof(reply
));
1277 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1283 *security_level
= reply
.info
& ICM_AR_INFO_SLEVEL_MASK
;
1284 if (nboot_acl
&& (reply
.info
& ICM_AR_INFO_BOOT_ACL_SUPPORTED
))
1285 *nboot_acl
= (reply
.info
& ICM_AR_INFO_BOOT_ACL_MASK
) >>
1286 ICM_AR_INFO_BOOT_ACL_SHIFT
;
1288 *rpm
= !!(reply
.hdr
.flags
& ICM_AR_FLAGS_RTD3
);
1293 static int icm_ar_get_route(struct tb
*tb
, u8 link
, u8 depth
, u64
*route
)
1295 struct icm_ar_pkg_get_route_response reply
;
1296 struct icm_ar_pkg_get_route request
= {
1297 .hdr
= { .code
= ICM_GET_ROUTE
},
1298 .link_info
= depth
<< ICM_LINK_INFO_DEPTH_SHIFT
| link
,
1302 memset(&reply
, 0, sizeof(reply
));
1303 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1308 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1311 *route
= get_route(reply
.route_hi
, reply
.route_lo
);
1315 static int icm_ar_get_boot_acl(struct tb
*tb
, uuid_t
*uuids
, size_t nuuids
)
1317 struct icm_ar_pkg_preboot_acl_response reply
;
1318 struct icm_ar_pkg_preboot_acl request
= {
1319 .hdr
= { .code
= ICM_PREBOOT_ACL
},
1323 memset(&reply
, 0, sizeof(reply
));
1324 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1329 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1332 for (i
= 0; i
< nuuids
; i
++) {
1333 u32
*uuid
= (u32
*)&uuids
[i
];
1335 uuid
[0] = reply
.acl
[i
].uuid_lo
;
1336 uuid
[1] = reply
.acl
[i
].uuid_hi
;
1338 if (uuid
[0] == 0xffffffff && uuid
[1] == 0xffffffff) {
1339 /* Map empty entries to null UUID */
1342 } else if (uuid
[0] != 0 || uuid
[1] != 0) {
1343 /* Upper two DWs are always one's */
1344 uuid
[2] = 0xffffffff;
1345 uuid
[3] = 0xffffffff;
1352 static int icm_ar_set_boot_acl(struct tb
*tb
, const uuid_t
*uuids
,
1355 struct icm_ar_pkg_preboot_acl_response reply
;
1356 struct icm_ar_pkg_preboot_acl request
= {
1358 .code
= ICM_PREBOOT_ACL
,
1359 .flags
= ICM_FLAGS_WRITE
,
1364 for (i
= 0; i
< nuuids
; i
++) {
1365 const u32
*uuid
= (const u32
*)&uuids
[i
];
1367 if (uuid_is_null(&uuids
[i
])) {
1369 * Map null UUID to the empty (all one) entries
1372 request
.acl
[i
].uuid_lo
= 0xffffffff;
1373 request
.acl
[i
].uuid_hi
= 0xffffffff;
1375 /* Two high DWs need to be set to all one */
1376 if (uuid
[2] != 0xffffffff || uuid
[3] != 0xffffffff)
1379 request
.acl
[i
].uuid_lo
= uuid
[0];
1380 request
.acl
[i
].uuid_hi
= uuid
[1];
1384 memset(&reply
, 0, sizeof(reply
));
1385 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1390 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1396 static void icm_handle_notification(struct work_struct
*work
)
1398 struct icm_notification
*n
= container_of(work
, typeof(*n
), work
);
1399 struct tb
*tb
= n
->tb
;
1400 struct icm
*icm
= tb_priv(tb
);
1402 mutex_lock(&tb
->lock
);
1405 * When the domain is stopped we flush its workqueue but before
1406 * that the root switch is removed. In that case we should treat
1407 * the queued events as being canceled.
1409 if (tb
->root_switch
) {
1410 switch (n
->pkg
->code
) {
1411 case ICM_EVENT_DEVICE_CONNECTED
:
1412 icm
->device_connected(tb
, n
->pkg
);
1414 case ICM_EVENT_DEVICE_DISCONNECTED
:
1415 icm
->device_disconnected(tb
, n
->pkg
);
1417 case ICM_EVENT_XDOMAIN_CONNECTED
:
1418 icm
->xdomain_connected(tb
, n
->pkg
);
1420 case ICM_EVENT_XDOMAIN_DISCONNECTED
:
1421 icm
->xdomain_disconnected(tb
, n
->pkg
);
1426 mutex_unlock(&tb
->lock
);
1432 static void icm_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1433 const void *buf
, size_t size
)
1435 struct icm_notification
*n
;
1437 n
= kmalloc(sizeof(*n
), GFP_KERNEL
);
1441 INIT_WORK(&n
->work
, icm_handle_notification
);
1442 n
->pkg
= kmemdup(buf
, size
, GFP_KERNEL
);
1445 queue_work(tb
->wq
, &n
->work
);
1449 __icm_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
1450 size_t *nboot_acl
, bool *rpm
)
1452 struct icm
*icm
= tb_priv(tb
);
1453 unsigned int retries
= 50;
1456 ret
= icm
->driver_ready(tb
, security_level
, nboot_acl
, rpm
);
1458 tb_err(tb
, "failed to send driver ready to ICM\n");
1463 * Hold on here until the switch config space is accessible so
1464 * that we can read root switch config successfully.
1467 struct tb_cfg_result res
;
1470 res
= tb_cfg_read_raw(tb
->ctl
, &tmp
, 0, 0, TB_CFG_SWITCH
,
1476 } while (--retries
);
1478 tb_err(tb
, "failed to read root switch config space, giving up\n");
1482 static int pci2cio_wait_completion(struct icm
*icm
, unsigned long timeout_msec
)
1484 unsigned long end
= jiffies
+ msecs_to_jiffies(timeout_msec
);
1488 pci_read_config_dword(icm
->upstream_port
,
1489 icm
->vnd_cap
+ PCIE2CIO_CMD
, &cmd
);
1490 if (!(cmd
& PCIE2CIO_CMD_START
)) {
1491 if (cmd
& PCIE2CIO_CMD_TIMEOUT
)
1497 } while (time_before(jiffies
, end
));
1502 static int pcie2cio_read(struct icm
*icm
, enum tb_cfg_space cs
,
1503 unsigned int port
, unsigned int index
, u32
*data
)
1505 struct pci_dev
*pdev
= icm
->upstream_port
;
1506 int ret
, vnd_cap
= icm
->vnd_cap
;
1510 cmd
|= (port
<< PCIE2CIO_CMD_PORT_SHIFT
) & PCIE2CIO_CMD_PORT_MASK
;
1511 cmd
|= (cs
<< PCIE2CIO_CMD_CS_SHIFT
) & PCIE2CIO_CMD_CS_MASK
;
1512 cmd
|= PCIE2CIO_CMD_START
;
1513 pci_write_config_dword(pdev
, vnd_cap
+ PCIE2CIO_CMD
, cmd
);
1515 ret
= pci2cio_wait_completion(icm
, 5000);
1519 pci_read_config_dword(pdev
, vnd_cap
+ PCIE2CIO_RDDATA
, data
);
1523 static int pcie2cio_write(struct icm
*icm
, enum tb_cfg_space cs
,
1524 unsigned int port
, unsigned int index
, u32 data
)
1526 struct pci_dev
*pdev
= icm
->upstream_port
;
1527 int vnd_cap
= icm
->vnd_cap
;
1530 pci_write_config_dword(pdev
, vnd_cap
+ PCIE2CIO_WRDATA
, data
);
1533 cmd
|= (port
<< PCIE2CIO_CMD_PORT_SHIFT
) & PCIE2CIO_CMD_PORT_MASK
;
1534 cmd
|= (cs
<< PCIE2CIO_CMD_CS_SHIFT
) & PCIE2CIO_CMD_CS_MASK
;
1535 cmd
|= PCIE2CIO_CMD_WRITE
| PCIE2CIO_CMD_START
;
1536 pci_write_config_dword(pdev
, vnd_cap
+ PCIE2CIO_CMD
, cmd
);
1538 return pci2cio_wait_completion(icm
, 5000);
1541 static int icm_firmware_reset(struct tb
*tb
, struct tb_nhi
*nhi
)
1543 struct icm
*icm
= tb_priv(tb
);
1546 if (!icm
->upstream_port
)
1549 /* Put ARC to wait for CIO reset event to happen */
1550 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1551 val
|= REG_FW_STS_CIO_RESET_REQ
;
1552 iowrite32(val
, nhi
->iobase
+ REG_FW_STS
);
1555 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1556 val
|= REG_FW_STS_ICM_EN_INVERT
;
1557 val
|= REG_FW_STS_ICM_EN_CPU
;
1558 iowrite32(val
, nhi
->iobase
+ REG_FW_STS
);
1560 /* Trigger CIO reset now */
1561 return pcie2cio_write(icm
, TB_CFG_SWITCH
, 0, 0x50, BIT(9));
1564 static int icm_firmware_start(struct tb
*tb
, struct tb_nhi
*nhi
)
1566 unsigned int retries
= 10;
1570 /* Check if the ICM firmware is already running */
1571 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1572 if (val
& REG_FW_STS_ICM_EN
)
1575 dev_info(&nhi
->pdev
->dev
, "starting ICM firmware\n");
1577 ret
= icm_firmware_reset(tb
, nhi
);
1581 /* Wait until the ICM firmware tells us it is up and running */
1583 /* Check that the ICM firmware is running */
1584 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1585 if (val
& REG_FW_STS_NVM_AUTH_DONE
)
1589 } while (--retries
);
1594 static int icm_reset_phy_port(struct tb
*tb
, int phy_port
)
1596 struct icm
*icm
= tb_priv(tb
);
1602 if (!icm
->upstream_port
)
1614 * Read link status of both null ports belonging to a single
1617 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, &val0
);
1620 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, &val1
);
1624 state0
= val0
& PHY_PORT_CS1_LINK_STATE_MASK
;
1625 state0
>>= PHY_PORT_CS1_LINK_STATE_SHIFT
;
1626 state1
= val1
& PHY_PORT_CS1_LINK_STATE_MASK
;
1627 state1
>>= PHY_PORT_CS1_LINK_STATE_SHIFT
;
1629 /* If they are both up we need to reset them now */
1630 if (state0
!= TB_PORT_UP
|| state1
!= TB_PORT_UP
)
1633 val0
|= PHY_PORT_CS1_LINK_DISABLE
;
1634 ret
= pcie2cio_write(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, val0
);
1638 val1
|= PHY_PORT_CS1_LINK_DISABLE
;
1639 ret
= pcie2cio_write(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, val1
);
1643 /* Wait a bit and then re-enable both ports */
1644 usleep_range(10, 100);
1646 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, &val0
);
1649 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, &val1
);
1653 val0
&= ~PHY_PORT_CS1_LINK_DISABLE
;
1654 ret
= pcie2cio_write(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, val0
);
1658 val1
&= ~PHY_PORT_CS1_LINK_DISABLE
;
1659 return pcie2cio_write(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, val1
);
1662 static int icm_firmware_init(struct tb
*tb
)
1664 struct icm
*icm
= tb_priv(tb
);
1665 struct tb_nhi
*nhi
= tb
->nhi
;
1668 ret
= icm_firmware_start(tb
, nhi
);
1670 dev_err(&nhi
->pdev
->dev
, "could not start ICM firmware\n");
1674 if (icm
->get_mode
) {
1675 ret
= icm
->get_mode(tb
);
1678 case NHI_FW_SAFE_MODE
:
1679 icm
->safe_mode
= true;
1682 case NHI_FW_CM_MODE
:
1683 /* Ask ICM to accept all Thunderbolt devices */
1684 nhi_mailbox_cmd(nhi
, NHI_MAILBOX_ALLOW_ALL_DEVS
, 0);
1691 tb_err(tb
, "ICM firmware is in wrong mode: %u\n", ret
);
1697 * Reset both physical ports if there is anything connected to
1700 ret
= icm_reset_phy_port(tb
, 0);
1702 dev_warn(&nhi
->pdev
->dev
, "failed to reset links on port0\n");
1703 ret
= icm_reset_phy_port(tb
, 1);
1705 dev_warn(&nhi
->pdev
->dev
, "failed to reset links on port1\n");
1710 static int icm_driver_ready(struct tb
*tb
)
1712 struct icm
*icm
= tb_priv(tb
);
1715 ret
= icm_firmware_init(tb
);
1719 if (icm
->safe_mode
) {
1720 tb_info(tb
, "Thunderbolt host controller is in safe mode.\n");
1721 tb_info(tb
, "You need to update NVM firmware of the controller before it can be used.\n");
1722 tb_info(tb
, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1726 ret
= __icm_driver_ready(tb
, &tb
->security_level
, &tb
->nboot_acl
,
1732 * Make sure the number of supported preboot ACL matches what we
1733 * expect or disable the whole feature.
1735 if (tb
->nboot_acl
> icm
->max_boot_acl
)
1741 static int icm_suspend(struct tb
*tb
)
1743 struct icm
*icm
= tb_priv(tb
);
1745 if (icm
->save_devices
)
1746 icm
->save_devices(tb
);
1748 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DRV_UNLOADS
, 0);
1753 * Mark all switches (except root switch) below this one unplugged. ICM
1754 * firmware will send us an updated list of switches after we have send
1755 * it driver ready command. If a switch is not in that list it will be
1756 * removed when we perform rescan.
1758 static void icm_unplug_children(struct tb_switch
*sw
)
1763 sw
->is_unplugged
= true;
1765 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1766 struct tb_port
*port
= &sw
->ports
[i
];
1768 if (tb_is_upstream_port(port
))
1770 if (port
->xdomain
) {
1771 port
->xdomain
->is_unplugged
= true;
1777 icm_unplug_children(port
->remote
->sw
);
1781 static void icm_free_unplugged_children(struct tb_switch
*sw
)
1785 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1786 struct tb_port
*port
= &sw
->ports
[i
];
1788 if (tb_is_upstream_port(port
))
1791 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
1792 tb_xdomain_remove(port
->xdomain
);
1793 port
->xdomain
= NULL
;
1800 if (port
->remote
->sw
->is_unplugged
) {
1801 tb_switch_remove(port
->remote
->sw
);
1802 port
->remote
= NULL
;
1804 icm_free_unplugged_children(port
->remote
->sw
);
1809 static void icm_rescan_work(struct work_struct
*work
)
1811 struct icm
*icm
= container_of(work
, struct icm
, rescan_work
.work
);
1812 struct tb
*tb
= icm_to_tb(icm
);
1814 mutex_lock(&tb
->lock
);
1815 if (tb
->root_switch
)
1816 icm_free_unplugged_children(tb
->root_switch
);
1817 mutex_unlock(&tb
->lock
);
1820 static void icm_complete(struct tb
*tb
)
1822 struct icm
*icm
= tb_priv(tb
);
1824 if (tb
->nhi
->going_away
)
1827 icm_unplug_children(tb
->root_switch
);
1830 * Now all existing children should be resumed, start events
1831 * from ICM to get updated status.
1833 __icm_driver_ready(tb
, NULL
, NULL
, NULL
);
1836 * We do not get notifications of devices that have been
1837 * unplugged during suspend so schedule rescan to clean them up
1840 queue_delayed_work(tb
->wq
, &icm
->rescan_work
, msecs_to_jiffies(500));
1843 static int icm_runtime_suspend(struct tb
*tb
)
1845 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DRV_UNLOADS
, 0);
1849 static int icm_runtime_resume(struct tb
*tb
)
1852 * We can reuse the same resume functionality than with system
1859 static int icm_start(struct tb
*tb
)
1861 struct icm
*icm
= tb_priv(tb
);
1865 tb
->root_switch
= tb_switch_alloc_safe_mode(tb
, &tb
->dev
, 0);
1867 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
1868 if (!tb
->root_switch
)
1872 * NVM upgrade has not been tested on Apple systems and they
1873 * don't provide images publicly either. To be on the safe side
1874 * prevent root switch NVM upgrade on Macs for now.
1876 tb
->root_switch
->no_nvm_upgrade
= x86_apple_machine
;
1877 tb
->root_switch
->rpm
= icm
->rpm
;
1879 ret
= tb_switch_add(tb
->root_switch
);
1881 tb_switch_put(tb
->root_switch
);
1882 tb
->root_switch
= NULL
;
1888 static void icm_stop(struct tb
*tb
)
1890 struct icm
*icm
= tb_priv(tb
);
1892 cancel_delayed_work(&icm
->rescan_work
);
1893 tb_switch_remove(tb
->root_switch
);
1894 tb
->root_switch
= NULL
;
1895 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DRV_UNLOADS
, 0);
1898 static int icm_disconnect_pcie_paths(struct tb
*tb
)
1900 return nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DISCONNECT_PCIE_PATHS
, 0);
1904 static const struct tb_cm_ops icm_fr_ops
= {
1905 .driver_ready
= icm_driver_ready
,
1908 .suspend
= icm_suspend
,
1909 .complete
= icm_complete
,
1910 .handle_event
= icm_handle_event
,
1911 .approve_switch
= icm_fr_approve_switch
,
1912 .add_switch_key
= icm_fr_add_switch_key
,
1913 .challenge_switch_key
= icm_fr_challenge_switch_key
,
1914 .disconnect_pcie_paths
= icm_disconnect_pcie_paths
,
1915 .approve_xdomain_paths
= icm_fr_approve_xdomain_paths
,
1916 .disconnect_xdomain_paths
= icm_fr_disconnect_xdomain_paths
,
1920 static const struct tb_cm_ops icm_ar_ops
= {
1921 .driver_ready
= icm_driver_ready
,
1924 .suspend
= icm_suspend
,
1925 .complete
= icm_complete
,
1926 .runtime_suspend
= icm_runtime_suspend
,
1927 .runtime_resume
= icm_runtime_resume
,
1928 .handle_event
= icm_handle_event
,
1929 .get_boot_acl
= icm_ar_get_boot_acl
,
1930 .set_boot_acl
= icm_ar_set_boot_acl
,
1931 .approve_switch
= icm_fr_approve_switch
,
1932 .add_switch_key
= icm_fr_add_switch_key
,
1933 .challenge_switch_key
= icm_fr_challenge_switch_key
,
1934 .disconnect_pcie_paths
= icm_disconnect_pcie_paths
,
1935 .approve_xdomain_paths
= icm_fr_approve_xdomain_paths
,
1936 .disconnect_xdomain_paths
= icm_fr_disconnect_xdomain_paths
,
1940 static const struct tb_cm_ops icm_tr_ops
= {
1941 .driver_ready
= icm_driver_ready
,
1944 .suspend
= icm_suspend
,
1945 .complete
= icm_complete
,
1946 .runtime_suspend
= icm_runtime_suspend
,
1947 .runtime_resume
= icm_runtime_resume
,
1948 .handle_event
= icm_handle_event
,
1949 .get_boot_acl
= icm_ar_get_boot_acl
,
1950 .set_boot_acl
= icm_ar_set_boot_acl
,
1951 .approve_switch
= icm_tr_approve_switch
,
1952 .add_switch_key
= icm_tr_add_switch_key
,
1953 .challenge_switch_key
= icm_tr_challenge_switch_key
,
1954 .disconnect_pcie_paths
= icm_disconnect_pcie_paths
,
1955 .approve_xdomain_paths
= icm_tr_approve_xdomain_paths
,
1956 .disconnect_xdomain_paths
= icm_tr_disconnect_xdomain_paths
,
1959 struct tb
*icm_probe(struct tb_nhi
*nhi
)
1964 tb
= tb_domain_alloc(nhi
, sizeof(struct icm
));
1969 INIT_DELAYED_WORK(&icm
->rescan_work
, icm_rescan_work
);
1970 mutex_init(&icm
->request_lock
);
1972 switch (nhi
->pdev
->device
) {
1973 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI
:
1974 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI
:
1975 icm
->is_supported
= icm_fr_is_supported
;
1976 icm
->get_route
= icm_fr_get_route
;
1977 icm
->save_devices
= icm_fr_save_devices
;
1978 icm
->driver_ready
= icm_fr_driver_ready
;
1979 icm
->device_connected
= icm_fr_device_connected
;
1980 icm
->device_disconnected
= icm_fr_device_disconnected
;
1981 icm
->xdomain_connected
= icm_fr_xdomain_connected
;
1982 icm
->xdomain_disconnected
= icm_fr_xdomain_disconnected
;
1983 tb
->cm_ops
= &icm_fr_ops
;
1986 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI
:
1987 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI
:
1988 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI
:
1989 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI
:
1990 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI
:
1991 icm
->max_boot_acl
= ICM_AR_PREBOOT_ACL_ENTRIES
;
1992 icm
->is_supported
= icm_ar_is_supported
;
1993 icm
->get_mode
= icm_ar_get_mode
;
1994 icm
->get_route
= icm_ar_get_route
;
1995 icm
->save_devices
= icm_fr_save_devices
;
1996 icm
->driver_ready
= icm_ar_driver_ready
;
1997 icm
->device_connected
= icm_fr_device_connected
;
1998 icm
->device_disconnected
= icm_fr_device_disconnected
;
1999 icm
->xdomain_connected
= icm_fr_xdomain_connected
;
2000 icm
->xdomain_disconnected
= icm_fr_xdomain_disconnected
;
2001 tb
->cm_ops
= &icm_ar_ops
;
2004 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI
:
2005 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI
:
2006 icm
->max_boot_acl
= ICM_AR_PREBOOT_ACL_ENTRIES
;
2007 icm
->is_supported
= icm_ar_is_supported
;
2008 icm
->get_mode
= icm_ar_get_mode
;
2009 icm
->driver_ready
= icm_tr_driver_ready
;
2010 icm
->device_connected
= icm_tr_device_connected
;
2011 icm
->device_disconnected
= icm_tr_device_disconnected
;
2012 icm
->xdomain_connected
= icm_tr_xdomain_connected
;
2013 icm
->xdomain_disconnected
= icm_tr_xdomain_disconnected
;
2014 tb
->cm_ops
= &icm_tr_ops
;
2018 if (!icm
->is_supported
|| !icm
->is_supported(tb
)) {
2019 dev_dbg(&nhi
->pdev
->dev
, "ICM not supported on this controller\n");