1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020 - 2023, Intel Corporation
6 * Authors: Gil Fine <gil.fine@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/module.h>
14 static bool clx_enabled
= true;
15 module_param_named(clx
, clx_enabled
, bool, 0444);
16 MODULE_PARM_DESC(clx
, "allow low power states on the high-speed lanes (default: true)");
18 static const char *clx_name(unsigned int clx
)
21 case TB_CL0S
| TB_CL1
| TB_CL2
:
22 return "CL0s/CL1/CL2";
25 case TB_CL0S
| TB_CL2
:
27 case TB_CL0S
| TB_CL1
:
38 static int tb_port_pm_secondary_set(struct tb_port
*port
, bool secondary
)
43 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
44 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
49 phy
|= LANE_ADP_CS_1_PMS
;
51 phy
&= ~LANE_ADP_CS_1_PMS
;
53 return tb_port_write(port
, &phy
, TB_CFG_PORT
,
54 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
57 static int tb_port_pm_secondary_enable(struct tb_port
*port
)
59 return tb_port_pm_secondary_set(port
, true);
62 static int tb_port_pm_secondary_disable(struct tb_port
*port
)
64 return tb_port_pm_secondary_set(port
, false);
67 /* Called for USB4 or Titan Ridge routers only */
68 static bool tb_port_clx_supported(struct tb_port
*port
, unsigned int clx
)
73 /* Don't enable CLx in case of two single-lane links */
74 if (!port
->bonded
&& port
->dual_link_port
)
77 /* Don't enable CLx in case of inter-domain link */
81 if (tb_switch_is_usb4(port
->sw
)) {
82 if (!usb4_port_clx_supported(port
))
84 } else if (!tb_lc_is_clx_supported(port
)) {
89 mask
|= LANE_ADP_CS_0_CL0S_SUPPORT
;
91 mask
|= LANE_ADP_CS_0_CL1_SUPPORT
;
93 mask
|= LANE_ADP_CS_0_CL2_SUPPORT
;
95 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
96 port
->cap_phy
+ LANE_ADP_CS_0
, 1);
100 return !!(val
& mask
);
103 static int tb_port_clx_set(struct tb_port
*port
, unsigned int clx
, bool enable
)
109 mask
|= LANE_ADP_CS_1_CL0S_ENABLE
;
111 mask
|= LANE_ADP_CS_1_CL1_ENABLE
;
113 mask
|= LANE_ADP_CS_1_CL2_ENABLE
;
118 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
119 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
128 return tb_port_write(port
, &phy
, TB_CFG_PORT
,
129 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
132 static int tb_port_clx_disable(struct tb_port
*port
, unsigned int clx
)
134 return tb_port_clx_set(port
, clx
, false);
137 static int tb_port_clx_enable(struct tb_port
*port
, unsigned int clx
)
139 return tb_port_clx_set(port
, clx
, true);
142 static int tb_port_clx(struct tb_port
*port
)
147 if (!tb_port_clx_supported(port
, TB_CL0S
| TB_CL1
| TB_CL2
))
150 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
151 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
155 if (val
& LANE_ADP_CS_1_CL0S_ENABLE
)
157 if (val
& LANE_ADP_CS_1_CL1_ENABLE
)
159 if (val
& LANE_ADP_CS_1_CL2_ENABLE
)
166 * tb_port_clx_is_enabled() - Is given CL state enabled
167 * @port: USB4 port to check
168 * @clx: Mask of CL states to check
170 * Returns true if any of the given CL states is enabled for @port.
172 bool tb_port_clx_is_enabled(struct tb_port
*port
, unsigned int clx
)
174 return !!(tb_port_clx(port
) & clx
);
178 * tb_switch_clx_is_supported() - Is CLx supported on this type of router
179 * @sw: The router to check CLx support for
181 static bool tb_switch_clx_is_supported(const struct tb_switch
*sw
)
186 if (sw
->quirks
& QUIRK_NO_CLX
)
190 * CLx is not enabled and validated on Intel USB4 platforms
193 if (tb_switch_is_tiger_lake(sw
))
196 return tb_switch_is_usb4(sw
) || tb_switch_is_titan_ridge(sw
);
200 * tb_switch_clx_init() - Initialize router CL states
203 * Can be called for any router. Initializes the current CL state by
204 * reading it from the hardware.
206 * Returns %0 in case of success and negative errno in case of failure.
208 int tb_switch_clx_init(struct tb_switch
*sw
)
210 struct tb_port
*up
, *down
;
211 unsigned int clx
, tmp
;
213 if (tb_switch_is_icm(sw
))
219 if (!tb_switch_clx_is_supported(sw
))
222 up
= tb_upstream_port(sw
);
223 down
= tb_switch_downstream_port(sw
);
225 clx
= tb_port_clx(up
);
226 tmp
= tb_port_clx(down
);
228 tb_sw_warn(sw
, "CLx: inconsistent configuration %#x != %#x\n",
231 tb_sw_dbg(sw
, "CLx: current mode: %s\n", clx_name(clx
));
237 static int tb_switch_pm_secondary_resolve(struct tb_switch
*sw
)
239 struct tb_port
*up
, *down
;
245 up
= tb_upstream_port(sw
);
246 down
= tb_switch_downstream_port(sw
);
247 ret
= tb_port_pm_secondary_enable(up
);
251 return tb_port_pm_secondary_disable(down
);
254 static int tb_switch_mask_clx_objections(struct tb_switch
*sw
)
256 int up_port
= sw
->config
.upstream_port_number
;
257 u32 offset
, val
[2], mask_obj
, unmask_obj
;
260 /* Only Titan Ridge of pre-USB4 devices support CLx states */
261 if (!tb_switch_is_titan_ridge(sw
))
268 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
269 * Port A consists of lane adapters 1,2 and
270 * Port B consists of lane adapters 3,4
271 * If upstream port is A, (lanes are 1,2), we mask objections from
272 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
275 mask_obj
= TB_LOW_PWR_C0_PORT_B_MASK
;
276 unmask_obj
= TB_LOW_PWR_C1_PORT_A_MASK
;
277 offset
= TB_LOW_PWR_C1_CL1
;
279 mask_obj
= TB_LOW_PWR_C1_PORT_A_MASK
;
280 unmask_obj
= TB_LOW_PWR_C0_PORT_B_MASK
;
281 offset
= TB_LOW_PWR_C3_CL1
;
284 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
,
285 sw
->cap_lp
+ offset
, ARRAY_SIZE(val
));
289 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
291 val
[i
] &= ~unmask_obj
;
294 return tb_sw_write(sw
, &val
, TB_CFG_SWITCH
,
295 sw
->cap_lp
+ offset
, ARRAY_SIZE(val
));
298 static bool validate_mask(unsigned int clx
)
300 /* Previous states need to be enabled */
302 return (clx
& TB_CL0S
) == TB_CL0S
;
307 * tb_switch_clx_enable() - Enable CLx on upstream port of specified router
308 * @sw: Router to enable CLx for
309 * @clx: The CLx state to enable
311 * CLx is enabled only if both sides of the link support CLx, and if both sides
312 * of the link are not configured as two single lane links and only if the link
313 * is not inter-domain link. The complete set of conditions is described in CM
314 * Guide 1.0 section 8.1.
316 * Returns %0 on success or an error code on failure.
318 int tb_switch_clx_enable(struct tb_switch
*sw
, unsigned int clx
)
320 bool up_clx_support
, down_clx_support
;
321 struct tb_switch
*parent_sw
;
322 struct tb_port
*up
, *down
;
325 if (!clx
|| sw
->clx
== clx
)
328 if (!validate_mask(clx
))
331 parent_sw
= tb_switch_parent(sw
);
335 if (!tb_switch_clx_is_supported(parent_sw
) ||
336 !tb_switch_clx_is_supported(sw
))
339 /* Only support CL2 for v2 routers */
340 if ((clx
& TB_CL2
) &&
341 (usb4_switch_version(parent_sw
) < 2 ||
342 usb4_switch_version(sw
) < 2))
345 ret
= tb_switch_pm_secondary_resolve(sw
);
349 up
= tb_upstream_port(sw
);
350 down
= tb_switch_downstream_port(sw
);
352 up_clx_support
= tb_port_clx_supported(up
, clx
);
353 down_clx_support
= tb_port_clx_supported(down
, clx
);
355 tb_port_dbg(up
, "CLx: %s %ssupported\n", clx_name(clx
),
356 up_clx_support
? "" : "not ");
357 tb_port_dbg(down
, "CLx: %s %ssupported\n", clx_name(clx
),
358 down_clx_support
? "" : "not ");
360 if (!up_clx_support
|| !down_clx_support
)
363 ret
= tb_port_clx_enable(up
, clx
);
367 ret
= tb_port_clx_enable(down
, clx
);
369 tb_port_clx_disable(up
, clx
);
373 ret
= tb_switch_mask_clx_objections(sw
);
375 tb_port_clx_disable(up
, clx
);
376 tb_port_clx_disable(down
, clx
);
382 tb_sw_dbg(sw
, "CLx: %s enabled\n", clx_name(clx
));
387 * tb_switch_clx_disable() - Disable CLx on upstream port of specified router
388 * @sw: Router to disable CLx for
390 * Disables all CL states of the given router. Can be called on any
391 * router and if the states were not enabled already does nothing.
393 * Returns the CL states that were disabled or negative errno in case of
396 int tb_switch_clx_disable(struct tb_switch
*sw
)
398 unsigned int clx
= sw
->clx
;
399 struct tb_port
*up
, *down
;
402 if (!tb_switch_clx_is_supported(sw
))
408 if (sw
->is_unplugged
)
411 up
= tb_upstream_port(sw
);
412 down
= tb_switch_downstream_port(sw
);
414 ret
= tb_port_clx_disable(up
, clx
);
418 ret
= tb_port_clx_disable(down
, clx
);
424 tb_sw_dbg(sw
, "CLx: %s disabled\n", clx_name(clx
));