Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / drivers / thunderbolt / lc.c
blob41e6c738f6c8a5b48d3b7c15cd9bf91b0019353b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt link controller support
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
9 #include "tb.h"
11 /**
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
16 int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
18 if (!sw->cap_lc)
19 return -EINVAL;
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
23 static int read_lc_desc(struct tb_switch *sw, u32 *desc)
25 if (!sw->cap_lc)
26 return -EINVAL;
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
30 static int find_port_lc_cap(struct tb_port *port)
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
34 u32 desc;
36 ret = read_lc_desc(sw, &desc);
37 if (ret)
38 return ret;
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
45 return sw->cap_lc + start + phys * size;
48 static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
52 u32 ctrl, lane;
53 int cap, ret;
55 if (sw->generation < 2)
56 return 0;
58 cap = find_port_lc_cap(port);
59 if (cap < 0)
60 return cap;
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 if (ret)
64 return ret;
66 /* Resolve correct lane */
67 if (port->port % 2)
68 lane = TB_LC_SX_CTRL_L1C;
69 else
70 lane = TB_LC_SX_CTRL_L2C;
72 if (configured) {
73 ctrl |= lane;
74 if (upstream)
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 } else {
77 ctrl &= ~lane;
78 if (upstream)
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
85 /**
86 * tb_lc_configure_port() - Let LC know about configured port
87 * @port: Port that is set as configured
89 * Sets the port configured for power management purposes.
91 int tb_lc_configure_port(struct tb_port *port)
93 return tb_lc_set_port_configured(port, true);
96 /**
97 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
98 * @port: Port that is set as configured
100 * Sets the port unconfigured for power management purposes.
102 void tb_lc_unconfigure_port(struct tb_port *port)
104 tb_lc_set_port_configured(port, false);
107 static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
109 struct tb_switch *sw = port->sw;
110 u32 ctrl, lane;
111 int cap, ret;
113 if (sw->generation < 2)
114 return 0;
116 cap = find_port_lc_cap(port);
117 if (cap < 0)
118 return cap;
120 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
121 if (ret)
122 return ret;
124 /* Resolve correct lane */
125 if (port->port % 2)
126 lane = TB_LC_SX_CTRL_L1D;
127 else
128 lane = TB_LC_SX_CTRL_L2D;
130 if (configure)
131 ctrl |= lane;
132 else
133 ctrl &= ~lane;
135 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
139 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140 * @port: Switch downstream port connected to another host
142 * Sets the lane configured for XDomain accordingly so that the LC knows
143 * about this. Returns %0 in success and negative errno in failure.
145 int tb_lc_configure_xdomain(struct tb_port *port)
147 return tb_lc_set_xdomain_configured(port, true);
151 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152 * @port: Switch downstream port that was connected to another host
154 * Unsets the lane XDomain configuration.
156 void tb_lc_unconfigure_xdomain(struct tb_port *port)
158 tb_lc_set_xdomain_configured(port, false);
161 static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
162 unsigned int flags)
164 u32 ctrl;
165 int ret;
168 * Enable wake on PCIe and USB4 (wake coming from another
169 * router).
171 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
172 offset + TB_LC_SX_CTRL, 1);
173 if (ret)
174 return ret;
176 ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
177 TB_LC_SX_CTRL_WOU4);
179 if (flags & TB_WAKE_ON_CONNECT)
180 ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
181 if (flags & TB_WAKE_ON_USB4)
182 ctrl |= TB_LC_SX_CTRL_WOU4;
183 if (flags & TB_WAKE_ON_PCIE)
184 ctrl |= TB_LC_SX_CTRL_WOP;
186 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
190 * tb_lc_set_wake() - Enable/disable wake
191 * @sw: Switch whose wakes to configure
192 * @flags: Wakeup flags (%0 to disable)
194 * For each LC sets wake bits accordingly.
196 int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
198 int start, size, nlc, ret, i;
199 u32 desc;
201 if (sw->generation < 2)
202 return 0;
204 if (!tb_route(sw))
205 return 0;
207 ret = read_lc_desc(sw, &desc);
208 if (ret)
209 return ret;
211 /* Figure out number of link controllers */
212 nlc = desc & TB_LC_DESC_NLC_MASK;
213 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
214 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
216 /* For each link controller set sleep bit */
217 for (i = 0; i < nlc; i++) {
218 unsigned int offset = sw->cap_lc + start + i * size;
220 ret = tb_lc_set_wake_one(sw, offset, flags);
221 if (ret)
222 return ret;
225 return 0;
229 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
230 * @sw: Switch to set sleep
232 * Let the switch link controllers know that the switch is going to
233 * sleep.
235 int tb_lc_set_sleep(struct tb_switch *sw)
237 int start, size, nlc, ret, i;
238 u32 desc;
240 if (sw->generation < 2)
241 return 0;
243 ret = read_lc_desc(sw, &desc);
244 if (ret)
245 return ret;
247 /* Figure out number of link controllers */
248 nlc = desc & TB_LC_DESC_NLC_MASK;
249 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
250 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
252 /* For each link controller set sleep bit */
253 for (i = 0; i < nlc; i++) {
254 unsigned int offset = sw->cap_lc + start + i * size;
255 u32 ctrl;
257 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
258 offset + TB_LC_SX_CTRL, 1);
259 if (ret)
260 return ret;
262 ctrl |= TB_LC_SX_CTRL_SLP;
263 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
264 offset + TB_LC_SX_CTRL, 1);
265 if (ret)
266 return ret;
269 return 0;
273 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
274 * @sw: Switch to check
276 * Checks whether conditions for lane bonding from parent to @sw are
277 * possible.
279 bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
281 struct tb_port *up;
282 int cap, ret;
283 u32 val;
285 if (sw->generation < 2)
286 return false;
288 up = tb_upstream_port(sw);
289 cap = find_port_lc_cap(up);
290 if (cap < 0)
291 return false;
293 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
294 if (ret)
295 return false;
297 return !!(val & TB_LC_PORT_ATTR_BE);
300 static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
301 struct tb_port *in)
303 struct tb_port *port;
305 /* The first DP IN port is sink 0 and second is sink 1 */
306 tb_switch_for_each_port(sw, port) {
307 if (tb_port_is_dpin(port))
308 return in != port;
311 return -EINVAL;
314 static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
316 u32 val, alloc;
317 int ret;
319 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
320 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
321 if (ret)
322 return ret;
325 * Sink is available for CM/SW to use if the allocation valie is
326 * either 0 or 1.
328 if (!sink) {
329 alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
330 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
331 return 0;
332 } else {
333 alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
334 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
335 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
336 return 0;
339 return -EBUSY;
343 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
344 * @sw: Switch whose DP sink is queried
345 * @in: DP IN port to check
347 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
348 * for the given DP IN port or not.
350 bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
352 int sink;
355 * For older generations sink is always available as there is no
356 * allocation mechanism.
358 if (sw->generation < 3)
359 return true;
361 sink = tb_lc_dp_sink_from_port(sw, in);
362 if (sink < 0)
363 return false;
365 return !tb_lc_dp_sink_available(sw, sink);
369 * tb_lc_dp_sink_alloc() - Allocate DP sink
370 * @sw: Switch whose DP sink is allocated
371 * @in: DP IN port the DP sink is allocated for
373 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
374 * resource is available and allocation is successful returns %0. In all
375 * other cases returs negative errno. In particular %-EBUSY is returned if
376 * the resource was not available.
378 int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
380 int ret, sink;
381 u32 val;
383 if (sw->generation < 3)
384 return 0;
386 sink = tb_lc_dp_sink_from_port(sw, in);
387 if (sink < 0)
388 return sink;
390 ret = tb_lc_dp_sink_available(sw, sink);
391 if (ret)
392 return ret;
394 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
395 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
396 if (ret)
397 return ret;
399 if (!sink) {
400 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
401 val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
402 } else {
403 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
404 val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
405 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
408 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
409 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
411 if (ret)
412 return ret;
414 tb_port_dbg(in, "sink %d allocated\n", sink);
415 return 0;
419 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
420 * @sw: Switch whose DP sink is de-allocated
421 * @in: DP IN port whose DP sink is de-allocated
423 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
425 int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
427 int ret, sink;
428 u32 val;
430 if (sw->generation < 3)
431 return 0;
433 sink = tb_lc_dp_sink_from_port(sw, in);
434 if (sink < 0)
435 return sink;
437 /* Needs to be owned by CM/SW */
438 ret = tb_lc_dp_sink_available(sw, sink);
439 if (ret)
440 return ret;
442 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
443 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
444 if (ret)
445 return ret;
447 if (!sink)
448 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
449 else
450 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
452 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
453 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
454 if (ret)
455 return ret;
457 tb_port_dbg(in, "sink %d de-allocated\n", sink);
458 return 0;
462 * tb_lc_force_power() - Forces LC to be powered on
463 * @sw: Thunderbolt switch
465 * This is useful to let authentication cycle pass even without
466 * a Thunderbolt link present.
468 int tb_lc_force_power(struct tb_switch *sw)
470 u32 in = 0xffff;
472 return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);