io_uring: do not always copy iovec in io_req_map_rw()
[linux/fpc-iii.git] / drivers / thunderbolt / path.c
blobad58559ea88e518711e9bea8f9bd40f01cd31489
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - path/tunnel functionality
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/ktime.h>
14 #include "tb.h"
16 static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
18 const struct tb_port *port = hop->in_port;
20 tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
21 hop->in_hop_index, regs->out_port, regs->next_hop);
22 tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
23 regs->weight, regs->priority,
24 regs->initial_credits, regs->drop_packages);
25 tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
26 regs->counter_enable, regs->counter);
27 tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
28 regs->ingress_fc, regs->egress_fc,
29 regs->ingress_shared_buffer, regs->egress_shared_buffer);
30 tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
31 regs->unknown1, regs->unknown2, regs->unknown3);
34 static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
35 int dst_hopid)
37 struct tb_port *port, *out_port = NULL;
38 struct tb_regs_hop hop;
39 struct tb_switch *sw;
40 int i, ret, hopid;
42 hopid = src_hopid;
43 port = src;
45 for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
46 sw = port->sw;
48 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
49 if (ret) {
50 tb_port_warn(port, "failed to read path at %d\n", hopid);
51 return NULL;
54 if (!hop.enable)
55 return NULL;
57 out_port = &sw->ports[hop.out_port];
58 hopid = hop.next_hop;
59 port = out_port->remote;
62 return out_port && hopid == dst_hopid ? out_port : NULL;
65 static int tb_path_find_src_hopid(struct tb_port *src,
66 const struct tb_port *dst, int dst_hopid)
68 struct tb_port *out;
69 int i;
71 for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
72 out = tb_path_find_dst_port(src, i, dst_hopid);
73 if (out == dst)
74 return i;
77 return 0;
80 /**
81 * tb_path_discover() - Discover a path
82 * @src: First input port of a path
83 * @src_hopid: Starting HopID of a path (%-1 if don't care)
84 * @dst: Expected destination port of the path (%NULL if don't care)
85 * @dst_hopid: HopID to the @dst (%-1 if don't care)
86 * @last: Last port is filled here if not %NULL
87 * @name: Name of the path
89 * Follows a path starting from @src and @src_hopid to the last output
90 * port of the path. Allocates HopIDs for the visited ports. Call
91 * tb_path_free() to release the path and allocated HopIDs when the path
92 * is not needed anymore.
94 * Note function discovers also incomplete paths so caller should check
95 * that the @dst port is the expected one. If it is not, the path can be
96 * cleaned up by calling tb_path_deactivate() before tb_path_free().
98 * Return: Discovered path on success, %NULL in case of failure
100 struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
101 struct tb_port *dst, int dst_hopid,
102 struct tb_port **last, const char *name)
104 struct tb_port *out_port;
105 struct tb_regs_hop hop;
106 struct tb_path *path;
107 struct tb_switch *sw;
108 struct tb_port *p;
109 size_t num_hops;
110 int ret, i, h;
112 if (src_hopid < 0 && dst) {
114 * For incomplete paths the intermediate HopID can be
115 * different from the one used by the protocol adapter
116 * so in that case find a path that ends on @dst with
117 * matching @dst_hopid. That should give us the correct
118 * HopID for the @src.
120 src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
121 if (!src_hopid)
122 return NULL;
125 p = src;
126 h = src_hopid;
127 num_hops = 0;
129 for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
130 sw = p->sw;
132 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
133 if (ret) {
134 tb_port_warn(p, "failed to read path at %d\n", h);
135 return NULL;
138 /* If the hop is not enabled we got an incomplete path */
139 if (!hop.enable)
140 break;
142 out_port = &sw->ports[hop.out_port];
143 if (last)
144 *last = out_port;
146 h = hop.next_hop;
147 p = out_port->remote;
148 num_hops++;
151 path = kzalloc(sizeof(*path), GFP_KERNEL);
152 if (!path)
153 return NULL;
155 path->name = name;
156 path->tb = src->sw->tb;
157 path->path_length = num_hops;
158 path->activated = true;
160 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
161 if (!path->hops) {
162 kfree(path);
163 return NULL;
166 p = src;
167 h = src_hopid;
169 for (i = 0; i < num_hops; i++) {
170 int next_hop;
172 sw = p->sw;
174 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
175 if (ret) {
176 tb_port_warn(p, "failed to read path at %d\n", h);
177 goto err;
180 if (tb_port_alloc_in_hopid(p, h, h) < 0)
181 goto err;
183 out_port = &sw->ports[hop.out_port];
184 next_hop = hop.next_hop;
186 if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
187 tb_port_release_in_hopid(p, h);
188 goto err;
191 path->hops[i].in_port = p;
192 path->hops[i].in_hop_index = h;
193 path->hops[i].in_counter_index = -1;
194 path->hops[i].out_port = out_port;
195 path->hops[i].next_hop_index = next_hop;
197 h = next_hop;
198 p = out_port->remote;
201 return path;
203 err:
204 tb_port_warn(src, "failed to discover path starting at HopID %d\n",
205 src_hopid);
206 tb_path_free(path);
207 return NULL;
211 * tb_path_alloc() - allocate a thunderbolt path between two ports
212 * @tb: Domain pointer
213 * @src: Source port of the path
214 * @src_hopid: HopID used for the first ingress port in the path
215 * @dst: Destination port of the path
216 * @dst_hopid: HopID used for the last egress port in the path
217 * @link_nr: Preferred link if there are dual links on the path
218 * @name: Name of the path
220 * Creates path between two ports starting with given @src_hopid. Reserves
221 * HopIDs for each port (they can be different from @src_hopid depending on
222 * how many HopIDs each port already have reserved). If there are dual
223 * links on the path, prioritizes using @link_nr but takes into account
224 * that the lanes may be bonded.
226 * Return: Returns a tb_path on success or NULL on failure.
228 struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
229 struct tb_port *dst, int dst_hopid, int link_nr,
230 const char *name)
232 struct tb_port *in_port, *out_port;
233 int in_hopid, out_hopid;
234 struct tb_path *path;
235 size_t num_hops;
236 int i, ret;
238 path = kzalloc(sizeof(*path), GFP_KERNEL);
239 if (!path)
240 return NULL;
243 * Number of hops on a path is the distance between the two
244 * switches plus the source adapter port.
246 num_hops = abs(tb_route_length(tb_route(src->sw)) -
247 tb_route_length(tb_route(dst->sw))) + 1;
249 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
250 if (!path->hops) {
251 kfree(path);
252 return NULL;
255 in_hopid = src_hopid;
256 out_port = NULL;
258 for (i = 0; i < num_hops; i++) {
259 in_port = tb_next_port_on_path(src, dst, out_port);
260 if (!in_port)
261 goto err;
263 /* When lanes are bonded primary link must be used */
264 if (!in_port->bonded && in_port->dual_link_port &&
265 in_port->link_nr != link_nr)
266 in_port = in_port->dual_link_port;
268 ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
269 if (ret < 0)
270 goto err;
271 in_hopid = ret;
273 out_port = tb_next_port_on_path(src, dst, in_port);
274 if (!out_port)
275 goto err;
278 * Pick up right port when going from non-bonded to
279 * bonded or from bonded to non-bonded.
281 if (out_port->dual_link_port) {
282 if (!in_port->bonded && out_port->bonded &&
283 out_port->link_nr) {
285 * Use primary link when going from
286 * non-bonded to bonded.
288 out_port = out_port->dual_link_port;
289 } else if (!out_port->bonded &&
290 out_port->link_nr != link_nr) {
292 * If out port is not bonded follow
293 * link_nr.
295 out_port = out_port->dual_link_port;
299 if (i == num_hops - 1)
300 ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
301 dst_hopid);
302 else
303 ret = tb_port_alloc_out_hopid(out_port, -1, -1);
305 if (ret < 0)
306 goto err;
307 out_hopid = ret;
309 path->hops[i].in_hop_index = in_hopid;
310 path->hops[i].in_port = in_port;
311 path->hops[i].in_counter_index = -1;
312 path->hops[i].out_port = out_port;
313 path->hops[i].next_hop_index = out_hopid;
315 in_hopid = out_hopid;
318 path->tb = tb;
319 path->path_length = num_hops;
320 path->name = name;
322 return path;
324 err:
325 tb_path_free(path);
326 return NULL;
330 * tb_path_free() - free a path
331 * @path: Path to free
333 * Frees a path. The path does not need to be deactivated.
335 void tb_path_free(struct tb_path *path)
337 int i;
339 for (i = 0; i < path->path_length; i++) {
340 const struct tb_path_hop *hop = &path->hops[i];
342 if (hop->in_port)
343 tb_port_release_in_hopid(hop->in_port,
344 hop->in_hop_index);
345 if (hop->out_port)
346 tb_port_release_out_hopid(hop->out_port,
347 hop->next_hop_index);
350 kfree(path->hops);
351 kfree(path);
354 static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
356 int i, res;
357 for (i = first_hop; i < path->path_length; i++) {
358 res = tb_port_add_nfc_credits(path->hops[i].in_port,
359 -path->nfc_credits);
360 if (res)
361 tb_port_warn(path->hops[i].in_port,
362 "nfc credits deallocation failed for hop %d\n",
367 static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
368 bool clear_fc)
370 struct tb_regs_hop hop;
371 ktime_t timeout;
372 int ret;
374 /* Disable the path */
375 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
376 if (ret)
377 return ret;
379 /* Already disabled */
380 if (!hop.enable)
381 return 0;
383 hop.enable = 0;
385 ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
386 if (ret)
387 return ret;
389 /* Wait until it is drained */
390 timeout = ktime_add_ms(ktime_get(), 500);
391 do {
392 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
393 if (ret)
394 return ret;
396 if (!hop.pending) {
397 if (clear_fc) {
398 /* Clear flow control */
399 hop.ingress_fc = 0;
400 hop.egress_fc = 0;
401 hop.ingress_shared_buffer = 0;
402 hop.egress_shared_buffer = 0;
404 return tb_port_write(port, &hop, TB_CFG_HOPS,
405 2 * hop_index, 2);
408 return 0;
411 usleep_range(10, 20);
412 } while (ktime_before(ktime_get(), timeout));
414 return -ETIMEDOUT;
417 static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
419 int i, res;
421 for (i = first_hop; i < path->path_length; i++) {
422 res = __tb_path_deactivate_hop(path->hops[i].in_port,
423 path->hops[i].in_hop_index,
424 path->clear_fc);
425 if (res && res != -ENODEV)
426 tb_port_warn(path->hops[i].in_port,
427 "hop deactivation failed for hop %d, index %d\n",
428 i, path->hops[i].in_hop_index);
432 void tb_path_deactivate(struct tb_path *path)
434 if (!path->activated) {
435 tb_WARN(path->tb, "trying to deactivate an inactive path\n");
436 return;
438 tb_dbg(path->tb,
439 "deactivating %s path from %llx:%x to %llx:%x\n",
440 path->name, tb_route(path->hops[0].in_port->sw),
441 path->hops[0].in_port->port,
442 tb_route(path->hops[path->path_length - 1].out_port->sw),
443 path->hops[path->path_length - 1].out_port->port);
444 __tb_path_deactivate_hops(path, 0);
445 __tb_path_deallocate_nfc(path, 0);
446 path->activated = false;
450 * tb_path_activate() - activate a path
452 * Activate a path starting with the last hop and iterating backwards. The
453 * caller must fill path->hops before calling tb_path_activate().
455 * Return: Returns 0 on success or an error code on failure.
457 int tb_path_activate(struct tb_path *path)
459 int i, res;
460 enum tb_path_port out_mask, in_mask;
461 if (path->activated) {
462 tb_WARN(path->tb, "trying to activate already activated path\n");
463 return -EINVAL;
466 tb_dbg(path->tb,
467 "activating %s path from %llx:%x to %llx:%x\n",
468 path->name, tb_route(path->hops[0].in_port->sw),
469 path->hops[0].in_port->port,
470 tb_route(path->hops[path->path_length - 1].out_port->sw),
471 path->hops[path->path_length - 1].out_port->port);
473 /* Clear counters. */
474 for (i = path->path_length - 1; i >= 0; i--) {
475 if (path->hops[i].in_counter_index == -1)
476 continue;
477 res = tb_port_clear_counter(path->hops[i].in_port,
478 path->hops[i].in_counter_index);
479 if (res)
480 goto err;
483 /* Add non flow controlled credits. */
484 for (i = path->path_length - 1; i >= 0; i--) {
485 res = tb_port_add_nfc_credits(path->hops[i].in_port,
486 path->nfc_credits);
487 if (res) {
488 __tb_path_deallocate_nfc(path, i);
489 goto err;
493 /* Activate hops. */
494 for (i = path->path_length - 1; i >= 0; i--) {
495 struct tb_regs_hop hop = { 0 };
497 /* If it is left active deactivate it first */
498 __tb_path_deactivate_hop(path->hops[i].in_port,
499 path->hops[i].in_hop_index, path->clear_fc);
501 /* dword 0 */
502 hop.next_hop = path->hops[i].next_hop_index;
503 hop.out_port = path->hops[i].out_port->port;
504 hop.initial_credits = path->hops[i].initial_credits;
505 hop.unknown1 = 0;
506 hop.enable = 1;
508 /* dword 1 */
509 out_mask = (i == path->path_length - 1) ?
510 TB_PATH_DESTINATION : TB_PATH_INTERNAL;
511 in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
512 hop.weight = path->weight;
513 hop.unknown2 = 0;
514 hop.priority = path->priority;
515 hop.drop_packages = path->drop_packages;
516 hop.counter = path->hops[i].in_counter_index;
517 hop.counter_enable = path->hops[i].in_counter_index != -1;
518 hop.ingress_fc = path->ingress_fc_enable & in_mask;
519 hop.egress_fc = path->egress_fc_enable & out_mask;
520 hop.ingress_shared_buffer = path->ingress_shared_buffer
521 & in_mask;
522 hop.egress_shared_buffer = path->egress_shared_buffer
523 & out_mask;
524 hop.unknown3 = 0;
526 tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
527 tb_dump_hop(&path->hops[i], &hop);
528 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
529 2 * path->hops[i].in_hop_index, 2);
530 if (res) {
531 __tb_path_deactivate_hops(path, i);
532 __tb_path_deallocate_nfc(path, 0);
533 goto err;
536 path->activated = true;
537 tb_dbg(path->tb, "path activation complete\n");
538 return 0;
539 err:
540 tb_WARN(path->tb, "path activation failed\n");
541 return res;
545 * tb_path_is_invalid() - check whether any ports on the path are invalid
547 * Return: Returns true if the path is invalid, false otherwise.
549 bool tb_path_is_invalid(struct tb_path *path)
551 int i = 0;
552 for (i = 0; i < path->path_length; i++) {
553 if (path->hops[i].in_port->sw->is_unplugged)
554 return true;
555 if (path->hops[i].out_port->sw->is_unplugged)
556 return true;
558 return false;
562 * tb_path_switch_on_path() - Does the path go through certain switch
563 * @path: Path to check
564 * @sw: Switch to check
566 * Goes over all hops on path and checks if @sw is any of them.
567 * Direction does not matter.
569 bool tb_path_switch_on_path(const struct tb_path *path,
570 const struct tb_switch *sw)
572 int i;
574 for (i = 0; i < path->path_length; i++) {
575 if (path->hops[i].in_port->sw == sw ||
576 path->hops[i].out_port->sw == sw)
577 return true;
580 return false;