1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Incremental bus scan, based on bus topology
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/errno.h>
10 #include <linux/firewire.h>
11 #include <linux/firewire-constants.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/atomic.h>
20 #include <asm/byteorder.h>
24 #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
25 #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
26 #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
27 #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
28 #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
29 #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
30 #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
31 #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
33 #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
35 #define SELFID_PORT_CHILD 0x3
36 #define SELFID_PORT_PARENT 0x2
37 #define SELFID_PORT_NCONN 0x1
38 #define SELFID_PORT_NONE 0x0
40 static u32
*count_ports(u32
*sid
, int *total_port_count
, int *child_port_count
)
43 int port_type
, shift
, seq
;
45 *total_port_count
= 0;
46 *child_port_count
= 0;
53 port_type
= (q
>> shift
) & 0x03;
55 case SELFID_PORT_CHILD
:
56 (*child_port_count
)++;
57 case SELFID_PORT_PARENT
:
58 case SELFID_PORT_NCONN
:
59 (*total_port_count
)++;
60 case SELFID_PORT_NONE
:
66 if (!SELF_ID_MORE_PACKETS(q
))
74 * Check that the extra packets actually are
75 * extended self ID packets and that the
76 * sequence numbers in the extended self ID
77 * packets increase as expected.
80 if (!SELF_ID_EXTENDED(q
) ||
81 seq
!= SELF_ID_EXT_SEQUENCE(q
))
89 static int get_port_type(u32
*sid
, int port_index
)
93 index
= (port_index
+ 5) / 8;
94 shift
= 16 - ((port_index
+ 5) & 7) * 2;
95 return (sid
[index
] >> shift
) & 0x03;
98 static struct fw_node
*fw_node_create(u32 sid
, int port_count
, int color
)
100 struct fw_node
*node
;
102 node
= kzalloc(struct_size(node
, ports
, port_count
), GFP_ATOMIC
);
107 node
->node_id
= LOCAL_BUS
| SELF_ID_PHY_ID(sid
);
108 node
->link_on
= SELF_ID_LINK_ON(sid
);
109 node
->phy_speed
= SELF_ID_PHY_SPEED(sid
);
110 node
->initiated_reset
= SELF_ID_PHY_INITIATOR(sid
);
111 node
->port_count
= port_count
;
113 refcount_set(&node
->ref_count
, 1);
114 INIT_LIST_HEAD(&node
->link
);
120 * Compute the maximum hop count for this node and it's children. The
121 * maximum hop count is the maximum number of connections between any
122 * two nodes in the subtree rooted at this node. We need this for
123 * setting the gap count. As we build the tree bottom up in
124 * build_tree() below, this is fairly easy to do: for each node we
125 * maintain the max hop count and the max depth, ie the number of hops
126 * to the furthest leaf. Computing the max hop count breaks down into
127 * two cases: either the path goes through this node, in which case
128 * the hop count is the sum of the two biggest child depths plus 2.
129 * Or it could be the case that the max hop path is entirely
130 * containted in a child tree, in which case the max hop count is just
131 * the max hop count of this child.
133 static void update_hop_count(struct fw_node
*node
)
135 int depths
[2] = { -1, -1 };
136 int max_child_hops
= 0;
139 for (i
= 0; i
< node
->port_count
; i
++) {
140 if (node
->ports
[i
] == NULL
)
143 if (node
->ports
[i
]->max_hops
> max_child_hops
)
144 max_child_hops
= node
->ports
[i
]->max_hops
;
146 if (node
->ports
[i
]->max_depth
> depths
[0]) {
147 depths
[1] = depths
[0];
148 depths
[0] = node
->ports
[i
]->max_depth
;
149 } else if (node
->ports
[i
]->max_depth
> depths
[1])
150 depths
[1] = node
->ports
[i
]->max_depth
;
153 node
->max_depth
= depths
[0] + 1;
154 node
->max_hops
= max(max_child_hops
, depths
[0] + depths
[1] + 2);
157 static inline struct fw_node
*fw_node(struct list_head
*l
)
159 return list_entry(l
, struct fw_node
, link
);
163 * This function builds the tree representation of the topology given
164 * by the self IDs from the latest bus reset. During the construction
165 * of the tree, the function checks that the self IDs are valid and
166 * internally consistent. On success this function returns the
167 * fw_node corresponding to the local card otherwise NULL.
169 static struct fw_node
*build_tree(struct fw_card
*card
,
170 u32
*sid
, int self_id_count
)
172 struct fw_node
*node
, *child
, *local_node
, *irm_node
;
173 struct list_head stack
, *h
;
174 u32
*next_sid
, *end
, q
;
175 int i
, port_count
, child_port_count
, phy_id
, parent_count
, stack_depth
;
177 bool beta_repeaters_present
;
181 INIT_LIST_HEAD(&stack
);
183 end
= sid
+ self_id_count
;
186 gap_count
= SELF_ID_GAP_COUNT(*sid
);
187 beta_repeaters_present
= false;
190 next_sid
= count_ports(sid
, &port_count
, &child_port_count
);
192 if (next_sid
== NULL
) {
193 fw_err(card
, "inconsistent extended self IDs\n");
198 if (phy_id
!= SELF_ID_PHY_ID(q
)) {
199 fw_err(card
, "PHY ID mismatch in self ID: %d != %d\n",
200 phy_id
, SELF_ID_PHY_ID(q
));
204 if (child_port_count
> stack_depth
) {
205 fw_err(card
, "topology stack underflow\n");
210 * Seek back from the top of our stack to find the
211 * start of the child nodes for this node.
213 for (i
= 0, h
= &stack
; i
< child_port_count
; i
++)
216 * When the stack is empty, this yields an invalid value,
217 * but that pointer will never be dereferenced.
221 node
= fw_node_create(q
, port_count
, card
->color
);
223 fw_err(card
, "out of memory while building topology\n");
227 if (phy_id
== (card
->node_id
& 0x3f))
230 if (SELF_ID_CONTENDER(q
))
235 for (i
= 0; i
< port_count
; i
++) {
236 switch (get_port_type(sid
, i
)) {
237 case SELFID_PORT_PARENT
:
239 * Who's your daddy? We dont know the
240 * parent node at this time, so we
241 * temporarily abuse node->color for
242 * remembering the entry in the
243 * node->ports array where the parent
244 * node should be. Later, when we
245 * handle the parent node, we fix up
252 case SELFID_PORT_CHILD
:
253 node
->ports
[i
] = child
;
255 * Fix up parent reference for this
258 child
->ports
[child
->color
] = node
;
259 child
->color
= card
->color
;
260 child
= fw_node(child
->link
.next
);
266 * Check that the node reports exactly one parent
267 * port, except for the root, which of course should
270 if ((next_sid
== end
&& parent_count
!= 0) ||
271 (next_sid
< end
&& parent_count
!= 1)) {
272 fw_err(card
, "parent port inconsistency for node %d: "
273 "parent_count=%d\n", phy_id
, parent_count
);
277 /* Pop the child nodes off the stack and push the new node. */
278 __list_del(h
->prev
, &stack
);
279 list_add_tail(&node
->link
, &stack
);
280 stack_depth
+= 1 - child_port_count
;
282 if (node
->phy_speed
== SCODE_BETA
&&
283 parent_count
+ child_port_count
> 1)
284 beta_repeaters_present
= true;
287 * If PHYs report different gap counts, set an invalid count
288 * which will force a gap count reconfiguration and a reset.
290 if (SELF_ID_GAP_COUNT(q
) != gap_count
)
293 update_hop_count(node
);
299 card
->root_node
= node
;
300 card
->irm_node
= irm_node
;
301 card
->gap_count
= gap_count
;
302 card
->beta_repeaters_present
= beta_repeaters_present
;
307 typedef void (*fw_node_callback_t
)(struct fw_card
* card
,
308 struct fw_node
* node
,
309 struct fw_node
* parent
);
311 static void for_each_fw_node(struct fw_card
*card
, struct fw_node
*root
,
312 fw_node_callback_t callback
)
314 struct list_head list
;
315 struct fw_node
*node
, *next
, *child
, *parent
;
318 INIT_LIST_HEAD(&list
);
321 list_add_tail(&root
->link
, &list
);
323 list_for_each_entry(node
, &list
, link
) {
324 node
->color
= card
->color
;
326 for (i
= 0; i
< node
->port_count
; i
++) {
327 child
= node
->ports
[i
];
330 if (child
->color
== card
->color
)
334 list_add_tail(&child
->link
, &list
);
338 callback(card
, node
, parent
);
341 list_for_each_entry_safe(node
, next
, &list
, link
)
345 static void report_lost_node(struct fw_card
*card
,
346 struct fw_node
*node
, struct fw_node
*parent
)
348 fw_node_event(card
, node
, FW_NODE_DESTROYED
);
351 /* Topology has changed - reset bus manager retry counter */
352 card
->bm_retries
= 0;
355 static void report_found_node(struct fw_card
*card
,
356 struct fw_node
*node
, struct fw_node
*parent
)
358 int b_path
= (node
->phy_speed
== SCODE_BETA
);
360 if (parent
!= NULL
) {
361 /* min() macro doesn't work here with gcc 3.4 */
362 node
->max_speed
= parent
->max_speed
< node
->phy_speed
?
363 parent
->max_speed
: node
->phy_speed
;
364 node
->b_path
= parent
->b_path
&& b_path
;
366 node
->max_speed
= node
->phy_speed
;
367 node
->b_path
= b_path
;
370 fw_node_event(card
, node
, FW_NODE_CREATED
);
372 /* Topology has changed - reset bus manager retry counter */
373 card
->bm_retries
= 0;
376 void fw_destroy_nodes(struct fw_card
*card
)
380 spin_lock_irqsave(&card
->lock
, flags
);
382 if (card
->local_node
!= NULL
)
383 for_each_fw_node(card
, card
->local_node
, report_lost_node
);
384 card
->local_node
= NULL
;
385 spin_unlock_irqrestore(&card
->lock
, flags
);
388 static void move_tree(struct fw_node
*node0
, struct fw_node
*node1
, int port
)
390 struct fw_node
*tree
;
393 tree
= node1
->ports
[port
];
394 node0
->ports
[port
] = tree
;
395 for (i
= 0; i
< tree
->port_count
; i
++) {
396 if (tree
->ports
[i
] == node1
) {
397 tree
->ports
[i
] = node0
;
404 * Compare the old topology tree for card with the new one specified by root.
405 * Queue the nodes and mark them as either found, lost or updated.
406 * Update the nodes in the card topology tree as we go.
408 static void update_tree(struct fw_card
*card
, struct fw_node
*root
)
410 struct list_head list0
, list1
;
411 struct fw_node
*node0
, *node1
, *next1
;
414 INIT_LIST_HEAD(&list0
);
415 list_add_tail(&card
->local_node
->link
, &list0
);
416 INIT_LIST_HEAD(&list1
);
417 list_add_tail(&root
->link
, &list1
);
419 node0
= fw_node(list0
.next
);
420 node1
= fw_node(list1
.next
);
422 while (&node0
->link
!= &list0
) {
423 WARN_ON(node0
->port_count
!= node1
->port_count
);
425 if (node0
->link_on
&& !node1
->link_on
)
426 event
= FW_NODE_LINK_OFF
;
427 else if (!node0
->link_on
&& node1
->link_on
)
428 event
= FW_NODE_LINK_ON
;
429 else if (node1
->initiated_reset
&& node1
->link_on
)
430 event
= FW_NODE_INITIATED_RESET
;
432 event
= FW_NODE_UPDATED
;
434 node0
->node_id
= node1
->node_id
;
435 node0
->color
= card
->color
;
436 node0
->link_on
= node1
->link_on
;
437 node0
->initiated_reset
= node1
->initiated_reset
;
438 node0
->max_hops
= node1
->max_hops
;
439 node1
->color
= card
->color
;
440 fw_node_event(card
, node0
, event
);
442 if (card
->root_node
== node1
)
443 card
->root_node
= node0
;
444 if (card
->irm_node
== node1
)
445 card
->irm_node
= node0
;
447 for (i
= 0; i
< node0
->port_count
; i
++) {
448 if (node0
->ports
[i
] && node1
->ports
[i
]) {
450 * This port didn't change, queue the
451 * connected node for further
454 if (node0
->ports
[i
]->color
== card
->color
)
456 list_add_tail(&node0
->ports
[i
]->link
, &list0
);
457 list_add_tail(&node1
->ports
[i
]->link
, &list1
);
458 } else if (node0
->ports
[i
]) {
460 * The nodes connected here were
461 * unplugged; unref the lost nodes and
462 * queue FW_NODE_LOST callbacks for
466 for_each_fw_node(card
, node0
->ports
[i
],
468 node0
->ports
[i
] = NULL
;
469 } else if (node1
->ports
[i
]) {
471 * One or more node were connected to
472 * this port. Move the new nodes into
473 * the tree and queue FW_NODE_CREATED
474 * callbacks for them.
476 move_tree(node0
, node1
, i
);
477 for_each_fw_node(card
, node0
->ports
[i
],
482 node0
= fw_node(node0
->link
.next
);
483 next1
= fw_node(node1
->link
.next
);
489 static void update_topology_map(struct fw_card
*card
,
490 u32
*self_ids
, int self_id_count
)
492 int node_count
= (card
->root_node
->node_id
& 0x3f) + 1;
493 __be32
*map
= card
->topology_map
;
495 *map
++ = cpu_to_be32((self_id_count
+ 2) << 16);
496 *map
++ = cpu_to_be32(be32_to_cpu(card
->topology_map
[1]) + 1);
497 *map
++ = cpu_to_be32((node_count
<< 16) | self_id_count
);
499 while (self_id_count
--)
500 *map
++ = cpu_to_be32p(self_ids
++);
502 fw_compute_block_crc(card
->topology_map
);
505 void fw_core_handle_bus_reset(struct fw_card
*card
, int node_id
, int generation
,
506 int self_id_count
, u32
*self_ids
, bool bm_abdicate
)
508 struct fw_node
*local_node
;
512 * If the selfID buffer is not the immediate successor of the
513 * previously processed one, we cannot reliably compare the
514 * old and new topologies.
516 if (!is_next_generation(generation
, card
->generation
) &&
517 card
->local_node
!= NULL
) {
518 fw_destroy_nodes(card
);
519 card
->bm_retries
= 0;
522 spin_lock_irqsave(&card
->lock
, flags
);
524 card
->broadcast_channel_allocated
= card
->broadcast_channel_auto_allocated
;
525 card
->node_id
= node_id
;
527 * Update node_id before generation to prevent anybody from using
528 * a stale node_id together with a current generation.
531 card
->generation
= generation
;
532 card
->reset_jiffies
= get_jiffies_64();
533 card
->bm_node_id
= 0xffff;
534 card
->bm_abdicate
= bm_abdicate
;
535 fw_schedule_bm_work(card
, 0);
537 local_node
= build_tree(card
, self_ids
, self_id_count
);
539 update_topology_map(card
, self_ids
, self_id_count
);
543 if (local_node
== NULL
) {
544 fw_err(card
, "topology build failed\n");
545 /* FIXME: We need to issue a bus reset in this case. */
546 } else if (card
->local_node
== NULL
) {
547 card
->local_node
= local_node
;
548 for_each_fw_node(card
, local_node
, report_found_node
);
550 update_tree(card
, local_node
);
553 spin_unlock_irqrestore(&card
->lock
, flags
);
555 EXPORT_SYMBOL(fw_core_handle_bus_reset
);