1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Incremental bus scan, based on bus topology
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/errno.h>
10 #include <linux/firewire.h>
11 #include <linux/firewire-constants.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/atomic.h>
20 #include <asm/byteorder.h>
23 #include "phy-packet-definitions.h"
24 #include <trace/events/firewire.h>
26 static struct fw_node
*fw_node_create(u32 sid
, int port_count
, int color
)
30 node
= kzalloc(struct_size(node
, ports
, port_count
), GFP_ATOMIC
);
35 node
->node_id
= LOCAL_BUS
| phy_packet_self_id_get_phy_id(sid
);
36 node
->link_on
= phy_packet_self_id_zero_get_link_active(sid
);
37 // NOTE: Only two bits, thus only for SCODE_100, SCODE_200, SCODE_400, and SCODE_BETA.
38 node
->phy_speed
= phy_packet_self_id_zero_get_scode(sid
);
39 node
->initiated_reset
= phy_packet_self_id_zero_get_initiated_reset(sid
);
40 node
->port_count
= port_count
;
42 kref_init(&node
->kref
);
43 INIT_LIST_HEAD(&node
->link
);
49 * Compute the maximum hop count for this node and it's children. The
50 * maximum hop count is the maximum number of connections between any
51 * two nodes in the subtree rooted at this node. We need this for
52 * setting the gap count. As we build the tree bottom up in
53 * build_tree() below, this is fairly easy to do: for each node we
54 * maintain the max hop count and the max depth, ie the number of hops
55 * to the furthest leaf. Computing the max hop count breaks down into
56 * two cases: either the path goes through this node, in which case
57 * the hop count is the sum of the two biggest child depths plus 2.
58 * Or it could be the case that the max hop path is entirely
59 * contained in a child tree, in which case the max hop count is just
60 * the max hop count of this child.
62 static void update_hop_count(struct fw_node
*node
)
64 int depths
[2] = { -1, -1 };
65 int max_child_hops
= 0;
68 for (i
= 0; i
< node
->port_count
; i
++) {
69 if (node
->ports
[i
] == NULL
)
72 if (node
->ports
[i
]->max_hops
> max_child_hops
)
73 max_child_hops
= node
->ports
[i
]->max_hops
;
75 if (node
->ports
[i
]->max_depth
> depths
[0]) {
76 depths
[1] = depths
[0];
77 depths
[0] = node
->ports
[i
]->max_depth
;
78 } else if (node
->ports
[i
]->max_depth
> depths
[1])
79 depths
[1] = node
->ports
[i
]->max_depth
;
82 node
->max_depth
= depths
[0] + 1;
83 node
->max_hops
= max(max_child_hops
, depths
[0] + depths
[1] + 2);
86 static inline struct fw_node
*fw_node(struct list_head
*l
)
88 return list_entry(l
, struct fw_node
, link
);
92 * This function builds the tree representation of the topology given
93 * by the self IDs from the latest bus reset. During the construction
94 * of the tree, the function checks that the self IDs are valid and
95 * internally consistent. On success this function returns the
96 * fw_node corresponding to the local card otherwise NULL.
98 static struct fw_node
*build_tree(struct fw_card
*card
, const u32
*sid
, int self_id_count
,
99 unsigned int generation
)
101 struct self_id_sequence_enumerator enumerator
= {
103 .quadlet_count
= self_id_count
,
105 struct fw_node
*node
, *child
, *local_node
, *irm_node
;
106 struct list_head stack
;
107 int phy_id
, stack_depth
;
109 bool beta_repeaters_present
;
113 INIT_LIST_HEAD(&stack
);
117 gap_count
= phy_packet_self_id_zero_get_gap_count(*sid
);
118 beta_repeaters_present
= false;
120 while (enumerator
.quadlet_count
> 0) {
121 unsigned int child_port_count
= 0;
122 unsigned int total_port_count
= 0;
123 unsigned int parent_count
= 0;
124 unsigned int quadlet_count
;
125 const u32
*self_id_sequence
;
126 unsigned int port_capacity
;
127 enum phy_packet_self_id_port_status port_status
;
128 unsigned int port_index
;
132 self_id_sequence
= self_id_sequence_enumerator_next(&enumerator
, &quadlet_count
);
133 if (IS_ERR(self_id_sequence
)) {
134 if (PTR_ERR(self_id_sequence
) != -ENODATA
) {
135 fw_err(card
, "inconsistent extended self IDs: %ld\n",
136 PTR_ERR(self_id_sequence
));
142 port_capacity
= self_id_sequence_get_port_capacity(quadlet_count
);
143 trace_self_id_sequence(card
->index
, self_id_sequence
, quadlet_count
, generation
);
145 for (port_index
= 0; port_index
< port_capacity
; ++port_index
) {
146 port_status
= self_id_sequence_get_port_status(self_id_sequence
, quadlet_count
,
148 switch (port_status
) {
149 case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD
:
152 case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT
:
153 case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN
:
156 case PHY_PACKET_SELF_ID_PORT_STATUS_NONE
:
162 if (phy_id
!= phy_packet_self_id_get_phy_id(self_id_sequence
[0])) {
163 fw_err(card
, "PHY ID mismatch in self ID: %d != %d\n",
164 phy_id
, phy_packet_self_id_get_phy_id(self_id_sequence
[0]));
168 if (child_port_count
> stack_depth
) {
169 fw_err(card
, "topology stack underflow\n");
174 * Seek back from the top of our stack to find the
175 * start of the child nodes for this node.
177 for (i
= 0, h
= &stack
; i
< child_port_count
; i
++)
180 * When the stack is empty, this yields an invalid value,
181 * but that pointer will never be dereferenced.
185 node
= fw_node_create(self_id_sequence
[0], total_port_count
, card
->color
);
187 fw_err(card
, "out of memory while building topology\n");
191 if (phy_id
== (card
->node_id
& 0x3f))
194 if (phy_packet_self_id_zero_get_contender(self_id_sequence
[0]))
197 for (port_index
= 0; port_index
< total_port_count
; ++port_index
) {
198 port_status
= self_id_sequence_get_port_status(self_id_sequence
, quadlet_count
,
200 switch (port_status
) {
201 case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT
:
202 // Who's your daddy? We dont know the parent node at this time, so
203 // we temporarily abuse node->color for remembering the entry in
204 // the node->ports array where the parent node should be. Later,
205 // when we handle the parent node, we fix up the reference.
207 node
->color
= port_index
;
210 case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD
:
211 node
->ports
[port_index
] = child
;
212 // Fix up parent reference for this child node.
213 child
->ports
[child
->color
] = node
;
214 child
->color
= card
->color
;
215 child
= fw_node(child
->link
.next
);
217 case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN
:
218 case PHY_PACKET_SELF_ID_PORT_STATUS_NONE
:
224 // Check that the node reports exactly one parent port, except for the root, which
225 // of course should have no parents.
226 if ((enumerator
.quadlet_count
== 0 && parent_count
!= 0) ||
227 (enumerator
.quadlet_count
> 0 && parent_count
!= 1)) {
228 fw_err(card
, "parent port inconsistency for node %d: "
229 "parent_count=%d\n", phy_id
, parent_count
);
233 /* Pop the child nodes off the stack and push the new node. */
234 __list_del(h
->prev
, &stack
);
235 list_add_tail(&node
->link
, &stack
);
236 stack_depth
+= 1 - child_port_count
;
238 if (node
->phy_speed
== SCODE_BETA
&& parent_count
+ child_port_count
> 1)
239 beta_repeaters_present
= true;
241 // If PHYs report different gap counts, set an invalid count which will force a gap
242 // count reconfiguration and a reset.
243 if (phy_packet_self_id_zero_get_gap_count(self_id_sequence
[0]) != gap_count
)
246 update_hop_count(node
);
251 card
->root_node
= node
;
252 card
->irm_node
= irm_node
;
253 card
->gap_count
= gap_count
;
254 card
->beta_repeaters_present
= beta_repeaters_present
;
259 typedef void (*fw_node_callback_t
)(struct fw_card
* card
,
260 struct fw_node
* node
,
261 struct fw_node
* parent
);
263 static void for_each_fw_node(struct fw_card
*card
, struct fw_node
*root
,
264 fw_node_callback_t callback
)
266 struct list_head list
;
267 struct fw_node
*node
, *next
, *child
, *parent
;
270 INIT_LIST_HEAD(&list
);
273 list_add_tail(&root
->link
, &list
);
275 list_for_each_entry(node
, &list
, link
) {
276 node
->color
= card
->color
;
278 for (i
= 0; i
< node
->port_count
; i
++) {
279 child
= node
->ports
[i
];
282 if (child
->color
== card
->color
)
286 list_add_tail(&child
->link
, &list
);
290 callback(card
, node
, parent
);
293 list_for_each_entry_safe(node
, next
, &list
, link
)
297 static void report_lost_node(struct fw_card
*card
,
298 struct fw_node
*node
, struct fw_node
*parent
)
300 fw_node_event(card
, node
, FW_NODE_DESTROYED
);
303 /* Topology has changed - reset bus manager retry counter */
304 card
->bm_retries
= 0;
307 static void report_found_node(struct fw_card
*card
,
308 struct fw_node
*node
, struct fw_node
*parent
)
310 int b_path
= (node
->phy_speed
== SCODE_BETA
);
312 if (parent
!= NULL
) {
313 /* min() macro doesn't work here with gcc 3.4 */
314 node
->max_speed
= parent
->max_speed
< node
->phy_speed
?
315 parent
->max_speed
: node
->phy_speed
;
316 node
->b_path
= parent
->b_path
&& b_path
;
318 node
->max_speed
= node
->phy_speed
;
319 node
->b_path
= b_path
;
322 fw_node_event(card
, node
, FW_NODE_CREATED
);
324 /* Topology has changed - reset bus manager retry counter */
325 card
->bm_retries
= 0;
328 /* Must be called with card->lock held */
329 void fw_destroy_nodes(struct fw_card
*card
)
332 if (card
->local_node
!= NULL
)
333 for_each_fw_node(card
, card
->local_node
, report_lost_node
);
334 card
->local_node
= NULL
;
337 static void move_tree(struct fw_node
*node0
, struct fw_node
*node1
, int port
)
339 struct fw_node
*tree
;
342 tree
= node1
->ports
[port
];
343 node0
->ports
[port
] = tree
;
344 for (i
= 0; i
< tree
->port_count
; i
++) {
345 if (tree
->ports
[i
] == node1
) {
346 tree
->ports
[i
] = node0
;
353 * Compare the old topology tree for card with the new one specified by root.
354 * Queue the nodes and mark them as either found, lost or updated.
355 * Update the nodes in the card topology tree as we go.
357 static void update_tree(struct fw_card
*card
, struct fw_node
*root
)
359 struct list_head list0
, list1
;
360 struct fw_node
*node0
, *node1
, *next1
;
363 INIT_LIST_HEAD(&list0
);
364 list_add_tail(&card
->local_node
->link
, &list0
);
365 INIT_LIST_HEAD(&list1
);
366 list_add_tail(&root
->link
, &list1
);
368 node0
= fw_node(list0
.next
);
369 node1
= fw_node(list1
.next
);
371 while (&node0
->link
!= &list0
) {
372 WARN_ON(node0
->port_count
!= node1
->port_count
);
374 if (node0
->link_on
&& !node1
->link_on
)
375 event
= FW_NODE_LINK_OFF
;
376 else if (!node0
->link_on
&& node1
->link_on
)
377 event
= FW_NODE_LINK_ON
;
378 else if (node1
->initiated_reset
&& node1
->link_on
)
379 event
= FW_NODE_INITIATED_RESET
;
381 event
= FW_NODE_UPDATED
;
383 node0
->node_id
= node1
->node_id
;
384 node0
->color
= card
->color
;
385 node0
->link_on
= node1
->link_on
;
386 node0
->initiated_reset
= node1
->initiated_reset
;
387 node0
->max_hops
= node1
->max_hops
;
388 node1
->color
= card
->color
;
389 fw_node_event(card
, node0
, event
);
391 if (card
->root_node
== node1
)
392 card
->root_node
= node0
;
393 if (card
->irm_node
== node1
)
394 card
->irm_node
= node0
;
396 for (i
= 0; i
< node0
->port_count
; i
++) {
397 if (node0
->ports
[i
] && node1
->ports
[i
]) {
399 * This port didn't change, queue the
400 * connected node for further
403 if (node0
->ports
[i
]->color
== card
->color
)
405 list_add_tail(&node0
->ports
[i
]->link
, &list0
);
406 list_add_tail(&node1
->ports
[i
]->link
, &list1
);
407 } else if (node0
->ports
[i
]) {
409 * The nodes connected here were
410 * unplugged; unref the lost nodes and
411 * queue FW_NODE_LOST callbacks for
415 for_each_fw_node(card
, node0
->ports
[i
],
417 node0
->ports
[i
] = NULL
;
418 } else if (node1
->ports
[i
]) {
420 * One or more node were connected to
421 * this port. Move the new nodes into
422 * the tree and queue FW_NODE_CREATED
423 * callbacks for them.
425 move_tree(node0
, node1
, i
);
426 for_each_fw_node(card
, node0
->ports
[i
],
431 node0
= fw_node(node0
->link
.next
);
432 next1
= fw_node(node1
->link
.next
);
438 static void update_topology_map(struct fw_card
*card
,
439 u32
*self_ids
, int self_id_count
)
441 int node_count
= (card
->root_node
->node_id
& 0x3f) + 1;
442 __be32
*map
= card
->topology_map
;
444 *map
++ = cpu_to_be32((self_id_count
+ 2) << 16);
445 *map
++ = cpu_to_be32(be32_to_cpu(card
->topology_map
[1]) + 1);
446 *map
++ = cpu_to_be32((node_count
<< 16) | self_id_count
);
448 while (self_id_count
--)
449 *map
++ = cpu_to_be32p(self_ids
++);
451 fw_compute_block_crc(card
->topology_map
);
454 void fw_core_handle_bus_reset(struct fw_card
*card
, int node_id
, int generation
,
455 int self_id_count
, u32
*self_ids
, bool bm_abdicate
)
457 struct fw_node
*local_node
;
459 trace_bus_reset_handle(card
->index
, generation
, node_id
, bm_abdicate
, self_ids
, self_id_count
);
461 guard(spinlock_irqsave
)(&card
->lock
);
464 * If the selfID buffer is not the immediate successor of the
465 * previously processed one, we cannot reliably compare the
466 * old and new topologies.
468 if (!is_next_generation(generation
, card
->generation
) &&
469 card
->local_node
!= NULL
) {
470 fw_destroy_nodes(card
);
471 card
->bm_retries
= 0;
474 card
->broadcast_channel_allocated
= card
->broadcast_channel_auto_allocated
;
475 card
->node_id
= node_id
;
477 * Update node_id before generation to prevent anybody from using
478 * a stale node_id together with a current generation.
481 card
->generation
= generation
;
482 card
->reset_jiffies
= get_jiffies_64();
483 card
->bm_node_id
= 0xffff;
484 card
->bm_abdicate
= bm_abdicate
;
485 fw_schedule_bm_work(card
, 0);
487 local_node
= build_tree(card
, self_ids
, self_id_count
, generation
);
489 update_topology_map(card
, self_ids
, self_id_count
);
493 if (local_node
== NULL
) {
494 fw_err(card
, "topology build failed\n");
495 /* FIXME: We need to issue a bus reset in this case. */
496 } else if (card
->local_node
== NULL
) {
497 card
->local_node
= local_node
;
498 for_each_fw_node(card
, local_node
, report_found_node
);
500 update_tree(card
, local_node
);
503 EXPORT_SYMBOL(fw_core_handle_bus_reset
);